From 6d733eaed35b29f10c09d38113c5ab5e49b5be0a Mon Sep 17 00:00:00 2001 From: Erpim Date: Sun, 8 Jun 2025 12:09:25 +0800 Subject: [PATCH 01/76] update ci --- .jenkins/test/config/dependent_packages.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/test/config/dependent_packages.yaml b/.jenkins/test/config/dependent_packages.yaml index f037eeb10..531bee81d 100644 --- a/.jenkins/test/config/dependent_packages.yaml +++ b/.jenkins/test/config/dependent_packages.yaml @@ -1,5 +1,5 @@ mindspore: - 'https://repo.mindspore.cn/mindspore/mindspore/version/202506/20250605/master_20250605212230_aac98ab9732926f6abd4c3d73be47d5be6c93ead_newest/' + 'https://repo.mindspore.cn/mindspore/mindspore/version/202506/20250608/br_infer_iter_20250608031509_f31d63401e48787a7677f6e5c61745dd44304240_newest/' mindspore_gs: 'https://repo.mindspore.cn/mindspore/golden-stick/version/202506/20250604/master_20250604160014_35fcbec4406d3b18faf02ef99fcbe2741e80348e_newest/' -- Gitee From 9b7b173efaa8a372fe4d1af06204fe7fdc5e0f1a Mon Sep 17 00:00:00 2001 From: twc Date: Mon, 9 Jun 2025 15:40:21 +0800 Subject: [PATCH 02/76] native qwen support TH compute --- vllm_mindspore/attention/layer.py | 8 +------- vllm_mindspore/model_executor/models/qwen2.py | 8 +------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/vllm_mindspore/attention/layer.py b/vllm_mindspore/attention/layer.py index 89914e97d..caa6353e3 100644 --- a/vllm_mindspore/attention/layer.py +++ b/vllm_mindspore/attention/layer.py @@ -130,9 +130,8 @@ class Attention(nn.Cell): self.head_size = head_size self.hidden_size_per_partition = num_heads*head_size self.kv_hidden_size_per_partition = num_kv_heads*head_size - self.flatten = True - input_layout = "TH" if self.flatten else "BSH" # pynative 下不支持拉平操作。 + input_layout = "TH" scale = float(scale) pre_tokens = 2147483647 next_tokens = 2147483647 @@ -173,7 +172,6 @@ class Attention(nn.Cell): batch_valid_length: shape = [batch_size, ] block_tables: shape = [block_size, num_block] """ - output = query cache_out = self.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping) query = ops.depend(query, cache_out) if is_prefill: @@ -202,9 +200,6 @@ class Attention(nn.Cell): actual_seq_kvlen: shape = [batch_size, ] NOTE: Currently `PyNative` mode does not support operations in "TH" form, so it will be converted to "BSH" form. """ - query = query.view(-1, self.hidden_size_per_partition) - key = key.view(-1, self.kv_hidden_size_per_partition) - value = value.view(-1, self.kv_hidden_size_per_partition) _, _, _, output = self.flash_attention( query, key, @@ -217,7 +212,6 @@ class Attention(nn.Cell): actual_seq_qlen, actual_seq_kvlen ) - output = output.view(1, -1, self.hidden_size_per_partition) return output def _run_decode_forward( diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 444ddc5a0..03f927240 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -507,7 +507,7 @@ class Qwen2ForCausalLM(MsModelBase): compilation_config.static_forward_context[str(i)] = self.kv_caches[i] def set_model_inputs(self, is_prefill): - dyn_input_ids = Tensor(shape=[None, None], dtype=mstype.int64) + dyn_input_ids = Tensor(shape=[None], dtype=mstype.int64) dyn_position_ids = Tensor(shape=[None], dtype=mstype.int64) block_size = self.cache_config.block_size @@ -596,12 +596,10 @@ class Qwen2ForCausalLM(MsModelBase): attn_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) positions = positions.to(ms.int64) if is_prefill: - input_ids = ops.expand_dims(input_ids, 0) if not self.prefill: self.prefill = True self.set_model_inputs(self.prefill) else: - input_ids = ops.expand_dims(input_ids, 1) if self.prefill: self.prefill = False self.set_model_inputs(self.prefill) @@ -617,10 +615,6 @@ class Qwen2ForCausalLM(MsModelBase): block_tables, intermediate_tensors, inputs_embeds) - if is_prefill: - model_output = ops.squeeze(model_output, 0) - else: - model_output = ops.squeeze(model_output, 1) return model_output def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor) -> FlashAttentionMetadata: -- Gitee From 9c24c1ea97041580f135de790fb038c4d4f9f3d0 Mon Sep 17 00:00:00 2001 From: cs123abc Date: Wed, 7 May 2025 19:44:30 +0800 Subject: [PATCH 03/76] delete unnecessary patch --- tests/st/python/test_sampler.py | 2 +- vllm_mindspore/__init__.py | 24 +- .../model_executor/layers/logits_processor.py | 2 +- .../model_executor/layers/sampler.py | 9 +- vllm_mindspore/model_executor/models/llama.py | 2 +- vllm_mindspore/model_executor/models/qwen2.py | 2 +- .../model_executor/sampling_metadata.py | 483 +----------------- vllm_mindspore/utils.py | 55 -- 8 files changed, 14 insertions(+), 565 deletions(-) diff --git a/tests/st/python/test_sampler.py b/tests/st/python/test_sampler.py index b554717c7..8066748f4 100644 --- a/tests/st/python/test_sampler.py +++ b/tests/st/python/test_sampler.py @@ -29,7 +29,7 @@ from transformers import GenerationConfig, GenerationMixin import vllm.envs as envs from vllm_mindspore.model_executor.layers.sampler import Sampler -from vllm_mindspore.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.model_executor.utils import set_random_seed from vllm_mindspore.sequence import SamplingParams, SequenceData, SequenceGroupMetadata from vllm.utils import Counter, is_pin_memory_available diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index 5892937a8..963316a79 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -60,34 +60,24 @@ from vllm_mindspore.v1.engine.core import shutdown vllm.v1.engine.core.DPEngineCoreProc.shutdown = shutdown from vllm_mindspore.utils import ( - direct_register_custom_op, make_tensor_with_pad, async_tensor_h2d, - get_dtype_size, - ascend_device_count_stateless, ascend_is_initialized, ms_memory_profiling, ) -vllm.utils.direct_register_custom_op = direct_register_custom_op vllm.utils.make_tensor_with_pad = make_tensor_with_pad vllm.utils.async_tensor_h2d = async_tensor_h2d -vllm.utils.get_dtype_size = get_dtype_size -vllm.utils.cuda_device_count_stateless = ascend_device_count_stateless vllm.utils.cuda_is_initialized = ascend_is_initialized vllm.utils.memory_profiling = ms_memory_profiling -vllm.config.cuda_device_count_stateless = ascend_device_count_stateless import vllm.executor -vllm.executor.cuda_device_count_stateless = ascend_device_count_stateless - from vllm_mindspore.model_executor.models.registry import ( MindSporeModelRegistry, _SUBPROCESS_COMMAND, ) - vllm.config.ModelRegistry = MindSporeModelRegistry import vllm.model_executor @@ -108,18 +98,10 @@ vllm.model_executor.model_loader.loader.get_model_architecture = ( get_ms_model_architecture ) -from vllm_mindspore.model_executor.sampling_metadata import ( - SequenceGroupToSample, - SamplingMetadataCache, - SamplingMetadata, -) - -vllm.model_executor.SamplingMetadataCache = SamplingMetadataCache -vllm.model_executor.SamplingMetadata = SamplingMetadata -vllm.model_executor.sampling_metadata.SequenceGroupToSample = SequenceGroupToSample -vllm.model_executor.sampling_metadata.SamplingMetadataCache = SamplingMetadataCache -vllm.model_executor.sampling_metadata.SamplingMetadata = SamplingMetadata +from vllm_mindspore.model_executor.sampling_metadata import SamplingTensors +vllm.model_executor.sampling_metadata.async_tensor_h2d = async_tensor_h2d +vllm.model_executor.sampling_metadata.SamplingTensors.from_lists = SamplingTensors.from_lists from vllm_mindspore.worker.cache_engine import ( ms_allocate_kv_cache, ms_swap_in, diff --git a/vllm_mindspore/model_executor/layers/logits_processor.py b/vllm_mindspore/model_executor/layers/logits_processor.py index 32b02fb7e..5d6036943 100644 --- a/vllm_mindspore/model_executor/layers/logits_processor.py +++ b/vllm_mindspore/model_executor/layers/logits_processor.py @@ -32,7 +32,7 @@ from vllm.distributed import ( from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding, ) -from vllm_mindspore.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.platforms import current_platform diff --git a/vllm_mindspore/model_executor/layers/sampler.py b/vllm_mindspore/model_executor/layers/sampler.py index edfe62526..db0ede030 100644 --- a/vllm_mindspore/model_executor/layers/sampler.py +++ b/vllm_mindspore/model_executor/layers/sampler.py @@ -37,11 +37,11 @@ from vllm.sequence import (VLLM_INVALID_TOKEN_ID, PromptLogprobs, SampleLogprobs, SequenceOutput) from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics from vllm_mindspore.model_executor.layers.utils import apply_penalties -from vllm_mindspore.model_executor.sampling_metadata import ( +from vllm.model_executor.sampling_metadata import ( SamplingMetadata, - SamplingTensors, - SequenceGroupToSample, + SamplingTensors ) +from vllm.model_executor.sampling_metadata import SequenceGroupToSample if envs.VLLM_USE_FLASHINFER_SAMPLER and find_spec("flashinfer"): raise RuntimeError("Donot support for mindspore now.") @@ -447,7 +447,8 @@ def _apply_min_p( """ probs = torch.softmax(logits, dim=-1) top_probs, _ = probs.max(dim=-1, keepdim=True) - scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs + # For MindSpore: unsqueeze_ will cause error, use unsqueeze instead + scaled_min_p = min_p.unsqueeze(dim=1) * top_probs tokens_to_remove = probs < scaled_min_p logits = logits.masked_fill_(tokens_to_remove, -float("inf")) diff --git a/vllm_mindspore/model_executor/models/llama.py b/vllm_mindspore/model_executor/models/llama.py index 3a18956b9..354bfb37a 100644 --- a/vllm_mindspore/model_executor/models/llama.py +++ b/vllm_mindspore/model_executor/models/llama.py @@ -48,7 +48,7 @@ from vllm_mindspore.model_executor.models.utils import ( from vllm_mindspore.model_executor.layers.sampler import get_sampler, SamplerOutput from vllm_mindspore.model_executor.layers.layernorm import RMSNorm from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope -from vllm_mindspore.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm_mindspore.model_executor.models.model_base import MsModelBase diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 444ddc5a0..7ee643d35 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -46,7 +46,7 @@ from vllm_mindspore.model_executor.model_loader.weight_utils import \ from vllm_mindspore.model_executor.models.utils import ( PPMissingLayer, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) -from vllm_mindspore.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm_mindspore.model_executor.models.model_base import MsModelBase, Fake_Attention, Fake_Attention_V1 from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE diff --git a/vllm_mindspore/model_executor/sampling_metadata.py b/vllm_mindspore/model_executor/sampling_metadata.py index a016dd8e1..416f4b7fe 100644 --- a/vllm_mindspore/model_executor/sampling_metadata.py +++ b/vllm_mindspore/model_executor/sampling_metadata.py @@ -18,14 +18,9 @@ from array import array from dataclasses import dataclass -from typing import Dict, List, Optional, Tuple +from typing import List - -from vllm.sampling_params import SamplingParams, SamplingType -from vllm.sequence import VLLM_TOKEN_ID_ARRAY_TYPE, SequenceData, SequenceGroupMetadata from vllm.utils import ( - PyObjectCache, - async_tensor_h2d, is_pin_memory_available, make_tensor_with_pad, ) @@ -36,368 +31,6 @@ from mindspore import Tensor import mindspore as ms -@dataclass -class SequenceGroupToSample: - # |---------- N-1 iteration --------| - # |---------------- N iteration ---------------------| - # |- tokenA -|......................|-- newTokens ---| - # |---------- context_len ----------| - # |-------------------- seq_len ----------------------| - # |-- query_len ---| - - # Sequence ids for the sequence group in a previous step. - seq_ids: List[int] - sampling_params: SamplingParams - # seq_id -> sequence data. - seq_data: Dict[int, SequenceData] - # The length of the sequence (all tokens seen in the past + new token to - # compute attention) of the sequence group. None if it is in a decode - # stage. - seq_len: Optional[int] - # The length of new query tokens to compute in the current step. None if it - # is in a decode stage. The length of query_len <= seq_len if chunked - # prefill is enabled. - query_len: Optional[int] - # A random number generator for sampling. - generator: Optional[ms.Generator] - # True if the sequence group is in prefill stage. False if it is in a - # decode stage. - is_prompt: bool - # Query token indices from logits. to compute prompt logprob. Empty if - # prompt logprob is not required. - prompt_logprob_indices: List[int] - # Sample token indices from logits. Empty if sampling is not required. - sample_indices: List[int] - - @property - def do_sample(self): - return len(self.sample_indices) > 0 - - def __post_init__(self): - if len(self.prompt_logprob_indices) > 0: - assert self.sampling_params.prompt_logprobs is not None - if self.is_prompt: - assert self.seq_len is not None - assert self.query_len is not None - - -def gen_seq_group_to_sample_builder(num_seqs: int): - return lambda: SequenceGroupToSample( - seq_ids=[0] * num_seqs, - sampling_params=None, - seq_data=None, # type: ignore - seq_len=0, - query_len=0, - generator=None, - is_prompt=True, - prompt_logprob_indices=[], - sample_indices=[], - ) - - -class SamplingMetadataCache: - """Used to cache SamplingMetadata objects between scheduler iterations""" - - def __init__(self): - self._seq_group_to_sample_cache: Dict[int, PyObjectCache] = {} - - def get_cached_seq_group_to_sample(self, num_seqs): - if num_seqs not in self._seq_group_to_sample_cache: - self._seq_group_to_sample_cache[num_seqs] = PyObjectCache( - gen_seq_group_to_sample_builder(num_seqs) - ) - - obj = self._seq_group_to_sample_cache[num_seqs].get_object() - return obj - - def reset(self): - for cache in self._seq_group_to_sample_cache.values(): - cache.reset() - - -class SamplingMetadata: - """Metadata for input sequences. Used in sampler. - - The usage is as follow; - ``` - hidden_states = execute_model(...) - logits = hidden_states[sampling_metadata.selected_token_indices] - sample(logits) - - def sample(logits): - # Use categorized_sample_indices for sampling.... - ``` - - Args: - seq_groups: List of batched sequence groups. - selected_token_indices: (num_query_tokens_to_logprob). Indices to find - logits from the initial model output hidden states. - categorized_sample_indices: SamplingType -> token indices to sample. - Each token indices is 2D tensor of (num_indices, num_indices) where - the first item means the sample index within the returned logit - (before pruning padding), and the second item means the sample - index after pruning using selected_token_indices. - For example, if the returned logit is [1, 2, 3], and we select - [1, 2] for sampling, the pruned logit will be [2, 3]. In this case, - The first tuple is [1, 2] (sampled index within original logit), - and the second tuple is [0, 1] (sampled index within pruned logit). - num_prompts: Number of prompt sequence groups in seq_groups. - skip_sampler_cpu_output: Indicates if we want to skip the GPU=>CPU - serialization of token outputs. - reuse_sampling_tensors: Indicates if we want to reuse sampling - tensors that are part of the sampler forward pass. Currently, - it is mainly used for multi-step decode. - - """ - - def __init__( - self, - seq_groups: List[SequenceGroupToSample], - selected_token_indices: Tensor, - categorized_sample_indices: Dict[SamplingType, Tensor], - num_prompts: int, - skip_sampler_cpu_output: bool = False, - reuse_sampling_tensors: bool = False, - ) -> None: - self.seq_groups = seq_groups - self.selected_token_indices = selected_token_indices - self.categorized_sample_indices = categorized_sample_indices - self.num_prompts = num_prompts - self.skip_sampler_cpu_output = skip_sampler_cpu_output - self.reuse_sampling_tensors = reuse_sampling_tensors - - @staticmethod - def prepare( - seq_group_metadata_list: List[SequenceGroupMetadata], - seq_lens: List[int], - query_lens: List[int], - device: str, - pin_memory: bool, - generators: Optional[Dict[str, ms.Generator]] = None, - cache: Optional[SamplingMetadataCache] = None, - ) -> "SamplingMetadata": - ( - seq_groups, - selected_token_indices, - categorized_sample_indices, - num_prompts, - ) = _prepare_seq_groups( - seq_group_metadata_list, seq_lens, query_lens, device, generators, cache - ) - selected_token_indices = async_tensor_h2d( - selected_token_indices, - dtype=ms.int64, - target_device=device, - pin_memory=pin_memory, - ) - categorized_sample_indices = { - t: async_tensor_h2d( - seq_ids, - dtype=ms.int64, - target_device=device, - pin_memory=pin_memory, - ) - for t, seq_ids in categorized_sample_indices.items() - } - - sampling_metadata = SamplingMetadata( - seq_groups=seq_groups, - selected_token_indices=selected_token_indices, - categorized_sample_indices=categorized_sample_indices, - num_prompts=num_prompts, - ) - return sampling_metadata - - def __repr__(self) -> str: - return ( - "SamplingMetadata(" - f"seq_groups={self.seq_groups}, " - f"selected_token_indices={self.selected_token_indices}, " - f"categorized_sample_indices={self.categorized_sample_indices}), " - ) - - -def _prepare_seq_groups( - seq_group_metadata_list, #: List[SequenceGroupMetadata], - seq_lens: List[int], - query_lens: List[int], - device: str, - generators: Optional[Dict[str, ms.Generator]] = None, - cache: Optional[SamplingMetadataCache] = None, -) -> Tuple[ - List[SequenceGroupToSample], - List[int], - Dict[SamplingType, List[int]], - int, -]: - """Prepare sequence groups and indices for sampling. - - Args: - seq_group_metadata_list: A list of sequence group to batch. - seq_lens: A list of sequence lens per sequence group. - Index of prompt len should match with seq_group_metadata_list. - query_lens: A list of query lengths. Prompt lens include the length - of entire prompt tokens, and it could be shorter. - device: A device to use for random number generators, - `SequenceGroupToSample.generator`. - generators: A store of per-request random number generators used - for seeded requests. - - Returns: - seq_groups: A list of sequence group to sample. - selected_token_indices: See the definition from `SamplingMetadata`. - categorized_sample_indices: See the definition from `SamplingMetadata`. - num_prompts: Total number of prompts from `seq_group_metadata_list`. - """ - # Batched sequence groups for the current model forward stsep. - seq_groups: List[SequenceGroupToSample] = [] - # A list of token indices to sample/compute logprob. It is used to - # prune the outcome logits from the model for the performance. - selected_token_indices: List[int] = [] - # Used for selected_token_indices. - model_output_idx = 0 - - # Sampling type -> ( - # indices to sample/prompt logprob within pruned output logits, - # indices to sample within pruned logits) - categorized_sample_indices: Dict[SamplingType, List[int]] = { - t: [] for t in SamplingType - } - # Index of logits to compute logprob. Logits include both prompt logprob - # and sample logprob indices. - logit_idx = 0 - # Total number of prompts from given sequence groups. - num_prompts = 0 - - for i, seq_group_metadata in enumerate(seq_group_metadata_list): - seq_ids = seq_group_metadata.seq_data.keys() - - if cache is not None: - sample_obj = cache.get_cached_seq_group_to_sample(len(seq_ids)) - - for j, seq_id in enumerate(seq_ids): - sample_obj.seq_ids[j] = seq_id - - sample_obj.prompt_logprob_indices.clear() - sample_obj.sample_indices.clear() - - sampling_params = seq_group_metadata.sampling_params - is_prompt = seq_group_metadata.is_prompt - generator: Optional[ms.Generator] = None - # If the current seq group is in decode stage, it is None. - seq_len: Optional[int] = None - query_len: Optional[int] = None - prompt_logprob_indices: List[int] = ( - sample_obj.prompt_logprob_indices if cache is not None else [] - ) - sample_indices: List[int] = ( - sample_obj.sample_indices if cache is not None else [] - ) - do_sample = seq_group_metadata.do_sample - - if seq_group_metadata.is_prompt: - if sampling_params.seed is not None: - generator = ms.Generator().manual_seed( - sampling_params.seed - ) - if generators is not None: - generators[seq_group_metadata.request_id] = generator - - num_prompts += 1 - num_prefill_sample = len(seq_ids) - assert num_prefill_sample == 1 - assert query_lens is not None and seq_lens is not None - query_len, seq_len = query_lens[i], seq_lens[i] - # If we need sampling, exclude num_prefill_sample tokens from - # prompt logprob. - prompt_logprob_len = ( - query_len - num_prefill_sample if do_sample else query_len - ) - sample_len = num_prefill_sample if do_sample else 0 - else: - # Decode - prompt_logprob_len = 0 - query_len = ( - query_lens[i] if query_lens is not None and len(query_lens) > 0 else 1 - ) - sample_len = len(seq_ids) * query_len if do_sample else 0 - - if sampling_params.seed is not None and generators is not None: - generator = generators.get(seq_group_metadata.request_id) - - # Update indices to select from the model output. - """ - This blocks computes selected_token_indices which is used in the - following way. - - hidden_states = model(...) - logits = hidden_states[selected_token_indices] - """ - - if sampling_params.prompt_logprobs is not None: - selected_token_indices.extend( - range(model_output_idx, model_output_idx + prompt_logprob_len) - ) - model_output_idx += prompt_logprob_len - if do_sample: - selected_token_indices.extend( - range(model_output_idx, model_output_idx + sample_len) - ) - model_output_idx += sample_len - - # We now find indices for logprob computation and sampling. - """ - This block computes categorized_sample_indices which is used in the - following way. - - hidden_states = model(...) - logits = hidden_states[selected_token_indices] - def sample(logits): - # Use categorized_sample_indices for sampling. - # prompt_logprob_indices to find prompt logprob indices. - # sample_indices to find sample indices. - """ - - if sampling_params.prompt_logprobs is not None: - prompt_logprob_indices.extend( - range(logit_idx, logit_idx + prompt_logprob_len) - ) - logit_idx += prompt_logprob_len - if do_sample: - sample_indices.extend(range(logit_idx, logit_idx + sample_len)) - categorized_sample_indices[sampling_params.sampling_type].extend( - list(range(logit_idx, logit_idx + sample_len)) - ) - logit_idx += sample_len - - if cache is not None: - sample_obj.sampling_params = sampling_params - sample_obj.seq_data = seq_group_metadata.seq_data - sample_obj.seq_len = seq_len - sample_obj.query_len = query_len - sample_obj.generator = generator - sample_obj.is_prompt = is_prompt - else: - sample_obj = SequenceGroupToSample( - seq_ids=list(seq_ids), - sampling_params=sampling_params, - seq_data=seq_group_metadata.seq_data, - seq_len=seq_len, - query_len=query_len, - generator=generator, - is_prompt=is_prompt, - prompt_logprob_indices=list(prompt_logprob_indices), - sample_indices=list(sample_indices), - ) - - seq_groups.append(sample_obj) - - if cache is not None: - cache.reset() - - return (seq_groups, selected_token_indices, categorized_sample_indices, num_prompts) - - @dataclass class SamplingTensors: """Tensors for sampling.""" @@ -412,119 +45,6 @@ class SamplingTensors: prompt_tokens: Tensor output_tokens: Tensor - @classmethod - def from_sampling_metadata( - cls, - sampling_metadata: "SamplingMetadata", - vocab_size: int, - device, #: torch.device, - dtype, #: torch.dtype, - ) -> Tuple["SamplingTensors", bool, bool, bool]: - prompt_tokens: List[array] = [] - output_tokens: List[array] = [] - top_ks: List[int] = [] - temperatures: List[float] = [] - top_ps: List[float] = [] - min_ps: List[float] = [] - presence_penalties: List[float] = [] - frequency_penalties: List[float] = [] - repetition_penalties: List[float] = [] - do_penalties = False - do_top_p_top_k = False - do_min_p = False - - assert sampling_metadata.seq_groups is not None - for seq_group in sampling_metadata.seq_groups: - seq_ids = seq_group.seq_ids - sampling_params = seq_group.sampling_params - temperature = sampling_params.temperature - p = sampling_params.presence_penalty - f = sampling_params.frequency_penalty - r = sampling_params.repetition_penalty - top_p = sampling_params.top_p - min_p = sampling_params.min_p - - # k should not be greater than the vocab size. - top_k = min(sampling_params.top_k, vocab_size) - top_k = vocab_size if top_k == -1 else top_k - if temperature < _SAMPLING_EPS: - # NOTE: Zero temperature means deterministic sampling - # (i.e., greedy sampling or beam search). - # Set the temperature to 1 to avoid division by zero. - temperature = 1.0 - if not do_top_p_top_k and ( - top_p < 1.0 - _SAMPLING_EPS or top_k != vocab_size - ): - do_top_p_top_k = True - if not do_min_p and min_p > _SAMPLING_EPS: - do_min_p = True - if not do_penalties and ( - abs(p) >= _SAMPLING_EPS - or abs(f) >= _SAMPLING_EPS - or abs(r - 1.0) >= _SAMPLING_EPS - ): - do_penalties = True - - is_prompt = seq_group.is_prompt - if is_prompt and sampling_params.prompt_logprobs is not None: - # For tokens in the prompt that we only need to get - # their logprobs - query_len = seq_group.query_len - assert query_len is not None - prefill_len = len(seq_group.prompt_logprob_indices) - temperatures += [temperature] * prefill_len - top_ps += [top_p] * prefill_len - top_ks += [top_k] * prefill_len - min_ps += [min_p] * prefill_len - presence_penalties += [0] * prefill_len - frequency_penalties += [0] * prefill_len - repetition_penalties += [1] * prefill_len - - if seq_group.do_sample: - sample_lens = len(seq_group.sample_indices) - assert sample_lens >= len(seq_ids) - temperatures += [temperature] * sample_lens - top_ps += [top_p] * sample_lens - top_ks += [top_k] * sample_lens - min_ps += [min_p] * sample_lens - presence_penalties += [p] * sample_lens - frequency_penalties += [f] * sample_lens - repetition_penalties += [r] * sample_lens - - if do_penalties: - for seq_group in sampling_metadata.seq_groups: - seq_ids = seq_group.seq_ids - sampling_params = seq_group.sampling_params - if seq_group.is_prompt and sampling_params.prompt_logprobs is not None: - prefill_len = len(seq_group.prompt_logprob_indices) - prompt_tokens.extend( - array(VLLM_TOKEN_ID_ARRAY_TYPE) for _ in range(prefill_len) - ) - output_tokens.extend( - array(VLLM_TOKEN_ID_ARRAY_TYPE) for _ in range(prefill_len) - ) - if seq_group.do_sample: - for seq_id in seq_ids: - seq_data = seq_group.seq_data[seq_id] - prompt_tokens.append(seq_data.prompt_token_ids_array) - output_tokens.append(seq_data.output_token_ids_array) - - sampling_tensors = SamplingTensors.from_lists( - temperatures, - top_ps, - top_ks, - min_ps, - presence_penalties, - frequency_penalties, - repetition_penalties, - prompt_tokens, - output_tokens, - vocab_size, - device, - dtype, - ) - return (sampling_tensors, do_penalties, do_top_p_top_k, do_min_p) - @classmethod def from_lists( cls, @@ -600,6 +120,7 @@ class SamplingTensors: # Because the memory is pinned, we can do non-blocking # transfer to device. + # For MindSpore: MindSpore does not support to device now return cls( temperatures=temperatures_t, top_ps=top_ps_t, diff --git a/vllm_mindspore/utils.py b/vllm_mindspore/utils.py index 60cd4af04..153589ed6 100644 --- a/vllm_mindspore/utils.py +++ b/vllm_mindspore/utils.py @@ -61,17 +61,6 @@ def get_valid_dtype(dtype): return dtype -def direct_register_custom_op( - op_name: str, - op_func: Callable, - mutates_args: List[str], - fake_impl: Optional[Callable] = None, - target_lib: Optional[Library] = None, - dispatch_key: str = "CUDA", -): - ... - - def _create_empty_tensor(ms_type): init_func = Zero() init_func.__enable_zero_dim__ = True @@ -153,50 +142,6 @@ STR_DTYPE_TO_MS_DTYPE = { } -def get_dtype_size(dtype: torch.dtype) -> int: - """Get the size of the data type in bytes.""" - if isinstance(dtype, str): - dtype = STR_DTYPE_TO_TENSOR_DTYPE[dtype] - return torch.tensor([1], dtype=dtype).itemsize - - -def ascend_device_count_stateless() -> int: - visible_device_str = os.environ.get("ASCEND_RT_VISIBLE_DEVICES", None) - if visible_device_str: - try: - res = visible_device_str.split(",") - except Exception as e: - logger.error('Cannot parse "ASCEND_RT_VISIBLE_DEVICES" for: %s!', - str(e)) - raise ValueError( - f'Error argument({visible_device_str}) of environ "ASCEND_RT_VISIBLE_DEVICES"!' - ) from e - - return len(res) - - import re - import subprocess - - output = subprocess.check_output(["npu-smi", "info"], encoding="utf-8") - res = re.findall( - r"\|\s+\d+\s+\w+\s+\|\s+(\w+)\s+\|\s+(?:[0-9\.]+|-)\s+[0-9\.]+\s+\d+\s+\/\s+\d+\s+\|", - output, - ) - - avl_devices = [] - for i, stat in enumerate(res): - if stat != "OK": - logger.warning("Device %d is not ok, status is %s!", i, stat) - else: - avl_devices.append(str(i)) - visible_device_str = ",".join(avl_devices) - os.environ["ASCEND_RT_VISIBLE_DEVICES"] = visible_device_str - logger.info('Set environ "ASCEND_RT_VISIBLE_DEVICES" as %s', - visible_device_str) - - return len(avl_devices) - - def ascend_is_initialized(): # Just return true for check. return True -- Gitee From 75b4eb23769cae3bfc592330b312ea038d00123e Mon Sep 17 00:00:00 2001 From: yyyyrf Date: Mon, 9 Jun 2025 10:04:00 +0800 Subject: [PATCH 04/76] add gptq a16w4 st --- .../python/test_vllm_deepseek_gptq_a16w4.py | 92 +++++++++++++++++++ tests/st/python/test_vllm_deepseek_osl.py | 42 ++++++++- .../python/test_vllm_deepseek_smoothquant.py | 2 +- .../test_vllm_deepseek_smoothquant_mss.py | 2 +- 4 files changed, 135 insertions(+), 3 deletions(-) create mode 100644 tests/st/python/test_vllm_deepseek_gptq_a16w4.py diff --git a/tests/st/python/test_vllm_deepseek_gptq_a16w4.py b/tests/st/python/test_vllm_deepseek_gptq_a16w4.py new file mode 100644 index 000000000..9bacd0749 --- /dev/null +++ b/tests/st/python/test_vllm_deepseek_gptq_a16w4.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# isort: skip_file +# encoding: utf-8 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""test mf deepseek r1 gptq int4 quantization.""" +import os +import yaml +import pytest +from . import set_env + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "MINDFORMERS_MODEL_CONFIG": "./config/predict_deepseek_r1_671b_a16w4.yaml", + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "vLLM_MODEL_BACKEND": "MindFormers", + "MS_ENABLE_LCCL": "off", + "HCCL_OP_EXPANSION_MODE": "AIV", + "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", + "VLLM_USE_V1": "0", + "HCCL_IF_BASE_PORT": "60000", + "LCAL_COMM_ID": "127.0.0.1:10068" +} +# set env +env_manager.setup_ai_environment(env_vars) +import vllm_mindspore # noqa: F401, E402 +from vllm import LLM, SamplingParams # noqa: E402 + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.allcards +def test_deepseek_r1_gptq_a16w4(): + """ + test case deepseek r1 a16w4 + """ + yaml_path = "./config/predict_deepseek_r1_671b.yaml" + a16w4_yaml = "./config/predict_deepseek_r1_671b_a16w4.yaml" + with open(yaml_path, 'r', encoding='utf-8') as file: + content = yaml.safe_load(file) + model_config = content["model"]["model_config"] + model_config["quantization_config"] = {"quant_method": "gptq-pergroup"} + content["model"]["model_config"] = model_config + + with open(a16w4_yaml, 'w', encoding='utf-8') as file: + yaml.dump(content, file, allow_unicode=True, sort_keys=False) + + # Sample prompts. + prompts = [ + "介绍下北京故宫", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=1024, top_k=1) + + # Create an LLM. + llm = LLM( + model= + "/home/workspace/mindspore_dataset/weight/DeepSeekR1_gptq-pergroup_safetensors", + trust_remote_code=True, + gpu_memory_utilization=0.9, + tensor_parallel_size=4) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert "博物院christianాలు sic辨" in generated_text + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/test_vllm_deepseek_osl.py b/tests/st/python/test_vllm_deepseek_osl.py index 28b7a8173..05542d97d 100644 --- a/tests/st/python/test_vllm_deepseek_osl.py +++ b/tests/st/python/test_vllm_deepseek_osl.py @@ -47,7 +47,7 @@ import vllm_mindspore # noqa: F401, E402 from vllm import LLM, SamplingParams # noqa: E402 -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_arm_ascend910b_training @pytest.mark.env_single def test_deepseek_r1(): @@ -85,3 +85,43 @@ def test_deepseek_r1(): # unset env env_manager.unset_all() + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_deepseek_r1_mss(): + """ + test case deepseek r1 w8a8 mss + """ + + # Sample prompts. + prompts = [ + "介绍下北京故宫", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, + max_tokens=10, + top_k=1) + + # Create an LLM. + llm = LLM( + model= + "/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-osl", + trust_remote_code=True, + gpu_memory_utilization=0.9, + tensor_parallel_size=8, + num_scheduler_steps=8) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert "博物院" in generated_text + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/test_vllm_deepseek_smoothquant.py b/tests/st/python/test_vllm_deepseek_smoothquant.py index 6937245f7..eb0ef8921 100644 --- a/tests/st/python/test_vllm_deepseek_smoothquant.py +++ b/tests/st/python/test_vllm_deepseek_smoothquant.py @@ -43,7 +43,7 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_arm_ascend910b_training @pytest.mark.env_single def test_deepseek_r1(): diff --git a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py b/tests/st/python/test_vllm_deepseek_smoothquant_mss.py index 3788bcb7d..5476b1ae3 100644 --- a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py +++ b/tests/st/python/test_vllm_deepseek_smoothquant_mss.py @@ -43,7 +43,7 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_arm_ascend910b_training @pytest.mark.env_single def test_deepseek_r1_mss(): -- Gitee From 74839a0d2853bc8cf43903fd1fa7d00114260245 Mon Sep 17 00:00:00 2001 From: moran Date: Tue, 10 Jun 2025 09:51:43 +0800 Subject: [PATCH 05/76] recover codecheck --- codecheck_toolkits/vllm_codecheck.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codecheck_toolkits/vllm_codecheck.sh b/codecheck_toolkits/vllm_codecheck.sh index c91087fe1..8d35771ef 100644 --- a/codecheck_toolkits/vllm_codecheck.sh +++ b/codecheck_toolkits/vllm_codecheck.sh @@ -6,7 +6,7 @@ RET_FLAG=0 # yapf check -MERGEBASE="$(git merge-base origin/master HEAD)" +MERGEBASE="$(git merge-base origin/develop HEAD)" if ! git diff --cached --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &> /dev/null; then git diff --cached --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs -P 5 \ yapf --diff --recursive --parallel --exclude tests/ @@ -73,4 +73,4 @@ fi rm -f pyproject.toml -exit 0 +exit ${RET_FLAG} -- Gitee From 1a49896947e30abcfec8b87bf889aac223501cdc Mon Sep 17 00:00:00 2001 From: fary86 Date: Mon, 9 Jun 2025 15:06:26 +0800 Subject: [PATCH 06/76] fix inference service exit when error occurs --- vllm_mindspore/__init__.py | 9 + vllm_mindspore/v1/core/__init__.py | 0 vllm_mindspore/v1/core/sched/__init__.py | 0 vllm_mindspore/v1/core/sched/scheduler.py | 177 +++++++++++++++++++ vllm_mindspore/v1/worker/gpu_model_runner.py | 24 +++ 5 files changed, 210 insertions(+) create mode 100644 vllm_mindspore/v1/core/__init__.py create mode 100644 vllm_mindspore/v1/core/sched/__init__.py create mode 100644 vllm_mindspore/v1/core/sched/scheduler.py diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index 5892937a8..cd5863eaf 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -279,6 +279,11 @@ vllm.v1.worker.gpu_model_runner.GPUModelRunner._update_states = _update_states from vllm_mindspore.v1.worker.gpu_model_runner import initialize_kv_cache vllm.v1.worker.gpu_model_runner.GPUModelRunner.initialize_kv_cache = initialize_kv_cache +from vllm_mindspore.v1.worker.gpu_model_runner import wrapper_gpu_model_runner_execute_model +from vllm.v1.worker.gpu_model_runner import GPUModelRunner +vllm.v1.worker.gpu_model_runner.GPUModelRunner.execute_model = \ + wrapper_gpu_model_runner_execute_model(GPUModelRunner.execute_model) + import vllm.v1.worker.block_table from vllm_mindspore.v1.worker.block_table import BlockTable vllm.v1.worker.block_table.BlockTable = BlockTable @@ -333,6 +338,10 @@ from vllm_mindspore.v1.worker.gpu_worker import compile_or_warm_up_model from vllm.v1.worker.gpu_worker import Worker Worker.compile_or_warm_up_model = compile_or_warm_up_model +from vllm_mindspore.v1.core.sched.scheduler import update_from_output +from vllm.v1.core.sched.scheduler import Scheduler +Scheduler.update_from_output = update_from_output + from .utils import check_ready from vllm_mindspore.engine.multiprocessing.engine import cleanup diff --git a/vllm_mindspore/v1/core/__init__.py b/vllm_mindspore/v1/core/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/v1/core/sched/__init__.py b/vllm_mindspore/v1/core/sched/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/v1/core/sched/scheduler.py b/vllm_mindspore/v1/core/sched/scheduler.py new file mode 100644 index 000000000..76f7a6b90 --- /dev/null +++ b/vllm_mindspore/v1/core/sched/scheduler.py @@ -0,0 +1,177 @@ +# ruff: noqa: G004: + +from typing import Optional + +from vllm.logger import init_logger +from vllm.v1.core.sched.output import SchedulerOutput +from vllm.v1.core.sched.utils import check_stop +from vllm.v1.engine import EngineCoreOutput, EngineCoreOutputs, FinishReason +from vllm.v1.outputs import ModelRunnerOutput +from vllm.v1.request import Request, RequestStatus +from vllm.v1.spec_decode.metrics import SpecDecodingStats + +logger = init_logger(__name__) + + +def update_from_output( + self, + scheduler_output: SchedulerOutput, + model_runner_output: ModelRunnerOutput, +) -> EngineCoreOutputs: + sampled_token_ids = model_runner_output.sampled_token_ids + spec_token_ids = model_runner_output.spec_token_ids + logprobs = model_runner_output.logprobs + prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict + num_scheduled_tokens = scheduler_output.num_scheduled_tokens + + new_running: list[Request] = [] + outputs: list[EngineCoreOutput] = [] + spec_decoding_stats: Optional[SpecDecodingStats] = None + + # NOTE(woosuk): As len(self.running) can be up to 1K or more, the below + # loop can be a performance bottleneck. We should do our best to avoid + # expensive operations inside the loop. + + # Add by vllm-mindspore begin: + running_req_ids = [req.request_id for req in self.running] + # abort_req_ids used to keep track of failed requests caused by model execution exception + abort_req_ids: list[str] = [] + # Add by vllm-mindspore end. + + for request in self.running: + req_id = request.request_id + + # Add by vllm-mindspore begin: + # None sampled_token_ids comes from exception model execution, set them to abort list + # to keep main scheduler task running right. + if sampled_token_ids is None: + self.scheduled_req_ids.remove(req_id) + logger.warning( + f'Process aborted request {req_id} from running requests {running_req_ids}' + ) + outputs.append( + EngineCoreOutput(request_id=req_id, + new_token_ids=[], + finish_reason=FinishReason.ABORT, + new_logprobs=None, + new_prompt_logprobs_tensors=None, + stop_reason=request.stop_reason, + events=request.take_events())) + abort_req_ids.append(req_id) + continue + # Add by vllm-mindspore end. + + num_tokens_scheduled = num_scheduled_tokens.get(req_id, 0) + if num_tokens_scheduled == 0: + # The request was not scheduled in this step. + new_running.append(request) + continue + + req_index = model_runner_output.req_id_to_index[req_id] + generated_token_ids = sampled_token_ids[req_index] + + scheduled_spec_token_ids = ( + scheduler_output.scheduled_spec_decode_tokens.get(req_id)) + if scheduled_spec_token_ids: + # num_computed_tokens represents the number of tokens + # processed in the current step, considering scheduled + # tokens and rejections. If some tokens are rejected, + # num_computed_tokens is decreased by the number of rejected + # tokens, where is given by: + # len(scheduled_spec_token_ids) + 1 - len(generated_token_ids). + num_tokens_rejected = (len(scheduled_spec_token_ids) + 1 - + len(generated_token_ids)) + request.num_computed_tokens -= num_tokens_rejected + spec_decoding_stats = self.make_spec_decoding_stats( + spec_decoding_stats, + num_draft_tokens=len(scheduled_spec_token_ids), + num_accepted_tokens=len(generated_token_ids) - 1) + + cached_encoder_input_ids = ( + self.encoder_cache_manager.get_cached_input_ids(request)) + # OPTIMIZATION: Avoid list(set) if the set is empty. + if cached_encoder_input_ids: + for input_id in list(cached_encoder_input_ids): + mm_positions = request.mm_positions[input_id] + start_pos = mm_positions["offset"] + num_tokens = mm_positions["length"] + if start_pos + num_tokens <= request.num_computed_tokens: + # The encoder output is already processed and stored + # in the decoder's KV cache. + self.encoder_cache_manager.free_encoder_input( + request, input_id) + + # Add newly generated spec token ids to the request. + if spec_token_ids is not None: + request.spec_token_ids = spec_token_ids[req_index] + + stopped = False + new_logprobs = None + new_token_ids = generated_token_ids + + # Append generated tokens and check for stop. Note that if + # a request is still being prefilled, we expect the model runner + # to return empty token ids for the request. + for num_new, output_token_id in enumerate(new_token_ids, 1): + request.append_output_token_ids(output_token_id) + + # Check for stop and update request state. + # This must be called before we make the EngineCoreOutput. + stopped = check_stop(request, self.max_model_len) + if stopped: + self._free_request(request) + del new_token_ids[num_new:] # Trim new tokens if needed. + break + + # Extract sample logprobs if needed. + if request.sampling_params.logprobs is not None and logprobs: + # NOTE: once we support N tokens per step (spec decode), + # the outer lists can be of length > 1. + new_logprobs = logprobs.slice(req_index, req_index + 1) + + if new_token_ids and request.use_structured_output: + # NOTE: structured_output_request + # should not be None if use_structured_output, we have + # check above, so safe to ignore type warning + request.structured_output_request.grammar.accept_tokens( # type: ignore[union-attr] + req_id, new_token_ids) + + # Get prompt logprobs for this request. + prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id) + if new_token_ids: + # Add EngineCoreOutput for this Request. + outputs.append( + EngineCoreOutput( + request_id=req_id, + new_token_ids=new_token_ids, + finish_reason=request.get_finished_reason(), + new_logprobs=new_logprobs, + new_prompt_logprobs_tensors=prompt_logprobs_tensors, + stop_reason=request.stop_reason, + events=request.take_events())) + else: + # Invariant: EngineCore returns no partial prefill outputs. + assert not prompt_logprobs_tensors + + self.scheduled_req_ids.remove(req_id) + if not stopped: + new_running.append(request) + + # Add by vllm-mindspore begin: + # make failed requests finished to make the server can continue to process new request + if len(abort_req_ids) > 0: + logger.warning(f'Aborted requests are {abort_req_ids}') + self.finish_requests(abort_req_ids, RequestStatus.FINISHED_ABORTED) + # Add by vllm-mindspore end. + + self.running = new_running + engine_core_outputs = EngineCoreOutputs( + outputs=outputs, + scheduler_stats=self.make_stats(spec_decoding_stats), + ) + if self.include_finished_set: + #TODO currently sending duplicates here, improve this + engine_core_outputs.finished_requests = ( + scheduler_output.finished_req_ids | self.finished_req_ids) + + return engine_core_outputs diff --git a/vllm_mindspore/v1/worker/gpu_model_runner.py b/vllm_mindspore/v1/worker/gpu_model_runner.py index a21a2f73e..7854e2942 100644 --- a/vllm_mindspore/v1/worker/gpu_model_runner.py +++ b/vllm_mindspore/v1/worker/gpu_model_runner.py @@ -12,6 +12,7 @@ from vllm_mindspore.v1.attention.backends.flash_attn import (FlashAttentionMetad from vllm_mindspore.utils import get_valid_dtype from vllm.v1.kv_cache_interface import FullAttentionSpec +from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.utils import bind_kv_cache from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.distributed.parallel_state import get_pp_group @@ -417,3 +418,26 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: if batch_changed: self.input_batch.refresh_sampling_metadata() + + +def wrapper_gpu_model_runner_execute_model(func): + + def new_func(*args, **kwargs): + self = args[0] + try: + output = func(*args, **kwargs) + return output + except Exception as e: + logger.warning( + f"Caught exception {str(e)} when processing req_ids {self.input_batch.req_ids}" + ) + return ModelRunnerOutput( + req_ids=self.input_batch.req_ids, + req_id_to_index=self.input_batch.req_id_to_index, + sampled_token_ids=None, + spec_token_ids=None, + logprobs=None, + prompt_logprobs_dict={}, + ) + + return new_func -- Gitee From 1554ec1c479fc972c39dd934864cac4205dbfa99 Mon Sep 17 00:00:00 2001 From: zhang_xu_hao1230 Date: Sat, 26 Apr 2025 16:41:33 +0800 Subject: [PATCH 07/76] =?UTF-8?q?=E5=8E=9F=E7=94=9Fqwen2=E6=94=AF=E6=8C=81?= =?UTF-8?q?lora?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- install_depend_pkgs.sh | 2 +- tests/st/python/test_multilora_inference.py | 113 ++ vllm_mindspore/__init__.py | 80 +- vllm_mindspore/attention/layer.py | 78 +- vllm_mindspore/lora/__init__.py | 0 vllm_mindspore/lora/layers.py | 1165 +++++++++++++++++ vllm_mindspore/lora/models.py | 227 ++++ vllm_mindspore/lora/ops/__init__.py | 0 vllm_mindspore/lora/ops/torch_ops/__init__.py | 0 vllm_mindspore/lora/ops/torch_ops/lora_ops.py | 171 +++ .../lora/punica_wrapper/__init__.py | 0 .../lora/punica_wrapper/punica_npu.py | 357 +++++ vllm_mindspore/lora/utils.py | 47 + .../model_executor/layers/rotary_embedding.py | 26 +- .../layers/vocab_parallel_embedding.py | 130 +- .../model_executor/models/model_base.py | 118 +- vllm_mindspore/model_executor/models/qwen2.py | 299 +++-- vllm_mindspore/model_executor/models/utils.py | 62 +- vllm_mindspore/platforms/ascend.py | 27 +- 19 files changed, 2525 insertions(+), 377 deletions(-) create mode 100644 tests/st/python/test_multilora_inference.py create mode 100644 vllm_mindspore/lora/__init__.py create mode 100644 vllm_mindspore/lora/layers.py create mode 100644 vllm_mindspore/lora/models.py create mode 100644 vllm_mindspore/lora/ops/__init__.py create mode 100644 vllm_mindspore/lora/ops/torch_ops/__init__.py create mode 100644 vllm_mindspore/lora/ops/torch_ops/lora_ops.py create mode 100644 vllm_mindspore/lora/punica_wrapper/__init__.py create mode 100644 vllm_mindspore/lora/punica_wrapper/punica_npu.py create mode 100644 vllm_mindspore/lora/utils.py diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index ba0f79886..b3d8306e2 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -100,4 +100,4 @@ cd "$msadapter_dir" || { echo "Failed to git clone msadapter!"; exit 1; } pip uninstall msadapter -y && pip install . || { echo "Failed to install msadapter"; exit 1; } cd .. -echo "========= All dependencies installed successfully!" +echo "========= All dependencies installed successfully!" \ No newline at end of file diff --git a/tests/st/python/test_multilora_inference.py b/tests/st/python/test_multilora_inference.py new file mode 100644 index 000000000..d5e86441c --- /dev/null +++ b/tests/st/python/test_multilora_inference.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +This example shows how to use the multi-LoRA functionality +for offline inference. + +""" +import pytest +import os +from . import set_env + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "MS_ENABLE_LCCL": "off", + "HCCL_OP_EXPANSION_MODE": "AIV", + "ASCEND_RT_VISIBLE_DEVICES": "0,1", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", + "VLLM_USE_V1": "1", +} +# set env +env_manager.setup_ai_environment(env_vars) +import vllm_mindspore +from typing import List, Optional, Tuple + +from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams +from vllm.lora.request import LoRARequest + + +def create_test_prompts( + lora_path: str +) -> List[Tuple[str, SamplingParams, Optional[LoRARequest]]]: + """Create a list of test prompts with their sampling parameters. + """ + return [ + ("违章停车与违法停车是否有区别?", + SamplingParams(temperature=0.0, top_p=1, top_k=-1, + max_tokens=10), LoRARequest("sql-lora1", 1, + lora_path)), + ] + + +def process_requests(engine: LLMEngine, + test_prompts: List[Tuple[str, SamplingParams, + Optional[LoRARequest]]]): + """Continuously process a list of prompts and handle the outputs.""" + request_id = 0 + + while test_prompts or engine.has_unfinished_requests(): + if test_prompts: + prompt, sampling_params, lora_request = test_prompts.pop(0) + engine.add_request(str(request_id), + prompt, + sampling_params, + lora_request=lora_request) + request_id += 1 + + request_outputs: List[RequestOutput] = engine.step() + for request_output in request_outputs: + if request_output.finished: + print(f'text is: {request_output.outputs[0].text}', flush=True) + assert " 从法律上来说,违章停车和违法" in request_output.outputs[0].text + + +def initialize_engine() -> LLMEngine: + """Initialize the LLMEngine.""" + # max_loras: controls the number of LoRAs that can be used in the same + # batch. Larger numbers will cause higher memory usage, as each LoRA + # slot requires its own preallocated tensor. + # max_lora_rank: controls the maximum supported rank of all LoRAs. Larger + # numbers will cause higher memory usage. If you know that all LoRAs will + # use the same rank, it is recommended to set this as low as possible. + # max_cpu_loras: controls the size of the CPU LoRA cache. + engine_args = EngineArgs( + model="/home/workspace/mindspore_dataset/weight/Qwen2.5-7B-Instruct", + enable_lora=True, + max_loras=1, + max_lora_rank=64, + max_cpu_loras=2, + max_num_seqs=256, + max_model_len=256, + max_num_batched_tokens=400) + return LLMEngine.from_engine_args(engine_args) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_multilora_inference(): + """test function that sets up and runs the prompt processing.""" + engine = initialize_engine() + lora_path = "/home/workspace/mindspore_dataset/weight/Qwen2.5-7B-Lora-Law" + test_prompts = create_test_prompts(lora_path) + process_requests(engine, test_prompts) + env_manager.unset_all() diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index 963316a79..e4cc27268 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# encoding: utf-8 +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -27,6 +27,7 @@ if "vllm" in sys.modules: # 1. set env before import mindspore. from vllm_mindspore.scripts import env_setup + env_setup() # 2. update the log configuration ahead of other modifications. @@ -49,14 +50,17 @@ import vllm.utils vllm.utils.current_platform = ascend_platform import vllm.attention.selector + vllm.attention.selector.current_platform = ascend_platform import vllm.engine.arg_utils from vllm_mindspore.engine.arg_utils import _is_v1_supported_oracle + vllm.engine.arg_utils.EngineArgs._is_v1_supported_oracle = _is_v1_supported_oracle import vllm.v1.engine.core from vllm_mindspore.v1.engine.core import shutdown + vllm.v1.engine.core.DPEngineCoreProc.shutdown = shutdown from vllm_mindspore.utils import ( @@ -71,6 +75,35 @@ vllm.utils.async_tensor_h2d = async_tensor_h2d vllm.utils.cuda_is_initialized = ascend_is_initialized vllm.utils.memory_profiling = ms_memory_profiling +import vllm.lora.utils + +from vllm_mindspore.model_executor.layers.linear import LinearBase +from vllm_mindspore.lora.utils import _all_lora_classes + +vllm.lora.utils._all_lora_classes = _all_lora_classes +vllm.lora.utils.LinearBase = LinearBase + +import vllm.lora.models +from vllm_mindspore.lora.models import register_module, from_local_checkpoint, from_lora_tensors + +vllm.lora.models.LoRAModelManager.register_module = register_module +vllm.lora.models.LoRAModel.from_local_checkpoint = from_local_checkpoint +vllm.lora.models.LoRAModel.from_lora_tensors = from_lora_tensors + +from vllm_mindspore.lora.layers import (ColumnParallelLinearWithLoRA, + MergedColumnParallelLinearWithLoRA, + MergedQKVParallelLinearWithLoRA, + QKVParallelLinearWithLoRA, + RowParallelLinearWithLoRA) + +import vllm.lora.layers + +vllm.lora.layers.ColumnParallelLinearWithLoRA = ColumnParallelLinearWithLoRA +vllm.lora.layers.MergedColumnParallelLinearWithLoRA = MergedColumnParallelLinearWithLoRA +vllm.lora.layers.MergedQKVParallelLinearWithLoRA = MergedQKVParallelLinearWithLoRA +vllm.lora.layers.QKVParallelLinearWithLoRA = QKVParallelLinearWithLoRA +vllm.lora.layers.RowParallelLinearWithLoRA = RowParallelLinearWithLoRA + import vllm.executor from vllm_mindspore.model_executor.models.registry import ( @@ -92,11 +125,9 @@ from vllm.model_executor.model_loader import get_model_architecture vllm.model_executor.model_loader.get_model_architecture = get_ms_model_architecture vllm.model_executor.model_loader.utils.get_model_architecture = ( - get_ms_model_architecture -) + get_ms_model_architecture) vllm.model_executor.model_loader.loader.get_model_architecture = ( - get_ms_model_architecture -) + get_ms_model_architecture) from vllm_mindspore.model_executor.sampling_metadata import SamplingTensors @@ -115,12 +146,10 @@ vllm.worker.cache_engine.CacheEngine.swap_in = ms_swap_in vllm.worker.cache_engine.CacheEngine.swap_out = ms_swap_out from vllm_mindspore.model_executor.model_loader.weight_utils import ( - safetensors_weights_iterator, -) + safetensors_weights_iterator, ) vllm.model_executor.model_loader.loader.safetensors_weights_iterator = ( - safetensors_weights_iterator -) + safetensors_weights_iterator) from vllm_mindspore.worker.worker import _warm_up_model from vllm_mindspore.worker.profile import ( @@ -140,15 +169,13 @@ from vllm_mindspore.worker.model_runner import ( ) vllm.worker.model_runner.ModelInputForGPUBuilder._get_cuda_graph_pad_size = ( - _get_cuda_graph_pad_size -) + _get_cuda_graph_pad_size) vllm.worker.model_runner.GPUModelRunnerBase._dummy_run = _dummy_run import vllm.worker.multi_step_model_runner vllm.worker.multi_step_model_runner._get_supported_attention_backends = ( - _get_supported_attention_backends -) + _get_supported_attention_backends) from vllm_mindspore.executor.multiproc_worker_utils import ( get_mp_context as ms_get_mp_context, @@ -165,8 +192,10 @@ import vllm.executor.multiproc_worker_utils vllm.executor.multiproc_worker_utils.ProcessWorkerWrapper.terminate_worker = ms_terminate_worker import vllm.v1.executor.multiproc_executor + vllm.v1.executor.multiproc_executor.get_mp_context = ms_get_mp_context import vllm.v1.utils + vllm.v1.utils.get_mp_context = ms_get_mp_context from vllm_mindspore.executor.ray_gpu_executor import ( @@ -201,6 +230,7 @@ vllm.config.ParallelConfig.has_unfinished_dp = has_unfinished_dp from .utils import update_modules from vllm_mindspore.attention.backends import ms_attn + update_modules("vllm.attention.backends.flash_attn", ms_attn) from vllm_mindspore.worker.spec_decode_worker import ( @@ -211,20 +241,25 @@ from vllm_mindspore.worker.spec_decode_worker import ( _merge_outputs, ) from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker + SpecDecodeWorker.__init__ = spec_decode_worker_init SpecDecodeWorker._verify_tokens = _verify_tokens SpecDecodeWorker._run_no_spec = _run_no_spec from vllm.model_executor.layers.spec_decode_base_sampler import SpecDecodeBaseSampler + SpecDecodeBaseSampler._create_output = _create_output from vllm.spec_decode.top1_proposer import Top1Proposer + Top1Proposer._merge_outputs = _merge_outputs from vllm_mindspore.model_executor.layers.rejection_sampler import _smallest_positive_value, _multinomial from vllm.model_executor.layers.rejection_sampler import RejectionSampler + RejectionSampler._smallest_positive_value = _smallest_positive_value -RejectionSampler._smallest_positive_value.__set_name__(RejectionSampler, '_smallest_positive_value') +RejectionSampler._smallest_positive_value.__set_name__( + RejectionSampler, '_smallest_positive_value') vllm.model_executor.layers.rejection_sampler._multinomial = _multinomial ######### for multi-model @@ -240,34 +275,42 @@ from vllm_mindspore.model_executor.layers.rotary_embedding import InferMRotaryEm vllm.model_executor.layers.rotary_embedding.MRotaryEmbedding = InferMRotaryEmbedding from vllm_mindspore.v1.sample import rejection_sampler + update_modules("vllm.v1.sample.rejection_sampler", rejection_sampler) from vllm_mindspore.v1.spec_decode import eagle + update_modules("vllm.v1.spec_decode.eagle", eagle) from vllm_mindspore.v1.attention.backends import flash_attn import vllm.v1.attention.backends + sys.modules['vllm.v1.attention.backends.flash_attn'] = flash_attn import vllm.v1.attention.backends.flash_attn import vllm.v1.worker.gpu_model_runner from vllm_mindspore.v1.worker.gpu_model_runner import _prepare_inputs + vllm.v1.worker.gpu_model_runner.GPUModelRunner._prepare_inputs = _prepare_inputs from vllm_mindspore.v1.worker.gpu_model_runner import _update_states + vllm.v1.worker.gpu_model_runner.GPUModelRunner._update_states = _update_states from vllm_mindspore.v1.worker.gpu_model_runner import initialize_kv_cache + vllm.v1.worker.gpu_model_runner.GPUModelRunner.initialize_kv_cache = initialize_kv_cache import vllm.v1.worker.block_table from vllm_mindspore.v1.worker.block_table import BlockTable + vllm.v1.worker.block_table.BlockTable = BlockTable vllm.v1.worker.gpu_input_batch.BlockTable = BlockTable import vllm.v1.worker.gpu_input_batch from vllm_mindspore.v1.worker.gpu_input_batch import _make_sampling_metadata, _make_prompt_token_ids_tensor + vllm.v1.worker.gpu_input_batch.InputBatch._make_sampling_metadata = _make_sampling_metadata vllm.v1.worker.gpu_model_runner.InputBatch._make_sampling_metadata = _make_sampling_metadata vllm.v1.worker.gpu_input_batch.InputBatch._make_prompt_token_ids_tensor = _make_prompt_token_ids_tensor @@ -279,17 +322,19 @@ from vllm_mindspore.v1.worker.gpu_worker import init_device Worker.__init__ = wrapper_worker_init(Worker.__init__) Worker.init_device = wrapper_worker_init_device(init_device) - import vllm.v1.utils from vllm_mindspore.v1.utils import copy_slice + vllm.v1.utils.copy_slice = copy_slice vllm.v1.worker.gpu_input_batch.copy_slice = copy_slice from vllm_mindspore.v1.sample.ops.penalties import _convert_to_tensors import vllm.v1.sample.ops.penalties + vllm.v1.sample.ops.penalties._convert_to_tensors = _convert_to_tensors import vllm.model_executor.layers.utils from vllm_mindspore.model_executor.layers.utils import apply_penalties + vllm.model_executor.layers.utils.apply_penalties = apply_penalties vllm.v1.sample.ops.penalties.apply_penalties = apply_penalties @@ -299,26 +344,31 @@ from vllm_mindspore.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p, ra import vllm.v1.sample.ops.topk_topp_sampler from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler + TopKTopPSampler.forward_native = topk_topp_sampler_forward_native vllm.v1.sample.ops.topk_topp_sampler.apply_top_k_top_p = apply_top_k_top_p vllm.v1.sample.ops.topk_topp_sampler.random_sample = random_sample vllm.v1.sample.ops.topk_topp_sampler.apply_top_k_only = apply_top_k_only from vllm_mindspore.v1.sample.sampler import apply_temperature import vllm.v1.sample.sampler + vllm.v1.sample.sampler.Sampler.apply_temperature = apply_temperature from vllm_mindspore.distributed.shm_broadcast import initialize_ShmRingBuffer from vllm.distributed.device_communicators.shm_broadcast import ShmRingBuffer + ShmRingBuffer.__init__ = initialize_ShmRingBuffer from vllm_mindspore.v1.worker.gpu_worker import compile_or_warm_up_model from vllm.v1.worker.gpu_worker import Worker + Worker.compile_or_warm_up_model = compile_or_warm_up_model from .utils import check_ready from vllm_mindspore.engine.multiprocessing.engine import cleanup import vllm.engine.multiprocessing.engine + vllm.engine.multiprocessing.engine.MQLLMEngine.cleanup = cleanup check_ready() diff --git a/vllm_mindspore/attention/layer.py b/vllm_mindspore/attention/layer.py index caa6353e3..f4af1afba 100644 --- a/vllm_mindspore/attention/layer.py +++ b/vllm_mindspore/attention/layer.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -18,37 +17,31 @@ """Common layer for LLM.""" from typing import Any, Dict, List, Optional, Tuple -from mindspore import Tensor, mint, nn, ops, jit +from mindspore import Tensor, mint, nn, ops from mindspore.common import dtype as mstype from mindspore.ops.auto_generate import PagedAttention, ReshapeAndCache from mindspore.ops.operations.nn_ops import FlashAttentionScore - -from vllm.config import CacheConfig from vllm.attention.backends.abstract import AttentionType -from vllm.model_executor.layers.quantization.base_config import ( - QuantizationConfig) +from vllm.config import CacheConfig +from vllm.model_executor.layers.quantization.base_config import \ + QuantizationConfig -def _pad_to_max_tensor( - input_: Tensor, - max_len: int, - dim: int = 0, - pad_value: int = -1 -) -> Tensor: +def _pad_to_max_tensor(input_: Tensor, + max_len: int, + dim: int = 0, + pad_value: int = -1) -> Tensor: """Temporary function, will be deprecated in the future.""" if input_.shape[dim] == max_len: return input_ - pad_shape = (input_.shape[0], max_len - input_.shape[dim], *input_.shape[dim + 1:]) + pad_shape = (input_.shape[0], max_len - input_.shape[dim], + *input_.shape[dim + 1:]) pad_tensor = mint.ones(size=pad_shape, dtype=input_.dtype) * pad_value output = mint.cat([input_, pad_tensor], dim=dim) return output -def _generate_attn_mask( - query: Tensor, - value: Tensor, - flatten: bool -) -> Tensor: +def _generate_attn_mask(query: Tensor, value: Tensor, flatten: bool) -> Tensor: """Temporary function, will be deprecated in the future.""" if flatten: return mint.triu(mint.ones(size=(128, 128), dtype=query.dtype), 1) @@ -59,16 +52,14 @@ def _generate_attn_mask( return mask -def _hidden_states_th2bsh( - input_: Tensor, - batch_valid_length: Tensor -) -> Tensor: +def _hidden_states_th2bsh(input_: Tensor, + batch_valid_length: Tensor) -> Tensor: """Temporary function, will be deprecated in the future.""" max_seq_len = batch_valid_length.max().item() start_pos = 0 padding_input_list = [] for valid_length in batch_valid_length: - valid_input = input_[:, start_pos: start_pos + valid_length, :] + valid_input = input_[:, start_pos:start_pos + valid_length, :] padded_input = _pad_to_max_tensor(valid_input, max_seq_len, 1) padding_input_list.append(padded_input) start_pos += valid_length @@ -76,10 +67,8 @@ def _hidden_states_th2bsh( return bsh_output -def _hidden_states_bsh2th( - input_: Tensor, - batch_valid_length: Tensor -) -> Tensor: +def _hidden_states_bsh2th(input_: Tensor, + batch_valid_length: Tensor) -> Tensor: """Temporary function, will be deprecated in the future.""" unpadded_input_list = [] for batch_index, valid_length in enumerate(batch_valid_length): @@ -128,8 +117,8 @@ class Attention(nn.Cell): self.num_heads = num_heads self.num_kv_heads = num_kv_heads self.head_size = head_size - self.hidden_size_per_partition = num_heads*head_size - self.kv_hidden_size_per_partition = num_kv_heads*head_size + self.hidden_size_per_partition = num_heads * head_size + self.kv_hidden_size_per_partition = num_kv_heads * head_size input_layout = "TH" scale = float(scale) @@ -146,7 +135,6 @@ class Attention(nn.Cell): scale_value=scale, kv_head_num=num_kv_heads) - @jit def construct( self, query: Tensor, @@ -161,7 +149,7 @@ class Attention(nn.Cell): q_seq_lens: Tensor, block_tables: Tensor, ) -> Tensor: - """Attention foward, support MHA and GQA. + """Attention forward, support MHA and GQA. Args: query: shape = [1, num_tokens, hidden_size] @@ -172,12 +160,20 @@ class Attention(nn.Cell): batch_valid_length: shape = [batch_size, ] block_tables: shape = [block_size, num_block] """ - cache_out = self.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping) + output = query + # ensure that the input tensors of reshape_and_cache is continuous + key = key.contiguous() + value = value.contiguous() + cache_out = self.reshape_and_cache(key, value, key_cache, value_cache, + slot_mapping) query = ops.depend(query, cache_out) if is_prefill: - output = self._run_prefill_forward(query, key, value, attn_mask, batch_valid_length, batch_valid_length) + output = self._run_prefill_forward(query, key, value, attn_mask, + batch_valid_length, + batch_valid_length) else: - output = self._run_decode_forward(query, key_cache, value_cache, block_tables, batch_valid_length, + output = self._run_decode_forward(query, key_cache, value_cache, + block_tables, batch_valid_length, attn_mask, q_seq_lens) return output @@ -233,15 +229,7 @@ class Attention(nn.Cell): block_tables: shape = [block_size, num_block] context_lens: shape = [batch_size, ] """ - output = self.paged_attention( - query, - key_cache, - value_cache, - block_tables, - batch_valid_length, - None, - None, - attn_mask, - q_seq_lens - ) + output = self.paged_attention(query, key_cache, value_cache, + block_tables, batch_valid_length, None, + None, attn_mask, q_seq_lens) return output diff --git a/vllm_mindspore/lora/__init__.py b/vllm_mindspore/lora/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/lora/layers.py b/vllm_mindspore/lora/layers.py new file mode 100644 index 000000000..19a132c03 --- /dev/null +++ b/vllm_mindspore/lora/layers.py @@ -0,0 +1,1165 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import math +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast + +import mindspore as ms +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers import PretrainedConfig +from vllm.adapter_commons.layers import AdapterMapping +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + split_tensor_along_last_dim, + tensor_model_parallel_all_gather, + tensor_model_parallel_all_reduce) +from vllm.distributed.utils import divide +# yapf: enable +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.rotary_embedding import ( + LinearScalingRotaryEmbedding, RotaryEmbedding) +from vllm.model_executor.layers.vocab_parallel_embedding import \ + VocabParallelEmbedding + +# yapf: disable +from vllm_mindspore.model_executor.layers.linear import ( + ColumnParallelLinear, LinearBase, MergedColumnParallelLinear, + QKVParallelLinear, RowParallelLinear) + +if TYPE_CHECKING: + from vllm.lora.punica_wrapper import PunicaWrapperBase + + +def _get_lora_device(base_layer: nn.Module) -> torch.device: + # code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34 + """Returns the device for where to place the LoRA tensors.""" + # unquantizedLinear + if hasattr(base_layer, "weight"): + return base_layer.weight.device + # Compressed Tensor + elif hasattr(base_layer, "weight_packed"): + return base_layer.weight_packed.device + # GPTQ/AWQ + elif hasattr(base_layer, "qweight"): + return base_layer.qweight.device + # marlin + elif hasattr(base_layer, "B"): + return base_layer.B.device + # HQQ marlin + elif hasattr(base_layer, "W_q"): + return base_layer.W_q.device + else: + raise ValueError(f"Unsupported base layer: {base_layer}") + + +def _not_fully_sharded_can_replace(can_replace): + """ + decorator which adds the condition of not using fully sharded loras + intended to wrap can_replace_layer() + """ + + def dec(*args, **kwargs): + decorate = kwargs.pop("decorate") if "decorate" in kwargs else True + condition = (not kwargs["lora_config"].fully_sharded_loras + if decorate else True) + return can_replace(*args, **kwargs) and condition + + return dec + + +@dataclass +class LoRAMapping(AdapterMapping): + is_prefill: bool = False + +# vllm-mindspore Inherits ms.nn.Cell +class BaseLayerWithLoRA(ms.nn.Cell): + + def slice_lora_a( + self, lora_a: Union[torch.Tensor, List[Union[torch.Tensor, None]]] + ) -> Union[torch.Tensor, List[Union[torch.Tensor, None]]]: + """Slice lora a if splitting for tensor parallelism.""" + ... + + def slice_lora_b( + self, lora_b: Union[torch.Tensor, List[Union[torch.Tensor, None]]] + ) -> Union[torch.Tensor, List[Union[torch.Tensor, None]]]: + """Slice lora b if splitting with tensor parallelism.""" + ... + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """Initializes lora matrices.""" + ... + + def reset_lora(self, index: int): + """Resets the lora weights at index back to 0.""" + ... + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + bias: Optional[torch.Tensor] = None, + ): + """Overwrites lora tensors at index.""" + ... + + def set_mapping( + self, + punica_wrapper, + ): + self.punica_wrapper: PunicaWrapperBase = punica_wrapper + + @classmethod + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + raise NotImplementedError + + +class VocabParallelEmbeddingWithLoRA(BaseLayerWithLoRA): + + def __init__(self, base_layer: VocabParallelEmbedding) -> None: + super().__init__() + self.base_layer = base_layer + self.embeddings_slice: Optional[Tuple[int, int]] + self.embeddings_weights: Optional[torch.Tensor] + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + + if self.base_layer.num_added_embeddings_per_partition > 0: + # We can start adding lora weights + self.embeddings_weights = self.base_layer.weight.data[ + self.base_layer.num_org_embeddings_per_partition:self. + base_layer.num_org_embeddings_per_partition + + self.base_layer.num_added_embeddings_per_partition] + self.embeddings_slice = ( + self.base_layer.shard_indices.added_vocab_start_index - + self.base_layer.org_vocab_size, + self.base_layer.shard_indices.added_vocab_end_index - + self.base_layer.org_vocab_size) + self.base_layer.weight.data[ + self.base_layer.num_org_embeddings_per_partition:].fill_(0) + else: + self.embeddings_slice = None + self.embeddings_weights = None + + self.embeddings_tensors = torch.zeros( + ( + max_loras, + lora_config.lora_extra_vocab_size, + self.base_layer.embedding_dim, + ), + dtype=self.base_layer.weight.dtype, + device=self.base_layer.weight.device, + ) + self.lora_a_stacked = torch.zeros( + ( + max_loras, + self.base_layer.org_vocab_size + + lora_config.lora_extra_vocab_size, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.base_layer.weight.device, + ) + self.lora_b_stacked = torch.zeros( + ( + max_loras, + 1, + self.base_layer.embedding_dim, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.base_layer.weight.device, + ) + self.lora_a_stacked_2d = self.lora_a_stacked.view( + self.lora_a_stacked.shape[0] * self.lora_a_stacked.shape[1], + self.lora_a_stacked.shape[2], + ) + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + self.embeddings_tensors[index] = 0 + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + bias: Optional[torch.Tensor] = None, + ): + self.reset_lora(index) + self.lora_a_stacked[index, :lora_a.shape[0], :lora_a.shape[1]].copy_( + lora_a, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if embeddings_tensor is not None: + self.embeddings_tensors[ + index, :embeddings_tensor.shape[0], :embeddings_tensor. + shape[1], ].copy_(embeddings_tensor, non_blocking=True) + if self.embeddings_slice is not None: + # TODO(yard1): Optimize this copy, we don't need to copy + # everything, just the modified part + embeddings = self.embeddings_tensors.view( + self.embeddings_tensors.shape[0] * + self.embeddings_tensors.shape[1], + self.embeddings_tensors.shape[2], + )[self.embeddings_slice[0]:self.embeddings_slice[1]] + assert self.embeddings_weights is not None + self.embeddings_weights[:embeddings.shape[0]].copy_(embeddings) + + def construct(self, x: torch.Tensor) -> torch.Tensor: + added_tokens_mask = x > self.base_layer.org_vocab_size - 1 + embeddings_indices = self.punica_wrapper.embeddings_indices + indices = embeddings_indices[1].view_as(x) + full_lora_a_embeddings = F.embedding( + x + indices, + self.lora_a_stacked_2d, + ) + indices = embeddings_indices[0].view_as(x) + full_output = self.base_layer.forward( + x.add_(indices * added_tokens_mask)) + + full_output_org = full_output + if full_output.ndim == 3: + full_output = full_output.view( + full_output.shape[0] * full_output.shape[1], -1) + if full_lora_a_embeddings.ndim == 3: + full_lora_a_embeddings = full_lora_a_embeddings.view( + full_lora_a_embeddings.shape[0] * + full_lora_a_embeddings.shape[1], + -1, + ) + + full_output = self.punica_wrapper.add_lora_embedding( + full_output, + full_lora_a_embeddings, + self.lora_b_stacked, + add_input=True) + return full_output.view_as(full_output_org) + + @classmethod + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + return type(source_layer) is VocabParallelEmbedding + + +class BaseLinearLayerWithLoRA(BaseLayerWithLoRA): + + def __init__(self, base_layer: LinearBase): + super().__init__() + self.base_layer = base_layer + self.input_size = self.base_layer.input_size + self.device = _get_lora_device(self.base_layer) + self.lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]] = None + + self.output_slices: Tuple[int, ...] + self.tp_size: int + self.output_size: int + self.n_slices: int + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + self.lora_config = lora_config + + if isinstance(self.base_layer, ColumnParallelLinear): + lora_a_out_size = (lora_config.max_lora_rank if + not lora_config.fully_sharded_loras else divide( + lora_config.max_lora_rank, self.tp_size)) + lora_b_out_size = self.output_size + + elif isinstance(self.base_layer, RowParallelLinear): + lora_a_out_size = lora_config.max_lora_rank + lora_b_out_size = (self.output_size if + not lora_config.fully_sharded_loras else divide( + self.output_size, self.tp_size)) + else: + raise NotImplementedError + + self.lora_a_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_a_out_size, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + self.lora_b_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_b_out_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + if lora_config.bias_enabled: + lora_bias_out_size = lora_b_out_size + self.lora_bias_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_bias_out_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + self.output_slices = (self.lora_b_stacked[0].shape[2], ) + + def reset_lora(self, index: int): + for s_index in range(self.n_slices): + self.lora_a_stacked[s_index][index] = 0 + self.lora_b_stacked[s_index][index] = 0 + if self.lora_config.bias_enabled: + # Make mypy happy + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[s_index][index] = 0 + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + lora_bias: Optional[torch.Tensor] = None, + ): + # Except for QKVParallelLinearWithLora and + # MergedColumnParallelLinearWithLoRA, all other linear LoRA layers + # store weights in a tuple of size 1. These two layers will + # override this function. + assert (len(self.lora_a_stacked) == len(self.lora_b_stacked) == + self.n_slices == 1) + + self.reset_lora(index) + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + self.lora_a_stacked[0][index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[0][index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if lora_bias is not None: + + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + assert len(self.lora_bias_stacked) + self.lora_bias_stacked[0][index, 0, :lora_bias.shape[0]].copy_( + lora_bias.T, non_blocking=True) + + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + output = self.base_layer.quant_method.apply(self.base_layer, x, bias) + self.punica_wrapper.add_lora_linear(output, x, self.lora_a_stacked, + self.lora_b_stacked, + self.lora_bias_stacked, 1.0, + self.output_slices) + return output + + + + +class ColumnParallelLinearWithLoRA(BaseLinearLayerWithLoRA): + """ + LoRA on top of ColumnParallelLinear layer. + LoRA B is sliced for tensor parallelism. + There are two types for the `base_layer`: + 1. ColumnParallelLinear, e.g.`dense_h_to_4h` in `FalconForCausalLM`. + 2. MergedColumnParallelLinear, e.g.`gate_up_proj` in `Phi3ForCausalLM`. + """ + + def __init__(self, base_layer: ColumnParallelLinear) -> None: + super().__init__(base_layer) + # The base_layer type is ColumnParallelLinear or + # MergedColumnParallelLinear, their weight sharding logic is + # inconsistent when TP is greater than 1. + self.is_merged_col_linear = type( + base_layer) is MergedColumnParallelLinear + self.tp_size = get_tensor_model_parallel_world_size() + self.output_size = self.base_layer.output_size_per_partition + # There is only one LoRA layer + self.n_slices = 1 + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + return lora_a + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + # Applicable to cases where the base_layer is + # MergedColumnParallelLinear. + if self.is_merged_col_linear: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size // 2 + offset = lora_b.shape[-1] // 2 + + left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * + shard_size] + right_weight = lora_b[:, offset + tp_rank * shard_size:offset + + (tp_rank + 1) * shard_size] + lora_b = torch.cat([left_weight, right_weight], dim=1) + # Applicable to cases where the base_layer is + # ColumnParallelLinear. + else: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] + return lora_b + + def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + # TODO: Fix the slicing logic of bias. + if bias is None: + return bias + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + bias = bias[start_idx:end_idx] + return bias + + def construct( + self, input_: torch.Tensor + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: + """Forward of ColumnParallelLinear + + Args: + input_: Tensor whose last dimension is `input_size`. + + Returns: + - output + - bias + """ + bias = (self.base_layer.bias + if not self.base_layer.skip_bias_add else None) + + # Matrix multiply. + output_parallel = self.apply(input_, bias) + if self.base_layer.gather_output: + # All-gather across the partitions. + output = tensor_model_parallel_all_gather(output_parallel) + else: + output = output_parallel + output_bias = (self.base_layer.bias + if self.base_layer.skip_bias_add else None) + return output, output_bias + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + return type(source_layer) is ColumnParallelLinear or ( + type(source_layer) is MergedColumnParallelLinear + and len(packed_modules_list) == 1) + + +class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): + """ColumnParallelLinear layer that is composed of 2 sublayers (slices) + packed together (eg. gate_proj + up_proj -> gate_up_proj). + + This means we have 2 LoRAs, each applied to one half of the layer. + + Both slices must have the same size. + """ + + def __init__( + self, base_layer: Union[MergedColumnParallelLinear, + QKVParallelLinear]) -> None: + super().__init__(base_layer) + # There are two LoRA layers + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + # the output_sizes in MergedColumnParallelLinear is not sharded by tp + # we need to divide it by the tp_size to get correct slices size + output_sizes = self.base_layer.output_sizes + self.output_slices = tuple( + divide(output_size, self.tp_size) for output_size in output_sizes) + self.n_slices = len(self.output_slices) + self.output_ids = (self.tp_rank, ) * self.n_slices + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """ + The main reason for overriding this function is to enhance code + maintainability. + """ + self.lora_config = lora_config + + lora_a_output_size_per_partition = ( + lora_config.max_lora_rank if not lora_config.fully_sharded_loras + else divide(lora_config.max_lora_rank, self.tp_size)) + self.lora_a_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + self.lora_b_stacked = tuple( + torch.zeros( + max_loras, + 1, + output_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) for output_size in self.output_slices) + if lora_config.bias_enabled: + self.lora_bias_stacked = tuple( + torch.zeros( + max_loras, + 1, + output_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for output_size in self.output_slices) + + def slice_lora_a( + self, lora_a: List[Union[torch.Tensor, None]] + ) -> List[Union[torch.Tensor, None]]: + return lora_a + + def slice_lora_b( + self, lora_b: List[Union[torch.Tensor, None]] + ) -> List[Union[torch.Tensor, None]]: + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (lora_b_i := lora_b[i]) is not None: + lora_b[i] = lora_b_i[:, shard_size * shard_id:shard_size * + (shard_id + 1)] + return lora_b + + def slice_bias( + self, bias: List[Union[torch.Tensor, + None]]) -> List[Union[torch.Tensor, None]]: + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (bias_i := bias[i]) is not None: + bias[i] = bias_i[shard_size * shard_id:shard_size * + (shard_id + 1)] + return bias + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + lora_bias: Optional[torch.Tensor] = None, + ): + self.reset_lora(index) + + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + for i in range(self.n_slices): + if (lora_a_i := lora_a[i]) is not None: + self.lora_a_stacked[i][ + index, 0, :lora_a_i.shape[1], :lora_a_i.shape[0]].copy_( + lora_a_i.T, non_blocking=True) + if (lora_b_i := lora_b[i]) is not None: + self.lora_b_stacked[i][ + index, 0, :lora_b_i.shape[1], :lora_b_i.shape[0]].copy_( + lora_b_i.T, non_blocking=True) + + if lora_bias is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + for i in range(self.n_slices): + if (lora_bias_i := lora_bias[i]) is not None: + self.lora_bias_stacked[i][index, + 0, :lora_bias_i.shape[0]].copy_( + lora_bias_i.T, + non_blocking=True) + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + return (type(source_layer) is MergedColumnParallelLinear + and len(packed_modules_list) == 2) + + +class QKVParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): + """ + ColumnParallelLinear layer that is specifically designed for + qkv_proj. Certain models, such as chatglm3 and baichuan-7b, + only contains a single LoRA within their qkv_proj layer. + + During inference with Tensor Parallel, the weights of lora_b + must be accurately partitioned according to the respective ranks. + + Q slice may have different shape than K and V slices (which both have + the same shape). + """ + + def __init__(self, base_layer: QKVParallelLinear) -> None: + super().__init__(base_layer) + self.q_proj_total_size = (self.base_layer.total_num_heads * + self.base_layer.head_size) + self.q_proj_shard_size = (self.base_layer.num_heads * + self.base_layer.head_size) + self.kv_proj_shard_size = (self.base_layer.num_kv_heads * + self.base_layer.head_size) + self.kv_proj_total_size = (self.base_layer.total_num_kv_heads * + self.base_layer.head_size) + # There is only one LoRA layer + self.n_slices = 1 + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + tp_rank = get_tensor_model_parallel_rank() + self.q_shard_id = tp_rank + self.kv_shard_id = tp_rank // self.base_layer.num_kv_head_replicas + lora_b_q = lora_b[:, self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1)] + k_offset = self.q_proj_total_size + lora_b_k = lora_b[:, k_offset + + self.kv_proj_shard_size * self.kv_shard_id:k_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + v_offset = k_offset + self.kv_proj_total_size + lora_b_v = lora_b[:, v_offset + + self.kv_proj_shard_size * self.kv_shard_id:v_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + lora_b = torch.cat([lora_b_q, lora_b_k, lora_b_v], dim=1) + return lora_b + + def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + bias_q = bias[self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1)] + k_offset = self.q_proj_total_size + bias_k = bias[k_offset + + self.kv_proj_shard_size * self.kv_shard_id:k_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + v_offset = k_offset + self.kv_proj_total_size + bias_v = bias[v_offset + + self.kv_proj_shard_size * self.kv_shard_id:v_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + bias = torch.cat([bias_q, bias_k, bias_v], dim=1) + return bias + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is QKVParallelLinear and len( + packed_modules_list) == 1 + + +class MergedQKVParallelLinearWithLoRA(MergedColumnParallelLinearWithLoRA): + """MergedColumnParallelLinear layer that is composed of 3 sublayers (slices) + packed together in qkv proj fashion + (q_proj + k_proj + v_proj -> qkv_proj). + + This means we have 3 LoRAs, each applied to one slice of the layer. + + Q slice may have different shape than K and V slices (which both have + the same shape). + """ + + def __init__(self, base_layer: QKVParallelLinear) -> None: + super().__init__(base_layer) + # There are three LoRA layer. + self.n_slices = len(self.base_layer.output_sizes) + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + + self.q_proj_shard_size = (self.base_layer.num_heads * + self.base_layer.head_size) + self.kv_proj_shard_size = (self.base_layer.num_kv_heads * + self.base_layer.head_size) + self.q_shard_id = self.tp_rank + self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas + + self.output_slices = ( + self.q_proj_shard_size, + self.kv_proj_shard_size, + self.kv_proj_shard_size, + ) + self.output_ids = ( + self.q_shard_id, + self.kv_shard_id, + self.kv_shard_id, + ) + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """ + The main reason for overloading this function is to handle inconsistent + weight dimensions in qkv lora. + """ + super().create_lora_weights(max_loras, lora_config, model_config) + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + return (type(source_layer) is QKVParallelLinear + and len(packed_modules_list) == 3) + + +class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA): + + def __init__(self, base_layer: RowParallelLinear) -> None: + super().__init__(base_layer) + + self.tp_size = get_tensor_model_parallel_world_size() + # reset input_size + self.input_size = self.base_layer.input_size_per_partition + self.output_size = self.base_layer.output_size + + self.tp_rank = get_tensor_model_parallel_rank() + # There is only one LoRA layer. + self.n_slices = 1 + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + + shard_size = self.input_size + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size + lora_a = lora_a[start_idx:end_idx, :] + return lora_a + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + return lora_b + + def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + return bias + + def construct( + self, input_: torch.Tensor + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: + """Forward of RowParallelLinear + + Args: + input_: tensor whose last dimension is `input_size`. If + `input_is_parallel` is set, then the last dimension + is `input_size // tp_size`. + + Returns: + - output + - bias + """ + # Set up backprop all-reduce. + if self.base_layer.input_is_parallel: + input_parallel = input_ + else: + # TODO: simplify code below + splitted_input = split_tensor_along_last_dim( + input_, num_partitions=self.base_layer.tp_size) + input_parallel = splitted_input[self.tp_rank].contiguous() + + # Matrix multiply. + output_parallel = self.apply(input_parallel) + if self.base_layer.reduce_results and self.base_layer.tp_size > 1: + output_ = tensor_model_parallel_all_reduce(output_parallel) + else: + output_ = output_parallel + + if not self.base_layer.skip_bias_add: + output = (output_ + self.base_layer.bias + if self.base_layer.bias is not None else output_) + output_bias = None + else: + output = output_ + output_bias = self.base_layer.bias + return output, output_bias + + @property + def weight(self): + return (self.base_layer.weight if hasattr(self.base_layer, "weight") + else self.base_layer.qweight) + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + return type(source_layer) is RowParallelLinear + + +class LogitsProcessorWithLoRA(BaseLayerWithLoRA): + """ + LoRA wrapper for LogitsProcessor, with extra logic to handle the + application of the LoRA adapter and added LoRA vocabulary. + + Args: + base_layer: LogitsProcessor layer + hidden_size: hidden size of the model + dtype: data type of the model + device: device of the model + sharded_to_full_mapping: index mapping from sharded vocab to full vocab + received from base_layer.get_sharded_to_full_mapping(). If None, + no reindexing will be done. + """ + + def __init__(self, base_layer: LogitsProcessor, hidden_size: int, + dtype: torch.dtype, device: torch.device, + sharded_to_full_mapping: Optional[List[int]]) -> None: + super().__init__() + self.base_layer = base_layer + self.hidden_size = hidden_size + self.dtype = dtype + self.device = device + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + self.sharded_to_full_mapping = sharded_to_full_mapping + + @property + def logits_as_input(self): + return self.base_layer.logits_as_input + + @property + def vocab_size(self): + return self.base_layer.vocab_size + + @property + def scale(self): + return self.base_layer.scale + + @property + def soft_cap(self): + return self.base_layer.soft_cap + + @property + def use_all_gather(self): + return self.base_layer.use_all_gather + + @property + def org_vocab_size(self): + return self.base_layer.org_vocab_size + + @property + def include_gpu_probs_tensor(self): + return self.base_layer.include_gpu_probs_tensor + + @property + def should_modify_greedy_probs_inplace(self): + return self.base_layer.should_modify_greedy_probs_inplace + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + # TODO: Verify if this condition can be further relaxed + if 32000 < self.base_layer.vocab_size > 257024: + raise ValueError("When using LoRA, vocab size must be " + "32000 >= vocab_size <= 257024") + self.lora_a_stacked = torch.zeros( + ( + max_loras, + 1, + lora_config.max_lora_rank, + self.hidden_size, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.lora_b_stacked = torch.zeros( + ( + max_loras, + 1, + # Pad for kernel compatibility + math.ceil(self.base_layer.vocab_size / + lora_config.lora_vocab_padding_size) * + lora_config.lora_vocab_padding_size, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.embeddings_tensors = torch.full( + (max_loras, lora_config.lora_extra_vocab_size, self.hidden_size), + fill_value=float("-inf"), + dtype=self.dtype, + device=self.device, + ) + if self.sharded_to_full_mapping is not None: + self.sharded_to_full_mapping_gpu = torch.tensor( + self.sharded_to_full_mapping, + device=self.device, + dtype=torch.long) + else: + self.sharded_to_full_mapping_gpu = None + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + self.embeddings_tensors[index] = float("-inf") + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + bias: Optional[torch.Tensor] = None, + ): + self.reset_lora(index) + self.lora_a_stacked[index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if embeddings_tensor is not None: + self.embeddings_tensors[ + index, :embeddings_tensor.shape[0], :embeddings_tensor. + shape[1], ] = embeddings_tensor + + def _get_logits( + self, + hidden_states: torch.Tensor, + lm_head: VocabParallelEmbedding, + embedding_bias: Optional[torch.Tensor] = None, + ) -> Optional[torch.Tensor]: + # Get the logits for the next tokens. + logits = lm_head.quant_method.apply(lm_head, hidden_states) + if embedding_bias is not None: + logits += embedding_bias + + # Gather logits for TP + logits = self.base_layer._gather_logits(logits) + + if logits is None: + return None + + if self.sharded_to_full_mapping_gpu is not None: + # Reindex full logits tensor to ensure 1:1 mapping between + # index and token_id + # Example for: + # org_vocab_size = 4 + # added_vocab_size = 2 + # pad_to_size = 8 + # tp_size = 2 + + # indices: [0, 1, 2, 3, 4, 5, 6, 7] + # token_id: [0, 1, 4, -1, 2, 3, 5, -1] + + # Therefore, the mapping is expected to be: + # [0, 1, 4, 6, 2, 3, 5, 7] so that when we reindex, + # we get: + # indices: [0, 1, 2, 3, 4, 5, 6, 7] + # token_id: [0, 1, 2, 3, 4, 5, -1, -1] + logits = logits[:, self.sharded_to_full_mapping_gpu] + + lora_logits = torch.empty( + self.embeddings_tensors.shape[0] + 1, + self.embeddings_tensors.shape[1], + hidden_states.shape[0], + dtype=self.embeddings_tensors.dtype, + device=self.embeddings_tensors.device, + ) + torch.matmul(self.embeddings_tensors, + hidden_states.T, + out=lora_logits[:-1]) + lora_logits[-1] = float("-inf") + lora_logits = lora_logits.mT + indices_padded = self.punica_wrapper.sampler_indices_padded + lora_logits = (lora_logits.reshape( + lora_logits.shape[0] * lora_logits.shape[1], + lora_logits.shape[2], + ).index_select(0, indices_padded).nan_to_num_(nan=float("-inf"), + posinf=float("inf"), + neginf=float("-inf"))) + + logits[:, + self.base_layer.org_vocab_size:self.base_layer.org_vocab_size + + lora_logits.shape[1]] = lora_logits + + # LogitsProcessorWithLoRA always using bgmv + self.punica_wrapper.add_lora_logits(logits, hidden_states, + self.lora_a_stacked, + self.lora_b_stacked, 1.0) + + # Remove paddings in vocab (if any). + logits = logits[:, :self.base_layer.vocab_size] + return logits + + def construct(self, *args, **kwargs): + return type(self.base_layer).forward(self, *args, **kwargs) + + @classmethod + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + # Special handling for the LogitsProcessor. + return False + + +class LinearScalingRotaryEmbeddingWithLoRA(BaseLayerWithLoRA): + """Implements RoPE-scaled embeddings with linear scaling for + multiple LoRA adapters with a specialized kernel. + + Replace LinearScalingRotaryEmbedding with MultiLinearScalingRotaryEmbedding + which can handle multi lora adapters in a specialied kernel. + """ + + def __init__(self, base_layer: RotaryEmbedding) -> None: + super().__init__() + self.base_layer = base_layer + + @property + def scaling_factors(self): + return self.base_layer.scaling_factors + + @property + def rotary_dim(self): + return self.base_layer.rotary_dim + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + scaling_factors = (list(lora_config.long_lora_scaling_factors) + if lora_config.long_lora_scaling_factors else []) + base_scaling_factor = (self.base_layer.scaling_factor if isinstance( + self.base_layer, LinearScalingRotaryEmbedding) else 1.0) + scaling_factors = sorted( + list(set([base_scaling_factor] + scaling_factors))) + self.base_layer = LinearScalingRotaryEmbedding( + self.base_layer.head_size, + self.base_layer.rotary_dim, + self.base_layer.max_position_embeddings, + self.base_layer.base, + self.base_layer.is_neox_style, + scaling_factors, + self.base_layer.dtype, + ) + + def reset_lora(self, index: int): + ... + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + bias: Optional[torch.Tensor] = None, + ): + ... + + def construct( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + ): + return self.base_layer( + positions, + query, + key, + offsets=self.punica_wrapper.long_lora_indices, + ) + + @property + def scaling_factor_to_offset(self) -> Dict[float, int]: + return self.base_layer.scaling_factor_to_offset + + @classmethod + def can_replace_layer( + cls, + source_layer: nn.Module, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig], + ) -> bool: + """Returns True if the layer can be replaced by this LoRA layer.""" + return (type(source_layer) is LinearScalingRotaryEmbedding + or type(source_layer) is RotaryEmbedding) + + def extra_repr(self) -> str: + return self.base_layer.extra_repr() diff --git a/vllm_mindspore/lora/models.py b/vllm_mindspore/lora/models.py new file mode 100644 index 000000000..921978498 --- /dev/null +++ b/vllm_mindspore/lora/models.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import os +from typing import Dict, List, Optional, Union + +import safetensors.torch +import torch +from vllm.lora.lora import LoRALayerWeights +from vllm.lora.peft_helper import PEFTHelper +from vllm.lora.utils import is_regex_target_modules, parse_fine_tuned_lora_name +from vllm.model_executor.models.utils import WeightsMapper +from vllm.utils import is_pin_memory_available + +from vllm_mindspore.lora.layers import BaseLayerWithLoRA + +_GLOBAL_LORA_ID = 0 + + +def get_lora_id(): + global _GLOBAL_LORA_ID + _GLOBAL_LORA_ID += 1 + return _GLOBAL_LORA_ID + + +def register_module(self, module_name: str, module: "BaseLayerWithLoRA"): + assert isinstance(module, BaseLayerWithLoRA) + self.modules[module_name] = module + + +@classmethod #type:ignore +def from_lora_tensors( + cls, + lora_model_id: int, + tensors: Dict[str, torch.Tensor], + peft_helper: PEFTHelper, + device: str = "cuda", + dtype: Optional[torch.dtype] = None, + embeddings: Optional[Dict[str, torch.Tensor]] = None, + target_embedding_padding: Optional[int] = None, + embedding_modules: Optional[Dict[str, str]] = None, + embedding_padding_modules: Optional[List[str]] = None, + weights_mapper: Optional[WeightsMapper] = None, +): + """Create a LoRAModel from a dictionary of tensors.""" + pin_memory = str(device) == "cpu" and is_pin_memory_available() + loras: Dict[str, LoRALayerWeights] = {} + for tensor_name, tensor in tensors.items(): + module_name, is_lora_a, is_bias = parse_fine_tuned_lora_name( + tensor_name, weights_mapper) + if module_name not in loras: + lora_embeddings_tensor = None + if embeddings: + assert embedding_modules is not None + embeddings_module = next( + (k for k in embedding_modules if k in module_name), None) + if embeddings_module: + lora_embeddings_tensor = embeddings[ + embedding_modules[embeddings_module]] + if pin_memory: + lora_embeddings_tensor = ( + lora_embeddings_tensor.pin_memory()) + loras[module_name] = LoRALayerWeights.from_config( + module_name, peft_helper, lora_embeddings_tensor) + + if is_bias: + # vllm-mindspore remove tensor device + loras[module_name].bias = tensor.to(dtype=dtype).t() + bias = tensor.to(dtype=dtype).t() + if pin_memory: + bias = bias.pin_memory() + loras[module_name].bias = bias + elif is_lora_a: + loras[module_name].lora_a = tensor.to(dtype=dtype).t() + if pin_memory: + loras[module_name].lora_a = loras[ + module_name].lora_a.pin_memory() + else: + loras[module_name].lora_b = tensor.to(dtype=dtype).t() + assert embedding_padding_modules is not None + if any(name in module_name for name in embedding_padding_modules + ) and target_embedding_padding is not None: + lora_b = loras[module_name].lora_b + assert target_embedding_padding >= lora_b.shape[1] + addition = target_embedding_padding - lora_b.shape[1] + loras[module_name].lora_b = torch.nn.functional.pad( + lora_b, (0, addition)) + if pin_memory: + loras[module_name].lora_b = loras[ + module_name].lora_b.pin_memory() + + for lora in loras.values(): + lora.optimize() + + return cls(lora_model_id, + peft_helper.r, + loras, + scaling_factor=peft_helper.vllm_long_context_scaling_factor) + + +@classmethod #type:ignore +def from_local_checkpoint( + cls, + lora_dir: str, + expected_lora_modules: List[str], + peft_helper: PEFTHelper, + *, + lora_model_id: Optional[int] = None, + device: str = "cuda", + dtype: Optional[torch.dtype] = None, + target_embedding_padding: Optional[int] = None, + embedding_modules: Optional[Dict[str, str]] = None, + embedding_padding_modules: Optional[List[str]] = None, + weights_mapper: Optional[WeightsMapper] = None, +): + """Create a LoRAModel from a local checkpoint. + + Args: + lora_dir: The local path that has lora data. + expected_lora_modules: Name of modules that are expected to be + replaced by lora. + peft_helper: Loaded lora configuration information. + lora_model_id: Lora model id. If not given, automatically set by + a global counter. + device: Device where the lora model is loaded. + dtype: dtype of the lora model weights. + + Returns: + Loaded LoRA Model. + """ + lora_tensor_path = os.path.join(lora_dir, "adapter_model.safetensors") + lora_bin_file_path = os.path.join(lora_dir, "adapter_model.bin") + new_embeddings_tensor_path = os.path.join(lora_dir, + "new_embeddings.safetensors") + new_embeddings_bin_file_path = os.path.join(lora_dir, "new_embeddings.bin") + + unexpected_modules: List[Union[list[str], str]] + if os.path.isfile(lora_tensor_path): + tensors: Dict[str, torch.Tensor] = {} + # Find unexpected modules. + # Use safetensor key as a source of truth to find expected modules. + # in peft if you have target_modules A, B, C and C does not exist + # in the model it won’t error and model will be trained with A, B + # loraified. C won’t exist in the safetensor but it will exist in + # the target_modules of the adapter_config.json. + unexpected_modules = [] + # vllm-mindspore safetensors open with np + with safetensors.safe_open(lora_tensor_path, + framework="np") as f: # type: ignore + for lora_module in f.keys(): # noqa + module_name, _, _ = parse_fine_tuned_lora_name( + lora_module, weights_mapper) + part_name = module_name.split(".")[-1] + if part_name not in expected_lora_modules: + unexpected_modules.append(module_name) + if unexpected_modules: + raise ValueError( + f"While loading {lora_dir}, expected" + f" target modules in {expected_lora_modules}" + f" but received {unexpected_modules}." + f" Please verify that the loaded LoRA module is correct") + # Load tensors if there are only expected modules. + for module in f.keys(): # noqa + # vllm-mindspore add numpy to tensor + tensors[module] = torch.Tensor(f.get_tensor(module)) + elif os.path.isfile(lora_bin_file_path): + # When a bin file is provided, we rely on config to find unexpected + # modules. + unexpected_modules = [] + target_modules = peft_helper.target_modules + if not isinstance(target_modules, list): + target_modules = [target_modules] + for module in target_modules: + # Compatible with more modules, + # such as:layers.11.self_attn.k_proj + part_name = module.split(".")[-1] + if part_name not in expected_lora_modules: + unexpected_modules.append(module) + # loaded lora's target modules must be a subset of + # expected_lora_modules. It is not reliable. See + # https://github.com/vllm-project/vllm/pull/5909. But there's no + # other better mechanism. + if unexpected_modules and not is_regex_target_modules( + peft_helper.target_modules, expected_lora_modules): + raise ValueError( + f"While loading {lora_dir}, expected" + f" target modules in {expected_lora_modules}" + f" but received {unexpected_modules}." + f" Please verify that the loaded LoRA module is correct") + tensors = torch.load(lora_bin_file_path, map_location=device) + else: + raise ValueError(f"{lora_dir} doesn't contain tensors") + + embeddings = None + if os.path.isfile(new_embeddings_tensor_path): + embeddings = safetensors.torch.load_file(new_embeddings_tensor_path) + elif os.path.isfile(new_embeddings_bin_file_path): + embeddings = torch.load(new_embeddings_bin_file_path, + map_location=device, + weights_only=True) + + return cls.from_lora_tensors( + lora_model_id=get_lora_id() + if lora_model_id is None else lora_model_id, + tensors=tensors, + peft_helper=peft_helper, + device=device, + dtype=dtype, + embeddings=embeddings, + target_embedding_padding=target_embedding_padding, + embedding_modules=embedding_modules, + embedding_padding_modules=embedding_padding_modules, + weights_mapper=weights_mapper) diff --git a/vllm_mindspore/lora/ops/__init__.py b/vllm_mindspore/lora/ops/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/lora/ops/torch_ops/__init__.py b/vllm_mindspore/lora/ops/torch_ops/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/lora/ops/torch_ops/lora_ops.py b/vllm_mindspore/lora/ops/torch_ops/lora_ops.py new file mode 100644 index 000000000..d085c34e2 --- /dev/null +++ b/vllm_mindspore/lora/ops/torch_ops/lora_ops.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +""" +For punica_npu +""" +from mindspore import mint +from mindspore.ops.auto_generate import grouped_matmul_v4 + +def einsum_ms(inputs, selected_loras): + # mint.einsum("bi, boi -> bo", inputs, selected_loras) + selected_loras = mint.transpose(selected_loras, 1, 2) + outputs = mint.matmul(inputs.unsqueeze(1), selected_loras).squeeze(1) + return outputs + +def sort_lora_by_token_count(lora_indices_tensor, seq_len_tensor): + unique_ids = mint.unique(lora_indices_tensor) + token_sums = [] + for uid in unique_ids: + mask = (lora_indices_tensor == uid) + total_tokens = mint.sum(seq_len_tensor[mask]) + token_sums.append(total_tokens) + token_sums_tensor = mint.stack(token_sums) + sorted_counts, sort_indices = mint.sort(token_sums_tensor, descending=True) + sorted_ids = unique_ids[sort_indices] + return sorted_ids, sorted_counts + +def sgmv_expand(inputs, + lora_b_weights, + output_tensor, + b_seq_start_loc, + seq_len_tensor, + lora_indices_tensor, + batches, + max_seq_length, + token_nums, + add_inputs = False): + exploded_indices = mint.repeat_interleave(lora_indices_tensor, + seq_len_tensor) + + return bgmv_expand(inputs, lora_b_weights, output_tensor, exploded_indices, + add_inputs) + + +def bgmv_expand(inputs, + lora_b_weights, + output_tensor, + lora_indices_tensor, + add_inputs = True): + selected_loras = lora_b_weights[lora_indices_tensor].astype(output_tensor.dtype) + inputs = inputs.astype(output_tensor.dtype) + if len(selected_loras.shape) == 4: + selected_loras = selected_loras.squeeze(1) + outputs = einsum_ms(inputs, selected_loras) + limit = output_tensor.shape[0] + if outputs.shape[0] == 1 and output_tensor.shape[0] != 1: + limit = 1 + if add_inputs: + output_tensor[:, :outputs.shape[1]] += outputs[:limit, :] + else: + output_tensor[:, :outputs.shape[1]] = outputs[:limit, :] + return output_tensor + + +def sgmv_shrink( + inputs, + lora_a_weights, + output_tensor, + b_seq_start_loc, + seq_len_tensor, + lora_indices_tensor, + batches, + max_seq_length, + token_nums, + scaling, +): + group_list = seq_len_tensor + if (lora_indices_tensor.unique().shape[0] != lora_indices_tensor.shape[0]): + sorted_ids, sorted_counts = sort_lora_by_token_count(lora_indices_tensor, seq_len_tensor) + group_list = sorted_counts + if lora_a_weights.shape[0] != group_list.shape[0]: + new_tensor = mint.zeros(lora_a_weights.shape[0], dtype=group_list.dtype) + new_tensor[:group_list.size(0)] = group_list + group_list = new_tensor + if len(lora_a_weights.shape) == 4: + lora_a_weights = lora_a_weights.squeeze(1) + lora_a_weights = mint.transpose(lora_a_weights, 1, 2) + outputs = grouped_matmul_v4([inputs], [lora_a_weights], group_list=group_list, split_item=3, group_type=0, group_list_type=1) + outputs = outputs[0] + output_tensor[:, :outputs.shape[1]] = scaling * outputs[:] + return output_tensor + + +def bgmv_shrink(inputs, + lora_b_weights, + output_tensor, + lora_indices_tensor, + scaling = 1.0): + selected_loras = lora_b_weights[lora_indices_tensor].astype(output_tensor.dtype) + inputs = inputs.astype(output_tensor.dtype) + if len(selected_loras.shape) == 4: + selected_loras = selected_loras.squeeze(1) + outputs = einsum_ms(inputs, selected_loras) + output_tensor[:, :outputs.shape[1]] = scaling * outputs[:] + return output_tensor + + +def sgmv_expand_slice(inputs, + lora_b_weights, + output_tensor, + b_seq_start_loc, + seq_len_tensor, + lora_indices_tensor, + batches, + max_seq_length, + token_nums, + slice_offset, + slice_size, + add_inputs = False): + group_list = seq_len_tensor + if (lora_indices_tensor.unique().shape[0] != lora_indices_tensor.shape[0]): + sorted_ids, sorted_counts = sort_lora_by_token_count(lora_indices_tensor, seq_len_tensor) + group_list = sorted_counts + if lora_b_weights.shape[0] != group_list.shape[0]: + new_tensor = mint.zeros(lora_b_weights.shape[0], dtype=group_list.dtype) + new_tensor[:group_list.size(0)] = group_list + group_list = new_tensor + if len(lora_b_weights.shape) == 4: + lora_b_weights = lora_b_weights.squeeze(1) + lora_b_weights = mint.transpose(lora_b_weights, 1, 2) + inputs = inputs.astype(output_tensor.dtype) + outputs = grouped_matmul_v4([inputs], [lora_b_weights], group_list=group_list, split_item=3, group_type=0, group_list_type=1) + outputs = outputs[0] + if add_inputs: + output_tensor[:, slice_offset:slice_offset + slice_size] += outputs[:] + else: + output_tensor[:, slice_offset:slice_offset + slice_size] = outputs[:] + return output_tensor + + +def bgmv_expand_slice(inputs, + lora_b_weights, + output_tensor, + lora_indices_tensor, + slice_offset, + slice_size, + add_inputs = True): + selected_loras = lora_b_weights[lora_indices_tensor].astype(output_tensor.dtype) + inputs = inputs.astype(output_tensor.dtype) + if len(selected_loras.shape) == 4: + selected_loras = selected_loras.squeeze(1) + outputs = einsum_ms(inputs, selected_loras) + if add_inputs: + output_tensor[:, slice_offset:slice_offset + slice_size] += outputs[:] + else: + output_tensor[:, slice_offset:slice_offset + slice_size] = outputs[:] + return output_tensor \ No newline at end of file diff --git a/vllm_mindspore/lora/punica_wrapper/__init__.py b/vllm_mindspore/lora/punica_wrapper/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/lora/punica_wrapper/punica_npu.py b/vllm_mindspore/lora/punica_wrapper/punica_npu.py new file mode 100644 index 000000000..51b41b150 --- /dev/null +++ b/vllm_mindspore/lora/punica_wrapper/punica_npu.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +""" +refer to https://github.com/vllm-project/vllm-ascend/blob/v0.7.3/vllm_ascend/lora/punica_wrapper/punica_npu.py +""" +from typing import Callable + +from mindspore import mint +from mindspore.common import dtype as mstype +from vllm_mindspore.lora.ops.torch_ops.lora_ops import (bgmv_expand, bgmv_expand_slice, + bgmv_shrink, sgmv_expand, + sgmv_expand_slice, sgmv_shrink) +from vllm.lora.punica_wrapper.punica_base import PunicaWrapperBase + + +# The platforms that are compatible with the PyTorch-native implementation can +# inherit this class +class PunicaWrapperNPU(PunicaWrapperBase): + """ + PunicaWrapperNPU is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the pytorch punica ops. + """ + + def __init__(self, max_num_batched_tokens, max_batches, device, **kwargs): + PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, + device) + + def _shrink_prefill( + self, + y, + x, + w_t_all, + scale, + ): + sgmv_shrink( + x, + w_t_all, + y, + *self.prefill_metadata, + scale, + ) + + def _shrink_decode( + self, + y, + x, + w_t_all, + scale, + ): + bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) + + def _expand_prefill( + self, + y, + x, + w_t_all, + add_inputs, + ): + sgmv_expand( + x, + w_t_all, + y, + *self.prefill_metadata, + add_inputs, + ) + + def _expand_decode( + self, + y, + x, + w_t_all, + add_inputs, + ): + bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_inputs) + + def _expand_slice_prefill( + self, + y, + x, + w_t_all, + y_offset, + y_slice_size, + add_inputs, + ): + sgmv_expand_slice( + x, + w_t_all, + y, + *self.prefill_metadata, + y_offset, + y_slice_size, + add_inputs, + ) + + def _expand_slice_decode( + self, + y, + x, + w_t_all, + y_offset, + y_slice_size, + add_inputs, + ): + bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, + y_slice_size, add_inputs) + + def _apply_expand( + self, + y, + x, + w_t_all, + y_offset, + y_slice_size, + add_inputs, + ): + """ + Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` + computation, which is suitable for the + GEMM of lora'b. + """ + + expand_slice_fun: Callable = (self._expand_slice_prefill + if self.is_prefill else + self._expand_slice_decode) + expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_inputs) + + def _apply_shrink(self, y, x, w_t_all, scale): + """ + Perform the ` y+=x@w_t_all` computation, which is suitable for the + GEMM of lora'a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + shrink_fun: Callable = (self._shrink_prefill + if self.is_prefill else self._shrink_decode) + shrink_fun(y, x, w_t_all, scale) + y.view_as(y_org) + + def add_shrink(self, y, x, lora_a_stacked, scale, **kwargs): + """ + Performs GEMM for multiple slices of lora_a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[ms.Tensor, ...], ms.Tensor]): Output tensors + x (ms.Tensor): Input tensor + lora_a_stacked (Tuple[ms.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + """ + + x = x.view(-1, x.shape[-1]) + # TODO fuse these kernels + for slice_idx in range(len(lora_a_stacked)): + self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], + scale) + + def add_expand(self, + y, + x, + lora_b_stacked, + lora_bias_stacked, + output_slices, + offset_start=0, + add_inputs=True, + **kwargs) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (ms.Tensor): Output tensor. + x (Union[Tuple[ms.Tensor, ...], ms.Tensor]): Input tensors + lora_b_stacked (Tuple[ms.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[ms.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_inputs (bool): Defaults to True. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + offset_left = offset_start + if lora_bias_stacked is not None: + self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + for slice_idx in range(len(lora_b_stacked)): + self._apply_expand( + y, + x[slice_idx], + lora_b_stacked[slice_idx], + offset_left, + output_slices[slice_idx], + add_inputs=add_inputs, + ) + offset_left += output_slices[slice_idx] + y.view_as(y_org) + + def add_lora_embedding(self, + y, + x, + lora_b_stacked, + add_inputs=True, + **kwargs) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + + Semantics: + y += x @ lora_b_stacked + + Args: + y (ms.Tensor): Output tensor. + x (ms.Tensor): Input tensor. + lora_b_stacked (ms.Tensor): lora_b's weights. + add_inputs (bool): Default to True. + """ + #No LoRA request, so return directly + if self.no_lora: + return + # Embedding layer only need expand op + expand_fun: Callable = (self._expand_prefill + if self.is_prefill else self._expand_decode) + expand_fun(y, x, lora_b_stacked, add_inputs) + + def add_lora_linear(self, + y, + x, + lora_a_stacked, + lora_b_stacked, + lora_bias_stacked, + scale, + output_slices, + *, + buffer=None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (ms.Tensor): Output tensor. Will be changed in-place. + x (ms.Tensor): Input tensor + lora_a_stacked (Tuple[ms.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[ms.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[ms.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[ms.Tensor, ...]]): Defaults to None. + """ + #No LoRA request, so return directly + if self.no_lora: + return + x = x.reshape(-1, x.shape[-1]) + assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) + if lora_bias_stacked is not None: + assert len(lora_bias_stacked) == len(output_slices) + y = self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + + if buffer is None: + r = lora_b_stacked[0].shape[-1] + # We set the buffer to be float32 by default, consistent with the + # triton op + buffer = tuple( + mint.zeros((x.shape[0], r), dtype=mstype.float32) + for _ in range(len(output_slices))) + self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs) + self.add_expand(y, + buffer, + lora_b_stacked, + None, + output_slices, + add_inputs=True, + **kwargs) + + def add_lora_logits(self, + y, + x, + lora_a_stacked, + lora_b_stacked, + scale, + *, + buffer=None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (ms.Tensor): Output tensor. + x (ms.Tensor): Input tensor. + lora_a_stacked (ms.Tensor): lora_a's weights. + lora_b_stacked (ms.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[ms.Tensor]):Default to None. + """ + #No LoRA request, so return directly + if self.no_lora: + return + y_org = y + y = y.view(-1, y.shape[-1]) + x = x.view(-1, x.shape[-1]) + r = lora_b_stacked.shape[-1] + if buffer is None: + # We set the buffer to be float32 by default, consistent with the + # triton op + buffer = mint.zeros((x.shape[0], r), dtype=mstype.float32) + # LogitsProcessorWithLoRA always using bgmv. + bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) + bgmv_expand(buffer, + lora_b_stacked, + y, + self.sampler_indices, + add_inputs=True) + y.view_as(y_org) diff --git a/vllm_mindspore/lora/utils.py b/vllm_mindspore/lora/utils.py new file mode 100644 index 000000000..0084e607b --- /dev/null +++ b/vllm_mindspore/lora/utils.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from typing import Set, Type + +from vllm.lora.fully_sharded_layers import ( + ColumnParallelLinearWithShardedLoRA, + MergedColumnParallelLinearWithShardedLoRA, + MergedQKVParallelLinearWithShardedLoRA, QKVParallelLinearWithShardedLoRA, + RowParallelLinearWithShardedLoRA) + +from vllm_mindspore.lora.layers import ( + BaseLayerWithLoRA, ColumnParallelLinearWithLoRA, + LinearScalingRotaryEmbeddingWithLoRA, LogitsProcessorWithLoRA, + MergedColumnParallelLinearWithLoRA, MergedQKVParallelLinearWithLoRA, + QKVParallelLinearWithLoRA, RowParallelLinearWithLoRA, + VocabParallelEmbeddingWithLoRA) + +_all_lora_classes: Set[Type[BaseLayerWithLoRA]] = { + VocabParallelEmbeddingWithLoRA, + ColumnParallelLinearWithLoRA, + MergedColumnParallelLinearWithLoRA, + QKVParallelLinearWithLoRA, + MergedQKVParallelLinearWithLoRA, + RowParallelLinearWithLoRA, + LogitsProcessorWithLoRA, + ColumnParallelLinearWithShardedLoRA, + QKVParallelLinearWithShardedLoRA, + MergedColumnParallelLinearWithShardedLoRA, + MergedQKVParallelLinearWithShardedLoRA, + RowParallelLinearWithShardedLoRA, + LinearScalingRotaryEmbeddingWithLoRA, +} diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index c9dfe254d..0cf464e39 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -58,6 +57,7 @@ def _apply_rotary_emb( class RotaryEmbedding(CustomOp): + def __init__( self, head_size: int, @@ -86,10 +86,8 @@ class RotaryEmbedding(CustomOp): # use CPU to compute the cache and then move it to GPU. However, we # create the cache on GPU for faster initialization. This may cause # a slight numerical difference between the HF implementation and ours. - inv_freq = 1.0 / ( - base - ** (mint.arange(0, self.rotary_dim, 2, dtype=mstype.float32) / self.rotary_dim) - ) + inv_freq = 1.0 / (base**(mint.arange( + 0, self.rotary_dim, 2, dtype=mstype.float32) / self.rotary_dim)) return inv_freq def _compute_cos_sin_cache(self) -> Tensor: @@ -121,14 +119,14 @@ class RotaryEmbedding(CustomOp): query_shape = query.shape query = query.view(num_tokens, -1, self.head_size) - query_rot = query[..., : self.rotary_dim] + query_rot = query[..., :self.rotary_dim] query_pass = query[..., self.rotary_dim:] query_rot = _apply_rotary_emb(query_rot, cos, sin, self.is_neox_style) query = mint.cat((query_rot, query_pass), dim=-1).reshape(query_shape) key_shape = key.shape key = key.view(num_tokens, -1, self.head_size) - key_rot = key[..., : self.rotary_dim] + key_rot = key[..., :self.rotary_dim] key_pass = key[..., self.rotary_dim:] key_rot = _apply_rotary_emb(key_rot, cos, sin, self.is_neox_style) key = mint.cat((key_rot, key_pass), dim=-1).reshape(key_shape) @@ -136,6 +134,7 @@ class RotaryEmbedding(CustomOp): class InferRotaryEmbedding(CustomOp): + def __init__( self, head_size: int, @@ -146,8 +145,9 @@ class InferRotaryEmbedding(CustomOp): dtype, ) -> None: super().__init__() - freqs_base = np.arange(0, rotary_dim, 2)[: (rotary_dim // 2)].astype(np.float32) # (head_dim // 2, ) - freqs = 1.0 / (base ** (freqs_base / rotary_dim)) # (head_dim // 2, ) + freqs_base = np.arange(0, rotary_dim, 2)[:(rotary_dim // 2)].astype( + np.float32) # (head_dim // 2, ) + freqs = 1.0 / (base**(freqs_base / rotary_dim)) # (head_dim // 2, ) mscale = 1.0 t = np.arange(0, max_position_embeddings, 1).astype(np.float32) @@ -170,12 +170,16 @@ class InferRotaryEmbedding(CustomOp): is_prefill: bool, offsets: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor]: + query = query.contiguous() + key = key.contiguous() if is_prefill: - return self.rotary_embedding_op(query, key, self.freqs_cos, self.freqs_sin, batch_valid_length) + return self.rotary_embedding_op(query, key, self.freqs_cos, + self.freqs_sin, batch_valid_length) freqs_cos = self.gather(self.freqs_cos, positions, 0) freqs_sin = self.gather(self.freqs_sin, positions, 0) - return self.rotary_embedding_op(query, key, freqs_cos, freqs_sin, batch_valid_length) + return self.rotary_embedding_op(query, key, freqs_cos, freqs_sin, + batch_valid_length) class MRotaryEmbedding(RotaryEmbedding): diff --git a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py index e3407f516..b694075df 100644 --- a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -23,16 +22,15 @@ from mindspore import Parameter, Tensor, mint, nn, ops from mindspore.common import dtype as mstype from mindspore.common.dtype import typing from vllm.distributed import (divide, get_tensor_model_parallel_rank, - get_tensor_model_parallel_world_size, - tensor_model_parallel_all_reduce,) -from vllm.model_executor.layers.quantization.base_config import ( - QuantizationConfig) + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.quantization.base_config import \ + QuantizationConfig +from vllm_mindspore.distributed.communication_op import \ + ReduceFromModelParallelRegion from vllm_mindspore.model_executor.layers.quantization.base_config import ( QuantizeMethodBase, method_has_implemented_embedding) from vllm_mindspore.model_executor.utils import set_weight_attrs -from vllm_mindspore.distributed.communication_op import ReduceFromModelParallelRegion -from mindspore import jit DEFAULT_VOCAB_PADDING_SIZE = 64 @@ -40,15 +38,13 @@ DEFAULT_VOCAB_PADDING_SIZE = 64 class UnquantizedEmbeddingMethod(QuantizeMethodBase): """Unquantized method for embeddings.""" - def create_weights(self, layer: nn.Cell, - input_size_per_partition: int, + def create_weights(self, layer: nn.Cell, input_size_per_partition: int, output_partition_sizes: List[int], input_size: int, - output_size: int, params_dtype, - **extra_weight_attrs): + output_size: int, params_dtype, **extra_weight_attrs): """Create weights for embedding layer.""" - weight = Parameter(mint.zeros((sum(output_partition_sizes), - input_size_per_partition), - dtype=params_dtype), + weight = Parameter(mint.zeros( + (sum(output_partition_sizes), input_size_per_partition), + dtype=params_dtype), requires_grad=False) set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0}) layer.insert_param_to_cell("weight", weight) @@ -64,7 +60,7 @@ class UnquantizedEmbeddingMethod(QuantizeMethodBase): layer: nn.Cell, x: Tensor, bias: Optional[Tensor] = None) -> Tensor: - output_shape = x.shape[:-1] + (self.output_size_per_partition,) + output_shape = x.shape[:-1] + (self.output_size_per_partition, ) x = x.reshape(-1, self.input_size_per_partition) x = self.matmul(x, layer.weight) if bias is not None: @@ -72,8 +68,7 @@ class UnquantizedEmbeddingMethod(QuantizeMethodBase): x = x.reshape(output_shape) return x - def embedding(self, layer: nn.Cell, - input_: Tensor) -> Tensor: + def embedding(self, layer: nn.Cell, input_: Tensor) -> Tensor: return self.gather(layer.weight, input_, 0) @@ -87,12 +82,15 @@ def get_masked_input_and_mask( ) -> Tuple[Tensor, Tensor]: displaced_x = mint.sub(input_, org_vocab_start_index) down_truncated_x = mint.nn.functional.relu(displaced_x) - truncated_x = mint.minimum(down_truncated_x, (org_vocab_end_index - org_vocab_start_index - 1)) + truncated_x = mint.minimum( + down_truncated_x, (org_vocab_end_index - org_vocab_start_index - 1)) org_vocab_mask = mint.eq(displaced_x, truncated_x) displaced_x = mint.sub(input_, added_vocab_start_index) down_truncated_x = mint.nn.functional.relu(displaced_x) - truncated_x = mint.minimum(down_truncated_x, (added_vocab_end_index - added_vocab_start_index - 1)) + truncated_x = mint.minimum( + down_truncated_x, + (added_vocab_end_index - added_vocab_start_index - 1)) added_vocab_mask = mint.eq(displaced_x, truncated_x) added_offset = added_vocab_start_index - ( org_vocab_end_index - org_vocab_start_index) - num_org_vocab_padding @@ -103,26 +101,29 @@ def get_masked_input_and_mask( return input_, vocab_mask.expand_dims(-1) -def pad_vocab_size(vocab_size: int, pad_to: int = DEFAULT_VOCAB_PADDING_SIZE) -> int: +def pad_vocab_size(vocab_size: int, + pad_to: int = DEFAULT_VOCAB_PADDING_SIZE) -> int: """Pad the vocab size to the given value.""" return ((vocab_size + pad_to - 1) // pad_to) * pad_to def vocab_range_from_per_partition_vocab_size( - per_partition_vocab_size: int, rank: int, offset: int = 0 -) -> Sequence[int]: + per_partition_vocab_size: int, + rank: int, + offset: int = 0) -> Sequence[int]: index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f + offset, index_l + offset -def vocab_range_from_global_vocab_size( - global_vocab_size: int, rank: int, world_size: int, offset: int = 0 -) -> Sequence[int]: +def vocab_range_from_global_vocab_size(global_vocab_size: int, + rank: int, + world_size: int, + offset: int = 0) -> Sequence[int]: per_partition_vocab_size = divide(global_vocab_size, world_size) - return vocab_range_from_per_partition_vocab_size( - per_partition_vocab_size, rank, offset=offset - ) + return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, + rank, + offset=offset) @dataclass @@ -185,6 +186,7 @@ class VocabParallelEmbeddingShardIndices: class VocabParallelEmbedding(nn.Cell): + def __init__( self, num_embeddings: int, @@ -203,12 +205,11 @@ class VocabParallelEmbedding(nn.Cell): self.padding_size = padding_size self.org_vocab_size = org_num_embeddings or num_embeddings num_added_embeddings = num_embeddings - self.org_vocab_size - self.org_vocab_size_padded = pad_vocab_size( - self.org_vocab_size, self.padding_size - ) + self.org_vocab_size_padded = pad_vocab_size(self.org_vocab_size, + self.padding_size) self.num_embeddings_padded = pad_vocab_size( - self.org_vocab_size_padded + num_added_embeddings, self.padding_size - ) + self.org_vocab_size_padded + num_added_embeddings, + self.padding_size) assert self.org_vocab_size_padded <= self.num_embeddings_padded self.shard_indices = self._get_indices( @@ -233,13 +234,11 @@ class VocabParallelEmbedding(nn.Cell): # layer type like ParallelLMHead, this is not important. is_embedding_layer = type(self) is VocabParallelEmbedding quant_method_implements_embedding = method_has_implemented_embedding( - type(quant_method) - ) + type(quant_method)) if is_embedding_layer and not quant_method_implements_embedding: raise NotImplementedError( f"The class {type(quant_method).__name__} must implement " - "the 'embedding' method, see UnquantizedEmbeddingMethod." - ) + "the 'embedding' method, see UnquantizedEmbeddingMethod.") self.quant_method: QuantizeMethodBase = quant_method @@ -247,20 +246,16 @@ class VocabParallelEmbedding(nn.Cell): params_dtype = mstype.float16 # Divide the weight matrix along the vocaburaly dimension. self.num_added_embeddings = self.num_embeddings - self.org_vocab_size - self.num_embeddings_per_partition = divide( - self.num_embeddings_padded, self.tp_size - ) - assert ( - self.shard_indices.num_elements_padded == self.num_embeddings_per_partition - ) + self.num_embeddings_per_partition = divide(self.num_embeddings_padded, + self.tp_size) + assert (self.shard_indices.num_elements_padded == + self.num_embeddings_per_partition) self.num_org_embeddings_per_partition = ( - self.shard_indices.org_vocab_end_index - - self.shard_indices.org_vocab_start_index - ) + self.shard_indices.org_vocab_end_index - + self.shard_indices.org_vocab_start_index) self.num_added_embeddings_per_partition = ( - self.shard_indices.added_vocab_end_index - - self.shard_indices.added_vocab_start_index - ) + self.shard_indices.added_vocab_end_index - + self.shard_indices.added_vocab_start_index) self.quant_method.create_weights( self, @@ -288,17 +283,19 @@ class VocabParallelEmbedding(nn.Cell): tp_size.""" num_added_embeddings_padded = vocab_size_padded - org_vocab_size_padded padded_org_vocab_start_index, padded_org_vocab_end_index = ( - vocab_range_from_global_vocab_size(org_vocab_size_padded, tp_rank, tp_size) - ) + vocab_range_from_global_vocab_size(org_vocab_size_padded, tp_rank, + tp_size)) padded_added_vocab_start_index, padded_added_vocab_end_index = ( - vocab_range_from_global_vocab_size( - num_added_embeddings_padded, tp_rank, tp_size, offset=org_vocab_size - ) - ) + vocab_range_from_global_vocab_size(num_added_embeddings_padded, + tp_rank, + tp_size, + offset=org_vocab_size)) # remove padding - org_vocab_start_index = min(padded_org_vocab_start_index, org_vocab_size) + org_vocab_start_index = min(padded_org_vocab_start_index, + org_vocab_size) org_vocab_end_index = min(padded_org_vocab_end_index, org_vocab_size) - added_vocab_start_index = min(padded_added_vocab_start_index, vocab_size) + added_vocab_start_index = min(padded_added_vocab_start_index, + vocab_size) added_vocab_end_index = min(padded_added_vocab_end_index, vocab_size) return VocabParallelEmbeddingShardIndices( padded_org_vocab_start_index, @@ -311,18 +308,15 @@ class VocabParallelEmbedding(nn.Cell): added_vocab_end_index, ) - @jit def construct(self, input_): if self.tp_size > 1: # Build the mask. masked_input, input_mask = get_masked_input_and_mask( - input_, - self.shard_indices.org_vocab_start_index, + input_, self.shard_indices.org_vocab_start_index, self.shard_indices.org_vocab_end_index, self.shard_indices.num_org_vocab_padding, self.shard_indices.added_vocab_start_index, - self.shard_indices.added_vocab_end_index - ) + self.shard_indices.added_vocab_end_index) else: masked_input, input_mask = input_, None # Get the embeddings. @@ -354,11 +348,13 @@ class VocabParallelEmbedding(nn.Cell): if loaded_weight.shape[output_dim] != self.org_vocab_size: raise ValueError( f"'loaded_weight.shape[output_dim]' should be equal to 'org_vocab_size'," - f" but got {loaded_weight.shape[output_dim]} and {self.org_vocab_size}") + f" but got {loaded_weight.shape[output_dim]} and {self.org_vocab_size}" + ) # Copy the data. - loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size).contiguous() - param[: loaded_weight.shape[0]] = loaded_weight + loaded_weight = loaded_weight.narrow(output_dim, start_idx, + shard_size).contiguous() + param[:loaded_weight.shape[0]] = loaded_weight param[loaded_weight.shape[0]:] = 0 @@ -401,8 +397,8 @@ class ParallelLMHead(VocabParallelEmbedding): self.quant_config = quant_config if bias: self.bias = Parameter( - mint.zeros(self.num_embeddings_per_partition, dtype=params_dtype) - ) + mint.zeros(self.num_embeddings_per_partition, + dtype=params_dtype)) set_weight_attrs( self.bias, { diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index 0d933a2db..c980050e2 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -18,68 +17,62 @@ import os from abc import abstractmethod -from typing import Iterable, List, Optional, Set, Tuple, Union, Dict +from typing import Dict, Iterable, Optional, Set, Tuple, Union +import torch +from mindspore import Tensor, mutable, nn +from vllm.attention.backends.abstract import AttentionType +from vllm.attention.layer import Attention from vllm.config import VllmConfig, get_current_vllm_config +from vllm.forward_context import get_forward_context from vllm.model_executor.layers.sampler import SamplerOutput from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from vllm.attention.backends.abstract import AttentionType -from vllm.forward_context import get_forward_context -from vllm.attention.layer import Attention - -import torch - -from mindspore import Tensor, nn, mutable class Fake_Attention: + def __init__(self): vllm_config = get_current_vllm_config() block_size = vllm_config.cache_config.block_size num_kv_heads = vllm_config.model_config.get_num_kv_heads( - vllm_config.parallel_config - ) + vllm_config.parallel_config) head_size = vllm_config.model_config.get_head_size() num_block = 0 self.kv_shape = [num_block, block_size, num_kv_heads, head_size] - self.kv_cache = [ - ( - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - ) - for _ in range(vllm_config.parallel_config.pipeline_parallel_size) - ] + self.kv_cache = [( + torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), + torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), + ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size)] self.attn_type = AttentionType.DECODER class Fake_MLA(Fake_Attention): + def __init__(self): super().__init__() vllm_config = get_current_vllm_config() self.kv_cache = [ - (torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"),) + (torch.zeros(self.kv_shape, dtype=torch.bfloat16, + device="Ascend"), ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size) ] class Fake_Attention_V1(Attention): + def __init__(self): vllm_config = get_current_vllm_config() block_size = vllm_config.cache_config.block_size num_kv_heads = vllm_config.model_config.get_num_kv_heads( - vllm_config.parallel_config - ) + vllm_config.parallel_config) head_size = vllm_config.model_config.get_head_size() num_block = 0 self.kv_shape = [num_block, block_size, num_kv_heads, head_size] - self.kv_cache = [ - ( - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - ) - for _ in range(vllm_config.parallel_config.pipeline_parallel_size) - ] + self.kv_cache = [( + torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), + torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), + ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size)] self.attn_type = AttentionType.DECODER self.num_block = num_block self.num_kv_heads = num_kv_heads @@ -90,18 +83,21 @@ class Fake_Attention_V1(Attention): class Fake_MLA_V1(Fake_Attention_V1): + def __init__(self): super().__init__() vllm_config = get_current_vllm_config() self.kv_cache = [ - (torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"),) + (torch.zeros(self.kv_shape, dtype=torch.bfloat16, + device="Ascend"), ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size) ] -class MsModelBase(): +class MsModelBase: + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: - super(MsModelBase, self).__init__() + super().__init__() config = vllm_config.model_config.hf_config lora_config = vllm_config.lora_config @@ -125,7 +121,8 @@ class MsModelBase(): if os.path.isdir(model_name_or_path): return model_name_or_path else: - from vllm.model_executor.model_loader.weight_utils import download_weights_from_hf + from vllm.model_executor.model_loader.weight_utils import \ + download_weights_from_hf allow_patterns = ["*.safetensors"] revision = self.model_config.revision return download_weights_from_hf( @@ -171,15 +168,24 @@ class MsModelBase(): def named_modules(self, remove_duplicate: bool = True): self._check_modules_valid() - res_modules = set() for name, module in self.modules_dict.items(): for module_name, sub_module in module.cells_and_names(): if name != "self": module_name = name + "." + module_name yield module_name, sub_module - def get_submodule(self): - raise RuntimeError("Cannot get submodule for mindspore model now!") + def get_submodule(self, target: str): + parts = target.split(".") + if target == "": + return self + for part in parts: + if not part: + raise ValueError( + f"Invalid submodule path: empty part in '{target}'") + current = self + for part in parts: + current = getattr(current, part) + return current def eval(self): self._check_modules_valid() @@ -198,23 +204,19 @@ class MsModelBase(): previous_hidden_states: Optional[Tensor] = None, spec_step_idx: int = 0, ) -> Union[Tensor, IntermediateTensors]: - return self.forward( - input_ids, - positions, - intermediate_tensors, - inputs_embeds, - previous_hidden_states=previous_hidden_states, - spec_step_idx=spec_step_idx - ) - - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[Tensor] = None, - **kwargs - ) -> Union[Tensor, IntermediateTensors]: + return self.forward(input_ids, + positions, + intermediate_tensors, + inputs_embeds, + previous_hidden_states=previous_hidden_states, + spec_step_idx=spec_step_idx) + + def forward(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + **kwargs) -> Union[Tensor, IntermediateTensors]: raise NotImplementedError def set_model_inputs(self, is_prefill): @@ -264,8 +266,10 @@ class MsModelBase(): value_cache = [] forward_context = get_forward_context() for i in range(self.config.num_hidden_layers): - k_cache = self.kv_caches[i].kv_cache[forward_context.virtual_engine][0] - v_cache = self.kv_caches[i].kv_cache[forward_context.virtual_engine][1] + k_cache = self.kv_caches[i].kv_cache[ + forward_context.virtual_engine][0] + v_cache = self.kv_caches[i].kv_cache[ + forward_context.virtual_engine][1] key_cache.append(k_cache) value_cache.append(v_cache) return mutable(key_cache), mutable(value_cache) @@ -276,7 +280,8 @@ class MsModelBase(): hidden_states: Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[Tensor]: - raise NotImplementedError("Function compute_logits should be Implemented!") + raise NotImplementedError( + "Function compute_logits should be Implemented!") @abstractmethod def sample( @@ -288,4 +293,5 @@ class MsModelBase(): @abstractmethod def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: - raise NotImplementedError("Function load_weights should be Implemented!") + raise NotImplementedError( + "Function load_weights should be Implemented!") diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 965566ec9..fcd9bb51a 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -# encoding: utf-8 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -15,21 +16,28 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union, Iterable +from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, + Union) if TYPE_CHECKING: from transformers import Qwen2Config else: Qwen2Config = None +import mindspore as ms import numpy as np - -from mindspore import Parameter, Tensor, mint, nn, jit, ops, mutable +import vllm.envs as envs +from mindspore import Parameter, Tensor, mint, mutable, nn, ops from mindspore.common import dtype as mstype - +from vllm.attention.backends.abstract import AttentionType +from vllm.config import CacheConfig, VllmConfig +from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.forward_context import get_forward_context +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.models.interfaces import SupportsLoRA +from vllm.sequence import IntermediateTensors from vllm_mindspore.attention import Attention - from vllm_mindspore.model_executor.layers.activation import SwiGLU from vllm_mindspore.model_executor.layers.layernorm import RMSNorm from vllm_mindspore.model_executor.layers.linear import ( @@ -43,27 +51,22 @@ from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) from vllm_mindspore.model_executor.model_loader.weight_utils import \ default_weight_loader +from vllm_mindspore.model_executor.models.attention_mask import \ + LowerTriangularMask +from vllm_mindspore.model_executor.models.model_base import (Fake_Attention, + Fake_Attention_V1, + MsModelBase) from vllm_mindspore.model_executor.models.utils import ( - PPMissingLayer, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) + PPMissingLayer, _jit, make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix, set_enforce_eager) from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm_mindspore.model_executor.models.model_base import MsModelBase, Fake_Attention, Fake_Attention_V1 -from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE +from vllm_mindspore.v1.attention.backends.flash_attn import \ + FlashAttentionMetadata -from vllm.config import CacheConfig, VllmConfig -import vllm.envs as envs -from vllm.model_executor.layers.quantization import \ - QuantizationConfig -from vllm.sequence import IntermediateTensors -from vllm.attention.backends.abstract import AttentionType -from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size -from vllm.forward_context import get_forward_context -from vllm_mindspore.v1.attention.backends.flash_attn import FlashAttentionMetadata -import mindspore as ms - class Qwen2MLP(nn.Cell): + def __init__( self, hidden_size: int, @@ -80,22 +83,18 @@ class Qwen2MLP(nn.Cell): bias=bias, quant_config=quant_config, prefix=f"{prefix}.gate_up_proj", - params_dtype=mstype.bfloat16 - ) - self.down_proj = RowParallelLinear( - input_size=intermediate_size, - output_size=hidden_size, - bias=bias, - quant_config=quant_config, - prefix=f"{prefix}.down_proj", - params_dtype=mstype.bfloat16 - ) + params_dtype=mstype.bfloat16) + self.down_proj = RowParallelLinear(input_size=intermediate_size, + output_size=hidden_size, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.down_proj", + params_dtype=mstype.bfloat16) if hidden_act != "silu": raise ValueError(f"Unsupported activation: {hidden_act}. " "Only silu is supported for now.") self.act_fn = SwiGLU() - @jit def construct(self, x): x, _ = self.gate_up_proj(x) x = self.act_fn(x) @@ -104,19 +103,18 @@ class Qwen2MLP(nn.Cell): class Qwen2Attention(nn.Cell): - def __init__( - self, - hidden_size: int, - num_heads: int, - num_kv_heads: int, - max_position: int = 4096 * 32, - rope_theta: float = 10000, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - rope_scaling: Optional[Tuple] = None, - prefix: str = "", - attn_type: str = AttentionType.DECODER - ) -> None: + + def __init__(self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + max_position: int = 4096 * 32, + rope_theta: float = 10000, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + rope_scaling: Optional[Tuple] = None, + prefix: str = "", + attn_type: str = AttentionType.DECODER) -> None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() @@ -166,18 +164,15 @@ class Qwen2Attention(nn.Cell): rope_scaling=rope_scaling, dtype=mstype.bfloat16, ) - self.attn = Attention( - self.num_heads, - self.head_dim, - self.scaling, - num_kv_heads=self.num_kv_heads, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.attn", - attn_type=attn_type - ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.attn", + attn_type=attn_type) - @jit def construct( self, positions: Tensor, @@ -192,10 +187,12 @@ class Qwen2Attention(nn.Cell): block_tables: Tensor, ) -> Tensor: qkv, _ = self.qkv_proj(hidden_states) - q, k, v = mint.split(qkv, (self.q_size, self.kv_size, self.kv_size), -1) + q, k, v = mint.split(qkv, (self.q_size, self.kv_size, self.kv_size), + -1) q, k = self.rotary_emb(positions, q, k, batch_valid_length, is_prefill) - attn_output = self.attn(q, k, v, key_cache, value_cache, is_prefill, slot_mapping, attn_mask, - batch_valid_length, q_seq_lens, block_tables) + attn_output = self.attn(q, k, v, key_cache, value_cache, is_prefill, + slot_mapping, attn_mask, batch_valid_length, + q_seq_lens, block_tables) output, _ = self.o_proj(attn_output) return output @@ -243,14 +240,17 @@ class Qwen2DecoderLayer(nn.Cell): quant_config=quant_config, prefix=f"{prefix}.mlp", ) - self.input_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16,) - self.post_attention_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16,) - - @jit + self.input_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + params_dtype=mstype.bfloat16, + ) + self.post_attention_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + params_dtype=mstype.bfloat16, + ) + def construct( self, positions: Tensor, @@ -270,22 +270,16 @@ class Qwen2DecoderLayer(nn.Cell): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: - hidden_states, residual = self.input_layernorm(hidden_states, residual) - hidden_states = self.self_attn( - positions, - hidden_states, - key_cache, - value_cache, - is_prefill, - slot_mapping, - attn_mask, - batch_valid_length, - q_seq_lens, - block_tables - ) + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn(positions, hidden_states, key_cache, + value_cache, is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) # Fully Connected - hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual @@ -302,6 +296,9 @@ class Qwen2Model(nn.Cell): self.config = config self.quant_config = quant_config self.vocab_size = config.vocab_size + if vllm_config.lora_config is not None: + vllm_config.model_config.enforce_eager = True + set_enforce_eager(vllm_config.model_config.enforce_eager) if get_pp_group().is_first_rank or (config.tie_word_embeddings and get_pp_group().is_last_rank): @@ -328,15 +325,18 @@ class Qwen2Model(nn.Cell): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) if get_pp_group().is_last_rank: - self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16,) + self.norm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + params_dtype=mstype.bfloat16, + ) else: self.norm = PPMissingLayer() def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.embed_tokens(input_ids) - @jit + @_jit def construct( self, input_ids: Optional[Tensor], @@ -364,19 +364,12 @@ class Qwen2Model(nn.Cell): for i in range(self.start_layer, self.end_layer): # PP 并行对层进行切分 layer = self.layers[i] - hidden_states, residual = layer( - positions, - hidden_states, - key_caches[i - self.start_layer], - value_caches[i - self.start_layer], - is_prefill, - slot_mapping, - attn_mask, - batch_valid_length, - q_seq_lens, - block_tables, - residual - ) + hidden_states, residual = layer(positions, hidden_states, + key_caches[i - self.start_layer], + value_caches[i - self.start_layer], + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables, residual) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, @@ -385,7 +378,8 @@ class Qwen2Model(nn.Cell): hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, Tensor]], params_dict: Dict[str, Parameter]): + def load_weights(self, weights: Iterable[Tuple[str, Tensor]], + params_dict: Dict[str, Parameter]): loaded_params: Set[str] = set() stacked_params_mapping = [ # (param_name, shard_name, shard_id) @@ -405,7 +399,7 @@ class Qwen2Model(nn.Cell): # the checkpoint. Skip them. continue if (self.quant_config is not None and - (scale_name := self.quant_config.get_cache_scale(name))): + (scale_name := self.quant_config.get_cache_scale(name))): # Loading kv cache quantization scales param = params_dict[scale_name] weight_loader = getattr(param, "weight_loader", @@ -436,7 +430,7 @@ class Qwen2Model(nn.Cell): return loaded_params -class Qwen2ForCausalLM(MsModelBase): +class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): packed_modules_mapping = { "qkv_proj": [ "q_proj", @@ -480,7 +474,8 @@ class Qwen2ForCausalLM(MsModelBase): config.hidden_size, params_dtype=mstype.bfloat16, quant_config=quant_config, - prefix=maybe_prefix(prefix, "lm_head")) + prefix=maybe_prefix( + prefix, "lm_head")) self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() else: @@ -491,20 +486,26 @@ class Qwen2ForCausalLM(MsModelBase): self.set_modules({"model": self.model, "lm_head": self.lm_head}) self.prefill = True - self.mstype = STR_DTYPE_TO_MS_DTYPE.get(self.model_config.dtype, self.model_config.dtype) - self.casual_mask = LowerTriangularMask(dtype=self.mstype, - max_model_len=self.model_config.max_model_len) + self.mstype = STR_DTYPE_TO_MS_DTYPE.get(self.model_config.dtype, + self.model_config.dtype) + self.casual_mask = LowerTriangularMask( + dtype=self.mstype, max_model_len=self.model_config.max_model_len) self.set_model_inputs(self.prefill) if envs.VLLM_USE_V1: - self.kv_caches = [Fake_Attention_V1() for i in range(config.num_hidden_layers)] + self.kv_caches = [ + Fake_Attention_V1() for i in range(config.num_hidden_layers) + ] else: - self.kv_caches = [Fake_Attention() for i in range(config.num_hidden_layers)] + self.kv_caches = [ + Fake_Attention() for i in range(config.num_hidden_layers) + ] compilation_config = vllm_config.compilation_config if prefix in compilation_config.static_forward_context: raise ValueError(f"Duplicate layer name: {prefix}") for i in range(config.num_hidden_layers): - compilation_config.static_forward_context[str(i)] = self.kv_caches[i] + compilation_config.static_forward_context[str( + i)] = self.kv_caches[i] def set_model_inputs(self, is_prefill): dyn_input_ids = Tensor(shape=[None], dtype=mstype.int64) @@ -525,43 +526,40 @@ class Qwen2ForCausalLM(MsModelBase): dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) dyn_value_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) - dyn_value_caches = mutable([dyn_value_cache for _ in range(num_layers)]) + dyn_value_caches = mutable( + [dyn_value_cache for _ in range(num_layers)]) - dyn_slot_mapping = Tensor(shape=[None, ], dtype=mstype.int32) + dyn_slot_mapping = Tensor(shape=[ + None, + ], dtype=mstype.int32) dynamic_attention_mask = Tensor(shape=[None, None], dtype=self.mstype) - dyn_batch_valid_length = Tensor(shape=[None,], dtype=mstype.int32) - dyn_q_seq_lens = Tensor(shape=[None, ], dtype=mstype.int32) + dyn_batch_valid_length = Tensor(shape=[ + None, + ], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[ + None, + ], dtype=mstype.int32) dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) dyn_intermediate_tensors = None dyn_inputs_embeds = None - self.model.set_inputs( - dyn_input_ids, - dyn_position_ids, - dyn_key_caches, - dyn_value_caches, - is_prefill, - dyn_slot_mapping, - dynamic_attention_mask, - dyn_batch_valid_length, - dyn_q_seq_lens, - dyn_block_tables, - dyn_intermediate_tensors, - dyn_inputs_embeds - ) - - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: IntermediateTensors = None, - inputs_embeds: Tensor = None, - **kwargs - ) -> Union[Tensor, IntermediateTensors]: + self.model.set_inputs(dyn_input_ids, dyn_position_ids, dyn_key_caches, + dyn_value_caches, is_prefill, dyn_slot_mapping, + dynamic_attention_mask, dyn_batch_valid_length, + dyn_q_seq_lens, dyn_block_tables, + dyn_intermediate_tensors, dyn_inputs_embeds) + + def forward(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs) -> Union[Tensor, IntermediateTensors]: key_cache, value_cache = self.get_kvcache() attn_metadata = get_forward_context().attn_metadata input_ids = input_ids.to(ms.int64) if attn_metadata is None: - attn_metadata = self._dummy_attention_metadata(input_ids, positions) + attn_metadata = self._dummy_attention_metadata( + input_ids, positions) if not envs.VLLM_USE_V1: seq_lens = attn_metadata.seq_lens max_query_len = attn_metadata.max_query_len @@ -576,24 +574,25 @@ class Qwen2ForCausalLM(MsModelBase): seq_lens_np = np.array(seq_lens, dtype=np.int32) query_lens_np = np.array(query_lens, dtype=np.int32) kv_cache_lens = seq_lens_np - query_lens_np - is_prefill = attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max() == 0 + is_prefill = attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max( + ) == 0 slot_mapping = attn_metadata.slot_mapping - batch_valid_length = Tensor.from_numpy(np.array(attn_metadata.seq_lens, dtype=np.int32)) + batch_valid_length = Tensor.from_numpy( + np.array(attn_metadata.seq_lens, dtype=np.int32)) q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) block_tables = attn_metadata.block_tables position_ids = ms.Tensor(positions, dtype=ms.int32) - attn_mask = self.casual_mask.gen_attention_mask(is_prefill, position_ids, query_lens) + attn_mask = self.casual_mask.gen_attention_mask( + is_prefill, position_ids, query_lens) else: - if attn_metadata.max_context_lens == 0: - is_prefill = True - else: - is_prefill = False + is_prefill = attn_metadata.max_context_lens == 0 slot_mapping = attn_metadata.slot_mapping batch_valid_length = Tensor.from_numpy(attn_metadata.seq_lens_np) q_seq_lens = attn_metadata.q_seq_lens block_tables = attn_metadata.block_tables query_lens_np = attn_metadata.q_seq_lens_np - attn_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) + attn_mask = self.casual_mask.gen_attention_mask( + is_prefill, positions, query_lens_np) positions = positions.to(ms.int64) if is_prefill: if not self.prefill: @@ -617,7 +616,8 @@ class Qwen2ForCausalLM(MsModelBase): inputs_embeds) return model_output - def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor) -> FlashAttentionMetadata: + def _dummy_attention_metadata(self, input_ids: Tensor, + positions: Tensor) -> FlashAttentionMetadata: input_len = input_ids.shape[0] max_seq_len = ms.Tensor(input_len, dtype=ms.int32) seq_lengths = ms.Tensor([input_len], dtype=ms.int32) @@ -640,16 +640,14 @@ class Qwen2ForCausalLM(MsModelBase): # To enforce prefill and decode are both complied in warmup process. # So set max_context_lens to 0 for prefill and 1 for decode. max_context_lens=0 if self.prefill else 1, - query_start_loc = None - ) + query_start_loc=None) def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: params_dict = self.get_params_dict() self.model.load_weights(weights, params_dict) - def sample( - self, logits: Tensor, sampling_metadata: SamplingMetadata - ) -> Optional[SamplerOutput]: + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: next_tokens = self.sampler(logits, sampling_metadata) return next_tokens @@ -658,5 +656,6 @@ class Qwen2ForCausalLM(MsModelBase): hidden_states: Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, sampling_metadata) + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) return logits diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 4bb7831c5..81b3f1923 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -18,7 +17,11 @@ from dataclasses import dataclass, field from typing import List, Tuple, Union, Mapping, Optional, Iterable +from functools import wraps +from typing import List, Tuple +import mindspore as ms +from mindspore import jit, mint from vllm.sequence import IntermediateTensors from vllm_mindspore.multimodal.inputs import NestedTensors @@ -70,6 +73,7 @@ class WeightsMapper: ) -> Iterable[Tuple[str, ms.Tensor]]: return ((out_name, data) for name, data in weights if (out_name := self._map_name(name)) is not None) +enforce_eager = False class PPMissingLayer(ms.nn.Cell): @@ -118,9 +122,8 @@ def extract_layer_index(layer_name: str) -> int: int_vals.append(int(subname)) except ValueError: continue - assert len(int_vals) == 1, ( - f"layer name {layer_name} should" " only contain one integer" - ) + assert len(int_vals) == 1, (f"layer name {layer_name} should" + " only contain one integer") return int_vals[0] @@ -135,17 +138,13 @@ def make_layers( from vllm.distributed.parallel_state import get_pp_group from vllm.distributed.utils import get_pp_indices - start_layer, end_layer = get_pp_indices( - num_hidden_layers, get_pp_group().rank_in_group, get_pp_group().world_size - ) - modules = ms.nn.CellList( - [PPMissingLayer() for _ in range(start_layer)] - + [ - maybe_offload_to_cpu(layer_fn(prefix=f"{prefix}.{idx}")) - for idx in range(start_layer, end_layer) - ] - + [PPMissingLayer() for _ in range(end_layer, num_hidden_layers)] - ) + start_layer, end_layer = get_pp_indices(num_hidden_layers, + get_pp_group().rank_in_group, + get_pp_group().world_size) + modules = ms.nn.CellList([PPMissingLayer() for _ in range(start_layer)] + [ + maybe_offload_to_cpu(layer_fn(prefix=f"{prefix}.{idx}")) + for idx in range(start_layer, end_layer) + ] + [PPMissingLayer() for _ in range(end_layer, num_hidden_layers)]) return start_layer, end_layer, modules @@ -157,9 +156,10 @@ def make_empty_intermediate_tensors_factory(keys: List[str], hidden_size: int): device, ) -> IntermediateTensors: dtype = get_valid_dtype(dtype) - return IntermediateTensors( - {key: mint.zeros((batch_size, hidden_size), dtype=dtype) for key in keys} - ) + return IntermediateTensors({ + key: mint.zeros((batch_size, hidden_size), dtype=dtype) + for key in keys + }) return make_empty_intermediate_tensors @@ -263,4 +263,28 @@ def merge_multimodal_embeddings( inputs_embeds, (input_ids == placeholder_token_id), multimodal_embeddings, - ) \ No newline at end of file + ) +def set_enforce_eager(value): + """ + set global variable enforce_eager to value. + """ + global enforce_eager + enforce_eager = value + + +def _jit(func): + """ + A decorator to apply JIT compilation to a function or method. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + if enforce_eager: + # If enforce_eager is True, we do not apply JIT compilation. + return func(*args, **kwargs) + if hasattr(func, "__wrapped_by_jit__"): + # If the function is already wrapped by JIT, we call it directly. + return func(*args, **kwargs) + return jit(func, jit_level="O0", infer_boost="on")(*args, **kwargs) + + return wrapper diff --git a/vllm_mindspore/platforms/ascend.py b/vllm_mindspore/platforms/ascend.py index 356a33a04..89e228290 100644 --- a/vllm_mindspore/platforms/ascend.py +++ b/vllm_mindspore/platforms/ascend.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -17,15 +16,12 @@ # ============================================================================ """Ascend platform.""" -import os -from typing import (TYPE_CHECKING, Optional, Union, Tuple) +from typing import TYPE_CHECKING, Optional, Tuple, Union import torch -import mindspore as ms - -from vllm.platforms.interface import DeviceCapability, Platform, PlatformEnum, _Backend -from vllm.logger import init_logger import vllm.envs as envs +from vllm.logger import init_logger +from vllm.platforms.interface import Platform, PlatformEnum, _Backend if TYPE_CHECKING: from vllm.config import ModelConfig, VllmConfig @@ -40,7 +36,7 @@ class AscendPlatform(Platform): _enum = PlatformEnum.OOT device_name: str = "npu" - device_type: str = "cuda" # To use cuda worker, executor... + device_type: str = "cuda" # To use cuda worker, executor... simple_compile_backend: str = "npu" ray_device_key: str = "NPU" device_control_env_var: str = "ASCEND_RT_VISIBLE_DEVICES" @@ -103,7 +99,8 @@ class AscendPlatform(Platform): model_config.disable_cascade_attn = True @classmethod - def get_attn_backend_cls(cls, selected_backend, head_size, dtype, kv_cache_dtype, block_size, use_v1, use_mla): + def get_attn_backend_cls(cls, selected_backend, head_size, dtype, + kv_cache_dtype, block_size, use_v1, use_mla): """Get the attention backend class of a device.""" if use_v1: if use_mla: @@ -119,12 +116,13 @@ class AscendPlatform(Platform): return "vllm_mindspore.attention.backends.ms_attn.MsAttentionBackend" raise ValueError( - "Invaild attention backend %s for vLLM-MindSpore with head_size: %s, dtype: %s, kv_cache_dtype: %s, block_size: %s." - % (str(selected_backend), str(head_size), str(dtype), str(kv_cache_dtype), str(block_size)) + f"Invalid attention backend {str(selected_backend)} for vLLM-MindSpore with head_size: {str(head_size)}, dtype: {str(dtype)}, kv_cache_dtype: {str(kv_cache_dtype)}, block_size: {str(block_size)}." ) @classmethod - def get_current_memory_usage(cls, device: Optional[torch.types.Device] = None) -> float: + def get_current_memory_usage(cls, + device: Optional[torch.types.Device] = None + ) -> float: """Return the memory usage in bytes.""" torch.cuda.reset_peak_memory_stats() return torch.cuda.max_memory_allocated(device) @@ -144,4 +142,7 @@ class AscendPlatform(Platform): @classmethod def supports_v1(cls, model_config: ModelConfig) -> bool: - return True \ No newline at end of file + return True + + def get_punica_wrapper(cls) -> str: + return "vllm_mindspore.lora.punica_wrapper.punica_npu.PunicaWrapperNPU" -- Gitee From 18fe97f458aeb067a15f85e644ba9998d482bf37 Mon Sep 17 00:00:00 2001 From: zlq2020 Date: Tue, 15 Apr 2025 19:31:44 +0800 Subject: [PATCH 08/76] Refactor AttentionMetada --- vllm_mindspore/__init__.py | 12 +- vllm_mindspore/attention/backends/ms_attn.py | 145 +++++++++++------- .../models/mf_models/deepseek_mtp.py | 4 +- .../models/mf_models/deepseek_v3.py | 7 +- .../models/mf_models/mf_model_base.py | 72 +++------ .../model_executor/models/mf_models/qwen2.py | 8 +- .../model_executor/models/model_base.py | 50 ++---- vllm_mindspore/model_executor/models/qwen2.py | 34 ++-- .../model_executor/models/registry.py | 4 +- vllm_mindspore/platforms/ascend.py | 49 +++--- .../backends/{flash_attn.py => ms_attn.py} | 113 ++++++-------- vllm_mindspore/v1/spec_decode/eagle.py | 5 +- vllm_mindspore/v1/worker/gpu_model_runner.py | 55 +++++-- 13 files changed, 270 insertions(+), 288 deletions(-) rename vllm_mindspore/v1/attention/backends/{flash_attn.py => ms_attn.py} (64%) diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index e8fa6e8bd..c5e2deb5c 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -274,6 +274,7 @@ MultiModalKwargs.as_kwargs = as_kwargs from vllm_mindspore.model_executor.layers.rotary_embedding import InferMRotaryEmbedding vllm.model_executor.layers.rotary_embedding.MRotaryEmbedding = InferMRotaryEmbedding +# patch for V1 from vllm_mindspore.v1.sample import rejection_sampler update_modules("vllm.v1.sample.rejection_sampler", rejection_sampler) @@ -282,11 +283,8 @@ from vllm_mindspore.v1.spec_decode import eagle update_modules("vllm.v1.spec_decode.eagle", eagle) -from vllm_mindspore.v1.attention.backends import flash_attn -import vllm.v1.attention.backends - -sys.modules['vllm.v1.attention.backends.flash_attn'] = flash_attn -import vllm.v1.attention.backends.flash_attn +from vllm_mindspore.v1.attention.backends import ms_attn +update_modules("vllm.v1.attention.backends.flash_attn", ms_attn) import vllm.v1.worker.gpu_model_runner @@ -298,9 +296,9 @@ from vllm_mindspore.v1.worker.gpu_model_runner import _update_states vllm.v1.worker.gpu_model_runner.GPUModelRunner._update_states = _update_states -from vllm_mindspore.v1.worker.gpu_model_runner import initialize_kv_cache - +from vllm_mindspore.v1.worker.gpu_model_runner import initialize_kv_cache, get_kv_cache_spec vllm.v1.worker.gpu_model_runner.GPUModelRunner.initialize_kv_cache = initialize_kv_cache +vllm.v1.worker.gpu_model_runner.GPUModelRunner.get_kv_cache_spec = get_kv_cache_spec from vllm_mindspore.v1.worker.gpu_model_runner import wrapper_gpu_model_runner_execute_model from vllm.v1.worker.gpu_model_runner import GPUModelRunner diff --git a/vllm_mindspore/attention/backends/ms_attn.py b/vllm_mindspore/attention/backends/ms_attn.py index d6123b0a8..bca31412a 100644 --- a/vllm_mindspore/attention/backends/ms_attn.py +++ b/vllm_mindspore/attention/backends/ms_attn.py @@ -25,8 +25,6 @@ import os import numpy as np -import torch - from vllm.attention.backends.abstract import ( AttentionBackend, AttentionImpl, @@ -95,50 +93,86 @@ def advance_step_op(sampled_token_ids, @dataclass -class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): - """Metadata for TorchSDPABackend.""" - - # Currently, input sequences can only contain all prompts - # or all decoding. True if all sequences are prompts. - chunked_prefill: bool - seq_lens: Optional[List[int]] = None # For non-chunked prefill - - # For chunked prefill only +class MsAttentionMetadata(AttentionMetadata): + """Metadata for MsAttentionBackend. + """ + # (batch_size,). The sequence length per sequence. Sequence length means + # the computed tokens + new tokens None if it is a decoding. + seq_lens: Optional[List[int]] + # seq_lens stored as a tensor. + seq_lens_tensor: Optional[ms.Tensor] + + # NOTE(sang): Definition of context_len, query_len, and seq_len. + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ---------------------| + # |-- query_len ---| + + # Maximum sequence length among prefill batch. 0 if there are decoding + # requests only. + max_prefill_seq_len: int + # Maximum sequence length among decode batch. 0 if there are prefill + # requests only. + max_decode_seq_len: int + # (batch_size,) A tensor of context lengths (tokens that are computed + # so far). + context_lens_tensor: Optional[ms.Tensor] + + # (batch_size, max_blocks_per_seq). + # Block addresses per sequence. (Seq id -> list of physical block) + # E.g., [0, 1, 2] means tokens are stored in 0th, 1st, and 2nd blocks + # in the kv cache. Each block can contain up to block_size tokens. + # 2nd dimensions are padded up to max_blocks_per_seq if it is cuda-graph + # captured. + block_tables: Optional[ms.Tensor] + + # Whether or not if cuda graph is enabled. + # Cuda-graph is currently enabled for decoding only. + # TODO(woosuk): Move `use_cuda_graph` out since it's unrelated to attention. + + use_cuda_graph: bool + + # Maximum query length in the batch. max_query_len: Optional[int] = None - max_prefill_seq_len: int = 0 - seq_start_loc: Optional[torch.Tensor] = None - _cached_prefill_metadata: Optional["MSAttentionMetadata"] = None - _cached_decode_metadata: Optional["MSAttentionMetadata"] = None - context_lens_tensor: Optional[torch.Tensor] = None - encoder_seq_start_loc: Optional[torch.Tensor] = None + # Max number of query tokens among request in the batch. max_decode_query_len: Optional[int] = None - max_kv_len: Optional[int] = None - query_start_loc: Optional[torch.Tensor] = None - kv_start_loc: Optional[torch.Tensor] = None - prefill_block_tables: Optional[torch.Tensor] = None - query_lens: Optional[List[int]] = None + # (batch_size + 1,). The cumulative subquery lengths of the sequences in + # the batch, used to index into subquery. E.g., if the subquery length + # is [4, 6], it is [0, 4, 10]. + query_start_loc: Optional[ms.Tensor] = None + # (batch_size + 1,). The cumulative sequence lengths of the sequences in + # the batch, used to index into sequence. E.g., if the sequence length is + # [4, 6], it is [0, 4, 10]. + seq_start_loc: Optional[ms.Tensor] = None + + _cached_prefill_metadata: Optional["MsAttentionMetadata"] = None + _cached_decode_metadata: Optional["MsAttentionMetadata"] = None # Begin encoder attn & enc/dec cross-attn fields... + # Encoder sequence lengths representation encoder_seq_lens: Optional[List[int]] = None - encoder_seq_lens_tensor: Optional[torch.Tensor] = None - + encoder_seq_lens_tensor: Optional[ms.Tensor] = None + # (batch_size + 1,). The cumulative sequence lengths of the sequences in + # the batch, used to index into sequence. E.g., if the sequence length is + # [4, 6], it is [0, 4, 10]. + encoder_seq_start_loc: Optional[ms.Tensor] = None # Maximum sequence length among encoder sequences max_encoder_seq_len: Optional[int] = None - # Number of tokens input to encoder num_encoder_tokens: Optional[int] = None # Cross-attention memory-mapping data structures: slot mapping # and block tables - cross_slot_mapping: Optional[torch.Tensor] = None - cross_block_tables: Optional[torch.Tensor] = None - - use_cuda_graph: bool = False - enable_kv_scales_calculation: bool + cross_slot_mapping: Optional[ms.Tensor] = None + cross_block_tables: Optional[ms.Tensor] = None + # add by vllm-mindspore + query_lens: Optional[List[int]] = None @property def prefill_metadata(self): @@ -169,7 +203,7 @@ class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): block_tables = (None if self.block_tables is None else self.block_tables[:self.num_prefills]) - self._cached_prefill_metadata = MSAttentionMetadata( + self._cached_prefill_metadata = MsAttentionMetadata( num_prefills=self.num_prefills, num_prefill_tokens=self.num_prefill_tokens, num_decode_tokens=0, @@ -193,7 +227,6 @@ class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): encoder_seq_lens_tensor=self.encoder_seq_lens_tensor, encoder_seq_start_loc=self.encoder_seq_start_loc, max_encoder_seq_len=self.max_encoder_seq_len, - chunked_prefill=self.chunked_prefill, cross_slot_mapping=self.cross_slot_mapping, cross_block_tables=self.cross_block_tables) return self._cached_prefill_metadata @@ -216,7 +249,7 @@ class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): block_tables = (None if self.block_tables is None else self.block_tables[self.num_prefills:]) - self._cached_decode_metadata = MSAttentionMetadata( + self._cached_decode_metadata = MsAttentionMetadata( num_prefills=0, num_prefill_tokens=0, num_decode_tokens=self.num_decode_tokens, @@ -245,14 +278,13 @@ class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): encoder_seq_lens_tensor=self.encoder_seq_lens_tensor, encoder_seq_start_loc=self.encoder_seq_start_loc, max_encoder_seq_len=self.max_encoder_seq_len, - chunked_prefill=self.chunked_prefill, cross_slot_mapping=self.cross_slot_mapping, cross_block_tables=self.cross_block_tables) return self._cached_decode_metadata def advance_step(self, model_input: "ModelInputForNPUWithSamplingMetadata", - sampled_token_ids: Optional[torch.Tensor], + sampled_token_ids: Optional[ms.Tensor], block_size: int, num_seqs: int, num_queries: int, @@ -394,7 +426,7 @@ class MSAttentionMetadata(AttentionMetadata, PagedAttentionMetadata): raise AttributeError(f"Invalid attention type {str(attn_type)}") -class MsAttentionMetadataBuilder(AttentionMetadataBuilder[MSAttentionMetadata]): +class MsAttentionMetadataBuilder(AttentionMetadataBuilder[MsAttentionMetadata]): def __init__(self, input_builder: "ModelInputForGPUBuilder"): self.input_builder = input_builder @@ -545,7 +577,7 @@ class MsAttentionMetadataBuilder(AttentionMetadataBuilder[MSAttentionMetadata]): block_tables = make_tensor_with_pad( self.block_tables, pad=-1, - dtype=torch.int, + dtype=ms.int32, device=device, ) assert max_query_len > 0, "query_lens: {}".format(query_lens) @@ -557,13 +589,13 @@ class MsAttentionMetadataBuilder(AttentionMetadataBuilder[MSAttentionMetadata]): query_start_loc_tensor = ms.Tensor(query_start_loc, dtype=ms.int32) seq_start_loc_tensor = ms.Tensor(seq_start_loc, dtype=ms.int32) - return MSAttentionMetadata( + return MsAttentionMetadata( slot_mapping=slot_mapping_tensor, block_tables=block_tables, seq_lens_tensor=seq_lens_tensor, seq_lens=seq_lens, + max_prefill_seq_len=max_prefill_seq_len, max_decode_seq_len=max_decode_seq_len, - chunked_prefill=self.input_builder.chunked_prefill_enabled, num_prefills=self.num_prefills, num_prefill_tokens=self.num_prefill_tokens, num_decode_tokens=num_decode_tokens, @@ -574,6 +606,7 @@ class MsAttentionMetadataBuilder(AttentionMetadataBuilder[MSAttentionMetadata]): seq_start_loc=seq_start_loc_tensor, context_lens_tensor=context_lens_tensor, max_query_len=max_query_len, + use_cuda_graph=False, ) @@ -590,7 +623,7 @@ class MsAttentionBackend(AttentionBackend): @staticmethod def get_metadata_cls() -> Type["AttentionMetadata"]: - return MSAttentionMetadata + return MsAttentionMetadata @staticmethod def get_builder_cls() -> Type["MsAttentionMetadataBuilder"]: @@ -615,7 +648,7 @@ class MsAttentionBackend(AttentionBackend): def swap_blocks( src_kv_cache: MsKVCache, dst_kv_cache: MsKVCache, - src_to_dst: torch.Tensor, + src_to_dst: ms.Tensor, swap_type: bool, ) -> None: """ @@ -637,7 +670,7 @@ class MsAttentionBackend(AttentionBackend): @staticmethod def copy_blocks( kv_caches: List[MsKVCache], - src_to_dists: torch.Tensor, + src_to_dists: ms.Tensor, ) -> None: blocks_to_copy = src_to_dists.asnumpy().tolist() for kv_cache in kv_caches: @@ -691,14 +724,14 @@ class MsAttentionImpl(AttentionImpl): def forward( self, layer: AttentionLayer, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: MSAttentionMetadata, + query: ms.Tensor, + key: ms.Tensor, + value: ms.Tensor, + kv_cache: ms.Tensor, + attn_metadata: MsAttentionMetadata, attn_type: str = AttentionType.DECODER, - output: Optional[torch.Tensor] = None, - ) -> torch.Tensor: + output: Optional[ms.Tensor] = None, + ) -> ms.Tensor: """Forward pass with FlashAttention. Args: @@ -726,7 +759,7 @@ class MLABackend(AttentionBackend): @staticmethod def get_metadata_cls() -> Type["AttentionMetadata"]: - return MSAttentionMetadata + return MsAttentionMetadata @staticmethod def get_builder_cls() -> Type["MsAttentionMetadataBuilder"]: @@ -747,9 +780,9 @@ class MLABackend(AttentionBackend): @staticmethod def swap_blocks( - src_kv_cache: torch.Tensor, - dst_kv_cache: torch.Tensor, - src_to_dst: torch.Tensor, + src_kv_cache: ms.Tensor, + dst_kv_cache: ms.Tensor, + src_to_dst: ms.Tensor, ) -> None: src_key_cache = src_kv_cache[0] dst_key_cache = dst_kv_cache[0] @@ -758,8 +791,8 @@ class MLABackend(AttentionBackend): @staticmethod def copy_blocks( - kv_caches: List[torch.Tensor], - src_to_dists: torch.Tensor, + kv_caches: List[ms.Tensor], + src_to_dists: ms.Tensor, ) -> None: blocks_to_copy = src_to_dists.asnumpy().tolist() for kv_cache in kv_caches: @@ -771,4 +804,4 @@ class MLABackend(AttentionBackend): def get_supported_head_sizes() -> List[int]: return [576] -FlashAttentionMetadata = MSAttentionMetadata +FlashAttentionMetadata = MsAttentionMetadata diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_mtp.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_mtp.py index c0b72f4df..6265d110e 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_mtp.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_mtp.py @@ -34,7 +34,7 @@ from research.deepseek3.deepseek3 import ( ) from vllm_mindspore.model_executor.layers.sampler import get_sampler -from vllm_mindspore.model_executor.models.model_base import Fake_MLA +from vllm_mindspore.model_executor.models.model_base import MLAAttentionWrapper from vllm_mindspore.model_executor.models.mf_models.mf_model_base import MfModelBase from vllm_mindspore.model_executor.models.mf_models.deepseekv3_weight_processor import DeepseekV3WeightProcessor @@ -50,7 +50,7 @@ class DeepseekV3MTPForCausalLM(MfModelBase): self.sampler = get_sampler() self.set_modules({"model": self.network}) - self.kv_caches = [Fake_MLA() for i in range(self.mf_model_config.num_layers)] + self.kv_caches = [MLAAttentionWrapper() for i in range(self.mf_model_config.num_layers)] compilation_config = get_current_vllm_config().compilation_config if prefix in compilation_config.static_forward_context: diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index d09d8d265..837070b14 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -50,7 +50,7 @@ from research.deepseek3.deepseek3 import ( ) from vllm_mindspore.model_executor.layers.sampler import get_sampler -from vllm_mindspore.model_executor.models.model_base import Fake_MLA, Fake_MLA_V1 +from vllm_mindspore.model_executor.models.model_base import MLAAttentionWrapper from vllm_mindspore.model_executor.models.mf_models.mf_model_base import MfModelBase from vllm_mindspore.model_executor.models.mf_models.deepseekv3_weight_processor import DeepseekV3WeightProcessor from vllm_mindspore.model_executor.models.attention_mask import MLALowerTriangularMask @@ -124,10 +124,7 @@ class DeepseekV3ForCausalLM(MfModelBase): self.sampler = get_sampler() self.set_modules({"model": self.network}) - if envs.VLLM_USE_V1: - self.kv_caches = [Fake_MLA_V1() for i in range(self.mf_model_config.num_layers)] - else: - self.kv_caches = [Fake_MLA() for i in range(self.mf_model_config.num_layers)] + self.kv_caches = [MLAAttentionWrapper() for i in range(self.mf_model_config.num_layers)] compilation_config = get_current_vllm_config().compilation_config if prefix in compilation_config.static_forward_context: diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 8a5a07778..5475d5653 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -43,7 +43,7 @@ from mindformers.tools.utils import is_pynative from vllm_mindspore.model_executor.models.model_base import MsModelBase from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask -from vllm_mindspore.v1.attention.backends.flash_attn import FlashAttentionMetadata +from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata logger = init_logger(__name__) @@ -87,24 +87,23 @@ class MfModelBase(MsModelBase): dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.mf_model_config.compute_dtype) self.lm_head.set_inputs(dynamic_hidden_states) - def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor) -> FlashAttentionMetadata: + + def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor) -> MsAttentionMetadata: input_len = input_ids.shape[0] max_seq_len = ms.Tensor(input_len, dtype=ms.int32) seq_lengths = ms.Tensor([input_len], dtype=ms.int32) - q_seq_lens = ms.Tensor([input_len], dtype=ms.int32) q_seq_lens_np = np.array([input_len], dtype=np.int32) seq_lens_np = np.array([input_len], dtype=np.int32) block_tables = ms.Tensor([[0]], dtype=ms.int32) slot_mapping = [-1 for _ in range(input_len)] slot_mapping = ms.Tensor(slot_mapping, dtype=ms.int32) - return FlashAttentionMetadata( + return MsAttentionMetadata( max_seq_len=max_seq_len, seq_lens=seq_lengths, seq_lens_np=seq_lens_np, block_tables=block_tables, slot_mapping=slot_mapping, - q_seq_lens=q_seq_lens, q_seq_lens_np=q_seq_lens_np, context_lens=0, # To enforce prefill and decode are both complied in warmup process. @@ -116,16 +115,7 @@ class MfModelBase(MsModelBase): def prepare_inputs(self, input_ids, positions, attn_metadata): key_cache, value_cache = self.get_kvcache() if not envs.VLLM_USE_V1: - seq_lens = attn_metadata.seq_lens - max_query_len = attn_metadata.max_query_len - # When Mutli-Step is enabled with Chunked-Prefill, prefills and - # decodes are scheduled together. In the first step, all the - # prefills turn into decodes and max_query_len will be 1. - if self.is_multi_step_chunked_prefill and max_query_len == 1: - query_lens = [1] * len(seq_lens) - else: - query_lens = attn_metadata.query_lens - + # V0 seq_lens = attn_metadata.seq_lens max_query_len = attn_metadata.max_query_len # When Mutli-Step is enabled with Chunked-Prefill, prefills and @@ -142,41 +132,27 @@ class MfModelBase(MsModelBase): if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max() == 0: is_prefill = True else: - is_prefill = False - - q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) - position_ids = ms.Tensor(positions, dtype=ms.int32) - attention_mask = self.casual_mask.gen_attention_mask(is_prefill, position_ids, query_lens) - - model_inputs = {} - model_inputs["input_ids"] = input_ids.astype(ms.int32) - model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) - model_inputs["block_tables"] = attn_metadata.block_tables - model_inputs["slot_mapping"] = attn_metadata.slot_mapping - model_inputs["position_ids"] = position_ids - model_inputs["q_seq_lens"] = q_seq_lens - model_inputs["attention_mask"] = attention_mask - model_inputs["key_cache"] = key_cache - model_inputs["value_cache"] = value_cache + is_prefill = False else: - if attn_metadata.max_context_lens == 0: - is_prefill = True - else: - is_prefill = False - q_seq_lens = attn_metadata.q_seq_lens + # V1 + is_prefill = True if attn_metadata.max_context_lens == 0 else False query_lens_np = attn_metadata.q_seq_lens_np - attention_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) - - model_inputs = {} - model_inputs["input_ids"] = input_ids.astype(ms.int32) - model_inputs["batch_valid_length"] = ms.from_numpy(attn_metadata.seq_lens_np) - model_inputs["block_tables"] = attn_metadata.block_tables - model_inputs["slot_mapping"] = attn_metadata.slot_mapping - model_inputs["position_ids"] = positions.to(ms.int32) - model_inputs["q_seq_lens"] = q_seq_lens - model_inputs["attention_mask"] = attention_mask - model_inputs["key_cache"] = key_cache - model_inputs["value_cache"] = value_cache + seq_lens_np = attn_metadata.seq_lens_np + + q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) + position_ids = ms.Tensor(positions, dtype=ms.int32) + attention_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) + + model_inputs = {} + model_inputs["input_ids"] = input_ids.astype(ms.int32) + model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) + model_inputs["block_tables"] = attn_metadata.block_tables + model_inputs["slot_mapping"] = attn_metadata.slot_mapping + model_inputs["position_ids"] = position_ids + model_inputs["q_seq_lens"] = q_seq_lens + model_inputs["attention_mask"] = attention_mask + model_inputs["key_cache"] = key_cache + model_inputs["value_cache"] = value_cache return model_inputs, is_prefill diff --git a/vllm_mindspore/model_executor/models/mf_models/qwen2.py b/vllm_mindspore/model_executor/models/mf_models/qwen2.py index d871be483..fdb987daf 100644 --- a/vllm_mindspore/model_executor/models/mf_models/qwen2.py +++ b/vllm_mindspore/model_executor/models/mf_models/qwen2.py @@ -33,9 +33,8 @@ from research.qwen2_5.infer.qwen2_5 import ( ) from vllm_mindspore.model_executor.layers.sampler import get_sampler -from vllm_mindspore.model_executor.models.model_base import Fake_Attention, Fake_Attention_V1 +from vllm_mindspore.model_executor.models.model_base import AttentionWrapper from vllm_mindspore.model_executor.models.mf_models.mf_model_base import MfModelBase - from vllm_mindspore.model_executor.models.mf_models.qwen2_weight_processor import Qwen2WeightProcessor @@ -49,10 +48,7 @@ class Qwen2ForCausalLM(MfModelBase): self.sampler = get_sampler() self.set_modules({"model": self.network}) - if envs.VLLM_USE_V1: - self.kv_caches = [Fake_Attention_V1() for i in range(self.mf_model_config.num_layers)] - else: - self.kv_caches = [Fake_Attention() for i in range(self.mf_model_config.num_layers)] + self.kv_caches = [AttentionWrapper() for i in range(self.mf_model_config.num_layers)] compilation_config = get_current_vllm_config().compilation_config if prefix in compilation_config.static_forward_context: diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index c980050e2..fde8a25aa 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -19,10 +19,10 @@ import os from abc import abstractmethod from typing import Dict, Iterable, Optional, Set, Tuple, Union -import torch +import mindspore as ms from mindspore import Tensor, mutable, nn + from vllm.attention.backends.abstract import AttentionType -from vllm.attention.layer import Attention from vllm.config import VllmConfig, get_current_vllm_config from vllm.forward_context import get_forward_context from vllm.model_executor.layers.sampler import SamplerOutput @@ -30,8 +30,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -class Fake_Attention: - +class AttentionWrapper: def __init__(self): vllm_config = get_current_vllm_config() block_size = vllm_config.cache_config.block_size @@ -40,41 +39,16 @@ class Fake_Attention: head_size = vllm_config.model_config.get_head_size() num_block = 0 self.kv_shape = [num_block, block_size, num_kv_heads, head_size] - self.kv_cache = [( - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size)] - self.attn_type = AttentionType.DECODER - - -class Fake_MLA(Fake_Attention): - - def __init__(self): - super().__init__() - vllm_config = get_current_vllm_config() self.kv_cache = [ - (torch.zeros(self.kv_shape, dtype=torch.bfloat16, - device="Ascend"), ) + ( + ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), + ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), + ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size) ] - - -class Fake_Attention_V1(Attention): - - def __init__(self): - vllm_config = get_current_vllm_config() - block_size = vllm_config.cache_config.block_size - num_kv_heads = vllm_config.model_config.get_num_kv_heads( - vllm_config.parallel_config) - head_size = vllm_config.model_config.get_head_size() - num_block = 0 - self.kv_shape = [num_block, block_size, num_kv_heads, head_size] - self.kv_cache = [( - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - torch.zeros(self.kv_shape, dtype=torch.bfloat16, device="Ascend"), - ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size)] self.attn_type = AttentionType.DECODER - self.num_block = num_block + + # add for v1 self.num_kv_heads = num_kv_heads self.head_size = head_size self.dtype = vllm_config.model_config.dtype @@ -82,14 +56,12 @@ class Fake_Attention_V1(Attention): self.sliding_window = None -class Fake_MLA_V1(Fake_Attention_V1): - +class MLAAttentionWrapper(AttentionWrapper): def __init__(self): super().__init__() vllm_config = get_current_vllm_config() self.kv_cache = [ - (torch.zeros(self.kv_shape, dtype=torch.bfloat16, - device="Ascend"), ) + (ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype),) for _ in range(vllm_config.parallel_config.pipeline_parallel_size) ] diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index fcd9bb51a..351ffafff 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -16,6 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ +import numpy as np from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, Union) @@ -25,10 +26,10 @@ else: Qwen2Config = None import mindspore as ms -import numpy as np -import vllm.envs as envs from mindspore import Parameter, Tensor, mint, mutable, nn, ops from mindspore.common import dtype as mstype + +import vllm.envs as envs from vllm.attention.backends.abstract import AttentionType from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size @@ -36,6 +37,7 @@ from vllm.forward_context import get_forward_context from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.models.interfaces import SupportsLoRA from vllm.sequence import IntermediateTensors +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm_mindspore.attention import Attention from vllm_mindspore.model_executor.layers.activation import SwiGLU @@ -51,18 +53,13 @@ from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) from vllm_mindspore.model_executor.model_loader.weight_utils import \ default_weight_loader -from vllm_mindspore.model_executor.models.attention_mask import \ - LowerTriangularMask -from vllm_mindspore.model_executor.models.model_base import (Fake_Attention, - Fake_Attention_V1, - MsModelBase) from vllm_mindspore.model_executor.models.utils import ( PPMissingLayer, _jit, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix, set_enforce_eager) -from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm_mindspore.model_executor.models.model_base import MsModelBase, AttentionWrapper +from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE -from vllm_mindspore.v1.attention.backends.flash_attn import \ - FlashAttentionMetadata +from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata class Qwen2MLP(nn.Cell): @@ -491,14 +488,7 @@ class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): self.casual_mask = LowerTriangularMask( dtype=self.mstype, max_model_len=self.model_config.max_model_len) self.set_model_inputs(self.prefill) - if envs.VLLM_USE_V1: - self.kv_caches = [ - Fake_Attention_V1() for i in range(config.num_hidden_layers) - ] - else: - self.kv_caches = [ - Fake_Attention() for i in range(config.num_hidden_layers) - ] + self.kv_caches = [AttentionWrapper() for i in range(config.num_hidden_layers)] compilation_config = vllm_config.compilation_config if prefix in compilation_config.static_forward_context: @@ -588,11 +578,11 @@ class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): is_prefill = attn_metadata.max_context_lens == 0 slot_mapping = attn_metadata.slot_mapping batch_valid_length = Tensor.from_numpy(attn_metadata.seq_lens_np) - q_seq_lens = attn_metadata.q_seq_lens block_tables = attn_metadata.block_tables query_lens_np = attn_metadata.q_seq_lens_np attn_mask = self.casual_mask.gen_attention_mask( is_prefill, positions, query_lens_np) + q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) positions = positions.to(ms.int64) if is_prefill: if not self.prefill: @@ -617,24 +607,22 @@ class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): return model_output def _dummy_attention_metadata(self, input_ids: Tensor, - positions: Tensor) -> FlashAttentionMetadata: + positions: Tensor) -> MsAttentionMetadata: input_len = input_ids.shape[0] max_seq_len = ms.Tensor(input_len, dtype=ms.int32) seq_lengths = ms.Tensor([input_len], dtype=ms.int32) - q_seq_lens = ms.Tensor([input_len], dtype=ms.int32) q_seq_lens_np = np.array([input_len], dtype=np.int32) seq_lens_np = np.array([input_len], dtype=np.int32) block_tables = ms.Tensor([[0]], dtype=ms.int32) slot_mapping = [-1 for _ in range(input_len)] slot_mapping = ms.Tensor(slot_mapping, dtype=ms.int32) - return FlashAttentionMetadata( + return MsAttentionMetadata( max_seq_len=max_seq_len, seq_lens=seq_lengths, seq_lens_np=seq_lens_np, block_tables=block_tables, slot_mapping=slot_mapping, - q_seq_lens=q_seq_lens, q_seq_lens_np=q_seq_lens_np, context_lens=0, # To enforce prefill and decode are both complied in warmup process. diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index bdc43d8bf..a9c2b9a3e 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -25,7 +25,7 @@ from vllm.model_executor.models.registry import (_LazyRegisteredModel, from vllm_mindspore.utils import (is_mindformers_model_backend, is_mindone_model_backend) -_MINDSPORE_MODELS = { +_NATIVE_MODELS = { "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), } @@ -69,7 +69,7 @@ else: module_name=f"vllm_mindspore.model_executor.models.{mod_relname}", class_name=cls_name, ) - for model_arch, (mod_relname, cls_name) in _MINDSPORE_MODELS.items() + for model_arch, (mod_relname, cls_name) in _NATIVE_MODELS.items() } MindSporeModelRegistry = _ModelRegistry(_registry_dict) diff --git a/vllm_mindspore/platforms/ascend.py b/vllm_mindspore/platforms/ascend.py index 89e228290..43d5d1773 100644 --- a/vllm_mindspore/platforms/ascend.py +++ b/vllm_mindspore/platforms/ascend.py @@ -65,29 +65,34 @@ class AscendPlatform(Platform): @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: - """ - Check and update the configuration for the current platform. - - It can raise an exception if the configuration is not compatible with - the current platform, or it can update the configuration to make it - compatible with the current platform. - - The config is passed by reference, so it can be modified in place. - """ parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config + compilation_config = vllm_config.compilation_config + model_config = vllm_config.model_config - import vllm.envs as envs - if envs.VLLM_USE_V1: - parallel_config.worker_cls = \ - "vllm.v1.worker.gpu_worker.Worker" - else: - if parallel_config.worker_cls == "auto": - if scheduler_config.is_multi_step: - parallel_config.worker_cls = "vllm.worker.multi_step_worker.MultiStepWorker" - elif vllm_config.speculative_config: - parallel_config.worker_cls = "vllm.spec_decode.spec_decode_worker.create_spec_worker" - parallel_config.sd_worker_cls = "vllm.worker.worker.Worker" + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + if envs.VLLM_USE_V1: + raise NotImplementedError( + "Multi-step scheduling is not supported (and not " + "needed) on vLLM V1. Please launch without " + "--num-scheduler-steps.") + else: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + if envs.VLLM_USE_V1: + parallel_config.worker_cls = \ + "vllm.v1.worker.gpu_worker.Worker" + else: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" + else: + if envs.VLLM_USE_V1: + parallel_config.worker_cls = \ + "vllm.v1.worker.gpu_worker.Worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" @@ -104,8 +109,8 @@ class AscendPlatform(Platform): """Get the attention backend class of a device.""" if use_v1: if use_mla: - return "vllm_mindspore.v1.attention.backends.flash_attn.MLABackend" - return "vllm_mindspore.v1.attention.backends.flash_attn.FlashAttentionBackend" + return "vllm_mindspore.v1.attention.backends.ms_attn.MLABackend" + return "vllm_mindspore.v1.attention.backends.ms_attn.MsAttentionBackend" raise RuntimeError("vLLM-MindSpore do not support v1 egine now!") if use_mla: logger.info("Using MindSpore MLA backend.") diff --git a/vllm_mindspore/v1/attention/backends/flash_attn.py b/vllm_mindspore/v1/attention/backends/ms_attn.py similarity index 64% rename from vllm_mindspore/v1/attention/backends/flash_attn.py rename to vllm_mindspore/v1/attention/backends/ms_attn.py index b5c5629ee..e8a940358 100644 --- a/vllm_mindspore/v1/attention/backends/flash_attn.py +++ b/vllm_mindspore/v1/attention/backends/ms_attn.py @@ -4,7 +4,6 @@ from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Type import numpy as np -import torch from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata, AttentionType) @@ -21,7 +20,7 @@ from mindspore._c_expression import swap_cache logger = init_logger(__name__) -class FlashAttentionBackend(AttentionBackend): +class MsAttentionBackend(AttentionBackend): accept_output_buffer: bool = True @@ -39,11 +38,11 @@ class FlashAttentionBackend(AttentionBackend): @staticmethod def get_metadata_cls() -> Type["AttentionMetadata"]: - return FlashAttentionMetadata + return MsAttentionMetadata @staticmethod - def get_builder_cls() -> Type["AttentionMetadataBuilder"]: - return FlashAttentionMetadataBuilder + def get_builder_cls() -> Type["MsAttentionMetadataBuilder"]: + return MsAttentionMetadataBuilder @staticmethod def get_kv_cache_shape( @@ -72,11 +71,11 @@ class MLABackend(AttentionBackend): @staticmethod def get_metadata_cls() -> Type["AttentionMetadata"]: - return FlashAttentionMetadata + return MsAttentionMetadata @staticmethod - def get_builder_cls() -> Type["AttentionMetadataBuilder"]: - return FlashAttentionMetadataBuilder + def get_builder_cls() -> Type["MsAttentionMetadataBuilder"]: + return MsAttentionMetadataBuilder @staticmethod def get_kv_cache_shape( @@ -98,8 +97,12 @@ class MLABackend(AttentionBackend): return [576] + @dataclass -class FlashAttentionMetadata: +class MsAttentionMetadata: + """ + AttentionMetadata for vllm-mindspore V1 + """ # NOTE(sang): Definition of context_len, query_len, and seq_len. # |---------- N-1 iteration --------| # |---------------- N iteration ---------------------| @@ -108,47 +111,36 @@ class FlashAttentionMetadata: # |-------------------- seq_len ---------------------| # |-- query_len ---| - max_seq_len: int - seq_lens: torch.Tensor + # add by vllm-mindspore begin seq_lens_np: np.ndarray - block_tables: torch.Tensor - slot_mapping: torch.Tensor - q_seq_lens: torch.Tensor + block_tables: ms.Tensor q_seq_lens_np: np.ndarray - context_lens: torch.Tensor + context_lens: ms.Tensor max_context_lens: int - query_start_loc: torch.Tensor - - def __getitem__(self, key): - if key == "batch_valid_length": - key = "seq_lens" - return getattr(self, key) - - -class MsAttentionImpl(AttentionImpl): - """ - If the input tensors contain prompt tokens, the layout is as follows: - |<--------------- num_prefill_tokens ----------------->| - |<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->| + # add by vllm-mindspore end - Otherwise, the layout is as follows: - |<----------------- num_decode_tokens ------------------>| - |<--decode_0-->|..........|<--decode_M-1-->|<--padding-->| - - Generation tokens can contain padding when cuda-graph is used. - Currently, prompt tokens don't contain any padding. + #num_actual_tokens: int = None # Number of tokens excluding padding. + #max_query_len: int + query_start_loc: ms.Tensor + max_seq_len: int + seq_lens: ms.Tensor + #block_table: torch.Tensor + slot_mapping: ms.Tensor - The prompts might have different lengths, while the generation tokens - always have length 1. + # For cascade attention. + #use_cascade: bool + #common_prefix_len: int + #cu_prefix_query_lens: Optional[torch.Tensor] + #prefix_kv_lens: Optional[torch.Tensor] + #suffix_kv_lens: Optional[torch.Tensor] - If chunked prefill is enabled, prefill tokens and decode tokens can be - batched together in a flattened 1D query. + # For logging. + num_input_tokens: int = 0 # Number of tokens including padding. - |<----- num_prefill_tokens ---->|<------- num_decode_tokens --------->| - |<-prefill_0->|...|<-prefill_N-1->|<--decode_0-->|...|<--decode_M-1-->| - Currently, cuda graph is disabled for chunked prefill, meaning there's no - padding between prefill and decode tokens. +class MsAttentionImpl(AttentionImpl): + """ + AttentionImpl for vllm-mindspore V1 """ def __init__( @@ -168,31 +160,20 @@ class MsAttentionImpl(AttentionImpl): def forward( self, - layer: torch.nn.Module, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: FlashAttentionMetadata, - output: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - """Forward pass with FlashAttention. - - Args: - query: shape = [num_tokens, num_heads, head_size] - key: shape = [num_tokens, num_kv_heads, head_size] - value: shape = [num_tokens, num_kv_heads, head_size] - output: shape = [num_tokens, num_heads, head_size] - kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size] - NOTE: kv_cache will be an empty tensor with shape [0] - for profiling run. - attn_metadata: Metadata for attention. - NOTE: It in-place updates the output tensor. + layer: ms.nn.Cell, + query: ms.Tensor, + key: ms.Tensor, + value: ms.Tensor, + kv_cache: ms.Tensor, + attn_metadata: MsAttentionMetadata, + output: Optional[ms.Tensor] = None, + ) -> ms.Tensor: + """Forward pass with MsAttention. """ pass -class FlashAttentionMetadataBuilder: +class MsAttentionMetadataBuilder: def __init__(self, runner: "GPUModelRunner"): self.runner = runner @@ -213,14 +194,12 @@ class FlashAttentionMetadataBuilder: context_lens = ms.from_numpy(self.runner.input_batch.num_computed_tokens_cpu[:num_reqs]) q_seq_lens_np = np.diff(self.runner.query_start_loc_np[:num_reqs + 1]) - q_seq_lens = ms.from_numpy(q_seq_lens_np) - attn_metadata = FlashAttentionMetadata( + attn_metadata = MsAttentionMetadata( seq_lens=seq_lens, seq_lens_np=seq_lens_np, block_tables=(self.runner.input_batch.block_table.get_device_tensor()[:num_reqs]), slot_mapping=slot_mapping, - q_seq_lens=q_seq_lens, q_seq_lens_np=q_seq_lens_np, max_seq_len=max_seq_len, context_lens=context_lens, @@ -228,3 +207,5 @@ class FlashAttentionMetadataBuilder: query_start_loc = query_start_loc ) return attn_metadata + +FlashAttentionMetadata = MsAttentionMetadata \ No newline at end of file diff --git a/vllm_mindspore/v1/spec_decode/eagle.py b/vllm_mindspore/v1/spec_decode/eagle.py index 7279bcaf5..0e252f88b 100644 --- a/vllm_mindspore/v1/spec_decode/eagle.py +++ b/vllm_mindspore/v1/spec_decode/eagle.py @@ -4,7 +4,7 @@ import torch.nn as nn from vllm.config import VllmConfig from vllm.forward_context import set_forward_context -from vllm_mindspore.v1.attention.backends.flash_attn import FlashAttentionMetadata +from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata from vllm.v1.sample.metadata import SamplingMetadata @@ -56,7 +56,8 @@ class EagleProposer: # FIXME(woosuk): The below two ops cause synchronization. Optimize. max_seq_len = seq_lens.max().item() max_num_tokens = (cu_num_tokens[1:] - cu_num_tokens[:-1]).max().item() - attn_metadata = FlashAttentionMetadata( + # TODO: new members need to be added to the MsAttentionMetadata for Eagle feature + attn_metadata = MsAttentionMetadata( num_actual_tokens=num_tokens, max_query_len=max_num_tokens, query_start_loc=cu_num_tokens, diff --git a/vllm_mindspore/v1/worker/gpu_model_runner.py b/vllm_mindspore/v1/worker/gpu_model_runner.py index 7854e2942..f53d49d4d 100644 --- a/vllm_mindspore/v1/worker/gpu_model_runner.py +++ b/vllm_mindspore/v1/worker/gpu_model_runner.py @@ -6,13 +6,14 @@ import torch from mindspore import mutable import mindspore as ms -from vllm_mindspore.v1.attention.backends.flash_attn import (FlashAttentionMetadata, - FlashAttentionBackend, - MLABackend) +from vllm_mindspore.v1.attention.backends.ms_attn import (MsAttentionMetadata, + MsAttentionBackend, + MLABackend) from vllm_mindspore.utils import get_valid_dtype -from vllm.v1.kv_cache_interface import FullAttentionSpec from vllm.v1.outputs import ModelRunnerOutput +from vllm.attention import AttentionType +from vllm.v1.kv_cache_interface import FullAttentionSpec, KVCacheSpec, SlidingWindowSpec from vllm.v1.utils import bind_kv_cache from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.distributed.parallel_state import get_pp_group @@ -27,7 +28,7 @@ logger = init_logger(__name__) def _prepare_inputs( self, scheduler_output: "SchedulerOutput", -) -> Tuple[FlashAttentionMetadata, torch.Tensor]: +) -> Tuple[MsAttentionMetadata, torch.Tensor]: total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens assert total_num_scheduled_tokens > 0 num_reqs = self.input_batch.num_reqs @@ -125,11 +126,8 @@ def _prepare_inputs( num_scheduled_tokens) common_prefix_len = 0 - if self.cascade_attn_enabled: - common_prefix_len = self._compute_cascade_attn_prefix_len( - num_scheduled_tokens, - scheduler_output.num_common_prefix_blocks, - ) + # when common_prefix_len > 0 use cascade_attn, + # which is associated with device_properties.multi_processor_count(CUDA). attn_metadata = self.attn_metadata_builder.build( num_reqs=num_reqs, @@ -441,3 +439,40 @@ def wrapper_gpu_model_runner_execute_model(func): ) return new_func + + +def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: + forward_ctx = self.vllm_config.compilation_config.static_forward_context + block_size = self.vllm_config.cache_config.block_size + use_mla = self.vllm_config.model_config.use_mla + kv_cache_spec: dict[str, KVCacheSpec] = {} + for layer_name, attn_module in forward_ctx.items(): + # vllm-mindspore AttentionWrapper is not an Attention isinstance + # assert isinstance(attn_module, Attention) + if attn_module.attn_type == AttentionType.DECODER: + if attn_module.sliding_window is not None: + kv_cache_spec[layer_name] = SlidingWindowSpec( + block_size=block_size, + num_kv_heads=attn_module.num_kv_heads, + head_size=attn_module.head_size, + dtype=self.kv_cache_dtype, + sliding_window=attn_module.sliding_window, + use_mla=use_mla) + else: + kv_cache_spec[layer_name] = FullAttentionSpec( + block_size=block_size, + num_kv_heads=attn_module.num_kv_heads, + head_size=attn_module.head_size, + dtype=self.kv_cache_dtype, + use_mla=use_mla) + elif attn_module.attn_type in (AttentionType.ENCODER, + AttentionType.ENCODER_ONLY): + # encoder-only attention does not need KV cache. + continue + elif attn_module.attn_type == AttentionType.ENCODER_DECODER: + raise NotImplementedError + else: + raise ValueError( + f"Unknown attention type: {attn_module.attn_type}") + + return kv_cache_spec -- Gitee From 70c105036df4ffd24b1542ff2fdf59f4a7facef8 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 9 Jun 2025 16:29:35 +0800 Subject: [PATCH 09/76] refactor InferRotaryEmbedding --- .../model_executor/layers/rotary_embedding.py | 61 ++++++++++++------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index 0cf464e39..da73c2ded 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -19,14 +19,11 @@ from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import mindspore -from mindspore import Tensor, mint, ops +from mindspore import Tensor, mint, ops, nn from mindspore.common import dtype as mstype from transformers import PretrainedConfig -from vllm_mindspore.model_executor.custom_op import CustomOp - - def _apply_rotary_emb( x: Tensor, cos: Tensor, @@ -56,7 +53,7 @@ def _apply_rotary_emb( return mint.stack((o1, o2), dim=-1).flatten(-2) -class RotaryEmbedding(CustomOp): +class RotaryEmbedding(nn.Cell): def __init__( self, @@ -102,7 +99,7 @@ class RotaryEmbedding(CustomOp): cache = mint.cat((cos, sin), dim=-1) return cache - def forward_native( + def construct( self, positions: Tensor, query: Tensor, @@ -133,7 +130,7 @@ class RotaryEmbedding(CustomOp): return query, key -class InferRotaryEmbedding(CustomOp): +class InferRotaryEmbedding(nn.Cell): def __init__( self, @@ -144,24 +141,43 @@ class InferRotaryEmbedding(CustomOp): is_neox_style: bool, dtype, ) -> None: + if not is_neox_style: + raise NotImplementedError( + "InferRotaryEmbedding only support Neox-style rotary embeddings." + ) super().__init__() - freqs_base = np.arange(0, rotary_dim, 2)[:(rotary_dim // 2)].astype( + self.rotary_embedding_op = ops.ApplyRotaryPosEmb(2) + self.gather = ops.Gather() + self.head_size = head_size + self.rotary_dim = rotary_dim + self.max_position_embeddings = max_position_embeddings + self.base = base + self.is_neox_style = is_neox_style + self.dtype = dtype + self.freqs_cos, self.freqs_sin = self._compute_cos_sin_cache() + + def _compute_inv_freq(self, base: Union[int, float]) -> Tensor: + """ + Compute the inverse frequency with numpy. + Numpy process is faster during initialization. + """ + freqs_base = np.arange(0, self.rotary_dim, 2).astype( np.float32) # (head_dim // 2, ) - freqs = 1.0 / (base**(freqs_base / rotary_dim)) # (head_dim // 2, ) - mscale = 1.0 - t = np.arange(0, max_position_embeddings, 1).astype(np.float32) + freqs = 1.0 / (base**(freqs_base / self.rotary_dim)) # (head_dim // 2, ) + return freqs - self.freqs = Tensor(freqs.reshape(1, 1, 1, -1), dtype=dtype) + def _compute_cos_sin_cache(self) -> Tuple[Tensor, Tensor]: + freqs = self._compute_inv_freq(self.base) + t = np.arange(0, self.max_position_embeddings, 1).astype(np.float32) freqs = np.outer(t, freqs) # (max_position_embedding, head_dim // 2) emb = np.concatenate((freqs, freqs), axis=-1) - freqs_cos = np.cos(emb) * mscale # (seq_len, head_dim) - freqs_sin = np.sin(emb) * mscale # (seq_len, head_dim) - self.freqs_cos = Tensor(freqs_cos, dtype=dtype) - self.freqs_sin = Tensor(freqs_sin, dtype=dtype) - self.rotary_embedding_op = ops.ApplyRotaryPosEmb(2) - self.gather = ops.Gather() + freqs_cos = np.cos(emb) # (seq_len, head_dim) + freqs_sin = np.sin(emb) # (seq_len, head_dim) + freqs_cos = Tensor(freqs_cos, dtype=self.dtype) + freqs_sin = Tensor(freqs_sin, dtype=self.dtype) + return freqs_cos, freqs_sin - def forward_native( + def construct( self, positions: Tensor, query: Tensor, @@ -206,7 +222,7 @@ class MRotaryEmbedding(RotaryEmbedding): if self.mrope_section: assert sum(self.mrope_section) == rotary_dim // 2 - def forward_native( + def construct( self, positions: mindspore.Tensor, query: mindspore.Tensor, @@ -448,7 +464,7 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): if self.mrope_section: assert sum(self.mrope_section) == rotary_dim // 2 - def forward_native( + def construct( self, positions: mindspore.Tensor, query: mindspore.Tensor, @@ -547,7 +563,8 @@ def get_rope( if key in _ROPE_DICT: return _ROPE_DICT[key] if rope_scaling is None: - rotary_emb = InferRotaryEmbedding( + cls = InferRotaryEmbedding if is_neox_style else RotaryEmbedding + rotary_emb = cls( head_size, rotary_dim, max_position, -- Gitee From afd31f85916b5c6c2b891435d40baa5572c3fd4a Mon Sep 17 00:00:00 2001 From: twc Date: Wed, 11 Jun 2025 15:16:25 +0800 Subject: [PATCH 10/76] mask seqlen use model_config max_model_len --- tests/st/python/test_vllm_deepseek_bf16_part.py | 2 +- tests/st/python/test_vllm_deepseek_gptq_a16w4.py | 3 ++- tests/st/python/test_vllm_deepseek_osl.py | 3 ++- tests/st/python/test_vllm_deepseek_part.py | 2 +- tests/st/python/test_vllm_deepseek_smoothquant_mss.py | 3 ++- vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py | 2 +- .../model_executor/models/mf_models/mf_model_base.py | 2 +- 7 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/st/python/test_vllm_deepseek_bf16_part.py b/tests/st/python/test_vllm_deepseek_bf16_part.py index c772e16da..0d4348111 100644 --- a/tests/st/python/test_vllm_deepseek_bf16_part.py +++ b/tests/st/python/test_vllm_deepseek_bf16_part.py @@ -61,7 +61,7 @@ def test_deepseek_r1_bf16(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-bf16", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_gptq_a16w4.py b/tests/st/python/test_vllm_deepseek_gptq_a16w4.py index 9bacd0749..f61afd845 100644 --- a/tests/st/python/test_vllm_deepseek_gptq_a16w4.py +++ b/tests/st/python/test_vllm_deepseek_gptq_a16w4.py @@ -77,7 +77,8 @@ def test_deepseek_r1_gptq_a16w4(): "/home/workspace/mindspore_dataset/weight/DeepSeekR1_gptq-pergroup_safetensors", trust_remote_code=True, gpu_memory_utilization=0.9, - tensor_parallel_size=4) + tensor_parallel_size=4, + max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_osl.py b/tests/st/python/test_vllm_deepseek_osl.py index 05542d97d..5b72972b4 100644 --- a/tests/st/python/test_vllm_deepseek_osl.py +++ b/tests/st/python/test_vllm_deepseek_osl.py @@ -112,7 +112,8 @@ def test_deepseek_r1_mss(): trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, - num_scheduler_steps=8) + num_scheduler_steps=8, + max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_part.py b/tests/st/python/test_vllm_deepseek_part.py index 2e4cdec45..42e2db8b4 100644 --- a/tests/st/python/test_vllm_deepseek_part.py +++ b/tests/st/python/test_vllm_deepseek_part.py @@ -61,7 +61,7 @@ def test_deepseek_r1(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py b/tests/st/python/test_vllm_deepseek_smoothquant_mss.py index 5476b1ae3..f286bc8e1 100644 --- a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py +++ b/tests/st/python/test_vllm_deepseek_smoothquant_mss.py @@ -61,7 +61,8 @@ def test_deepseek_r1_mss(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-smoothquant-newconfig", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, num_scheduler_steps=8) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, num_scheduler_steps=8, + max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index 837070b14..625953614 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -135,7 +135,7 @@ class DeepseekV3ForCausalLM(MfModelBase): self.set_flags = False set_runtime_kernel_launch_group() self.casual_mask = MLALowerTriangularMask(dtype=self.mf_model_config.compute_dtype, - max_model_len=self.mf_model_config.seq_length) + max_model_len=self.model_config.max_model_len) def _generate_model_config(self): self.mf_config.load_checkpoint = self.get_model_path() diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 5475d5653..9aa2ec0f4 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -65,7 +65,7 @@ class MfModelBase(MsModelBase): self.mf_config.model.model_config.parallel_config.pipeline_stage = 1 self._generate_model_config() self.casual_mask = LowerTriangularMask(dtype=self.mf_model_config.compute_dtype, - max_model_len=self.mf_model_config.seq_length) + max_model_len=self.model_config.max_model_len) self.network, self.lm_head = self._create_network() affinity_config = self.mf_config.get('context', {}).get('affinity_cpu_list', {}) -- Gitee From 13ca8401372cbd9745ec80f22c235f50eb30f456 Mon Sep 17 00:00:00 2001 From: fengtingyan Date: Mon, 9 Jun 2025 22:40:14 +0800 Subject: [PATCH 11/76] [featrue]Large EP support enable_micro_batch config in vllm_config and mf_config and generate prefill_micro_batch graph --- vllm_mindspore/engine/arg_utils.py | 5 --- .../models/mf_models/deepseek_v3.py | 1 + .../models/mf_models/mf_model_base.py | 31 ++++++++++++++++--- .../model_executor/models/model_base.py | 3 ++ 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/vllm_mindspore/engine/arg_utils.py b/vllm_mindspore/engine/arg_utils.py index ed74ba9e3..5460bbbef 100644 --- a/vllm_mindspore/engine/arg_utils.py +++ b/vllm_mindspore/engine/arg_utils.py @@ -51,11 +51,6 @@ def _is_v1_supported_oracle(self, model_config: ModelConfig) -> bool: recommend_to_remove=True) return False - if self.additional_config != EngineArgs.additional_config: - _raise_or_fallback(feature_name="--additional-config", - recommend_to_remove=False) - return False - # Xgrammar and Guidance are supported. SUPPORTED_GUIDED_DECODING = [ "xgrammar", "xgrammar:disable-any-whitespace", "guidance", diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index d09d8d265..819be42fa 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -144,6 +144,7 @@ class DeepseekV3ForCausalLM(MfModelBase): self.mf_config.load_checkpoint = self.get_model_path() self.mf_model_config = DeepseekV3Config_MF(**self.mf_config.model.model_config) + self.mf_model_config.enable_micro_batch = self.enable_micro_batch if self.mf_config.moe_config: self.mf_model_config.moe_config = self.mf_config.moe_config # dispatch/combine in moe need max_num_seqs as global_max_bs diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 8a5a07778..70577b893 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -28,13 +28,15 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_dp_group from vllm.logger import init_logger from vllm.forward_context import get_forward_context import vllm.envs as envs import mindspore as ms -from mindspore import Tensor +from mindspore import Tensor, mint from mindspore.common.api import _pynative_executor +from mindspore.communication import get_rank from mindformers.tools.register.config import MindFormerConfig from mindformers.core.context import build_mf_context @@ -54,6 +56,8 @@ class MfModelBase(MsModelBase): ) self.mf_config = MindFormerConfig(os.getenv("MINDFORMERS_MODEL_CONFIG")) + self.rank_id = get_rank() + self.dp_size = get_dp_group() build_mf_context(self.mf_config) build_parallel_config(self.mf_config) self.mf_config.model.model_config.parallel_config = ( @@ -196,11 +200,21 @@ class MfModelBase(MsModelBase): attn_metadata = self._dummy_attention_metadata(input_ids, positions) model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, attn_metadata) model_inputs = self.update_model_inputs(model_inputs, **kwargs) + + # enable_mb_split is True in lager EP enable micro-batch and per-dp-bs > 1 + enable_mb_split = self.is_enable_micro_batch_split(is_prefill, model_inputs["q_seq_lens"]) if is_prefill: - self.network.phase = "prefill" - if not self.set_flags or is_pynative(): - self.network.add_flags_custom(is_first_iteration=True) + if self.enable_micro_batch: + self.network.phase = "prefill" if not enable_mb_split else "prefill_micro_batch" + if not self.set_flags or is_pynative() or enable_mb_split: + self.network.add_flags_custom(is_first_iteration=is_first_iteration) + self.network.add_flags_enable_micro_batch(enable_micro_batch=enable_mb_split) + else: + self.network.phase = "prefill" + if not self.set_flags or is_pynative(): + self.network.add_flags_custom(is_first_iteration=True) + hidden_states = self.network(**model_inputs) self.network.phase = "increment" if not self.set_flags or is_pynative(): @@ -241,3 +255,12 @@ class MfModelBase(MsModelBase): def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: raise NotImplementedError("load_weight not implemented.") + + def is_enable_micro_batch_split(self, is_prefill, q_seq_lens): + """Judge enable micro batch """ + if self.enable_micro_batch: + is_prefill_cur_dp = mint.ones((1), dtype=ms.int8) if is_prefill else mint.zeros((1), dtype=ms.int8) + is_prefill_all_dp = get_dp_group().all_gather(is_prefill_cur_dp) + return is_prefill_all_dp.sum() == self.dp_size and q_seq_lens.shape[0] > 1 + else: + return False diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index 0d933a2db..c6d6a83ff 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -112,6 +112,9 @@ class MsModelBase(): self.parallel_config = vllm_config.parallel_config self.load_config = vllm_config.load_config self.scheduler_config = vllm_config.scheduler_config + self.enable_micro_batch = \ + vllm_config.additional_config.get('enable_micro_batch', 0) == 1 \ + if vllm_config.additional_config is not None else False self.modules_dict = None -- Gitee From 444de93a769bf1cf23fb8447e02c0325a0ac2946 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 9 Jun 2025 15:01:42 +0800 Subject: [PATCH 12/76] use dtype in model_config for layers --- .../model_executor/layers/layernorm.py | 7 +++++-- .../model_executor/layers/linear.py | 9 ++++----- .../model_executor/layers/rotary_embedding.py | 7 ++++++- .../layers/vocab_parallel_embedding.py | 3 ++- vllm_mindspore/model_executor/models/qwen2.py | 19 +++---------------- 5 files changed, 20 insertions(+), 25 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/layernorm.py b/vllm_mindspore/model_executor/layers/layernorm.py index db156c0cc..53050ae56 100644 --- a/vllm_mindspore/model_executor/layers/layernorm.py +++ b/vllm_mindspore/model_executor/layers/layernorm.py @@ -22,8 +22,9 @@ from mindspore import Parameter, Tensor, mint, ops from mindspore.common import dtype as mstype from mindspore.common.dtype import typing -from vllm_mindspore.model_executor.custom_op import CustomOp +from vllm.config import get_current_vllm_config +from vllm_mindspore.model_executor.custom_op import CustomOp class RMSNorm(CustomOp): def __init__( @@ -31,9 +32,11 @@ class RMSNorm(CustomOp): hidden_size: int, eps: float = 1e-6, var_hidden_size: Optional[int] = None, - params_dtype: Optional[Any] = mstype.float16, + params_dtype: Optional[Any] = None, ) -> None: super().__init__() + if params_dtype is None: + params_dtype = get_current_vllm_config().model_config.dtype self.weight = Parameter(mint.ones(hidden_size, dtype=params_dtype)) self.rms_norm = ops.RmsNorm(eps) diff --git a/vllm_mindspore/model_executor/layers/linear.py b/vllm_mindspore/model_executor/layers/linear.py index 572f0e345..e08511492 100644 --- a/vllm_mindspore/model_executor/layers/linear.py +++ b/vllm_mindspore/model_executor/layers/linear.py @@ -32,6 +32,7 @@ from vllm.distributed import ( tensor_model_parallel_all_gather, tensor_model_parallel_all_reduce, ) +from vllm.config import get_current_vllm_config from vllm_mindspore.model_executor.layers.quantization.base_config import ( QuantizationConfig, QuantizeMethodBase, @@ -39,7 +40,6 @@ from vllm_mindspore.model_executor.layers.quantization.base_config import ( from vllm_mindspore.model_executor.utils import set_weight_attrs from vllm_mindspore.distributed.communication_op import ReduceFromModelParallelRegion - WEIGHT_LOADER_V2_SUPPORTED = [ "CompressedTensorsLinearMethod", "AWQMarlinLinearMethod", @@ -170,8 +170,7 @@ class LinearBase(ms.nn.Cell): self.output_size = output_size self.skip_bias_add = skip_bias_add if params_dtype is None: - # params_dtype = torch.get_default_dtype() - params_dtype = ms.float16 + params_dtype = get_current_vllm_config().model_config.dtype self.params_dtype = params_dtype if quant_config is None: self.quant_method: Optional[QuantizeMethodBase] = UnquantizedLinearMethod() @@ -236,7 +235,7 @@ class ColumnParallelLinear(LinearBase): ) if bias: self.bias = Parameter( - mint.zeros(self.output_size_per_partition, dtype=params_dtype) + mint.zeros(self.output_size_per_partition, dtype=self.params_dtype) ) set_weight_attrs( self.bias, @@ -545,7 +544,7 @@ class RowParallelLinear(LinearBase): ) if bias: - self.bias = Parameter(mint.zeros(self.output_size, dtype=params_dtype)) + self.bias = Parameter(mint.zeros(self.output_size, dtype=self.params_dtype)) set_weight_attrs( self.bias, { diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index da73c2ded..eb56d6650 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -24,6 +24,8 @@ from mindspore.common import dtype as mstype from transformers import PretrainedConfig +from vllm.config import get_current_vllm_config + def _apply_rotary_emb( x: Tensor, cos: Tensor, @@ -543,9 +545,12 @@ def get_rope( base: int, is_neox_style: bool = True, rope_scaling: Optional[Dict[str, Any]] = None, - dtype: Optional[Any] = mstype.float16, + dtype: Optional[Any] = None, partial_rotary_factor: float = 1.0, ) -> InferRotaryEmbedding: + if dtype is None: + dtype = get_current_vllm_config().model_config.dtype + if rope_scaling is not None: # Transforms every value that is a list into a tuple for caching calls rope_scaling_tuple = { diff --git a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py index b694075df..6e760aa57 100644 --- a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py @@ -25,6 +25,7 @@ from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) from vllm.model_executor.layers.quantization.base_config import \ QuantizationConfig +from vllm.config import get_current_vllm_config from vllm_mindspore.distributed.communication_op import \ ReduceFromModelParallelRegion @@ -243,7 +244,7 @@ class VocabParallelEmbedding(nn.Cell): self.quant_method: QuantizeMethodBase = quant_method if params_dtype is None: - params_dtype = mstype.float16 + params_dtype = get_current_vllm_config().model_config.dtype # Divide the weight matrix along the vocaburaly dimension. self.num_added_embeddings = self.num_embeddings - self.org_vocab_size self.num_embeddings_per_partition = divide(self.num_embeddings_padded, diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 351ffafff..36c36cd4c 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -79,14 +79,12 @@ class Qwen2MLP(nn.Cell): output_sizes=[intermediate_size] * 2, bias=bias, quant_config=quant_config, - prefix=f"{prefix}.gate_up_proj", - params_dtype=mstype.bfloat16) + prefix=f"{prefix}.gate_up_proj") self.down_proj = RowParallelLinear(input_size=intermediate_size, output_size=hidden_size, bias=bias, quant_config=quant_config, - prefix=f"{prefix}.down_proj", - params_dtype=mstype.bfloat16) + prefix=f"{prefix}.down_proj") if hidden_act != "silu": raise ValueError(f"Unsupported activation: {hidden_act}. " "Only silu is supported for now.") @@ -142,7 +140,6 @@ class Qwen2Attention(nn.Cell): bias=True, quant_config=quant_config, prefix=f"{prefix}.qkv_proj", - params_dtype=mstype.bfloat16, ) self.o_proj = RowParallelLinear( input_size=self.total_num_heads * self.head_dim, @@ -150,7 +147,6 @@ class Qwen2Attention(nn.Cell): bias=False, quant_config=quant_config, prefix=f"{prefix}.o_proj", - params_dtype=mstype.bfloat16, ) self.rotary_emb = get_rope( @@ -159,7 +155,6 @@ class Qwen2Attention(nn.Cell): max_position=max_position, base=self.rope_theta, rope_scaling=rope_scaling, - dtype=mstype.bfloat16, ) self.attn = Attention(self.num_heads, self.head_dim, @@ -240,12 +235,10 @@ class Qwen2DecoderLayer(nn.Cell): self.input_layernorm = RMSNorm( config.hidden_size, eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16, ) self.post_attention_layernorm = RMSNorm( config.hidden_size, eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16, ) def construct( @@ -302,7 +295,6 @@ class Qwen2Model(nn.Cell): self.embed_tokens = VocabParallelEmbedding( self.vocab_size, config.hidden_size, - params_dtype=mstype.bfloat16, quant_config=quant_config, prefix=f"{prefix}.embed_tokens", ) @@ -322,11 +314,7 @@ class Qwen2Model(nn.Cell): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) if get_pp_group().is_last_rank: - self.norm = RMSNorm( - config.hidden_size, - eps=config.rms_norm_eps, - params_dtype=mstype.bfloat16, - ) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) else: self.norm = PPMissingLayer() @@ -469,7 +457,6 @@ class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): else: self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, - params_dtype=mstype.bfloat16, quant_config=quant_config, prefix=maybe_prefix( prefix, "lm_head")) -- Gitee From 00c70231264859257768cb53ec49366c49d50e33 Mon Sep 17 00:00:00 2001 From: moran Date: Wed, 11 Jun 2025 10:24:11 +0800 Subject: [PATCH 13/76] run ds ST case parallel --- .../multilora_inference.py} | 8 +- .../vllm_deepseek_bf16_part.py} | 11 +- .../vllm_deepseek_bf16_part_v1.py} | 13 +- .../vllm_deepseek_gptq_a16w4.py} | 10 +- .../vllm_deepseek_osl.py} | 17 +-- .../vllm_deepseek_part.py} | 18 +-- .../vllm_deepseek_part_v1.py} | 12 +- .../vllm_deepseek_smoothquant.py} | 12 +- .../vllm_deepseek_smoothquant_mss.py} | 12 +- tests/st/python/test_cases_parallel.py | 125 +++++++++++++++++- .../python/test_vllm_deepseek_mix_parallel.py | 2 +- 11 files changed, 150 insertions(+), 90 deletions(-) rename tests/st/python/{test_multilora_inference.py => cases_parallel/multilora_inference.py} (94%) rename tests/st/python/{test_vllm_deepseek_bf16_part_v1.py => cases_parallel/vllm_deepseek_bf16_part.py} (89%) rename tests/st/python/{test_vllm_deepseek_bf16_part.py => cases_parallel/vllm_deepseek_bf16_part_v1.py} (88%) rename tests/st/python/{test_vllm_deepseek_gptq_a16w4.py => cases_parallel/vllm_deepseek_gptq_a16w4.py} (92%) rename tests/st/python/{test_vllm_deepseek_osl.py => cases_parallel/vllm_deepseek_osl.py} (89%) rename tests/st/python/{test_vllm_deepseek_part.py => cases_parallel/vllm_deepseek_part.py} (89%) rename tests/st/python/{test_vllm_deepseek_part_v1.py => cases_parallel/vllm_deepseek_part_v1.py} (89%) rename tests/st/python/{test_vllm_deepseek_smoothquant.py => cases_parallel/vllm_deepseek_smoothquant.py} (88%) rename tests/st/python/{test_vllm_deepseek_smoothquant_mss.py => cases_parallel/vllm_deepseek_smoothquant_mss.py} (88%) diff --git a/tests/st/python/test_multilora_inference.py b/tests/st/python/cases_parallel/multilora_inference.py similarity index 94% rename from tests/st/python/test_multilora_inference.py rename to tests/st/python/cases_parallel/multilora_inference.py index d5e86441c..7e2129a19 100644 --- a/tests/st/python/test_multilora_inference.py +++ b/tests/st/python/cases_parallel/multilora_inference.py @@ -20,7 +20,7 @@ for offline inference. """ import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -28,7 +28,6 @@ env_vars = { "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", @@ -60,7 +59,7 @@ def create_test_prompts( def process_requests(engine: LLMEngine, test_prompts: List[Tuple[str, SamplingParams, - Optional[LoRARequest]]]): + Optional[LoRARequest]]]): """Continuously process a list of prompts and handle the outputs.""" request_id = 0 @@ -101,9 +100,6 @@ def initialize_engine() -> LLMEngine: return LLMEngine.from_engine_args(engine_args) -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_multilora_inference(): """test function that sets up and runs the prompt processing.""" engine = initialize_engine() diff --git a/tests/st/python/test_vllm_deepseek_bf16_part_v1.py b/tests/st/python/cases_parallel/vllm_deepseek_bf16_part.py similarity index 89% rename from tests/st/python/test_vllm_deepseek_bf16_part_v1.py rename to tests/st/python/cases_parallel/vllm_deepseek_bf16_part.py index 7a88aa370..6c29cc4c9 100644 --- a/tests/st/python/test_vllm_deepseek_bf16_part_v1.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_bf16_part.py @@ -17,7 +17,7 @@ """test mf deepseek r1.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,14 +27,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "on", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -42,9 +40,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1_bf16(): """ test case deepseek r1 bf16 @@ -60,7 +55,7 @@ def test_deepseek_r1_bf16(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-bf16", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_bf16_part.py b/tests/st/python/cases_parallel/vllm_deepseek_bf16_part_v1.py similarity index 88% rename from tests/st/python/test_vllm_deepseek_bf16_part.py rename to tests/st/python/cases_parallel/vllm_deepseek_bf16_part_v1.py index 0d4348111..0a85b1caf 100644 --- a/tests/st/python/test_vllm_deepseek_bf16_part.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_bf16_part_v1.py @@ -17,7 +17,7 @@ """test mf deepseek r1.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,15 +27,11 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "on", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", - "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "ATB_LLM_LCOC_ENABLE": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -43,9 +39,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1_bf16(): """ test case deepseek r1 bf16 @@ -61,7 +54,7 @@ def test_deepseek_r1_bf16(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-bf16", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_gptq_a16w4.py b/tests/st/python/cases_parallel/vllm_deepseek_gptq_a16w4.py similarity index 92% rename from tests/st/python/test_vllm_deepseek_gptq_a16w4.py rename to tests/st/python/cases_parallel/vllm_deepseek_gptq_a16w4.py index f61afd845..968f805ba 100644 --- a/tests/st/python/test_vllm_deepseek_gptq_a16w4.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_gptq_a16w4.py @@ -19,7 +19,7 @@ import os import yaml import pytest -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -29,15 +29,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -45,9 +42,6 @@ import vllm_mindspore # noqa: F401, E402 from vllm import LLM, SamplingParams # noqa: E402 -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.allcards def test_deepseek_r1_gptq_a16w4(): """ test case deepseek r1 a16w4 diff --git a/tests/st/python/test_vllm_deepseek_osl.py b/tests/st/python/cases_parallel/vllm_deepseek_osl.py similarity index 89% rename from tests/st/python/test_vllm_deepseek_osl.py rename to tests/st/python/cases_parallel/vllm_deepseek_osl.py index 5b72972b4..fc782b9e3 100644 --- a/tests/st/python/test_vllm_deepseek_osl.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_osl.py @@ -20,7 +20,7 @@ isort:skip_file """ import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -31,15 +31,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -47,9 +44,6 @@ import vllm_mindspore # noqa: F401, E402 from vllm import LLM, SamplingParams # noqa: E402 -@pytest.mark.level1 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1(): """ test case deepseek r1 w8a8 @@ -71,7 +65,7 @@ def test_deepseek_r1(): "/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-osl", trust_remote_code=True, gpu_memory_utilization=0.9, - tensor_parallel_size=8, + tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. @@ -87,9 +81,6 @@ def test_deepseek_r1(): env_manager.unset_all() -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1_mss(): """ test case deepseek r1 w8a8 mss @@ -111,7 +102,7 @@ def test_deepseek_r1_mss(): "/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-osl", trust_remote_code=True, gpu_memory_utilization=0.9, - tensor_parallel_size=8, + tensor_parallel_size=2, num_scheduler_steps=8, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects diff --git a/tests/st/python/test_vllm_deepseek_part.py b/tests/st/python/cases_parallel/vllm_deepseek_part.py similarity index 89% rename from tests/st/python/test_vllm_deepseek_part.py rename to tests/st/python/cases_parallel/vllm_deepseek_part.py index 42e2db8b4..7ef3e8901 100644 --- a/tests/st/python/test_vllm_deepseek_part.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_part.py @@ -17,7 +17,7 @@ """test mf deepseek r1.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,15 +27,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "on", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -43,9 +40,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1(): """ test case deepseek r1 w8a8 @@ -61,7 +55,7 @@ def test_deepseek_r1(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) @@ -76,9 +70,7 @@ def test_deepseek_r1(): # unset env env_manager.unset_all() -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single + def test_deepseek_mtp(): """ test case deepseek mtp with main model of r1-w8a8 @@ -94,7 +86,7 @@ def test_deepseek_mtp(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-MTP", - trust_remote_code=True, gpu_memory_utilization=0.7, tensor_parallel_size=8, max_model_len=4096, + trust_remote_code=True, gpu_memory_utilization=0.7, tensor_parallel_size=2, max_model_len=4096, speculative_config={"num_speculative_tokens": 1}) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. diff --git a/tests/st/python/test_vllm_deepseek_part_v1.py b/tests/st/python/cases_parallel/vllm_deepseek_part_v1.py similarity index 89% rename from tests/st/python/test_vllm_deepseek_part_v1.py rename to tests/st/python/cases_parallel/vllm_deepseek_part_v1.py index 9f5ecd72c..e5eb917a6 100644 --- a/tests/st/python/test_vllm_deepseek_part_v1.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_part_v1.py @@ -17,7 +17,7 @@ """test mf deepseek r1.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,14 +27,11 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", - "ATB_LLM_LCOC_ENABLE": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "ATB_LLM_LCOC_ENABLE": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -42,9 +39,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level0 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1(): """ test case deepseek r1 w8a8 @@ -60,7 +54,7 @@ def test_deepseek_r1(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_smoothquant.py b/tests/st/python/cases_parallel/vllm_deepseek_smoothquant.py similarity index 88% rename from tests/st/python/test_vllm_deepseek_smoothquant.py rename to tests/st/python/cases_parallel/vllm_deepseek_smoothquant.py index eb0ef8921..48d2441ad 100644 --- a/tests/st/python/test_vllm_deepseek_smoothquant.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_smoothquant.py @@ -17,7 +17,7 @@ """test mf deepseek r1 smoothquant.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,15 +27,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -43,9 +40,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level1 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1(): """ test case deepseek r1 w8a8 @@ -61,7 +55,7 @@ def test_deepseek_r1(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-smoothquant-newconfig", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, max_model_len=4096) + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) diff --git a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py b/tests/st/python/cases_parallel/vllm_deepseek_smoothquant_mss.py similarity index 88% rename from tests/st/python/test_vllm_deepseek_smoothquant_mss.py rename to tests/st/python/cases_parallel/vllm_deepseek_smoothquant_mss.py index f286bc8e1..111c91e4b 100644 --- a/tests/st/python/test_vllm_deepseek_smoothquant_mss.py +++ b/tests/st/python/cases_parallel/vllm_deepseek_smoothquant_mss.py @@ -17,7 +17,7 @@ """test mf deepseek r1 smoothquant.""" import pytest import os -from . import set_env +from tests.st.python import set_env env_manager = set_env.EnvVarManager() # def env @@ -27,15 +27,12 @@ env_vars = { "vLLM_MODEL_BACKEND": "MindFormers", "MS_ENABLE_LCCL": "off", "HCCL_OP_EXPANSION_MODE": "AIV", - "ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7", "MS_ALLOC_CONF": "enable_vmm:True", "LCCL_DETERMINISTIC": "1", "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "VLLM_USE_V1": "0", - "HCCL_IF_BASE_PORT": "60000", - "LCAL_COMM_ID": "127.0.0.1:10068" + "VLLM_USE_V1": "0" } # set env env_manager.setup_ai_environment(env_vars) @@ -43,9 +40,6 @@ import vllm_mindspore from vllm import LLM, SamplingParams -@pytest.mark.level1 -@pytest.mark.platform_arm_ascend910b_training -@pytest.mark.env_single def test_deepseek_r1_mss(): """ test case deepseek r1 w8a8 mss @@ -61,7 +55,7 @@ def test_deepseek_r1_mss(): # Create an LLM. llm = LLM(model="/home/workspace/mindspore_dataset/weight/DeepSeek-R1-W8A8-smoothquant-newconfig", - trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=8, num_scheduler_steps=8, + trust_remote_code=True, gpu_memory_utilization=0.9, tensor_parallel_size=2, num_scheduler_steps=8, max_model_len=4096) # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. diff --git a/tests/st/python/test_cases_parallel.py b/tests/st/python/test_cases_parallel.py index 18c894f34..35d31ea8c 100644 --- a/tests/st/python/test_cases_parallel.py +++ b/tests/st/python/test_cases_parallel.py @@ -50,21 +50,24 @@ def test_cases_parallel_part0(): """ commands = [ ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b.py::test_mf_qwen > vllm_mf_qwen_7b_test_mf_qwen.log", "vllm_mf_qwen_7b_test_mf_qwen.log"), ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_chunk_prefill.py::test_mf_qwen_7b_chunk_prefill " "> vllm_mf_qwen_7b_chunk_prefill_test_mf_qwen_7b_chunk_prefill.log", "vllm_mf_qwen_7b_chunk_prefill_test_mf_qwen_7b_chunk_prefill.log"), ("export ASCEND_RT_VISIBLE_DEVICES=4,5 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 &&" "pytest -s -v cases_parallel/vllm_mf_qwen_7b_chunk_prefill_v1.py::test_mf_qwen_7b_chunk_prefill " "> vllm_mf_qwen_7b_chunk_prefill_v1_test_mf_qwen_7b_chunk_prefill.log", "vllm_mf_qwen_7b_chunk_prefill_v1_test_mf_qwen_7b_chunk_prefill.log"), ("export ASCEND_RT_VISIBLE_DEVICES=6,7 && export LCAL_COMM_ID=127.0.0.1:10071 && " - "pytest -s -v cases_parallel/vllm_mf_qwen_7b_cp_pc_mss.py::test_mf_qwen_7b_cp_pc_mss " - "> vllm_mf_qwen_7b_cp_pc_mss_test_mf_qwen_7b_cp_pc_mss.log", - "vllm_mf_qwen_7b_cp_pc_mss_test_mf_qwen_7b_cp_pc_mss.log"), - + "export HCCL_IF_BASE_PORT=61006 && " + "pytest -s -v cases_parallel/multilora_inference.py::test_multilora_inference " + "> multilora_inference_test_multilora_inference.log", + "multilora_inference_test_multilora_inference.log") ] with Pool(len(commands)) as pool: @@ -83,18 +86,22 @@ def test_cases_parallel_part1(): """ commands = [ ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_mss.py::test_mf_qwen_7b_mss " "> vllm_mf_qwen_7b_mss_test_mf_qwen_7b_mss.log", "vllm_mf_qwen_7b_mss_test_mf_qwen_7b_mss.log"), ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_prefix_caching.py::test_mf_qwen_7b_prefix_caching " "> vllm_mf_qwen_7b_prefix_caching_test_mf_qwen_7b_prefix_caching.log", "vllm_mf_qwen_7b_prefix_caching_test_mf_qwen_7b_prefix_caching.log"), ("export ASCEND_RT_VISIBLE_DEVICES=4,5 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_prefix_caching_v1.py::test_mf_qwen_7b_prefix_caching " "> vllm_mf_qwen_7b_prefix_caching_v1_test_mf_qwen_7b_prefix_caching.log", "vllm_mf_qwen_7b_prefix_caching_v1_test_mf_qwen_7b_prefix_caching.log"), ("export ASCEND_RT_VISIBLE_DEVICES=6,7 && export LCAL_COMM_ID=127.0.0.1:10071 && " + "export HCCL_IF_BASE_PORT=61006 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_v1.py::test_mf_qwen > vllm_mf_qwen_7b_v1_test_mf_qwen.log", "vllm_mf_qwen_7b_v1_test_mf_qwen.log") ] @@ -103,6 +110,7 @@ def test_cases_parallel_part1(): results = list(pool.imap(run_command, commands)) check_results(commands, results) + @pytest.mark.level0 @pytest.mark.platform_arm_ascend910b_training @pytest.mark.env_single @@ -114,14 +122,17 @@ def test_cases_parallel_part2(): """ commands = [ ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " "pytest -s -v cases_parallel/vllm_qwen_7b.py::test_vllm_qwen " "> vllm_qwen_7b_test_vllm_qwen.log", "vllm_qwen_7b_test_vllm_qwen.log"), ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " "pytest -s -v cases_parallel/vllm_qwen_7b_v1.py::test_vllm_qwen " "> vllm_qwen_7b_v1_test_vllm_qwen.log", "vllm_qwen_7b_v1_test_vllm_qwen.log"), ("export ASCEND_RT_VISIBLE_DEVICES=4,5,6,7 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " "pytest -s -v cases_parallel/shm_broadcast.py::test_shm_broadcast " "> shm_broadcast_test_shm_broadcast.log", "shm_broadcast_test_shm_broadcast.log") @@ -130,3 +141,109 @@ def test_cases_parallel_part2(): with Pool(len(commands)) as pool: results = list(pool.imap(run_command, commands)) check_results(commands, results) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_cases_parallel_part3(): + """ + Feature: test cases parallel. + Description: test cases parallel. + Expectation: Pass. + """ + commands = [ + ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " + "pytest -s -v cases_parallel/vllm_deepseek_bf16_part.py::test_deepseek_r1_bf16 " + "> vllm_deepseek_bf16_part_test_deepseek_r1_bf16.log", + "vllm_deepseek_bf16_part_test_deepseek_r1_bf16.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " + "pytest -s -v cases_parallel/vllm_deepseek_bf16_part_v1.py::test_deepseek_r1_bf16 " + "> vllm_deepseek_bf16_part_v1_test_deepseek_r1_bf16.log", + "vllm_deepseek_bf16_part_v1_test_deepseek_r1_bf16.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=4,5,6,7 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " + "pytest -s -v cases_parallel/vllm_deepseek_gptq_a16w4.py::test_deepseek_r1_gptq_a16w4 " + "> vllm_deepseek_gptq_a16w4_test_deepseek_r1_gptq_a16w4.log", + "vllm_deepseek_gptq_a16w4_test_deepseek_r1_gptq_a16w4.log") + ] + + with Pool(len(commands)) as pool: + results = list(pool.imap(run_command, commands)) + check_results(commands, results) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_cases_parallel_part4(): + """ + Feature: test cases parallel. + Description: test cases parallel. + Expectation: Pass. + """ + commands = [ + ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " + "pytest -s -v cases_parallel/vllm_deepseek_osl.py::test_deepseek_r1_mss " + "> vllm_deepseek_osl_test_deepseek_r1_mss.log", + "vllm_deepseek_osl_test_deepseek_r1_mss.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " + "pytest -s -v cases_parallel/vllm_deepseek_part.py::test_deepseek_r1 " + "> vllm_deepseek_part_test_deepseek_r1.log", + "vllm_deepseek_part_test_deepseek_r1.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=4,5 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " + "pytest -s -v cases_parallel/vllm_deepseek_part.py::test_deepseek_mtp " + "> vllm_deepseek_part_test_deepseek_mtp.log", + "vllm_deepseek_part_test_deepseek_mtp.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=6,7 && export LCAL_COMM_ID=127.0.0.1:10071 && " + "export HCCL_IF_BASE_PORT=61006 && " + "pytest -s -v cases_parallel/vllm_deepseek_part_v1.py::test_deepseek_r1 " + "> vllm_deepseek_part_v1_test_deepseek_r1.log", + "vllm_deepseek_part_v1_test_deepseek_r1.log") + ] + + with Pool(len(commands)) as pool: + results = list(pool.imap(run_command, commands)) + check_results(commands, results) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_cases_parallel_level1_part0(): + """ + Feature: test cases parallel. + Description: test cases parallel. + Expectation: Pass. + """ + commands = [ + ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " + "pytest -s -v cases_parallel/vllm_mf_qwen_7b_cp_pc_mss.py::test_mf_qwen_7b_cp_pc_mss " + "> vllm_mf_qwen_7b_cp_pc_mss_test_mf_qwen_7b_cp_pc_mss.log", + "vllm_mf_qwen_7b_cp_pc_mss_test_mf_qwen_7b_cp_pc_mss.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " + "pytest -s -v cases_parallel/vllm_deepseek_osl.py::test_deepseek_r1 " + "> vllm_deepseek_osl_test_deepseek_r1.log", + "vllm_deepseek_osl_test_deepseek_r1.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=4,5 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " + "pytest -s -v cases_parallel/vllm_deepseek_smoothquant.py::test_deepseek_r1 " + "> vllm_deepseek_smoothquant_test_deepseek_r1.log", + "vllm_deepseek_smoothquant_test_deepseek_r1.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=6,7 && export LCAL_COMM_ID=127.0.0.1:10071 && " + "export HCCL_IF_BASE_PORT=61006 && " + "pytest -s -v cases_parallel/vllm_deepseek_smoothquant_mss.py::test_deepseek_r1_mss " + "> vllm_deepseek_smoothquant_mss_test_deepseek_r1_mss.log", + "vllm_deepseek_smoothquant_mss_test_deepseek_r1_mss.log") + ] + + with Pool(len(commands)) as pool: + results = list(pool.imap(run_command, commands)) + check_results(commands, results) diff --git a/tests/st/python/test_vllm_deepseek_mix_parallel.py b/tests/st/python/test_vllm_deepseek_mix_parallel.py index d23097c6a..eadecd8cc 100644 --- a/tests/st/python/test_vllm_deepseek_mix_parallel.py +++ b/tests/st/python/test_vllm_deepseek_mix_parallel.py @@ -37,7 +37,7 @@ env_vars = { "HCCL_DETERMINISTIC": "true", "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", "ATB_LLM_LCOC_ENABLE": "0", - "HCCL_IF_BASE_PORT": "60000", + "HCCL_IF_BASE_PORT": "61000", "LCAL_COMM_ID": "127.0.0.1:10068" } env_manager.setup_ai_environment(env_vars) -- Gitee From af5a38b81ecd497312868d57bb802e52f4adf9a9 Mon Sep 17 00:00:00 2001 From: horcam Date: Sat, 14 Jun 2025 14:48:31 +0800 Subject: [PATCH 14/76] update readme for develop --- README.md | 133 +++++++++++++++-------------------------------- README_en.md | 67 ++++++++++++++++++++++++ docs/arch.cn.png | Bin 0 -> 203512 bytes docs/arch.png | Bin 0 -> 199273 bytes 4 files changed, 110 insertions(+), 90 deletions(-) create mode 100644 README_en.md create mode 100644 docs/arch.cn.png create mode 100644 docs/arch.png diff --git a/README.md b/README.md index 5ea56601b..5a56f28c8 100644 --- a/README.md +++ b/README.md @@ -1,114 +1,67 @@ -# vllm-mindspore +

+vLLM MindSpore +

-## Overview +

+| 关于MindSpore | vLLM MindSpore SIG | 问题反馈 | +

-The `vllm-mindspore`is a integration for running vLLM on the MindSpore framework. - -This is the recommended solution for supporting the MindSpore within the vLLM community. It provides deep integration with the MindSpore framework, offering efficient computation and optimization support for vLLM, enabling seamless operation on MindSpore. - -By using the `vllm-mindspore`, popular open-source models, can run seamlessly for training and inference on the MindSpore framework. +

+English | 中文 +

--- +*最新消息* 🔥 -## Prerequisites - -- Hardware: Atlas A2/A3 -- Software: - - Python >= 3.9 - - CANN >= 8.0.0 - - MindSpore >=2.5.0 +- [Coming Soon🏃] 适配vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3),新增支持vLLM V1架构、Qwen3大模型。 +- [2025/04] 完成vLLM [v0.7.3](https://github.com/vllm-project/vllm/releases/tag/v0.7.3)适配,新增支持Automatic Prefix Caching、Chunked Prefill、Multi-step Scheduling、MTP等特性。联合openEuler社区和上海交通大学,实现DeepSeek全栈开源单机推理部署,你可以在[这里](https://www.openeuler.org/zh/news/openEuler/20240421-jd/20240421-jd.html)阅读详细报道。 +- [2025/03] 完成vLLM [v0.6.6.post1](https://github.com/vllm-project/vllm/releases/tag/v0.6.6.post1)适配,支持采用`vllm.entrypoints`部署基于MindSpore的DeepSeek-V3/R1、Qwen2.5等大模型推理服务。联合openEuler社区和北京大学,发布全栈开源DeepSeek推理方案,你可以在[这里](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm)阅读详细报道。 +- [2025/02] MindSpore社区正式创建了[mindspore/vllm-mindspore](https://gitee.com/mindspore/vllm-mindspore)代码,旨在将MindSpore大模型推理能力接入vLLM。 --- -## Getting Started - -### Installation - -#### Installation from source code - -Install from source code. [Wiki Installation.](https://gitee.com/mindspore/vllm-mindspore/wikis/Getting%20Started/Installation) - -#### Set up using Docker - -##### Pre-built images - -```shell -docker pull hub.oepkgs.net/oedeploy/openeuler/aarch64/mindspore:v1.0 -``` - -##### Build image from source - -```shell -docker build --network=host . -``` - -### Inference and Serving - -#### Offline Inference - -You can run vllm_mindspore in your own code on a list of prompts. - -```bash -export ASCEND_TOTAL_MEMORY_GB=64 # Based on the ascend device. -``` - -```python - -import vllm_mindspore # Add this line on the top of script. - -from vllm import LLM, SamplingParams +# 简介 -# Sample prompts. -prompts = [ - "I am", - "Today is", - "What is" -] +vLLM Mindspore插件(`vllm-mindspore`)是一个由[MindSpore社区](https://www.mindspore.cn/)孵化的vLLM后端插件。其旨在将基于Mindspore构建的大模型推理能力接入[vLLM](https://github.com/vllm-project/vllm),从而有机整合Mindspore和vLLM的技术长板,提供全栈开源、高性能、易用的大模型推理解决方案。 -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.0, top_p=0.95) +vLLM MindSpore插件以将Mindspore大模型接入vLLM,并实现服务化部署为功能目标。其遵循以下设计原则: -# Create an LLM. -llm = LLM(model="Qwen/Qwen2.5-32B-Instruct", tensor_parallel_size=8) -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") +- 接口兼容:支持vLLM原生的API和服务部署接口,避免新增配置文件或接口,降低用户学习成本和确保易用性。 +- 最小化侵入式修改:尽可能避免侵入式修改vLLM代码,以保障系统的可维护性和可演进性。 +- 组件解耦:最小化和规范化MindSpore大模型组件和vLLM服务组件的耦合面,以利于多种MindSpore大模型套件接入。 -``` +基于上述设计原则,vLLM MindSpore采用如下图所示的系统架构,分组件类别实现vLLM与Mindspore的对接: -#### Serving(OpenAI-Compatible) +- 服务化组件:通过将LLM Engine、Scheduler等服务化组件中的PyTorch API调用映射至MindSpore能力调用,继承支持包括Continuous Batching、PagedAttention在内的服务化功能。 +- 大模型组件:通过注册或替换模型、网络层、自定义算子等组件,将MindSpore Transformers、MindSpore One等MindSpore大模型套件和自定义大模型接入vLLM。 -You can start the server via the vllm_mindspore command: +
+ Description +
-`python3 -m vllm_mindspore.entrypoints vllm.entrypoints.openai.api_server --model "Qwen/Qwen2.5-32B-Instruct" --tensor_parallel_size=8` +vLLM MindSpore采用vLLM社区推荐的插件机制,实现能力注册。未来期望遵循[[RPC] Multi-framework support for vllm](https://gitee.com/mindspore/vllm-mindspore/issues/IBTNRG)所述原则。 -To call the server, you can use `curl` or any other HTTP client. +# 环境准备 -```shell +- 硬件:Atlas 800I A2推理服务器,或Atlas 800T A2推理服务器,已安装必要的驱动程序,并可连接至互联网 +- 操作系统:openEuler或Ubuntu Linux +- 软件: + - Python >= 3.9, < 3.12 + - CANN >= 8.0.0.beta1 + - MindSpore (与vllm-mindspore版本配套) + - vLLM (与vllm-mindspore版本配套) -curl http://localhost:8000/v1/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "Qwen/Qwen2.5-32B-Instruct", - "prompt": "MindSpore is", - "max_tokens": 120, - "temperature": 0 - }' +# 快速体验 -``` +请查看[快速体验](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_zh_cn/getting_started/quick_start/quick_start.md)和[安装指南](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_zh_cn/getting_started/installation/installation.md)了解更多。 -## Contributing +# 贡献 -We welcome and value any contributions and collaborations: +请参考 [CONTRIBUTING](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_zh_cn/developer_guide/contributing.md) 文档了解更多关于开发环境搭建、功能测试以及 PR 提交规范的信息。 -- Please feel free comments about your usage of vllm_mindspore. -- Please let us know if you encounter a bug by filing an issue. +我们欢迎并重视任何形式的贡献与合作,请通过[Issue](https://gitee.com/mindspore/vllm-mindspore/issues)来告知我们您遇到的任何Bug,或提交您的特性需求、改进建议、技术方案。 -## License +# SIG组织 -Apache License 2.0, as found in the [LICENSE](https://gitee.com/mindspore/vllm_mindspore/blob/master/LICENSE) file. +- 欢迎加入LLM Infercence Serving,参与开源项目共建和产业合作:[https://www.mindspore.cn/community/SIG](https://www.mindspore.cn/community/SIG) +- SIG例会,双周周三或周四下午,16:30 - 17:30 (UTC+8, [查看您的时区](https://dateful.com/convert/gmt8?t=15)) diff --git a/README_en.md b/README_en.md new file mode 100644 index 000000000..f99e67fc5 --- /dev/null +++ b/README_en.md @@ -0,0 +1,67 @@ +

+vLLM MindSpore +

+ +

+| About MindSpore | vLLM MindSpore SIG | Issue Feedback | +

+ +

+English | 中文 +

+ +--- +*Latest News* 🔥 + +- [Coming Soon🏃] Adaptation for vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3), support for vLLM V1 architecture and the Qwen3 large model. +- [2025/04] Adaptation for vLLM [v0.7.3](https://github.com/vllm-project/vllm/releases/tag/v0.7.3), support Automatic Prefix Caching, Chunked Prefill, Multi-step Scheduling, and MTP. In collaboration with the openEuler community and Shanghai Jiao Tong University, we achieved full-stack open-source single-machine inference deployment for DeepSeek. You can read the detailed report [here](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm). +- [2025/03] Adaptation for vLLM [v0.6.6.post1](https://github.com/vllm-project/vllm/releases/tag/v0.6.6.post1) supporting the deployment of inference services for large models such as DeepSeek-V3/R1 and Qwen2.5 based on MindSpore using `vllm.entrypoints`. In collaboration with the openEuler community and Peking University, we released a full-stack open-source DeepSeek inference solution. You can read the detailed report [here](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm). +- [2025/02] The MindSpore community officially created the [mindspore/vllm-mindspore](https://gitee.com/mindspore/vllm-mindspore) repository, aiming to integrate MindSpore's large model inference capabilities into vLLM. + +--- + +# Overview + +vLLM MindSpore (`vllm-mindspore`) is a plugin brewed by the [MindSpore community](https://www.mindspore.cn/en), which aims to integrate MindSpore LLM inference capabilities into [vLLM](https://github.com/vllm-project/vllm). With vLLM MindSpore, technical strengths of Mindspore and vLLM will be organically combined to provide a full-stack open-source, high-performance, easy-to-use LLM inference solution. + +vLLM MindSpore plugin aims to integrate Mindspore large models into vLLM and to enable deploying MindSpore-based LLM inference services. It follows the following design principles: + +- Interface compatibility: support the native APIs and service deployment interfaces of vLLM to avoid adding new configuration files or interfaces, reducing user learning costs and ensuring ease of use. +- Minimal invasive modifications: minimize invasive modifications to the vLLM code to ensure system maintainability and evolvability. +- Component decoupling: minimize and standardize the coupling between MindSpore large model components and vLLM service components to facilitate the integration of various MindSpore large model suites. + +On the basis of the above design principles, vLLM MindSpore adopts the system architecture shown in the figure below, and implements the docking between vLLM and Mindspore in categories of components: + +- Service components: vLLM MindSpore maps PyTorch API calls in service components including LLMEngine and Scheduler to MindSpore capabilities, inheriting support for service functions like Continuous Batching and PagedAttention. +- Model components: vLLM MindSpore registers or replaces model components including models, network layers, and custom operators, and integrates MindSpore Transformers, MindSpore One, and other MindSpore large model suites, as well as custom large models, into vLLM. + +
+ Description +
+ +vLLM MindSpore uses the plugin mechanism recommended by the vLLM community to realize capability registration. In the future, we expect to follow principles described in [[RPC] Multi-framework support for vllm](https://gitee.com/mindspore/vllm-mindspore/issues/IBTNRG). + +# Prerequisites + +- Hardware:Atlas 800I A2 Inference series, or Atlas 800T A2 Training series, with necessary drivers installed and access to the Internet. +- Operating System: openEuler or Ubuntu Linux. +- Software: + - Python >= 3.9, < 3.12 + - CANN >= 8.0.0.beta1 + - MindSpore (matched with the vllm-mindspore version) + - vLLM (matched with the vllm-mindspore version) + +# Getting Started + +Please refer to [Quick Start](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_en/getting_started/quick_start/quick_start.md) and [Installation](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_en/getting_started/installation/installation.md) for more details. + +# Contributing + +Please read [CONTRIBUTING](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_en/developer_guide/contributing.md) for details on setting up development environments, testing functions, and submitting PR. + +We welcome and value any form of contribution and cooperation. Please use [Issue](https://gitee.com/mindspore/vllm-mindspore/issues) to inform us of any bugs you encounter, or to submit your feature requests, improvement suggestions, and technical solutions. + +# SIG + +- Welcome to join vLLM MindSpore SIG to participate in the co-construction of open-source projects and industrial cooperation: [https://www.mindspore.cn/community/SIG](https://www.mindspore.cn/community/SIG) +- SIG meetings, every other Wednesday or Thursday afternoon, 16:30 - 17:30 (UTC+8, [Convert to your timezone](https://dateful.com/convert/gmt8?t=15)) diff --git a/docs/arch.cn.png b/docs/arch.cn.png new file mode 100644 index 0000000000000000000000000000000000000000..b2c2d0aedfbb3bad25e50071d8070e1f5c3f447d GIT binary patch literal 203512 zcmeFZ2VBy5{69|P+U0d^t-PzWsa|<%POfX1nG;QOmzf3Ph*MKU?5?sh_0rT_P&oki z2)ANOGgBH21xIDV38Eieswk^1F z`!HJdt(jwYUY+rrHI#5<^QNRj2fs(0`|8g9En8v_Cq1-^xwvJ^Hw(9H z`SPp4JKs3(gB&|z?`ySc$FC1HkJVyFMd53ai84>@Zq`~ve1toz#nYJNtXjrXuEL>)pl`Ib>Swu0Sv z2HkXjGB=_zB3ryrDo$i#ONn4_UoW_~;#NcoN_IrbUnOKGGO&6^dX>Aqp=kM)h^w;6 z$3nIm6AM0tZhF%2+{`DsIXa9a6VI0N#jFUSK$M9-?G2{OCoH6V3;uekq}z}sVqnR8 z$oH`359O0MAzO57w5n7Qgp}70lhG&p!M*Y*1|K78K=Qaa23>Ru*F{%!$tNv^?C@Jb z)up5k$>e@1-@OA|hLxeD{L$m2pz>0Mo>bB%oHSz6(ex&{wv?~It0lqF(LbXU)z9cm zx&kK?hYI0Mc>Fp^4OJn_~pI-1%f_vW#^ zeZA2{nV8fi^7h*p)UP1%$fftNs7ol>USJbftR8TkXH9ZPsW=T-BNmg51i~q~{pLY^j9L6O$`THR0r1#gMdVRBA!TC;)Egb<@#f#^i|3oMA4xX|zcy z|4BAkIqZ);E3$IhN~~<_u+*-MEw3R;&r2l(MdVfnAN>rYxHT&48xI{fdi7QJV1-quR}aSsP9kL`#|Q15?u{jSjM2BR|mt}w&clzfh4lEgrm z5-q*m>J&a)-BG(;k4t3rBlUQszI8d*zAyY5#k>g7Elv|LBk{;+NI=J+?D37aG6z$y z2+h!?!gPl|ux(M(5bK+i2Om(FXy?Y`o_1(S0A^xN*M{@XBl=kwooU-RFEA)aTw?T#}SEg+vXOCAeo zX_9XvL(MA3hFdU-wSYcHBi@zR+C9WLEu`5I}Ter;*_sKNz64>v4;Gu|+7MGyc>07kxO5(bdYk4jg}itgw+vxWh2=JW}7Jj#Q#C%*Jep&OwbHf7?&A=nwD)1GY~Zu2te62E|+V z4*YFDl#DbK!-bVWM*SzekzqsYSdeGtLII?>zfmZ71?**}xGET1@rA zdF1cN6>RhUs4Cg8G0Q^A&_70Ylb!I6R`8$kc`))R^Gw;Sf#b#gReBZJh|ApLCH{4K zyTK=YBWcn-{BzP;|7Oe=NbSiTd|#oUumc91l3>1HzHlJD!oMA}n{+>VB%N=!j(i*4 z((>V`M2F?itHoMh5a`|VzPA;9zey1xWkmd3eT(@`6-xF>=)b69bWvsTD0i4`bfQY9 zkM}Jz+H29hpxAD}{dAE7XT!P36=yOv@3Es!1pWY*zFIRpzlbMeeIXR^d1U7ODgm;j zc4t==IOr$1lpj_G51h~JY?;uN%1wFPL)?A6YdF{X%wK@t7q{qPGGmyGp1D1p zO{JH5(5+E9m7|I(bJ0@aU8y@wu00Iw^dJyUC{WNl_w})UEO9jMn;O}q9fOYj z@XX(WANFM+D(I*8YZ_jE5&p#+p}^hgVfSSd_z-wVjMd;r{Z%cYUyteMkwC_1DxT2QFCg_Lo< zNf}siI<|J~vQfhu-?VX-5vlgq_@JnP*7)(%>V~{FJH-b(^rQz^S_wv`8Z{2tpc~$? zqes}Qec2N$dh{kA=yK^Sci?by` zgN`rJBC7vSFDU*RIE%&ccP$?397Gw$Q~Y}XKz(~n5&%wPASihA4i9MQs_wC;DCPoa zX?o>Aij;bTf?;--E8YQr&@IoKW+3P`LB$YPu7ibJM@>3_n5tUAMlWob@3@p zRN~VOZCaRWZdxuhx?kbHLy3HR^vmdDOVW6a@3nn|&sv&DJAH%!txfWy(r*9bnapo0 zQM}{NJ!e(Mv(sPLL<1ma2IwJ^?(KtWdfwEuVGomz+1LBdfyUNjl1CS16wAEJ4HNZ^ zx>Kp)D-7LHMkjy`)^+ikyD_ANS%CK6sgw;J&sdVpBX2#<%zWP6bFov%rZc0^?HG}_ zQ{%#HG`+ukwD3)3f`hqC}Y9LdC%Vf$Ad$uwe+gMKmW1f;2=~`#o2-G z54>hcF$*qySK5Uo1;{m}V!ib0jLa4i^Tp4@*vgZRw@_c5aj(Z#fd_*8`vn>dnu29? z&ql7nz%-MdVvu>V^dERk5&_;%qG0Nef42hdF4cZr#SWU^qb;WidHp?AtsOyurc!y> z6Sxcu@5j#V?Q9}(t7~}V%-c*!5AZU2BRW&6{oYW~6^)6`kqU!CfuQtx^azQHGM%Ub z1JI6M5y_-Szd=_v2FptTzC|%5eRKPTJ^f)qqC!)n`ez_|E_u(CPjWjk^$YLAuV;dK zy6NGUUJFk)EPTMLWqwnGQWyyNnP2xbH7$IQZ5}Q)Y6imFHrMMtl=$fR#QQ4=3A zp81+K!m7T{lj@ZjQ8N4IRgVV!5Q3Cze_6d7Ob0^MHl<$_TN+E{l{_iQM`qEEQ5@Ca z3WF{)Mc6=ZbVlXw!Z$T287>=eyq$i&=<$_>;Wa-lv7We~IdP$PoS#;SDBDX4jO;cx z$;^%e>t__7SYbMnsip%fQ!CA^P?IG7s2BMn?wCJJ#$L&!=yX~vKtns!6z|paT4>T& z`CXC#)?>Zsv2Uwr*uJ+~dJ%2GRWx+;Q)_R@%QwR{;QbNZ?Os)DP5a1BUh?s@QkHXR zBo*oMYC>$JGf{TrOsL}V{xJ?;$9(;$ut9Ra+!We;Z zW?f5GowlVzY2FJ8@`t;9){%Ho#W9Y#lLG)R1jVr}N^t3ls1c9eDHcO1{xNxYvY{<% zwfaHSWdujRzn0?v>Pdo2bfPmn&x~@t`XFF%th`9`1r4 z2K@N_3ZfJWWLLwr%r_M%#ayYF&fsr-QvoDh;Zm_pO@J)$oGuq>da8;ow?mF-pE0aP z*LQc*`_GX3mt_=V{R8>~$h{XkgnS^_33_l^%GW*vxaC+NeeDP`HI<4xg3K~A`_G37 z1)g`5K=YITX0%is-n+`o9h=<$liX9t%kIAzCX{)SmcqSB&n5kHp9>rPbGMRK!O2p+ z#r^V*GyEK3Bl40t`}nZUS{@L@K!c+9GcXX+H>eed-A&bU(NAgg%Gb%?GmfLx^30L8 zcka!9NjhVOkZVchiru6Da)h?bZ~#m)Evrh__Wl;AUCjJk|K4Zm%XaxQifhij66@TS*-9RH zXwHTy|KB_k(Ja1+GaIC>RI_tdEEn4UoKgBX$)FcOHdlCb7$1yR7Lq_U7E_##>D^ow z{R{93N79R)I_+!fYRY$mQ45M@DSxTA^pSmS;9aQ2$&fOsw(;|ofBgO06nbfkS!@O_ z<+>UxqQ11k=lYS# zTpL~hWb^oa9dzq>W?`rflv1NwRd)+raQ(xX&nYY!EFIeD;0?#nLLCQG4CS1Z6ZX8! z`KC-+l>-fLyde0Q5>=b3-Wac5AFFP3R#sh1{ksi`hYZ3B3C5EXRPts6L**W~by-;8 z#_BVITJ8!y+>yptcjuVlS{On0QpA)@%CbJYp^@2{qZj-;j8EXy?HpuIzpm3+w|ex> zJOl4eSir*TRP}1d0j=qUMKcR8DI;R6isAHpgI@&pjsomU^;3}>%7g4&*Gs1q$hfpT zG@CjB$)`1AgFLqySST;)u`}iPj@=3;74sJlJvcvtz%Ew4;d1bcxN98KK@4)_?4muR z0HcF6y2EcOCD$%i9DV+>vx5MMPv#pIb62)1c@_gZ;A7*Xvs`7vHOGoi!6IB32zt)`5?6yg@Jh&ru!D(j&el>3Dqg99Zdo z;??mn>-+?BaJX`zN@=GSauyb0MGb7!CM9z`)K`6bXyeg>rWdaOQ$^z?mVLH@@ZxXI zz^@~CDp^4%+2%Y?>qd(bPUgib8}3b!(!gS@b1NQS`Q#qF{Mkp=JbaNvYjzXZxb3%} z&8;U}S$FhlkLKW`+p>&3`&1^Vd*kWqPx(aDSZ9*3z{Z{XO<9wvnb!b%^%ZtWX!f{@ zqrq35s@5)dUEyjyr6$jXbgR}fM+ezm!Y|bZbG6DWWX0iB-2&>cZ-K?tGj>xjd&nS1Z>6p3_CP*-#1YXKuYJQ?7V@ z#p%#3e8BEaW!hz_sSAqWHVTz4ua94(o`bUx5cD9ZY^Ww7`Ud%qVh) z7O8>Kb*j((Zq1Z3S*Q8O?AUzta~8h8o%(((>ve@ePw#%$1|=HTzN`%{K;5hsl{;lV zqHhku-YZGws0_#Qmp|DcIChan&JEeh+1&XA@i}61U7oxlOc153X=ux_? zup&k06U7Ggy!h(-$jmM~VmrY?ODTd-<1%5L6V@ZzmcnnVyi3Qs+B4u=)kz2oI47^qbN>ZD|Snl2gS{2TMDmKDKA?38cBnAfr~$Y|$F3AGI< z>(}Yk0HFL7=~^U7EK|~N47MxqetB+*7n&E9cneaO-30{P}) zrI`mKmOKjM$yF z|6wq7jgl1FO?JqR%AIZc-={3s-r;^dAP|6cE@bF|{TiEG-KH%hSJ_+mpx(7Cs&4ms z^m^5j9|5n@yhcoAC;zMxXJu^K@XaNwJF^^Or*?u=DMU$ky73#Tl>X<{2 zb?!+sCuQaw_KUmGVFS1w)DF+JoV!;kkJH|*DfDhAo;-M?lA)Ekzv;d5Bp<6CQ2{U1 z&+kz31pYItxlVMRvUbswPMACV&$hHnpU~kX)sj4UfN#rX%>R1{`buFIW^M1t&nHD&V>OB>g zlx^MNYdvme>ldaKG4BjwB_UvUMeV|(g++FGi}!neccX41Y4W5Xt~!OZ?cA~YQ-~*P zHY%^O%jSz;49qX!egL97s^PDTiqaOOEmTE(!&jenCC%s;WBtK@lHB;EHRfz%vdWj; z837QYmh5vfz;C6&}=6a&(G}hLB&2joY+FU*=+I$keJ!@iYnMpG6ulEpPD7hQf_RFFW!D3 z^rr5T&=&_dZ>`7UsCs?J9y${Bb|2_$8GW_S{uejy*W7LtyL>(R?ek` z(?^wmJ^M=_f=E2|uNmyH0Kiwv z3InaOgg^1)o)yOL7VAxA6-s%4nzt!k{^st#HN#JNxl-vUZ~SXR{}ymt7X*j@9!e~M zB)I{#M0FwMKQvs0#w8%_h}`n8LF=ePcl-~4XEjgy*T|TUA3pRA5W;V4``7RbaSF@% z7xMUV`vqe0=8xob-rjTTfS=L7;lwAP8oc9YtABai)eFRU>5Gr&-AeV}^{*FyVw}N^ z;#>KcU-XIyq3^&`k8O^h;LUKu2FyQc^KYKMX378Ea*6i; z1}{UAK5+iza;12bKQYDl$xUhJ)TU(K;c#l8eNuaTxV0sv);{+W&)F4WYW7bs^;>im zykrOINnX!>zfD>ZEhiBxyC?RLu9`{9L;Q4C5kk?>Jqy}Ijt=lhUwrUW-8-4%xlUnuU%K|6VDsMplRuaW-<6v-@BV*`t=!$8!ff+#r+GIZ;)A+_ z8+WYO-lo4gGv`T5n%L%DcVhfb#GS)4(L=xnKkfW3k3IefLpI#76#x;@v|F@yF8MLQ=$g0PTI873a(}wiRTRkZ1{x0JA@^haR1D?E|aH6kCxY- z>SP6UG66|3w^t?1rOo+!Pb`k3wWR3%`h;Z~fheop;=WVdvO=7hTI{^SG-!P3%Wjhq z?XoJOWSf%H%Ac6k5iPg(T*>+e^{5~wiJebzx9RVG%t|uOtTYa)4Dk!Zb(wtSs`mGx z$2&bY4VR|dn=?n>b=+qq1r)#2uzMHeR&~r~xS`GHus`?kZ*7rfTghwxg^a3>?IB(J zdnrvE91k2723D(RY_Dv@HCl1~XysLZz)S}v`a;^bb^>T0Cxvm=lSX-YBQ}Iu;#O$zFT!yCV%`8Xb;lovS zTgwurvKk+hGVrr3IoL6U@wSBZVOwUyPV6X?S1U;F$~7_i_{P*lscCmvsv;=}=d{f9 zs|&>GhEuyEQioZMdgn%&R2}g8=F=0Fo61&CZ+{$GI=F8fc(cG(^>)cshF+oShznCN zkeVlS+qk9lU`HuORg8R{ftOYM_bsXtrB{e}v2(WS6ps}|?<&;2XD2Pc`Sd1!(!J|+ z_4a*s6fehbwdtbI$!Tp1j#vndcHXMmjDCQZ2G`W7>;jzq7gLDnn#zE6Ti9N1x3=dO zny7-_8f_>kKzb(qal#KiI#Ugn<_d~eyD(&rhu2RT-qeN4&6*7yq<&1x_FZ8>sPYL1 z$kD*;*h!tw&UF3*o8#nfKBw{6?}YhR+-BldxLQtkGjQl;69wwZU{y3XEo%;WkY%5SE z7ec24I6!}jrK2Rpa+t%XtSka__%n7Bd?L30J$qK|())WY3hhUU%>RkW|F}!uPlI`% zDVkCUb7`}uA{r%IoXQ$j{+@MLo87*iYN_}U8fnVuo*@BZWP|xx)JCcJQads1T!Y@5~>O#gKa3$#|O(cFLzxG zs8PbxhsJ7ek2qnyV9(xAx~?f&ia7l+PR%*%$;*M{Ngc|1dJEm@615Evn-BUnkpcTn zcv?hM&qkpJM0%78pvm{Vw2xZ;8XvX4=%?BPf+1qddfRxq4D2F#2aU+*PnV6 z4>AZ`ctNoHXz_NVGaw&#V3=zEpcX+18Nn;%;;P42WXXKP{^ zV4lESP@O4Q5u&{G3y_Znl=PcwS{kf+POxaC0fqT;7-em7e!})mWft?i=MW9A_JbUN zrVG~|S-f_Ko|O`EVsh-U5!Q!>*G%&7sk;NGSizS@CE_GSgetNJ6r9V-h%?s!ftdDE zKfsO-s8D2Yfx&}!2CQJ{a0VhU=0ne!5bK_RZ_2z54fUKGm;bogYdleo7TNK6xw~! z`2unVRQfFMhaU{+9&^*v9`?^ULf_0@2NGI-rY8A6*imtGb;MqSs>0qcZy{D3hI%e$4!eI_{Es>E-4~4I+x=_|2URSVo=`sfBP z?>*xas3NUa?^iEdL#$w5fNXzvVh>&hx!Y3M@kUpGfbnW%{5nphcIiGUt09ca_JEe0 zQiJ^j4+znj4Prq)(;&vY<{2f;CVLm&$3h0S7my0g*{L<%6|_3Zp|UMtT?GT354DP< znT(H7Z`DRkIwd+5?kCzo*$zZIn#We7Q25@`H$4#`A2_sBki|A*pP0hy*V`!U11?gt zA?#+Vt5#llzC|U?0;zc%u2&XIW^X2=b{e=;#2VZ;{4-8dY-Z0$k{^G)f7qU^1#Q6p zxc+QTF2-7UJ3(FDj~)=NhM*|6y118e^`#gu7$e5(EFd-{50)1l>mQ8`yC?)(8aZ#twXga)#^jh=(8u zEd>1(`4FabHMA*@z5YJTiGEQdLTBVG%2CJt*-{YO@vpe*$#{ZiC#80L&pQ+cE(LDh z6%f!^q6^QZAPkB#+l_rqHAz1kRmkG39u^1Y%IPFki3xxg;4jlU`a zi{b9Uu^_ypR7qrsh(V-7Ig#l|-7HOOvay5jma|z5$)hol&*$xU}P@9x$kiLg_nmCYQLWgi`KZu`pH0rJ}NQ0)`qZ~7ppAUr5y$TtEb0f8f zp>%6oflU<9O?HW22tww6jww;!dvW*Is51vMue#|FJC2?8O*Y&Q(Riqo4Ck$Oi^3^^@+N z36qsRYrdys{SbC3m0vQ5$lS|QK$ap`Lug}qtEY#Wf2ix4DT%K zj+E;R?9t+E&Hz-FPxFoEr(8d8j*kGE|9K@0^+yQzdZAlNNty$DQ@V-BR$%ScnWnx1 zBs}^#$Q#Zruq#!1zJRo#Ezr!LUbeIlgu3QnV)vQuCj#~IbZgdnq$3k=17Bp#w*?*w z=&l&cL^crlcm_Mw6~{2J&m^a?vN-cW?#c`$@ncB8{*uJ(*O&ee2xOX411h21E(mfP z)qk#mOUVc0$cqe%yin{wKC}qM0{bWKPRrOiX8bCIXg^R-5I|bPIt3(}_?!-c_&(%q zSApkUh9%KAj@?dj;YH$MK96dyW$eUBwm{fDgx#}z$e-&V=jZwvdwSfF4NZQ>OLCNn zYakkrl=8ZfS;9DEO(YX6+9-AS2%O|7oYqTW1&xf+l%N4YB^h(yIWZ3q$PC?8 zdmb?Wto?9MOBId+7FKz?9gLt`7||>=(=3v4xjI9&SbKAj zOJS>#c7(eNBljJ=0SisWhmZ}3CFW7P3}PX?AvB>T z=owdoK>sVP-Am)AP`uQxCgv~*V@atY)~y4n9<8~Cq)c`H0yJ-} z6H+VT4cCNd2r9GuEdFz5g7(gMusDrh`r>g=x_&+kP|yrF`bJ{MF90k`w1_5DRxqdL zq%?`qa&B6hMUA#PboWs%NF~ON=U-n?bDxT50Z>_7Hz9@)t;WNw0QIyO=`nZWA(~7@ zm1PiYEKY(a55I8Y<&obM!0$5HQ0e79p-$`G*bJDiL3Bx2lr9lM))Jq;#!1EtYy0?l zj6pXqH3R6gT)$z4fp=YBGNSbH^3vl*f;wkZ7E~h}84dEW`%F&@LMy`(RHlbB{HDM& zsd^{;=-U=3bo0GO+NYqNGVw8@gwkcz9wMG7D21@|tJZxe2Z*a3zzf?PXhR<6+$A8` zR9WC-5GFYO{4p6zBo}Zh0|BD?TBHO+t0Db+&`dnJL#qINwNcnvoGiBi+qLe0sn6=y-XS$bpwci?>e%{1|b)OR;Tepi}*$)@wX# zD4*8iz)NO~6$slH+#gFYq4Q!42#u-p+djrT{`=N|(KMkcr!q$=@tvs4$Kc5raygOQ zKhh)AYFbu6f(Ym=c(?xtOg$qC>b>VDrF@$1N^BjttBNC_{kc+^7x5^EZu_lpEvOGSqgUME5`hpYb)bMs zpg9t)U)G*=$gDO@0JJW_1&EHka0o$>KB&m*lT=-$UVUV2gz0D@;}Q85LTC>`os?$u zl^b-@>__?K@a!RLYQ%h zTQ;W$A9mFZu#W~}h?6uotU+b<*@gbuvwUnmsUYoL1n3b#sYiukwx^(>z4ct5CDEGV z|Et?l-Kx@|Y;U3!Km$EtUwjTD{w}MK>cVix0v_o@5nK6*G-uc=I!-{G#1IJ{RS7CP;TXJYN@!5F#nRhUpq>i8tW8((1 z@X=PI5=ugjQyet&UeN~7`uI;uZ_N%=Ly+xsuFZpLtfTg&Ky~A($V%UfGX_D#APj%H z#=)MqDM$pMdDykxn5BW zf6}dT$?vorG6NzV!iG{H`YyL^5Svsgx7hfCHX&3Ar zoaB+vfan`i-Uv;m2-i5kNjWV9Vz_+L0~%l5<&UpT!Vr18Lv^-8v!2io3v9*(qpQei zilZHG4i#srn>x_#ezc8ed&O<0TgKnG+2ROKP9@HX$@yp@4KdB?&HFwVMS7o2f%-$6 z_{vA8PjN#-by}SZA=#OPfpNAk7GJeHl(>7wL>`+w7J`Bo0?%!z-Av180kxWD2R!*k zNmZ;P&vS@>_H2j|Yfg#-JP4FuAY^kq)0P9#^;fNXLpP+{Td&Aq z7aG#>h`bhCH?JUeQEM&z?GphIP3MIIaD)-<00DXQbgutkF;LrG?*yOodZqvr2;F>F zvW*y$Z`fcRaHz`5g?`QhVVGV3FEJdY)CPMcbGX2j4FTQ<0KC@-E-lZq1$8AcV1pON zU#eS&u3_$kv=(u&m3Di9N6w86 zTwWq<-Km{}S%{nN2pIsV%cZ7Duh7Iv+tru~0l;({gnr=c2Ed*l)y|4({2kM6P%&8F zIO8lRJKu0mu^_^f+TYRJoaK8p)H$>u4DfDL&aUiC#?p&hFlwcFdDy~i>uCiFAlk|& zDLwM2jQT2FP}zJSBW~=MHfq*Q6-*O0MMob6vVm-Lv=izGWo;ld>m|F3!9I&xGYH%h8ko;9 z#i6<7gr+viWLpn+AJOK>Xb>p))Mv4X0rA4EkMuw^Xa@u~`7N#|^(!9L6__~J=ML7e z0`|C*+dPQzglPwn>b4cf*lBT@AMui2ZUgrO`KVF6tiNctR+VC3Z+pwWpDPrdEAF*m-FJ)svsGg%V%2;3=HsU{eKoCTzKZa=Es3jDcxQPt&YH<=NpC~vd zk1~-ncIO*DrGx^B0C#f|3KXlTw0grtKAXPR+t}ccMJ&XCcwJK(fh8CO7vyIp z7D@)kYh^I!I8Is|yHC7Vr}azepp$e9Z6PS_^OC2VMT3ZOav6?cOcY1$l@3t(q%cZFj8`P}VS3bhpid;G{67n7ydQ)&d2PLmPwO7=%a*fv@dc&dqYGe;GohJ)qKj zi}@+|=*A*_3ftd9=K8nP$W6Pu#VpiQTs9d1+>q4;cj=zX1%`khvLc3dcv2w+b@B2_ECN%4?_kH7Pq<%v>LC95%b066h`&D z^Cf;GPozhS3#%Pay;lq%6c6^{&?=7=I`nReNp)6rUT4}{R3sakj`K4|^+grAZP4MS z-rERi52?|jZFL;jf@ql&)A(sQ6jY{3%-m%wm<1Zf0zJIwqaS;C%(1VZh%w(E-?VK< zNAipi-toeVdg{q^LM8B)VPT=s5 zm+rR6ZZ?^knT!Ftjvw<^__6=k0|mZG zXc+uQ27T5j{W}c=o`V5e*?-g31gKOtpg&gEf!BW_s1VJv&jnSLn0kB|=mj?9$7J=3tBLL*;anBN!lS6B%=n)Z@_dXnK} z^SAP^`4OsFq3xXfH%JFOijd8SEwZBK{$1lg#u_L52IgpB&OJ_!B>fqqA9&4yQI{zR z$R7?t-+}(E<~D0}56FL@o|4vp|5i0)tSee&mkQecamwW5hW|7H^Eyd&y9!j+OgN^z zvJwZog)jERZMZZu$PjELtRdMO{PP%0%@$;jH$LN^4ag(Od_>R52Ke@cxc+dZVA+^P zz7d*Do1=&BC@zhBup$WcbTe2$i;e1v#jaP4rub}V+RAV2c8M0O2s9VPdWBW`Ly)zN zkk%^q2Ok2VSCq+zs7a>~Ensc+RfcpX2)Jc+)gD zre)RHD5i$XV%@#RS0#c8s%Oi}>=L7&t?7?smMSjy4DfE4%=^?2Op`u65Q4Dt8fO@d zyHiyh{a((b$?Z%CjeUK1$;T-O;H}@E`M$qMyGZ4F2F9OD7Ld*sLtQ=@Ihj@gtLd~& zbXQ1^k$UTivgUh5ao6;XaVu2M%=tt#ZAHox8^&pCP~8LX`Z~mN8$s_1=ju;qt)?X~ zZ8Ej<$+A0X*%Z#CQHji0Ij;d2dA4R0l^bY2{3h`iWUZ2E6=W}b#-H3j@(u8HZ5AqS zR|`A)CpU;)E=b7tW!G?8C&LxBmSk8m@sUDJajY}&^2 zVwf%G$H~*GCxnKvihLsD6^e`)P2-8arxyNE*fS;_$ScIWf0S&J_&x}u8r0hOVK7m_ zNk(NWOz8!@jgQ>2|AYg;m(J<;`Kxr0W-)jJ8|lbKY&>5cpn71Y3!By;jNdA)OO#%; zy9pP4P2>EH&J+{SB#BB6I!Xrmp!Co!T=za?{lylqoK z$cG1_CK9yYjcUZ@oN*c)t*bf zUoo4Dy2geD!5-GmiQ!Kn-p2&=FYjzSc;duH^{TngH-_PwZ+v!sWE=2xR8mz8_Ls{? zRzG}Jd*RM}x~+NjydTIF=MzL#hFPYdy=0}x^HtxUD4Q~vu$xm;g?^R{Cc*7D>k4oG z`N8JcOBXSIKf{&(D^K-Mr1c-~9{g_H$G~@oLCEMqD*^LS3hAB{G~RJhs|hn~-b9iBu| z4WUcTsp|`Rje+5)#}Sh->PW_HE|F%D#@ba32JM2Y^mN+m{x!qi$LOdn>Q$fRt$Qi1 zd|BS=Q!Tqsx`!4S2cbvGtJk;Io(O4=6t9ms&*^Vv^LKAXTi_U!i5_#`quKSVt&3!o z5WF#x4T@Q=;*&BRi~3bL74(-1-Q0WfHEX;KVWsnpuTw$s>PQJ)j4(jVwk)-Zch>ih zN3yM2ze+@N*7B=Ls*iW;iQ zx!tvBXyJymy{r8?L-vlYk=kzX?xP937Q?=vS(nPPU0NRqh*BNn=5~^c~n5Y=^i#p07c963`=SX zx|GHB+zDEH;*T_^4oI)}cnMb`ix&?BC(QE6`dkY^9YAw=E$(~_PEzH#FF)v-6VnSi zj2UzGrs|G`05o=FyRjK{o!PA^okNt2N5W#h7RLi0W)%Qb%iLhzb@^FPHITb1`ugD6 zFYr|JRJLVnRYu}^nUV8hgp^uAulu$IQ)&*{d-wCxWA+eFIIsdvQo11bPbE6m;2Ar1 z%ys7V)N0i4bve6%Vn&Q{c4##qj+CQD#mCq`5Uo#2-B7UN#+{y!M(!+-waU^?Azz48 z$))Pl*Ct|rzk|dLRvkl5%pB8jbY(7)GmfGgWN{n2`Na7k%O7(avkRO7Co+@p#hlH% zt;^|t2Gs2WFH#e$3Uou4)Twk5<)cFv2ZNzM!M3tRP zPyQlcmV_0}q1UjNlC{7qq<#B=^QAeER?~CXs01R%Bs?)TLKwxH)x7+pYnn+EZ^s8Y zQCfD78y28fO5>Z_?m&6H0gvT*zA(7UidK7Kiy?v)(nJ&Qg}|`Yp^aR8=Ot*`BOy^T z9u^36S2%kk>9aw!GF469{p%7vPlSh z!^KB0Gc)r1ywzwIbWVEaqVK7P&e%S8)jyf6AuUw5gmdy+)*2+%-)xSgWZX-*li)jI z42scG>G$+}G@Aw>Y7BQJcrlyeHRQqJN81XF>l~0-T1Cz-qw?jx!(zsqn9U|LusM@U zH=8wuIB2xxuKuZ{d^v!FuyMI5Bz^IR#U>bixa`49f=9__A0@h7FHfHsF z-zGH;HRHG*x`K%Js65}4Pty+KMj9125?h6N5}KQo5mm4O6flLz(=G05rT_83O&s4l zvhxTwhi(7nc&=naFJ}KkBIjg3G2Yx82?{TL)4F0>jPrjut~EMJ zS-LiK*Jc&XW-bs|G9c|tS|7t#>Ii5(>THuSeI=+!W#zAw->@d~_-JgeO$M=-Ua{Xy zG;)kACx?qIq=nIz)$HfN#dRfv9-|$9YJMSamoCOVAQ>3m4oNUJvKyC|2wbB|5Rc>E zJTc&p@k0Q|q}#@<>$8Cc@rv1j#YDXhHr&}1)W!Hi&=s6LPJR&B7PB+K^c?t}zjet_ zEvN_zeNo-wqpMiZ0S#ZjR)fr0F1E>L+#_e>MEQ<@AL(QwAKnr46cdGQS73{#?P?}nn3nWp zWf`B`*N7FkKN?HT*czWXA+jFXIxaqf5R>1AU?7l;tzcVf$>S$=(1w=1Y0&DqL@(5f4 z2f(Afked^Gp=9-l`sH_A+M~pggaIrh)~ysD4j@;etL8y!2_yb(>x()saczcgM{R?G zLB~{MGhPW2xW24$d7dgvc<~WDrk>4can!h!8YXXq8oBh$?OX>Q9X+Dal9#B}#=d2m z7{rKd9fE5%%`+CEYK67}KwfL%BZ=dAWY9vL+4%b5g8k zN3mSEjsvX)LfF}*Zg@Gwtrg2GrPs3SQt182grq)SEW)d&hZ__JD-G|&_{O0ip2Scd zkb9;E&!GMki-TLQsC@`Ywr5<#+u(uWjqJ?{VE>#nQFxw&(h_jBcv1t2Yn*D#uVt(syA{cHVB`Fj z7z7n}dJL!GtE{qu3Yr?=*3u+`FQ<4hs7h4@`J+y^ivmMRce5+cO*UksEfUGK4MA2# zJIAonThU9d0zrlqW#Sr#djwHXWgLWKwMA%9jXK#!U~kEU^5i}tf?(jlV~y7hOk6bq zm)tuo&&t^BD0}O1wyDdI=rVAqpwxYw0fFVP5apiG!j_K578b{{=p-_l1*f=z%2Iz9 z8<>tSjYB^l6~|~AExroPylHq?`GrbHG;Y5{h;Mmep)SdyaAF;0O!O?>FbINTb>>L4 zbY#vXhTsjE%5p*_^&xPqv&Q+kp*%|u2&nAxAA(I4W_mVhUcsge@F6e##~slI(S6?A zc`Ncc<7{h`v$Q8GI!~OG8i|w=EFhsRLP%(b0o$W`M{x?7z+gLhmiTROWTr;2vNWn6 z6OH_Bss2z*mxbp34*EG;kk8o}#Gh_Rgx@suQ4Z5)q2GrOA&K3G-3%Az$P1qc10t>nFueU~xPonAAED zGIk5W?j*1fL}SmO?5hPvB}m;Wc)D*wAEMWH#7zrmzIB#Y52gF_xSqvvh196dET{zl zKJa9QU=hgu_qou1_%#-%`+AC@rOpxlBj2I!BCQC(oYpyNRC(!yV)PS5@!ZCDSxu)o zb&iNFh|XSh82D|*>==VjYkgCB2YtI@*YKqNd3n?*TkbI&KGGFJ)~9+oAeR8&ix$f6 zJkEIai1>tHK@>U^)mF8(F>5gqky(fhR6MaDo1C;9=nU>{?JH@-z_vQ_{}+4j9+vd^ z#*eeLwXL!~>pb)DX=VpYn-wY^aIKc9sd-AnQ>KO;!95_ zL~+xAHG0?DeDW?@)yI`3Z1CYtFaI`x`0XdC1?)Hrm$$$xB)dB>$CU;WZKeo}-VN zMt2k5SLd98b9R<5>vzs+Tha*e(Ih4*GcK>s42-y2*BKH4YCH4JI=hOmx`?-}^i2rz zMLU4S=Xpuh=h!!24L@9n9tr~2*hG7Qen*(DG2_)ot3R&yk)?~xFF^rBu~8R6LqoU3 zQ$@PP_R5auUpZPg5hY)MOJP~9uG-t=ILjhhCnJS}DGx+mI!k$kOoCF*2rmQ+^V_>c zOC0w`-W4sz))MKUUlzS{KL1in9KWy$SN*ib=C_JA`uv9t`Ui?|8{UnIQ9@wH$BxogR*igTn+ykK?aHR*xM zs5pB7L;6ni1ZsQ}MeMf<2bADh89v|bK)x&%H8^<(e7(3me@SA)t`=5S!l10P0}>(R z5W!zu3rUR+BPk+1WHtQfj;WmB1N}OL$J*lL;FQc&^d;Z6^v2dQ*q?%sZ*uQ9Zv`hx zy?<2yfbvtdzvBkd#|Q0~qGr8BEDecQgt#p-eJ#LCw~}6-KZbrqQ0ypTzkmc7)rZaa zha;%YLvinXTM*!eR!nVqi7Phb&iFL-H%H+-m&RoDw7QO-^Z4~|!-@?-zedlhh5c~1;K+-Pt6vFS zsR+Q|Er0?jVMVzp<7ci!O6j`@92IyEtXJZKF)LSmMPC(&<69)2K?d-h_i}b1IUj?H zEZ#2R0!_Aw?oD4Gz_nOA{v32N@sj?ur84Hm$Y$o!J@Ib*`em7cGa#WygiB76;0I}# z{?ESP(gf~IAW1YA;!8w=vLqgifVS|UjXSGbUIgU-w4$4A7Lb+~|HnaYHWs4q`r40A z2BI|?8D+Bov^*GoZwP(tE)o5?Y^U9Z7al^7?rMt6g@?5x=ecGQ!O6wQYl1Ku$^$TO ziqlW7^#TaGYcENB()3hpYUd>`abJ%zR(u09Ye6P4nbFoo`-x#!Q7~hPFB^h6OVcC; z8$!t**D$$wQUC!?qA;YuaYe)n53DIx-w-XYRPxMor2AAGg?rBbXA&G7tX@R%-9eyZ5M_>WMvg{`_K(T z2xEBZT>Ua=ebyK4v+L^78=b^3z)l#g!dPg7O(#SUQQF&OdSAN#0zHNma z|G8%X`WoPv1pq0g5z$Y?4$|$FAMBblHQ)Ida_te}t2Lz~<;mx`pX)V) z`~5Paf@aJvy|K&DSoeLtz=hLRFB{dA9lz>c9s2vuwQzu)vd5fn;wl!1GrGR z5PxTI07}mgd|J`jflccufNSXp!ubu!?m1*(tP1?Iz?=Uhg&-r`BC3003kx)#93lVR zL3k&DM{g=}=ed@mu8a6(UsG<_Zq93++pTlg!OF~K6`=w9`jmW^z!$=Zu<~pS@=?m{ zQ~{#jF_lHcj#jG&;Cg|&2PXKP&6TOC@Vo+vhVCJA;Ka<+51DS2kk>_c15mau9C0xx&wf^GQ+jY=p?Z0dz4uD8b9FMG=w;s(w=oc|(lwP8aX*N~M1+ zd%?ijy`yvf$Pa+ufZi?tzdxV@fB0@^=8unk-Fr0crs*TMC%=6K1Aa8cmeU)5_0~3- zKJ)*DVFGGQLXX}5*t7z^=$-!L-*qm4zkE*`U>fPWU8wgbDZn4T+uZAk`UVX*kyAk4ag@UD1iAT;;508?)(nt1ojFe>=FGr{hP}S-zOHd|9EFQ&i z#HvvoS3DN2mR3E~vTafxAjO9jK#c@bWu;+c)oWH|jq)Pqqz>C8GKHJ0?SKmEpZ-Uc zsR3mX3Z30$?i6R6rrr8QdbSFC1@d{))myjyn5K?BzvOx_~_1Xg88sHlv}{ps!A_SUNp{CD!S?imcTQ z&Z=W_gQM>H+EjuCGQsNRVJ4~ZURLi+D}8ZQz50n5k3MTuMX)5|F}lT^KU)G_U2z&h+G+KCpq(1U5ooz%ddxwQq}!>(0=<-+)I`5EVw8%p zH(%72h)_FHmBs>=1~vP`I|AVM?PCwL9V4bjw=TVJ6#qN~C%h^TR08Ooe`z5Vvd3EF ztufP#SF%P35v*67Ll4pcyya*W)l;Uf-(HWZk-5K&eS$JGqIQ($62`WG@$099I$Few zSUsz)){31wcK`?h(zmpT3#2~aybj`7CWO~`PXn;=cfFumXI9gSnvvcc=6O_nXWEg| zt*CYSm?~z&*o)wl0PWbSIMi696X-=t6;$KaJ9bakuNttbN`Gd_a?r0?C3qH63nu3N zi>Fmz`fcHy^3YV!olYNBQN$?tW{mpYiU%+NLwPmhgg}nt;ipB=V$PE`nFjsF$Cem# zu||QNY0xg2RfZ81;e{fNS}N+_Sizs*>N$ER#flZz&2pTp7-CH)n6Q?~#;^poHfYSA z>1=rDwp(vo*Q-=rRP{yY0nM%7>qiG-aD&luRiEd8n)PM-T3f->w`YKt2OFEc^NrlE zfNLf2?}!0wayI~Ip*moA^V^^Wb(-=*egBO;W97Sbtg#QSW{rh*jC+B>f*PrK$F=lY zk@&IB8JH#44X-}bgSwyoD96K}9^*q0zioWHu~)wg0oYmK-({{!(TM9Tz&oz|16<+3 zwSf*6`6Hy%jE|3Cc`-JMrF2B?k8^at%<;T37GZqEylD@*+TXRey3l^aD4AW6dq%+Y7Y^#kvJVbwv7ZWH^^^K}$12iWrJV|^roilB1wle2GvgC1}G&HOdm~fei zE!q(I`d{x_8Ba#2hP5nvy=j&px-6BRHS#lzTNg7BOg({oXTWj00A2sy2XxR8C$YLiAJ7#c z94VK3w1e9enJ$i5~S0p?!SoCh0t6(e5LTF=;`l?y*fjj+w1W zHF~}>t?ScPT|u*UkLirErgvCA%kc>9e2o|AmpQk`GLfS+8fgE6^lllxw}+{LLx+HA zoP+kygAVMD9`pP#)HH2Pf$?P4=*lO_pCu)hn!M@!N#d5&XUds9y|3Ke2_GoM4ZQ$BdQl71N9 z8S4+Ko*q%pN`xFbMS4&M5Ercg@Qm8Vu@&paR`-Mev@oU>4E`8}%%VV!mIeJTS zG|1(&eOFA?<#`~=uh}YU1Sje`VjU9d513FtpUJJx64S?#V3ul+JXI&JAd$Fuy3Ex#p|>^6bvbe`6O9$e>1$iq#qqY{?e;gWTSJ+a+`CrF8rCP>otW8 zhE>F$3G%9J&?^?AKfD8O8#G{=7``EXds^5J-bWzzFEjpwzUwuBrc<>~1H6%oA28N5 z5J3CQ=xO-Q1O0PDFYHt&t$T;;Y8;Bz|987up>w(N4)fJGV~^Gi#sv6;BI;O)jw?Ws z$Lz&1RfO8j{M~{ylP&#~;_P2`{VhQEc&F#j+W=w1t@LfRb;EpWj?&?hH7ar%zu#O# zF+-lBy|exAUySJWtX0%9I*491zUZ6*6BaCrqGY~^sFxJfW)}Xe$?T$iXJ45;m5FpA zyPcmI8dKA4>KohV?V6@HZDj|8nWJ4%J$-zv9T0x1_v$&Hri#(si$4;uXY4ej^=ek6N0M+^T{OrD ztlMRZpSx}jXmm?zy2N1dqu>r5IKTOBr73gz`19sB){lQM9Kb%-e{vK&gy`2RKK=8N zeDCI{itP+^7zg#67b%L;U#fhK9QATTjH))Qb`Vus0RUg`0qfYl!=3~I0z zQ%|QA=#8$w{pE1e z9q=#xgaMBt+k?q+k5b!vr*Raehy4Chq`Pb&oFE5uTvAxw4Azuq6xZD%J~(Zd@N!E= z->2lGn1qSxq(3qbuWb{WDnbQ!Mi&2CSXf$@SehDQTe%z7&(~-|v#_G#EJ1ZgE%KGb0z(b;CXH^S4j?!j0epaE#G`}eY*B;DoW=!`< z5lYLDSYQU~t{AVjfAu|UkJm6fM;fUoK2FMn> zcKP81#H>Z^GFnTq_WVBH0RLXDK|hM6+Tgo-xmCb;8OSS04=Oa-y`Qlq%9rto{Y-hF zxbk{|;*sJd6!C&km50t3BGgoVM=^Ga2mkb8G!J!|lq?H!ruaI?!rLL>UPDxq*}&$` z4U{;t3Yb8k?C?WEp7FWKJ9a=M;8t`7w{MC5RpbRU1?w!M6)w@i&cJCrDZCeVo)*ou zSDUKafvvSpXu-Mx_{$;e*bZ5R{yy@74o%`-B+`O#u-k-V5aiPo4)u)Tv8zB(aa({E zPstRH)_5yCgA=&LiTAUmr!ktY`N>$kFeMT_?HI(lVQp`9N5_D#v&2y_VSmD{3NWP$ z4x>;ZxQ-4aRG4ZghsTHJmTPgukWH$}=3;4>vRfC2Tic|ev~*~P(HvxJYMQKhCxDB_ zm5Ut9hU`lO4ZuVEl-tpIl9-2Gnc*=t+n8Inga$nIPolJD^e~(afB>m*ZVFNJvJfji z2xlZ8!;&}v?Zf5_m1k4Cp3A$u7&-1CrNW=pif?I6I?RUB!ZQXK#aokmK#o~J)osn7 zmch5RS|oMV`Hx*OcNRElQo`6!{@xodRYIz$L2`f;<);$8q64oH!3DvkoQ(*o^kj)N zL@h;+aCy}ZKu2w$^>QUZUe>4qwgMG@p=^NQ8D_phK*-!gJA@crIRu~s&4|*v40Ghv z{G#o4XJlw?V0UD(uG{ZBYA~%FXs)*Uz1&Q0;MI`=`lL5&d6Lr!5m`-0YQ~wQ@xZdT zuE0lF9pmO7MGdB&iG1OP@ym`54mSAE==IUDEZ!kRbQn^ou0QB+uo-r|r$e%>+lL|- zQT8T8qEa#LlwyucSnC0gGNAbC3tXP!>ayY6+2RxrY%RoazRfekkv1J**8twx2jd2* z0JP73il((OU2^;2-Gj?87s6-?W*@c35T%sr?&nGUY3Q88k%-y++3lhUzn>xxor%3z zV3gIB%+-i;Ns#(O)ex1nk3xT*lp112=D7l3$_hqRSC{rh*H9%?h&FH-RylRqge*RK z7E`w26jF$u6~WAe>+cD_rctjC8a>e%TTrY zDSX{2hqpPUM_rGuXSA+DBD6q`^Na98**WSrB;WyNv|q)J|L(8(JO|+bBgwVwkAnL z(kxFxZM|w*$}I;1B8q8awpL;8%Vo}kaJTpggKt#CWfll`yojhxhqreLL#{_Vlky`* zzzf+~$Wm#cx(nm5;6L<=O)nk?>v^Kh9!h=52U#Q;drr4K&7a&BTyyvpuDvhP*py`9r{j&HB2QaD0$VVB_sJ=b|IS zUpth?5a~y`#Za5*;8Q7`7o{1>Ev$M3Rqes7mr+P}D=xaCSY zt;^SgQs+lWmRKn!?b6jQN^&Y@?*OdDFX{wY=~Y*90BCAkmI_;2`z?xvj9h&Oyg)&~ zY!^0m)CaT0qG-#2z8d7UOh@`o2@LTJ<5JkJGjAcT9zrsmWJ9YTun*8g@OHFxih0>* z-7UeTqKwv*qBhhp{q}@2=n21|E(AcIWef*Rb;y6tyRA6+>*Dc;tN@@| zezE#ijXAr({E!bcJD5$3jwp%=kmI~tQu_xAYy{6!1WSh!JQmWNNKvh#^jFwW#k}^j z1-Fi8sc1UadkkbmMB$PH!Ei?K^zWgS_*@sYmu*jt=tq{El z0)X6{e}dNr@*;CmA|0&qx)ZWzZ@oI6llfyh%aU#7II`20_=uV<@Y=4L>*1v-p4+MA zZPLi3*|hY;CLx8yRwNdWlw+^A9g=(0(wHP~8nRcmJaZ9O_#A@tL3TaVmnWg^sn)hC z2~0}rBif*3oGYnQcDk2xX4vw~G_5V7@GwQj^T;moCFyD9mOylrJjqsWvAQ_7k^tPH zWgJL%$|?A(AGa23@3i%ID5`CrpH_Y0K!@by+T^OE!GOo|p=PTW3HKl@VeMH^Dk7sB zWK?NtVy%HtQm6)i}Uw zA(&nm;_Sq@Q({ij03H&fZ!q$!)46L)1zP}q2-C>r9?sG1B3TFzBS}k?fr-Mf7qSF+ z-pZjXuFl?FY+7^Z<6y5Y&ZRCSY>IXD+B)`6Vf@4}g_cmI#(SVwjx7=7QA6shNO_SSWXG-xMV@l^_`CM&yly8Nz!CAaFX+ zS2M>l(*1hj5DY=iM8DFbl{jy*gu(9IWC^&kFbkwSJhm`U=~WnpiLY#F6$V+<%3KHN z*ceYMzQZE@ZH{wQj5jp9oNv`$aLf2^m__@NwwTH)z$r_p68f3817&%n%UC(_Xk zOYQ|rsyfmN=jwA3zs$N^&Ib&lyp`s(Zo^Ufggs~i0ek3P3AIg>yyPi@66ico1XzTXjPil> z$wVqMvhOo0kY5JXHge(=A+@WL&We!1$AO|`&^&e+txQOc7^IbFrrt|SYA&D^rp!^s zQ8T$Fyv3`E$7K*3Fa=TCOnv}_-}RWp16bB(S|0GSg0z(GDNFfF(1-{Ek5>ya1C$?V zzj^au(NrVTwwC2JQe?~bJ9KUHhoLgd>q$`|u9>1b5|^;#0RX0mA+U+bkGitTFJ-O2 zzWmke(U(O@0{d`YK=^1EY09F5nW}T7Er4;h*9KDa4sR=#1X81_c#5!=Vv#0D5##!k z03b(emjQH6nh@D;xn^#AEt-2RC2AZ0S%`s*;}b}L2(o{E`PL|)a^XPt+}rtXl$K?O zWSGqQh$J}m4s*Na^=m#;9gewKK0Utmz_cgW+Ey12=O1Mq+jrp3!wkiFQV-_~j~xRv zjN6hy9n(*7Ij)X?I8iy7s6i3JYBiENWPYW_iJPD*iN1TP##oxky_88UrmB-^!u`-a z%J8(abqN4rDwF3-gM7wti4Ad5n6csGHox`C+EWX6m)9V|`)kL~_?UhOb8PBN}O<4IX@ zVJ`K?!S(Iz0omXZW>R|SnW_iutWjOtiUAof<9kd;u=CZm%Id)ZW&rmx2XECLTQ?qw zzM!5%U!=50qO9@IA#a{WC6-Yy5Ln9x>0r6$LgPvGh%l)uJZvs9k3nOb7B1uGiEdDf zg7Axk|D|PNRz%~w=*xyy^98Z*hSo%HepzC=X(K^HzDnhL4$;oOk{6L=XS5AwTHSGN zh9{Eo0rXIPWRux*UsnLueTgCbcw{mqAU8iUpWf0ZONeDfqbXzP=q57tS$2&N<~Bt) zA52F$=cI^(RK-l>`n~p@5E-~HN{^O?pj&W$hgsogTo?ItmEK66Efo-u9&CpprN>^z zdyacijc(jQwXLNANc6v$-a+~DL5Q^LvXb~~dkMB1O?JKmfBJiotWi^JA?!#>70j5d zTFF!T&at9l_l#s5fGt(H2uEr{rJS9vlwtEkTtc?|`Oee^J=>AHlX~9eglm5n?MiL7 z!%#vmx;aYS3LymL2ISY~Zt$-t(xg>2J7Z?2onFyU6V zcbwm)&lT$iVF&Cb4MIe`iYtQ)09z5?XYT-HijxkI;aidjL|VmPE@6PXBIS)Mh8!^j zrMl5R>6jTx{Zpf&3)$lwmM`A6V=#H3F+Q>1Pt@8$Qm(XTr&4?&4%W9eP{txPzq$fv zyfc<#pMWu_b8ENJ)HOVYCC;9hB&fh~B8iPIf{uago4s$h1^(@;qqmhCvM-58Lvf(9 z6IJWKwhBxUjX$Aiawnb0%ok1TKTZn!b0b4Ej9Ny#Bf?%~mWsnabm0G8R|D`5|GbI& z!e){!uYrH5_`AN%zn->7+%fb0V`Ri})|VFHod4s2{6p8F<2)&_b3&fwMb7(t+&}b0 zOsNb;J?wM0Ehf zW#~bwBGu#7wVTSV*4obs53f@-MUSl?|DwF~Udt6KvhfQT?M>HuvSInp2ByF0l`i*Y zuEi>sj5m&*>oiK>gRIq+)@$1{Uzu=e4ZXa{A2^{68qH6GS>4RCjahke)0_yvP%p@~PRH$6bRMC0r#byZN= z(M3&vzc*h!4CorW{w3e)&W69nWMtQ<;6jQ)ywkP=bObwUP*3xGehm0t{5a&oYmgyU z89blv?njNS5Zkm*1Ib4VLr_VQ%-RzX=61dHDkp{)r6W2-f{a!+Gvl z-Mj!p2`XEuy^qAT-G#oAazDRzGppDl&JUpZd^cRlux)p(lY-P$qeZEvt?UH)=&BwM z{-z$9(d)mOFk#9OW4UT%4lwzz)!Sh>|7d+7?4Y&cr}Y#?BClzM;2H<0zcvR*?3j@dL&e~FPkt7-@p z<`#OwgfMyA`tGE-x1Idp+t+l$1Fl5tG z0f^v*_&K2Jc4s3Kb-@Iu+M_l(mqGmI!IuO)*Bm5wuh@YPK0ekwws0nYuXiQDu?ZkY zCfo3n?(&y{gSS|31c!oVfKy(5|B_~?bunW~yjeA=@ONN1KHt$!G_qc1#^m6# zcZ?{%)|yyo@>unenrTOny{a?C0751~Qx#GM8wV>*Vk9P$AwlrW>4ZrV{o37|w_ax79-3ewgnm`{ZrH~a zm@q864gDs_;Aoxz^q<4k*No;bdra_;n_^hK{Ih_I7|2)^-GgTw_YG?S(lb0O9XL!& z#cMkk;v=?PpTP1>0#qgwop5eQ__awHAW9&h9b|?bvIutrZvmb!1!!^OZfpTj>@vL^M((p?t3u&>=!sk`a=$ zvuz8%Uh=NbOgU=`1P>%zyG)+3q($qlHAYae>Ii4OBJqP=_C}~ob%>j0Jd^laCv3aY z%5ZsYSIbz31j9>zY!2EkW$k2YJg;4q2bsW??`%kb@+a17o@^muUP##h!u@5SWX-qy z?YBy4_o<8KP#wS($!L@91asP@ttq5rgoDMV85ezJ&JWsMz&lQHuQeVd86pvBrE*On zjUK9euT6OXG*4~;+3XJi_^Y_0OCsLk<|T)>=*L`mS#-0&C)?8Q!H+QlC>@SS9EdPQYcp>%D_ad=sj7Z_1- z9{iy~q(4XB%^*>B%%$spmFgq^P ztlwN1%S9lIzgZXK#&=qz-!|X~S;2PF0NNqxWPo&=R@jQRC>|my0R^@@wBUCm6kCmN z;*Htu{LKvX`>5g3rl&Rr^`}NuXzG9Oj$Ql+G-pt|2_N$cZ`{a40f(&oJtkU!{Imk8 z&iUcz^R3~*?CRwImLc{Fs~4v3nDLxIg~xj?2rq|Lr_mJqvQnn)>-?yj1&W6xQkvKs1>SI@r4-kMF6TBmM7cK#L?XVdK=@(MV;0?>G zk+N_2JwSJR9)Isldp7h3n9NpI?oE5e$sZvE+L_G3zP>z$8w~f^g;>xl{A8 zNwtxgX(HpQDtKikKBx#R)IX2FcLR7|vI!NM!v>f{e-q0_4IHty9>{qPDL#qO)I=HG z_@ErDaCS5J6#fl~at^ zpBgh0)3Urc-Sm?CLAxNc6RSsaE@?w0(!pGS^r(@DeiVd%fd+2G%P5z|w(&0=HpnWd zYGbhC=ffTd%hDe9;$Dru&3MZN1>ND@fVS=2#W+2tp;351hxfLL8)VmEOOZpMX)00e;hgM95t~F2nIJsY~>~TlaSJh zfzMCo$zRI451$)XNJ4&MMVelvr2(AgK`c?_@PMl(awZsf(wDK_bq<*=QNlRA^TEkM z5&DJp)H7&KVYh^cJwS@-2A3Dgc$bSZi@tC10S`sb(M8fE9Q`ZAPVQk&$dWKOD8DP? za0W=|^KM>bf`v}LP zlu)SX;Wh;4Ue->{l4~2Zypb)@5?&Gypc4v0FZa!FCu#sr_iVHk8wuGPeId`>lgU8< z@24X_>2i+I@=B9uC7rdhS6s11M@L-DT0$J6eD4R^H+AD!kJ;pLrKU2vb|6L`QL9tE zi3#8rFTY-Hy*3Qiewz|}G$i4L?H}e04EkQXZn3v7D~{lD}hEEl7$q8#3)?` z9ZP!DuHfj4)WEnXAI0S|s6YRss~PVmQf9!yPd@f~JMIdbQ3d6B^2B~KY^ewkfq9)C zD+e*<#~o--zo8xK(}}7%g%dg z4&Ep@7%mGLD@*o|kP{wL>Gg@EF#^*4GjbpDK@yg5zAh?=U-k_KS2L-{5rr2={Gwuvb@H6UN>a@Ak!_an$i3eq zx`;KMyh`ElsJ#%7bwUzv9m?>kpepj60~3m}X#gC@q+=ul6l+QZ++sDg;7o}K@cPL7 z9t=r#>0mwcr`Fsai5)2m$w>d;f3PDrOt8nQaQVteKWl4INgr)$P>E5j8@kVFPFSq0yfpu zCZAlsmF?f5m3Q7>+Zjnc{xJ!+Zh?8(?WYolU0uD zo!*hX9tDGo75i0=Ijr^vqnt<-g%*Y%FoJfCArcR~QWkS@4tv&H_2qxM$ zA>UJ_>G1(n7#8WmiQ-5^WDeEl2z1IW=jz&JgG>5J`N6+#q6mbq4Fy*_Z^)J=N^M*T;27Tnmhc zP<>I_sL*1zqkDXi#iHdj^cO^({dr$04NMjtT~$`TEd+FLN&myAC8qxkr00NK|y z@$uKYI)8YFSo*Cj2GnsaF}HTU`^9g55PY@lV@uoDE1vVomybdcfB;?k!$sP$*`deY z9L|2xfOOoSq;%R=GS|`k5jbW*f}y0D2+&RX2*2jJ0n`_Z+jF*&lj7_FI;_10ugE;~ z#Y{Iqc}L@27eyumBh9>!W(?V6q{%M`9u{h;vh5OAK7ASgv;QI2eVPe-`ixs%SnJ{s zyag)TGcKpf|KTUtr$uFs9XuE8%3IUl3|oA1`*xE@2^QY=wAOEq&XebrIkt-EXRZD6 z7nP0>lOg;SL5B0K5I{*vASr~w`D`NHTRD-DG}_`S|^9d;C}hkWrd!0EstvbPn!#A^*@0Ke`!Mgt#Ka;c777I z>c21Zj0lScl%U*^Q`ukvKxl%$^FrciyWn=ieUKG4e?)u)xN3S72^ExE^=hQ40W(+$ z8-dUoi)k81CR6R?J#s}p536@63{J2q^c>P+BQn9N3rJ!+JIhd)iI!r3iwUgsJQ2nd zeQQjE%^3@(JpleZOCEvDQ70GEIqcIoA9lWJAGXUFIp#tvndEUKZpxAAX=3`OsLp+1 zDX16Pvz>?uy(+$%C!4wQ zS6q>omnF^jjFu_R13Ew@g62z<3vE@rH;$1na$42$OmS7D-7cxg)_inz>PAz`|KW!+ ztu%F2h|8pC(~p*LssJ^;6~*bal!#*veCI2! zhtShPt*ZpWtkoPajgH=CSt!5Ij7z90SmaFe_u%E)kx=Vs(Y8gLs3ymY($f>elxHPG z^@#xCv{nlbq3 zoN*o|z{p3-K>NJ^hA?X8QkZq`8AFbIkdA+d!%G}X06j19&6DQwEC3Icic zUdubN>(R-p!K+$w)J|ww*wUD}>^1!JRbLb*cgQSMq7Z|(BI))|V(6YOE_V5^XAK$a#Ap5H7L)(A6WA3u>z2hhj*8dE=e}C%|^T#w! zoT!1!iH%ZPd>%e8e41?gyT#9ONA60Sv-7X*zM+lB(>^<@8PGw_hSWl`EoyT}4ZwqR zQQ5nQ4%2idRXX18@R*k42l%>$%~s-xdt?gFc_mD*e)SS$t-`{nXjMlV6r?J5Br?Zp**ecDCoIi0fOt=99lzK3w$c^g!=pC#!b- zzUuSD%kwR5zZTtC<`@4Ze!~|RmY(w*KITc0x!3RZu(HtK(ncdyG(ofw%jEVfs28i< z1u!-&wXimodQ35L-Ck3HX;p*2i0Q>HyHq4HfkNQhkIR4j0icDCl0WhHn3nlXZ@K&z zaNsWsSB9I6^y>)uPQ?uwa$WsH^;SDCnhA;3_%aV=)a|sh0__Kv?RC@4a+z_7O1dl9 z{44D0F4MY#5{P@r7Ye8khV8gEL<|08J3n&D-vF3rP6|Dk3kZ%LUaamK^VC2JvY8N-Fxex`w*vHRDxAOx&&FYD)+2#mItG#;2^;9cLN&s zB5kKD`$9+cxf`l)ug6l((-b=sjD%H?6|x)kXGcamipTt4Fd9qIR~rYa&yF6ty4&(` zE>Y#@%GFxrG~LIuGEPGlMR4=VeLUKUdjVC~v`|a>@)?2;KOT48`+Pl+`LELa8pE(w z-o0f`o|GBwv;6X6nKReWKg%iO%5J1-d(wL9a)$jLUKY0QWguGGWwH1jj`nSg^Nsxw zhuikDBbIl0iG3E$8B0!auc&Wvz~!5jngHAv8mvQ}eq~7d>4yG%{ENGpS*KTj&vudx ziw@fp9c{)e?DO9Kqkyk}cT< zFHUxRr#p}PT+fyIfTsfOT~Ex3XvE&4VC?L5d%HV^>4Dv=;)c_Ms31;nFZ`?_s$msUO{d+|qIK>(nUAt1@-cdoFiXMbEaf-` zR-Wr!CrtNy;2yLDGh0+SWT3}NhLb{@t=@NuzDqqflICYI)-ly%|J{;^^x$m`!+yV2_w5^9zVq~YfkU7n z`?R5a@r8LZ__$@#3m{mXi~iQ>K!H@5g;`A#v{R1~F$LrScnX+XR4lpL&1ABF*=-pN z%y|S&E~a6$vIiXLve09t04)0LM ztXBuR5!~8^g?d=@!rI%j9|C~8668#<&`lODl*;B3hwVq}zFeRpItj!b15U=eaM8uA zw6TF(JkyYl3 zKwe;74h=}av1rZtZqO~bejWPJ4TAQ*ZuT+=TyKHw$+yKKMV@r-s)wi4?A>wX(aP1fbdYRFB+EHY-e?wQ4=Ao)4uX0C7JVKvk}Oy6>mMC7lyT?bjf*30*{@k7rZRIz z2u+W|-8|{19ifRtR#3!<21yq*vU7_e=B&hsZghC<<*=E)NQuy+z%AOJP}tO(qN4>d z5_bC94k`6C&vuz_T8uC90^mR^$aCt?Pc6$N6jh(gE;?A$Toqb~>eWYyKxgv^3(N2M z`7W5JqrHTmZ9EW)JZICgwz7Uy#*iID!9%wBrB`ei+c6s6*D;_1^pA&dJI4lGNKVVs zS>*iF?XgXa-61i{TaXFl1Cs3F4%x^6dw<{P05aZ?ou1Agrcn*dv*OxF5)~5B{w>5X zjNVJF7C^)wNc1O@j2^5ZU&AGl2o)D^ez#3@Z)oTcYtJeUdu65HY(B?We~&G_msboP zR9JB>*RJ4DslY)wi_7i~Ni?cGV#W31V#e~~fkx5gVsMOv-T9asdTnY)B&6*6q0G5`#!qmJTC5y6yF05hp$?lmd9AkJ@69#sx4k4 zp;$LGkQdDMjGB9f67TWsC1Wx6{1?cO1Qtx3v0b{@L+R51D_vZo>0=j?!C zq!OKpj7zgm?{N8K+f;)_B|Y#9=Y>9EmayE3!17OCZ1Cb*Ibk}|c`lO;b_H5QEoSxRiSbnLII$+@MkVIBj z@NVU&Ot(1xFpU2l^7=0O?GtLZblxq2H&?hj?UL+VNG3upA%(Y>#WNTSi35`9y^(|g zhNzINwD45X+BRqGjA-_4c=1eM7~$0*EfjsR0G_t(%;*Aq2XhCzIqRJCxsZRcB4M5I z1fF{@+1q_fNiSBx-svhxil*I5l5&p0;%BiM5ga5iUE}3=o&QA=(@0!f1m%K-StFd~ z5FG#$Nf=G~#mTpjEdfNS3{@lksdik^5CxxEOw&rR&@DUV=$Ux;ocdC7W@^7h_dt+i zBl}*xL1f9e(U+4O8T)rmb#Kqr23~1G6%e_;#1@ZBdpqt&wpW*Xs9*=2cUOtl zqudtyCX80z^en%;u=R3axdm_WK=qZj#EckEaTiM_4vUR12cJ|&nXf273zAoR?y*=w ziAiw!iM!F{`pX-wFSIIJ7Se>Cn(uisMu_J?nx8@+9L3SHAM0ww2!24;yds z8d)fuu$vu(br`5bt{HRbvN&U}6ssk}>Di51kP7}=T&rMfv!$tpw@=vh~U?uIvXBK+VWKq6^ zTc@AESC?W$*BF0gQ=)j)7LAe}m5aPN zA63|LCGP?*dn6+FP$I!|xo^@`BPHc@vDtn5ycmt{UWc+{Fd-J%Y)3txxIrB8lsi&G2&D8Wv zrKY)zl^UrqXWGU{&3)IB%27l@DsurkGp8w6PFiZY(B!_Lk)o0dH3?)YOs1%4QlKy{ zNQf+o!0$und4Ath**VSc&)@6&Kir@DzV7RKU;B0M(_)uT+;+*9V~Oh!gDo8mOOXeK z#}^$2h2C=tE9YZeAoV~YL6Na=>PSu(Rc8J7;ED*nrUznN*-$JbOzp z(8DH&hT?EVp;A7x15k`y3i>2zcp!eRJxi=|lDNEgg3smV+vi=&Y9O zm>O5M5_>cR2XU5BqR_!hbHmFCEdLA-QC*}ypGYia?U6-mLqko5LYJ6Jix$wCv#`Zh ziXr6O-l6Cmz#UsQSS}hcVV8lHn$c2bFf_N(?uHqz`kxTQ?0X!V-qP7ow45L;HWo6% z(p?fiEfR?GSN7S_qL%3+MNKz^+#ZCnvplL$S;E=BKmskd!*G=4)|5q)Ji)_!LQ583 zThF_7lb~)`YR|vf@{Gpn85nyiQ`b1YArz8Hm^e?IL~8DI>0kuc=z%R0`Y%&E&$pvGm-2F(|?Mn7uGFj2j_01%)@|0je zq>YA#x#}QLokv_DJJ2f40ZcXTeA$nh$1qqt4vkfsqbyoZ#W{&Vq5Oe@z`uPWGp%Z- z2%OtosScGq!4=#IzsD-#*sWu<3DpSRs~fk+~B8 zl4Cxgnm#=GTwyG_@LcSn`ZG_MinF+ubxj~ECUUl~BQodEdMc^45(0OztS6Y^VJGagY9BWHsSu9y z!pUb|!FZGwMUlJOP)12Z7r7jbaL;B_Ww(Rck%Up!Ss@=x?uy|`8ppsnIhUzmwkJ)giLFK1g#~<~fMxS4e%Rl#P3Sy4 zAS@AkVM>%?JbFYj7uk6$&PedaIKy$tH^IsN7E!c=dJY{|H|D{-tf?w6<+bO{D}dJO z{+_89W`{}>GG{=Erb~vUQO9Yd4vK66;Zr{)#%}iso3<=r$rVH&h((sLI;pRZA;pXm z{7>AYpG=^4^zshI-A2yB5TcID+k>LioFITSkidinEAUoT+q0L#R!6UyCrh5{O6>Aw zC`Q#<4tqWHXek5dg^~+%6duXk1c~?W7D67Tq#KD>N{&tO}%+2J7?Wrd-@a&QCqmo zssfYF4YUTBXR1evQve16pD`MGK>xX3jEql}g=aXhFHK3i7&bJ-M)Wsm^gJnM`w*w# z@_)yA!)O;-ZsU2SeUB%aB(U8Se#E7e2LDb`Kd4M8MR~femnZflZGhAR296=5p4Cl_ zMoouFBu+n@S<(e>K&;|Ic8i%WZC58m)dpB)Es*?nAf^TfDIN0J=Cr1-7_ymr!(sf? ztjG$ zmMs&NW#)@1*;yaV83sBNlndAm(}uz~=w~7Z!&u+YYwpyU2Km+3Yb$QwFBtKk>0Uo^6!aDcyB--+WQnJSA8BuuH;38~PT zlUrVVXEM|PT^gfq72Z+GzOknFOf?>!3lY3Q2fO@S?n6C_iD$F>8^W1kP@ei={OvZg z9;lilbj((KGOy`7KVSZ0bnILa_pmP^#VX#m`GW5Y;VbFFqT-vDqvy{S zctdHFKv{HRinNdlfingfclOug8k#ofH|9g^Pq@Zo)yt}>4l<}r%o=;B0yI)z2*k0> zm7VaIx_c2z8z!SG|DM%0(13{-a!eg?`kRGfzFKAO!*#;;o))>vB-ZSP(3|vqPMtIr zYgR1BCbJY`RbLXfOySE#FBe!f5K{r;JnwdoGZiswezsRi7IgFX&+!yqj3r~t9xWv3K|$L>$$Cnz-AVK zl2bT#Km~N9qR5pes0HHrx8{=y`e6pry`g9+rjF=tGBht|{H3FkV*zGY6?Cu@juRLP z-{o=i06vmmL;}~bY<)~)BCIX0J)0?7ytr@r09`B@ms-qoTZ%euJNK`4X)a%^yj%+A zS{Dna_put<4%$_lm4fj;kue&qhYy5~FVt~M15j^A20ZC7C*c?hx=3LXi2x;x;8Uy{ zj@U36kHFI?wuJ&@tvc6o8+jylI1<&J-h0s z*x2K$H{bqSXm)xTq+O5 zj><}14_D`RC2J1mjM~u(DJ-HrOl?fPloCvhS?*9TEmqF_9oLXYCi^GFk!4{|Zva+x zDoh&8Q~42&!5L#wsW`ZUZIy$YnM;0Y8sL=R;aeg2<2<#IZSk?ZQgI-_U22IhBCDh2 zQW5L+r(rPQg8YPt-^WB|2WRFuJ@Y9dQAbtQ<7KU_-j!V@ekcA6!DeZ0_JfXABRsR^ zl&l+@^IueMcZO&}+9enOe2-85!b@0`KAm;51Bv(#FT>%nQO5ePIAXuzzMsiXOZ!|N z|6moLo&Jn0R6fg-PMcI+Y*ECfiW8KQA^LTSKrlgNQ4q@qB#lMrLArxeGs%fWFbC#) z`g5G-rCu1CqkDe}MGOcI3l+!0#{fA5%CbX{am!7@i!qK$l9et=ShBg%m5BX+*zf!Sp`^&&=9-bP>ul*K#yPKYPZf@4N{yqZ(e^YV ztsY^06nu^B@x!~vI-76lBj-qs}d)}H90(-Jw9$XqZ7(GR@Le$bv z_@&+)wb*X2;^thO7mjcYz0>hC_xB4q0w%2I^jdBUkkES;QF&`CsZwbY8%Lk)O|gd@ zErM~%$cDKBAzC8JLYht1;0^D+fFbnWV)G&M`=n>b=IAXtQ$}8B9$)*%^OZpy2dIx==ksi&+Kz^V|%ST4O}8( zipFG(2MAn_g96>N{fVLhtcXiBezKHJnLN3u!X9T%-OPB#&=m2O?u~44P~P;9I0{dflj8+{Q%h8(wTMB+sZ6&y?8f(`o=g0fF3OW1ZJ+hl|0

SQ76ZmQeZ&hLQuxL zId(~sI(qLyaZ5842#G=%)LJKExx`}`+$><&sj=C=nmqwIwHUx>sL|#^uijwVE=@|F z%h7l)G%gGVvm=sXXZV8;g>q3a;G~Qz)T)1K?NC#V`YHZ)g1efd1rj`t)r7+|n>0$2 zJZajQ>FAzhsk3eNT_ek8S`U*0m&4d>i(|cBKp-w6OFU#pvyqUdTzz>C`+=EwLq3o+ zJj;#FKF3--e|yE4>nk8P`;QVo&v(2wK$*|=A?^XnTt>;~v7?~Qj!Ch8Z?pVZ$wZ^j&l|znduI(qT_L( z_-aIV>L#kGMR7~$P_@A=-whIFb%A&AoaC|vT5JptL}w8j1`@j|zWq$I$}tC@c5edm zY*HoJ88Bg@3FQ)gvWj9a!5DI{MQ-L&#V$fj?s#M-ka2h!?J)2`Ps zc_-D2i+6@`Qwc{pQ=v%_IY7z5KFPauByt9h@~o z)2Xo;m=~^7ZPfFUmxXZ6iwDz5ykr_n9eo4e*Y||SGfbPz0V^20>BGVrBuwN#dVm!m zn0G2?#O|LOK1>$eVp|D=`nC%xVNrGJmkEI+`cn1R+0{|Ut5bJExaCQveF?q{hv0*r z%?3a}E%U{gY+om?=MZi7Y|^dl8$92_@nvPK8^SZDgCCtHwls~TT6Au}xh9M|PhpH% zR5M!AWV3H7IFw^ANupCMu*(7c?YxI0+@WktS_pjGP?zE?p*od6#!E5+st~FhKu|S} z<(nGO@)6Cpr(P3`6X z?!WAmjpMRtVCmv80aN!3Wf$NrB8K-mx`R}gXJB@02Du_(uK3B>rse>sFo*0d{6}Rr zG}f49uJ4>qPBJoU07IVjCvt#XTX%nwgXMPV{-4#z3276AfkKyS22|k6%j=mEM0SOM zY++hk&cbN19Xe{L%lVbe;vA{{{Nlg?B#_@%W9p0X@UQ!N*v#I^O~EdMk!!Icek+(e zR^-zz9OWFWGDO-%Y;PPh5j9>giM_A6H^xt~dmoU)?gyZB0iye7x}w*$J*p@7F>d2$Hfy^|f*E;V~k9fuvwKby=8BQ#z%IQ|#!48FK6^;&n5oQa5S)vPn4qaTwGE&R-CN7@{I|gCwW+qif!X%e^QwvSl7INZ} zyF)0w%Bnd3m=G0Q2AgJeH2wUnM0|(rFN`@DAYZIvGc7aRr6d53OY*XRA@NVTcp2{_ z<{Q1}x~!iIg#MwRU7Dgu{%G)c$D%k(9X{E*i(FjT7}=1cv2`M6u$c|}D{ZA|six)( zN6tY4U} z0;td$aX%^c&~H*>#I)3Q`3n(8c`oL~`L)#b`3SVv_Cuvk9*CFsD1H9i{H)V#rzmv! zy3kTEWAY{wn1(G9Yr<+Snr@r1mqj*mcP`3&2xebT!fdD)JWi5qhe;l=koB=~)c7=- z$j!dL;CRT|!T4-s0~p!Y@FI^)a)Kh%$+zSLjyYwfO<|Z4gN;cAoMV=Ga3S7|TUAhN zQQU}TjEo2R)d8a*^LV0;e)!+SUL=j&hhO5dV)af@?nZ*`yV`TZ@>*(I1(>@dzPH+t zh5Q}JT@KEPza$IKj|_dTwk`gt2)bBzAh(KzKgDPeIBKm!mr52A7L=tf$(ZAF*Xq|W z11DBiNT6+!k?$riyjMM}rF>wd1s|G%3T{$I-f z|Br31Bx7FR`Wcn z2ASEjWnw5J>enRO}**$?mt>N}HSRAe;wxoB#xqb9mqL4DD-x{9gCAdehs>bNu9_jvyq?rF z$;Lu#C+`Ejgs;B*BO=oEr^vn^FWP(~SZLr>9q`h(5xLFur2i6;2_@BVf2jZF4n3W} zNa@YmC@d_@5RMwp?4t)!BjR9DCpM{$&o)x910h|MnCo5lG@>V0$P z4-t~&rSGEn{^}tw+BW~xCWT+zy`{)KJ~d8oey@Hgn%%Qh(&Mgw+J9^lcYI=E%h25m zVdS6Xh_Dc3MZFp8k-UhwQs6QkuZ}tWJmrePSY{1Z(InGLh))`Lm(VSf< zn^InmAv|`=A$!>4WY$B=SoIKlU;V90-e<4&{t+%ermsC-jdk>X8p{LBwyTA^Si_o(@}7o}8I% z#-q$>FC9r=wv{6RO4U8#NP>8yxBD-8@y4>!p?;}-u+Q|48T}!)(`z}YK00j~>+78i zecs$OTs@IlORrws+z~1;U6j+8{7@xX{LzW*P}s}da`bmG7#qD;-~TZVysN*8_+`*{ zvf8R0)ntWu+n^IC#}@P zFtJ-sV^VT+OW$7U%>V2>i~Of6-hW7et-Vrl>90=dtDfyvaPMb}LMC4(7a1K1dS}y7 zrcm`@&>lU*!9N7^dOyew{*qsXqb9>&Ux2sy5E<3pf0v0om=0qH8#ptsReOOhmoWc7 za)ru2-Hi#ppA9P~RAh^V6JKTIAEk)~fgP(Ea?nf*`ArzwZ9x0C(^HnS{9$1Am0T8` zhWvb6Th-epRLtyXRqOYY zXW8t&s%C4?YLcmend&4e!X*Dk$1CW)yvD1~rv9~S6E%pLYSf;WJZjS@dG({XW!`OP zNO-+5u(2zh@mS6ay9mXh2Fj&|c*X2=gr?@?OzC^wPj^it_4b2k%uVK3-Syl2z7IGu zM|ScyLK8hqo&=2+k!;QZ71~>__!sUH8rI0oXADN~kc&27ENYo30>Qhh z?$B;mM2zaCOb0@a)a&bZ<1nV|vJ6-C9lNwaOS@)vOQ95=Af~NAobwy}UB`U3WJyxi zw#2%oKny&F<{z*y?fG;GW|*Kp0F@TZjB>iLusM_|Jt|fEY^(s=exTWTXuM2zT_W#H zj|?%kZ)PD@>*YWGK#UmvQCFu`p4vVvvoZe0jLzge6(`Rfy5xnsg`Ex|{PFiVX@HRgQC#%}K=%ZYoM--zPBBfVSH!Xr~YF z%754k=BhO&SPrcMzWZo0N^3cw=u{_8bZ9Bw(!lq+Vo&1j_2xEaT&vM5^xfhV%BlG- zu|DTlA$tdcX&5_>ws9i5FWa=7D&Ch*ZYSb%+B?QvHv>j4bL7KAQ&dmQ#&MDyq}qvM z`fS}$Bu?_uK3PKB-arxY^2g6B$b|9xU{m!QM*T#CneH{)kozg{Hb!#_qD|PGf^1uj zTTE=9;J8s91_sV*Z^mkwWFvl9RZ{JHO^kpMtEYHFJLK;gvFAWa%|Gt=7ByWmzY3pe z+-2WzrYPsvI8Pw|%@dmQtI6_&j9=mE%M{E}6rx!U*nS_Xfdhf^J%2*tCTk}T9+r!P zMyY9vk`&D-Z7W!MeRcz|&2P;vdBsz|?w%8hBEnxjIi{1rVo^u?ord@BQLoOO20ubW zp5^yumGjW#-_dDCtZ`owG>;8~;`Gk@f9QJ^l`_nhCukXwZ@!~^QKDZhvOHMdPsObU zJ{#s_hGVJf^&1*ee!V!~2__MmHDe5IfE;zZno{!~O@QC8;JWk2Qm0vv3zFVKf}M~s z-0)?~fk9K?r;iFumoiq{#bc%tyHl1Y1eIt-#n$ z$IWM4bn6LUOIAZbsy)AoT5K`PZsJ^#-C9TcPupPFxn>91+dE30fl_C9YY9n*n{}vh zK4NxU+Z@>L12!J^I)$6#Ln z%@&`iCYMW=2EAT$>va37KcK4oX;ya|KP#yGh>>$=Vog7BZb*H5C`gdWHn5@0>O`U+sm zz483Wd9 z9#~NW-|kiaz3bZe;_AI`<#$jnQM7x34~125e=D1RAJl;_tt7E$PeVRC4f*p%;TdfS zIb8=adQ#x(#1kOiTLj410I}vD+l+SpM9SL;G@@xQ_2RWNuDZ1fjzN}588ap4Urq$` zULsYt+aPxeTD(7zRFa2g$$e+QItys8fo{2Wm&g>=L=sOGDR~B`}ZaCtNhoL9fG(|WPoi|guh>A zxoJJ$WJ8n^ARr}MpCLGBfVn27e z5Ala~^iuOI&`W?4+f)7D()-aSnb-P}hCc_AO0O>(eHzSV&P{S9Io+<{Hz|tlAU#p4 zpQl&Qf49OuO4+MPdER)x+v5YO@m}&8rTi zwm8E+Rj@J1%pnKA{92v6*sfe z<%)Lzl!{iY4d~K{BA0JvurSNWNj2@csp|Kia@&zf^nGV%cSm&|{cya?N?Eef6hWUC zp%*sLXa6e9b?G%eeEBJn>%2Gzu*glNCLhD1?ZnA8CQ(saR73{{uRTq*!`@w~S?`-t zfbv@*_jokXPO4}36AL&0I-@OKE78GC@$%cXAE?%-gokIHTdGv}omH;>W57U{cje+? zCsm`mW~cdGyZ9T;jhe!xu{x)0w?Zy4inq3C9@W}uUS>q3N@T2ule~vBm;V6>+9QYA zh=DnG(fGieH*Vo#Vpv-=IVG^|8s-%eY%WIGZ+1lL59Pk=Ejc_y-(LOA!xT@g=m8Gz zgAoFpD1RtP^LnVXPo#Q}Ut6e{_eJSu)4ZdLX4D!9fqFq=iCespL1>lM!wYe-cEEoh zos}#^yGl9Qsd>!?@Oe>0;LFePC;HrJviC|DGUvhspJh%%3F>PZqmhfTi{TlwfeWKv zr??6>xApR?a9pb7w@BCX5-+Xv#e+zI!kx1%4~83-npK~*+22?FBs{}Th>*>TJCwgh zo8C~%@6G$Huup`kiW}LkRI9i+$rE4{0naK@>UUfBSlRiJmka)0oyx~UK!H&TZecv* zn09FEM2F;u0aw2;uXjVrjNh4OO&?vK4Mb~HLR`jjiFrU7RyuQ>FZ&S%$$>A%yy)qz*l z12X$%j@uA+<{u9e!FQeof8^%6un&@}gD^rkk}9tS`-cKz8FmRGp%YR&+PU8ZG+}2; zH#}>$tq$}1khhbI)zjk861B_0QipIawI1+W*H$1GOEp8q{m$cd z{7&23@>H`Ou2R$?ULXeu`*r$+J=`(dHm>*^Tq}gOG5i&s&!wG%y5<}mmi;b ze}s%Yd0=MJmZ)^%&b#KW2THu+{t0(KS!Jm~}YJ5) ztpSRs-Ci#*Dqd@&J#Xcw&$z#vOE-K;>HYk`9kp?eSLakkx1t-n?f@hh$o3_Z2u%T0 z)3G^sutGw;g^1078S6U(1GnCVTg_Iyrolc;)l%tim!@vRZU8S-0Z-%rQsmOMRRf5# zh4@x3Qp*%~DrfnGd{N3tP<8A3rFYMZdt9%SzPzZy_lP%Z&jCZUeiwaCP2|!pdKG90 zEh2)Qw}ebXWv##nxZ8+!Hw71)+xujhz@7gwV?*k~X5i5&08$2B-#8N}J}8D;x{t?S<_kc3?j?%nX}dpuSNOYl`PhsN#r0Qo~wbk;-yGD@*WzIoWX zv*JL#_y62M^N%UhdV1Og_rrhn`DGk|z08mTLk^`l7^%a%^#KYex`$ zXbWS%bv)3C_OAv->+~wP%JCH%5{%G-xmsC<_Q+db@XcB3QY0=#&TZB#MZZjtyb)^L zo8d=*BI;zHcMEjhD#`45K+)gimTD6e#%BTh{9AjfI)l1H>^s}?q)HhwP_WX6XDGNv z4%TQQT&%hhr#teiJukJ7fk44hjb%>?T;)tm(a?9?`spJK_^gAv%#|4*VbF>aB{%m;$uZaFH?u9EpLM$ z0cNU!om*LpKMS9w%$l|FJx73I?aq&FfM~C-2R6{xpxw4@Eimw{P`{Gj@k|$8*%rFu zM{5k~aAN%U9){~QcSLt$=Hs6>xFt(&HYC-w54{z2M655Yc4cngIqc=leQQJgR;gJ& zr1Q=0I;&({NyOws%do_^igs`3&j(8Wm*4(h9{bhbCMfi;oHZ{{uBt?Z((x7W=|^DP ze7@NYZ3VeQ@i#p7yXI=EZ1_j?=0=?%X~{KHU3_H?xJ$2ga~F`ErxRH7b1mw;Oybws zXFpI9=k?dJwR1=8^Z4pWsxtDe({^Pqa`TbZ(lMOMqfhGwcdY*iVSf$9F0IQIv%*`n zt(ibv@j$v7Z7wO9oEuV?edKF{YuHqrRy7#}S>}xDI8g5j0t@KliH^jX$aVE}zmG^x zE8nrjqZ6X&kU8s|fk)osDmnM|SmT&HJ@DDE5ijT{EejYk_^;8L!0J;7NH?mdk+5%@ zP`jMBb-&hX12B9L@@6^^cU7TP;GZ4P`xbHuX!7SSzRXpL8`KXA?R93%O+@T<{{6|@XW{8`|Ur;740J97+ z>LG1ygRaSx{$>cL{l~JRE>wed)ulRDmfJVHt?lRM)r(yz+7t9YJv#eF+bQhWY`y-` zrvq*B_omdJ(Ltk(XzX zPltQbbu|t>n#JPFX4n^jZi~D|JT8JR)@SVf2&{hD;>y1b@BqvVa}wBCRo!m2Zq!{j z_-Ztvwpq{an$9ZkcIyG$Qg=H0D`1@upv`*K3<2{Z)IvS#mF%kgtrQTL<3SQTPRxNh$+(4u0{&S>nlJppemydYKjF7q-rgQS;aLIOmsB0LqR7RwJrktFxDY9W`Iy zV3va25u#s^mEQNuk}oPhxX28cooCpi)4&thvGm~$xH-a^Wz&!E=8>YZ*i6%@EhrDz z*tdpOgF6fL9+_`ymVnNuk*ob5dGZzCK3C7*=a)0xfMS#Li?Vj)6ewWbd#Yf~su?a- ztIq92054GE-2iQNYOPG~9xtJ)vzJZa1YoLLdu*6^-A&T4>K3)po6C_{mU!Hfuec5~ z{0LkBS0_Z&B`i00TvKL#wQWpdQ(wjf)rfrp;`}fzr_ceQQ#R#7i&gMf^D{}@esrDD zyMEMp{Wa`qZm){uGIx`tcmJ09@WoWU5ug2Jp!w!Zj~Aj=BVT?v?@a^w0OdE5u_T4D zrD9TNQ`*BmX;U#ZTo7p0%$-$HgE*@->^HL}0gU)Re|$T0Kn8hPXdmu62`@P~rSj6u z@6=tQ56~S2J$)HK2ERgc>jt!qAmSspR2WaIX=y%Nh7=vvaWCDniI>{j9TI%rq`oX)cexOnZ~@#!zE<;ahrzEab) znUK$(wW_#fP>@fL^)?LP-@JEfo!9o3zw=iYHIyi9KV))Y#C=Vgi}=n0oVp{7_NPY? zj_FTGnAygK%4W={t;jy(fS0arslrqtB;h_LUpkIfwl~q&Jg6a<)KJZ#tKj+#LEgV5 z$OUVJu|+;?>U7(xH6^e{X&|r{LfIG0K$a@61U7j09Z&5l+CE63{PTLf?*|&Vc2-}7 z-WY5lM*(Sk_+UVq`Zn5jBH~sOa7{ye=fBW2SszhHf8r7@@uWU`0|y*y2#%p^DCkuk zMV&TB$+9{eFvrB|EJhG?=OKnmA%>_|@W1EbjvdswF{t#}7_=%>D9>AaI+Ko!hZRqC+H)Ly+gUD?^*mDe%4}{Iq@67s-#aCRXqWAQ@L$yt z{bckS8p&{Kv1jF3nXa_9O;~pCrYBOeNP`f0H zGM?F!4aX_O!M#3abRGt40gR~4S2u`P16k%sEg+eq@o-xFMwQdYteaFECyn)myY~dMq*Hfcpj5o&ytW)6&wVQOhBp^`~cQq*eRJtJ-~U_|h!pi(#R< z5ta|6`-pz7Jcy3<#mmup(FIn)Wj7+$M z!p=VAIgS=S≠~9cmDEkfY2b%DK5=ao>xc;CzlSC*9qKU@L{f;=&7yE`AfL@)BfM zS8~jqG#rY)YP1kN$2k@8F?8zIz>#YLi;8e;iX6yytL@ z37dXBD*GJqq!B;2|JRtf2D(4GC?iW=hY#7RpWijSE75~@dCM=KWeOf&Gc6vggb0mG z_WUObD21Qf>j&qpF(>UVuex+jgx(jp-^z*}l)cj+GLP+8`KY{3Mp>YEGkj?X(uMzK z&pS;wn#8(#k*!_}$<3Xh0g^q4izZ?$V+l}ei;>27Cw!jp7?B(ZwALYVicF3#^^h)M zPkj_bAaAW2>1&ocH*?h$4lnDK&-35Fp&3~Y7W7>;tq%ik+S=Aw$BvPsVltg6S5Mi? zHwtSLd{JoL(#?q~dW4ZP{Z5V~YLpDq)9b*{=Ko<`KmlMX@#gya1|*5K}7V8XGdomDhU z&2TvSFd!npO&pq>?f=ue0ahmE)XRHbdQbg*UM--`)fe104)huGj9O;zh@KjDU{4s_ z=pci6LhEzxaiI^v<|0_#NG{x+WUQAQlhiQ&KumnM-39+PVx@ykOYqKJ0gt|SfUf#S z`LFb_Uua8>EFy8qHF!FOD9YKG?f`{P4|R6=5J3GXNq=-+?$TJOaU|J+k`!9By=_B&9MhP+}-GRB6N>+U$@n7+Z+?&wd~Rh#WoRH zF@mM;?v#3BM+47okXbc+n3f;X<3W}r2%K8%&apTH;C#PO*yuogRSq`22bxB;Uw0V!w$Bv;YkhKOUe8Oj-h^GmC^?;Y>DL) z`}QbQzi=!<@o83|e7_VgvU@^;wWEUe_|>FW4J zU4=*eTeE1ia$ddlVY;FJ{cH#CtRL%ys=ZLObz1l)73t!2>2Yzb9O?(rmC#-@>gj5= zh2Nt?XZHcs9m)Ukyju%`GDECiRekp??8Z|@o&Jl2wfik9s~juK-URgcbcRa1gFhdv zuK157K=rOS=vABrY= zU4X&A0gn#~{xR;080x|T^NVg9ww(14tdW;sIu$^(M1ZY$FjU@gYlYO?d7Xn>CH!o2 z#`n-8JN-|1qs%dP`hQKQ3Ab==7KFl~N%4JW5p#}f{bPt(lkkCp@v(OHd;ns{pxy1l z#CT>NA(l#~`(rF>vMM>d%t44?F4?JEm@~aIxUh$FuzvEv(t#JewQ!>P{D8!R7f?T2I``+UD;P- z_f^W0vRqe$V8F`%HdJEqeCtoJzq(YOG#OP!-M79X+_D2!h~6ERWdT1G9hzgxqrJ4?Sab9!v`5(pT zgm#zRn5Zj{&95WdyRC<#xqGss{lcOatDK-AB|}eNtYmTN9{p(2UwgH4_wQUlk9H5+ zbs$ATaOkE{po-RBCgh(W{z#UUvUg%CmI^kHio*$xsCFX!UeFwr22W@II}KC+r_mZMx40E_t{U$fv8ci$d*PQ4>yS98eIFmxYi4UnYA*;e%z~zF3sv9k_ zg>}Fz_(SASSW>Mfhb#k32C~$je7iC4S_lC!sirN1fr;N_D4UaSdW}r}=RtEC!N+U= z9X~enyp}3{ylY)SZ$=B&{EDg@`!#kJiG1F#IGZLL$X)vdH1Bq@-WEqcN@rbiD%T@0A0@R<8hAm)vne>n55RX_Z+Vt?gA6`$g6fd z4f(oS4(fC&newj-woXS_eR<~&Mo@Q476VL_K;&`yCQt4}4mx#%@OcSAQ6fLwIm9~u z!ZWsJgXfreuCXSYy=1u)CT4?n!SKj~TB7i~dR_Uq0bm6t?1Fu9QPkT`Vt;f8LM@xGz4zwo^tJII%FGrZm-2x@hRbRPS z5{s8k$jUpB5gF)I;*}S*CmU}v#yAbtBfx;7{ZNeU_7{{(jsWK5=189=1m9`9x8$Hx zCb_dQ#q)sCLP?ZRXKEf;S^eo53b%D5{} z1fmzK{Px7s6@H;d$S*^7+1k$0hl%`p**?XKF7P?eK3Vs3w9P&R?uy=(ypNul)RYtk z+lOY3eNj1C%CmPNd%sM;Kr`SNq9JVbjr~hmGdfjFM`byT{>^K3F<9P{d|B z4$=Y1r8ww^9vZwBlk_iFtIh-7e8}k#=H8>bhh#=K|NDzwJ5Y1JhGXk}YwbeJ>og&6)*A8{}CpbQ_VIAF`G^ATkOqym)_43uX?{E!_2klj#g5woY07Oh_>B|w)>a1JJy7+wOw=M?Ip($hq8rX>)#46r3lS3ZrtzOLv70PMzj-Wrh+qL zf$&j9NroHxo&$6Hk988WkWra3 zkTbz>t)6Yu%34>>+!GujUsW+)OTdn z@-`1z_=xTi8Nk$UU`A)>0SegN4ycaWUzoKDc{j! z>PGdI+?gX1)kPr~fl25Li{7a1b2CMQ2A+nJJp|eCOei`%Ds2CdmUFPi$(uU@9U7ITbxuP^VAK*RWQ@*-K6&kEuIda!PZ(n_k@W`G^j{ z(tK|yQ0b@(zLiqo^cQ|g4WfFufiny=fsXcgBI585VqE?O4T@&cVui$Tpp~Sr(1qde z@Iyn$5G!o@;hng?@^ge5ya|+X&hDSUHyV%nK>n*nj9Lt~2Af0S^Q$j) zhp5~br>9cw%9p(SQUPsNGF>{*|L@FA$n+N|me(Pcz9(GYRJi8hanG-TR6$%H1`a`^ z!wT`_Jyv0%I0}dcXq>2|k;^0kD24e~z2Tb4j^v)smiLPzS`tlB(qSj>L5sL4dc`J{ zpSXDD+__pu;^}`;$L=me!L5)a1O~kF0=g7|K2up zr+z5FZ)8V5grE*Hlu!;UqrZtI5NZg!EZ-U|RKSVdYT&VLo zs8S-JKTz<(6cab;5~&{w6gxR7bzek?YV$j|X1y6g{~X_@`_iLi$CS}RinN~@t{?d< z!F75JcuIiDv3hHZ0#n6%K>rVM?;Y1W+#aAnaIBse?*yGdea9klsOw ziVy-qL^{|DQU*~_DN&J55(9=1S}Y_Pq-0b`=ura6KnRf%0trdp^Wf}vM+Iif@AG;6 zQ_XYQ-OhdP`?~C*@a&!`MZ+L-uuL>br`zG(9m33R-$Fi^B0m0?M#ELE5B(eT> z%n<&&fY+QZyv~J4iZh$?y)@3zGZp6s)z-6oW#ZU#%B%tzdf5Adxd)K9HAlQ0oWLMf@15n8s+>JnSLMNR@fn zUvZBKLH{EVMZ-If^<|txvbGt|WK=pL=;KLa))Hz*Hhy>U1GI(d8{jgaN~?u#&WlSs zd>PzXPJ5>rr4o+9%9jz=pV;F3lh6IPeR}PYrx2U0CyqGPTXU6Q9LiBiNmUah;-K6p z^S?UUR6MlEEc~OeH;+P`8=OO}tyWgYP@P!!h%cVlBQpB)?pAn+8>sh)q5WffG)8v; znFF<4zvVBKk_1buTi}Whf{ntWTKv(C-9>v(C%J37Wm0H6?>Q!Q?v#NsK< zn{;pB;9PWr1)I{8?1*#|HS(H{{ZD(?^CB$ZJzX@0LoRbwt61;vhl;tGU$2`-!RVhl z{JeVWxx>L~#87pLy63p^^Z!XSfGXVDT$1PvK4$-fjxz`b{qsFtlqzCv(yJEFK+{LS zmIKA|Sttbs_Vibf(3|h-4lV*=E9vSRe&qPUdK|v(qP8csr0rId5j#x^G$vc+BUpOH zCv}911edd|jWj=UjiDx`B$(JBKL>wDLn9eIhMa%yc~MGGRZZ znn(ZNq=5W79|$*29gs^WKFdwjw_etIt3xFdRDuwB`i4F#nuW)51~y=N2qdBo=%>1B zLW?`IdZa5zR_Ew6d(sb?mDhhEZQcqhD6hWgrgofqs=I@|;Q*{=M?`wfQ|}~0jbNJ* zSa4tE!yIK(|6p{6K@-3t(T-1bVHM%w`q$7vIL|lz;BB{7-Z!gy!`&Y4j{%_IvCkO5JPkA3UtKt<9~VJ<^)p>gu^po^(KPvT|yT+_4s2M<5M;hAF3;7QBi=9O-_NifHaSAoT)?th!hD6NtCda74c%GOE+W-4_lZ zVH4yd9)FxIlRP(D?dH0Sx!KAP_nIm)vQ^gJbdDRqxZfb+k`hB~29H~ThLPg>BaLHn z2S7`}mlZu937i)Vxk{`JE^`co&Li0!2L4N2p z`y{JPIQeBYh+N_M>}rF9YH;@WxTIjrxvWl&6XH~Vup9VAouz^}fYppRmvaykpl2taoaF(`$`)^GKmHU~AK)c0vy~X#+XBjn@PJpaod>*8scr=AFsbR2N#;F8 zQ^SyiBgu(+daJAc_TPD#e)gHxtK|yanZa`6%6uMz70*2&p(hW&8G935viG`%*^U@? z0G-m9oJuuu9Wriz3q0nTjauH8U}uL#{$Q50@L)|D^CHm=wYu*-$L#FaWcjs2FaOqa3p*!@s#V%hWDO#J`ugn;2SRckJwcF3qTaC4S9$XmHhv_`SuhR*&2P{PT-6Hp ziVN^%4ccW7N}7}TFsFnAc9EY6LSTyZyIYBbV|_xZL%E27RM*Pj^$|`M@$2(-~jiKce>p3E49J*-i zLf!(AL~kq8V!VS4wJnj~qQ;KgTPf$|VmEWY&}a5H?r2ryhld9T1^gXe5lj9-`zs?9 zbQECNIeA(J*c0=*cCa$Mj(pEg5BAHL)b1QBa`tyrB6f>1g z6)N(NnKLr|CG00KbeD31>TL(fMu1wzZZ0e^vRH6uxnj^!L~{9_D7~tDVzt~E9Zbz= zHGEmBo#j+jt?JZ^a0R(a<8c$rIKbCgWCOo1&+mVC+TmfCo!;v%54GWnE~oFD>=BN+ z;Bgn`6+Gf~K@4J-p=+LKSWrJy4YHPvgW)M z?e@X%qle5~`lp|zEnrPCFQuEF)$~oDGFF%KlAj96n-WFX6Gy-LANiOFG zdah)6%XI`_=2qFWN~*Ly7A_x9?DWtJ*^0bUm+w~+SI~94fqj&Fnm=>bXTPtqe{dO? zqe)0hx55cwxeCN|XGa9AR7nb{>4|z7mr^I7*)B~gnLm!=#*Hv-omq;!3Rx7;)0%pfyXjOY5)ZtwxE8SM$H48gV*VL9PF zH$f}Sq8h;oM(^k-KW*r$mEt$%L~h#vpt8mfvFR;+!ch2_hC#|8(~g~3p%=W9?p7Pk z97k+3jF4aFp0Kk|TDu{y$l&TleVx1oOf*3ZYTR$Zj_z4QI`=g%jvkLI!DT!)YxBp?>^=STc1@z8=L)(( z3jStBA|OQ9z&oQ#<2~FxT( zkq_A-Z|K_m^hG{7M33Ry98>&rprj!(Bv{2RxwxMlq`iwVQhGH%JOym%HYd_4-rcXZ zEFgx}Jy7q`V_)eiXSwNJf{+JN$l&0JkiE9l#;~lQE||qEqLHi1Z;tz`x0RqAwvi9r z6hswlE=dhIiTS%pCMi+ip-}d=E9jvJzK!rqr5i0#TWQr=x?!r!@~Gnix86Y?QF*4F zC{Aq3)P_!Trp_{&KcF=8h#z?)sf7%@z9=AclWV`?0W#wC!j+rPi4OY>St}dtlTk+A z!}gd8-4P~o6Ib|CUk;>LQaQigA4G>;*J>r;j79OY z{u?ZXQMxgyOO7s-Fdobmw$MnoH4=?H+qpN?S zyeYAI-TsG9k z?BUdnNPOh*3xs*zwxCxV6+4Lph&h=1AYz&ZI|y~I{zp4_PjmJ54HZta-JVX#WDl{p zCla~;hpdn&aWt;fs;cm z@(rnv$D1FDO7OOxozqgYEYpLa9L;xugatPYEn0RSX)iW$ud$SSR<6qE&44WuL7uUt zCze^9L(5g+e^3xR^S8zSO^m!jB$jk+3z+?_e0>>WwM(t4d7iCZ_UM0RP( zpUrHTLXrRjAE<2Ol!`uLr#FC zT5SFTUbqQAtRUCApT|AH1Mfgh(OwarDU0dv$y595vtJB*r2(wYMC+SZH}I+1ng>LR zE9hLOTqNAIgoq3cR$X6qVWH>n|8#q+%EDIOG2Tzi?hn%1t9v69NfcGRUID_8ZodF? z=U5C;rQIk`BWF2iIMiGq+VsXalfXqK(04l=Ji)lpS=HTPNs9z(I(gY&RWxc(-C1wW zS&?GR7Ny2@GBatNp_#dj+F5II&ujk)uXiv7t|BZ8deKbdec}>0LblzEl-xHA(xZaoWi+fFW zxW_P+?Gk2|N35Tl!_g;{lk>K!T)6ljpMVxS@xd$lW@g5tGq!1A7-ml;97(9kR}_PX zy)d0jH1*_~EW)d8xa_c$edSMEPjOW^t!YZ|0+Iguve1qHj}GMk!Q~%GQyZq)E3Uya z5)+VdI`US$ifp3TqK%U#C=Rn>0bq0+5I}~m?(4Z9W6iascD3nT%p%4z*!T7c;ETk3 z)n29XOWt|4)!WiX*T_97qlOa(6&Hz^k~9k6DYowzy{8S+mEXY;O&~^Sr_hSSWIYb$ z9&06O8!P45s2c}!0Z%Y4)c*i(;oFwIC)R4K$@}j8e>;(~$8w%|PdImy5V(@m6Buk8 z=0w?vfaY6|;KLlR74!pSwQa)MY}4g`Vs^J3W172_?i8PCh9^B6HJP`Ladd>|P zxzxI(j!TsEb>IE<&-}r$v&L82^80x{a56I+$4N}9M+B#J_J>iLdAekj<8zaUQS)N6 zIwJB!KrZo5qBHz(M5)se@LoU^T_H#*u06lG;eI49*)|f>5ao>dI5pc@+k5uX7*GZG z?AygHmzVG|SDlH{jhj+Cp>^gq@v9Tbem*I6ocfA6aN4l4xuaO&O*om->o+#rCYd>5) zwuf~pwBFlKQaru3#1}>^jXdV$JKek`Jtkv2^KAP3$OJz;hqQvxp^is*l*)=YCY-%l zEiJTmx)s@5KGeL@go9LH4llQva~$j<7Q(~F&S?oq4QA%t=kM6x;S<$fWszEb%E)(d zp^-`^NGJafJ>g)5cwA{&t$EoKH;>=4G!K;c73fA5_?=Gbd?m7$Fh(MNGO)}lZRxBK znzx_RRW3Z6tZlF<0`${=&>fK-Rp7hlPLz3`2wDZ9HGBy<1x#^KEL*5}&KfgcrvEPNXH~?pe>Zzz5@FdyH53umy804E0yE9nZMY{=i&EWMMQFf3Ov0us&bHHdK=_+xY@1Te6IesL)=pM zoG&%p9`9effw5keaP_EDR#PiRGc-5P3m!d)9~6~!ZRN}|qHzI&g;l)Tcddq0X1 zYAtwx+)nn>uG zh-Jdtv4sYCi1hobhN6J>uU!ULDgM`7mcxBq`y(bxV*Piz@kth?kMM$GzYFNabw2j} z?o_XjXn+4gsv$r~f7^u`Y=B(5lLbG@D*hak=c7SV58NUuQSe1JfXk0>MXAH=i@X1K zULG4P~F06ApR}pTlSzb@rLPsPL-8 zqd)RXe3$iQAgp8**___}>Xh`C9c~HRR?v+cKAMUKev)?#OLn7(g5MTz$>X~YnwCmU z4PPa&q6eK>ZqzAHkzB@P)o7(k-a;Af83^{UG2)ka?dePl@fv_0A@@L`M$MkIx>g_~ z>VW2vFY(qOYi6UBbT`j%91)woLZiUhHilB}MhZsm-==>xu{)2d2oE|!QUx#oe`maq zV*019!~6V_?gJb}GVe7k$#^az&ss;?9U$o!mmqF*_?CiF8#~VBMD+7iL&J z{cW)R?e6+&yQtrERev7C7wOFhlL(fExTF_sp@v_AAa;CR`ifey3VG8pZs66XCN0Sc z*vJ-2D>}phrbC1mbkPd{23AamS8dhdU_M!;wvL;F9Brr_p;`E|#7TkFZjJ`6p+BzP zGwF;f9g2uTY`qH4YW0g_TD`+pcZl)z74+0i%nsKRp)oZ5^xe6>y81Hhs3Jk46>XxU zo+8?z5g||5ika#1dK+q0+1x^)4Z*bg`X-RqXQR^dgpp~<1cR$Py<;r5v$|=??j~2? zEE?=#!y_M){%R0LD#W+zG>>rKV`Ta(r`W+1h98W$$FIOh~@<2;-z{6q7s zc;DflXqpn8+hqwsS-Y{?$m@%0-xl+6S2d9K z`G#RrZyhq*$N&IAa_8yNrrwb3o~*lewL;p7SernvFMy4XK7p>1`1eKq9uCF>8lAD) zy=GHvgX0_nmbGD8%WGuu!vjB_uo+>pE5gmK=zdLl`jzT9UKdVBd;E-Qy!%IO)k53M zHmBf2vi5B%ob1fyk7P_}Y8bDX$_@$4{k@P-8&TPTEKBydhM&+&aSw|-92-+%a4LyV zOHh4@reX&p8|i%CQN8QDFt+n8S@xpW`qNnPL&h)sw_;`}cJ7=90h}*x*7B_cV3KZU z!EdsZ=3gkYsq{amqnLm}mQMvA*ilhne{2Q&wV-ZJXT zn4Zpk^)_L&SZ_0TA3m9Nmne8bb7Fe_E*NjB8}ObW`U}@-jyn|#x|gVXa$=;@-OP5Q z8^gZ#h?bta8uSVy{S<%DY_CFVM&AGf+gh;*4)90)?4p@sBi)n$@W!;Qz7A*M?6sDK zFqbgfXLd!5&?&ocHr%nKKrBU1PlsF*U)OP95CK6aBn_ATw5Th4ItNxihZZb8!H)yj z^58{4POj|~X=bp|uy+q}JMOEgJT>XvAV*IXfK%}q#2e&JHb_d6n|oT`+_$2*7>9I5 z+VH{*Tr>N%{M!lcO-&)=>`MCyL+go->QkOYf}@ZI)x8OKgTpE-yOF^SmHA_R9-iXb zr!5)!_6Ryr+oGZt-;aLM5CV&Tw% z?}ep?JZ9?CiSYVC}`5}-}a5B3362e_i?ppZ74GE|le=zRpc}+1SjOL0TrLc?p z)9?BR#0J;=1nkTWfieq8{|Qa)5@pUMnwZ&D>Y>`KXpXAuI$Cm7-*C?u>fo?q#sJKU zhN^>=j5WWxTd;@r(_qcnTu{HNSGmKSP6@N;{#+vsZNA0In4UTnply?67e;qRzdpn& zMruabw1+x{9!|2M(_z>JmCn$T7myxx)NN^HOnVb4VmxP;%TxAt^b)xrG^f(rk>p{1 z{Og;#^^=_mKl1{`{V!8(DFoE0%RG3--kANQz`UpJ2ci&qeB!D$^Q;=K-36^=(}DXj`gV#o+Xv1qDPn8xq@hLUn!nd0zd8dj|)EuR5^H zWzYD%+vh-1Ps&aK>hgD##152f;AOe@N^l>0et0#6u{Ix4brb$KhQWT;sS~Bi<6VAz zNab2Ir%2@66_jE%?ugR}Zta^gZd1&s({(AC!4EQ@V)3VNsrYPGRsLAPfSGOpDZ(ZY zUF`6?wwSkdQ58L@itEUD7moeV*maz7)Q)W|N|iPcjg0y&`P{Xi)laec@vj0NR(@K$Uw?kX;B)GNc_KBvJVP%Q+nnonoTv#e8#A!z_T zIDz=xKgdi zcd||MIb%x9O#6vq{ZoxZ&$TsM38_6*b#1s-yndk(JA=jP&dQ(YyG=P5nB(NN7Ur4k z%BuN0)noW26-}nMEf|pjpw4@ZW_{ytLw?4|0R?H8gG5i~-xX#9jMGqBR$tf{Ks{6! z121!EgizHH_p-;*@>U0S315=leq&H!f(YOu8|Og~*I_*J#UJ*le(jNkNc3DC3OfgC za7Brs_?1hxWs#7vG3llX%B4;tc~fI4+~ZC?syNvj*!?V6%fxm}9iT|EHXIr4lE!Fgf6#K}8&aGS;J~(Rw=x^-_4!DFsi_nz0aC&|p1YJ~72G1S|^R zz1(sgq0#q~wa0iZJtpdU^?O3_UjxP|9Gc?Dxoyb|BU`k&sZ-GC80`f^cpcW&wP)^i zV5OBPjYQ^dvH^-G`)Rz70N8PS3j>z%b7kzUa&n57fbhhf?_JXo_Z+(Owr>vilMT?) zglnekPz~7!WR?w-&q3CqE$VhM58SuQLAi39Qy($wSxkdo&~7(qzBZ#?fk$EBs?q|J zp%a(tbJpJc3!HOUrRE&8DWI;WNWb{UWcE9nP#1~ENh z^$JpCMR;=$es>*hLeVgj;j@kEfzI67qMgz?+%-U+t}|AKpm|(a;&K=e1M-lgmn5e2sf1Wl5lT4<@;bM=0S>Lp{OTf5Hrz@GQs*C6uECwg zHTWUk?y0m_F3!(t^=#1dKu}7F0J@+nk9waf^^{*8*t#tjyV$DTObU^$`H|Tfbk|Le z^8S83eXNY#3UkHt=ztM;a8CREw(m1zk~HfI1~rW72hg1!Sv!7T`x}9lccEf;V6CF# zO$f=ug>!OYMd3>)FQrGh*|)@(>?1GjXpK)Ul+=t>K2#w^0M+n>;#tE6OI<(mILN`h z-AbCvAI*LV+YgcpAw56u(ZJksdvYjfDWl`-;;ya9ytc2;4HZmm2u*Uy8Vl4hp!?tr zAHy+VG56-K#IA*0tInk?)YQ{PF9-tnq}F+fg@B`0J;gO)30H%gdxLeopj*dc ztj{uCsC@8=?{QUR?Hu?%h&EnL+c#u?Ml}FkYr4ylvTnh%p5u#by7F5bb=l}Jo!+y3 zn^V%VNWik?u^slMn*CD`Xbk+s))a43pD|vtU?VILb9m+NmwConUUmv6-O?q7wkZ#& zfFtM7jr^#s(h1^SmC6DfmKd(%o(>Rf(JL-_a^zFf8(Z|gkcEoM)iLBTkDN^V1tRg# zjkTlKe0JZvSy$+{qzz{IjbM&H4sw*UZ2)xp7eC>usSpp#Nrg4Ph;MFz@VM>9>v;0m z+qf+^jFtZgHT^MN7?^jq>FW4~7)r~|vmNY`R6U?tED^pdiLaNb)Hd@Nq%I_xn0>)1 z8o}KA)QNDO*-mq;6-%Dk=Z4ZdH97I*V{Xcdadt>0h0h<-)G$wFOxGuwaQvF-de*JI zw>p%flbyrL%<-y31nVvuW8;&g>rHzTo(SwAbyETIM5PFhm z$)*loYx1D2AZV;ELY@UX5l?STzYF6=K-8F2@cUbN48 zfHvHY<{RZEIy!X|SBL+j|Gp{QJOXfs{k+cB){ zP6xeguv$1>JT;Y@XyX?<6rF)_OhV}Af=i1PRRbVUU>As*sDAtc>CCG-cC+Ew8oENZLd#!ziC> z(gocPZ_O05zK=jyL2DV6*gjPXX%g|jrE26bTo!WUC_J*M|kC0W&5q}?FrA}2=~ z>o}-ZJN340Eb2vkGBHujrn&o5nj5Hu!KUrN>GDeB+6HiE%nrO{;Ga!2%j_zd_E5%# zs(Kov#>F`9%0y?i8^i1N|1P1Ks!g|4oP!$gm-<=Ts)hD+jU~?Uxcd8zWPppGqUXow zBU>+84>Edmb{7$@4B0Et&=NiC+y{jx>JiCoY1VPN==vDCb;8JYNMGA^2lr7a8i?e1N1IwNU4CmL!We=_wryw|#Lm*U2<{0|%XrV`B#6rN* zVhtt9G8Iup_}xgZ0t?{njpz~B#89MD3IIx!!eag6X2v-5t_I<(BDn8qB)i7Oz~V|k z57Bmm%0N?sfnIRx@AqW5pH*vs=gW?GUPsi}T}64$o!IRTwMx{1+@*oJ#dQKc^(oG; zR>_iG++c7+5E4g;)Mwz0MQ-Woj17USoO+bnX>U{z7J3$V9Bnoh9g;9Ii&}>t{g=t7 zI*x%{6@?&Vd0+;IV90*>LT^qw>CgkK4)h48J`w#~Hr4{=<^lWM6d;+bY@E0xVXKQ* z*=of9q?hoeId{d`G*;OYuOV*JRj6I$WCl^s3_q3w56&|PR_RvM2)+>#LT5X$rh^)H zAM{E!XVc9LgHymYF)D@*nk$hZ2#GaKv-PEJDO9=xGAcUTI8RMb!b;cB*WXf^{mz!~ zvYc0j{yEXjyD6C}q_v)@__bm^>!=X(b994x!Z}3ZSU%Ui+E%Yp5P6Cl4VsQKD2#=& z(_RMrI5GbhVyB&N>703Rg|e1>cQex><1ej(w*zcnn-(Ih%fF;0yUn#3fmmwVX;Mfu zZb&}G^_`2~TfMYU=2ZWoyCV+``Uv-%^bCVrGy2ctLj7v=cM>AYKDU4T>V_$eQ>}FV zrB385vQ;<#Z0)4?6EBuOkz4DZdkT^LW*R-E!aaoMcqD9tpT-4j>i36pZ2!-bQ>~q9 zyoO?MTPp*?eGV+B4U=47H}2PV7|7>~p(U29RICT%8MWAO_&uw7*R$XE|l16-Rw31KM{C z4$6yz_*YNew8(63DIC0uE{eL_WR%(T-~<|Woa*P?V|*QcYtm>99I8}`7#&f}@&OpX zTJoM*N{mS(8tr)+b=0c`U2H#=w_vPYszwxu2UK{>3MoVig@9(5Z73cJ5b#~HG83rQ zC$e4_6Th6@vR`v`T~IO4MTh>Z_Dn}p)2h^WRd8?&S@Wba^nDI51yIhQzReY5QE>sX z)GvrCq1U%1PfuD3Kmqye`cuk(Iz$EZ)QOcSozZvI^hf|iY$r|U1|kE>C5)S7AS<{! z62wlM=V90U=U6tIR?;bDKg%dOw&#>=*#}eb(f|xj!pq%f4wylKKy1w?M4E=pu9% z6m3>zLl-dJ;Mo53f!*hcfZi~39#Yo1Vz~_W*PTT)wwt|&e5tkg$9r?GW{F&eV1aj8 z&Oo1UJa+`U+z{f+)U}Z23L7h-1Jk(%@lBpeSXPDezlfY}9R8`r{!yZKUx1F zD527K$Xw|7W~yFfSh_C0x*Gkbqf%D9nSwuSpG|kIn~l+(<93aKMukNK<}IH&0usQM z3m!Vo4#`@E54AYXH}vrKe8K~_UZ1(!z#=7($P>!$5Up9G{-KdVozYkxj}dROPXO)g zvA?CxwjabuqC>=m@ojZ+<2?zI5jZHxYC6*5Jl~ixE9vxHzi_ob(}UJ_Kx+I88LBZE zF!>3}H8TYKSh@BNHUyK57cfdS$a-1L8i1(LkXPzu%e?q%5 z_bjjQ%tWlu%)i3T_uZ6Y@TY=Q1L$4*+EJBLkl0`(uRa(`2e?>CO$49FA$fOeRiL`0 z&_!Jjn#^t7r~J@_m{2jkZ`j8hcX29)DK!mCLOx`p|HuUMaY{dI)cmRNtIEv2!b5S{ ze|GAw7bKS{afRZ}9#ktd==YUSb^$J&c71QSwRoP`?B6IVW@+GBw8V|8x_cb{$^QM8 z0fl?j4kXE;&-uUq%^F?IW-)=+C#hssPlb3vtV3aFXue6$M>|92$Q(gYN!|49cD0Q8 zSkM36K}^Jre-lSwLVz*8DMdrHJ#xG`QuUgH1X*6OM9sFnZr1rB{orSEC&s$EN%omAF= zC_5Ff(KA9Hi1D%(@`eM2IG`QptuTNOn=y*x`E*J0OqtUb5z zGl?+gXQ3>Ign;PoGQiL-Wy4U>C&H3z2RmtLf8GHB>7$%tW^|?Fr+kB-H!ws&v<$=p z^>eW5f7%rQF-^&RuOkThe6Es?O%b*t2}HRz*A@X76YM2QjepV)0a3NdJ*#S~$oZ7l zO(#OOT-Q?=XO}_0n!8GV4&D#YEefoT8 zvA^kOnWNSfTcM6pTamO~ChCShyh5zsJJvM~sL^U8M-me3DIDnB!^yeM$ftowZyodJ=GZVzMS>fV$F~0$Mx8#Wkb3Uh7(xCQR(y=$i<(<7vCdM70<< zZ=2KHmy@i`yNf?1@8q@FiSPMJTL_?@!pK!PI~q40?Itc9hZ<~@>oBZbCA;|=Sr$6D zzEME;6CWyBWq5@01SL1XbY6DG*)46KWT2wX-%=jk6$G`&40EG4388|id0J!^{IdUO zp?&ilHKbJghxU){hbB6pslB0vDowEvaT~g7JXJQk zt#gB%P9A|Y(W~Gq1AZi&_EZ{?55E7*;-D`zS;_T3nlmkNGgF$w)TUbJN|XG3#Em4V zsY(H&ZSfZ4D@%0c6kA@R#DzCgQhxAB`EC8CGYQ{zXw>Rtu1Tn*_Ahe>^u3aiFprnvU^kLCc>DX>Kv`! ze>l!C)38oh3%0XBM^Y$=_{JTbdr zK5D7C>3wde&j8{qv0*8UBpaGy0OlO<2O(+mj<6f0&Gn*op#5ZDvMbGf>I=Q-SxFZr ze7JC!G^P1hXNX$-d?h45>Ou}lc#wYw=Q$t8)ZC{&xA8BC3S~Ukm0eiHq_}SEW~Vt? z`L1a|V0${uHy#*jKz2>4{|AeZ2>Lrer-WWW$aUS6#qyVMe8Bds1JPB=JxGSStoIOg z6FL*BoGGK=*Z^|!b@AIyB@&if9^A1bPlMs&5TWaaiCkBAO zh;mrw(hv{OCqD8)npj9WSPFL4W0Xe8bZY(R>Wu({p86%d5pohR=??T~_MH)uL818| zdG3iX7%-C$Mni{XwB{U5251vMbnQNGzA_dnOGC-)Y|;OeS5*UumHcIJ!DA`@i;~g= z!tB^CeDCtCa&KiK08l^q#qLmvksvqY(<7-y)kd~f1g9Y(eLsVD`&NwU2B09gREr&` z?{rq`8~!mussh7JtR$UFRpDG9&D@5n59OYuN)Buhw=>67d*b(3C1N3#&M#=OPYP%3 zEMmvl3|TW7T(@;O5S#V8zzY9mnqJ)%m!b<4bMB}AG0cM~Ct))2O$CCP*f>43USdiT z_d~&6Ido>{c|KEW!8tt%36OK<8UCYqS_ujnJ|_?NI<=;jo2r`|`_?851AZs>uCwCh z@_4?*Opx9#eT7e!W(Afu6;bb*B3KvEw{7m}DF_KjxwQCo2{VRF9TwakR`LZ&Rnu^; zVI5#r-+m;a8yY}P3zxw1Foz{swfy+U@DBq}&dfl_u1k&SHXXe)r)aO5C2IS5hN^fe z?*J%H{aolmH}=kSbq~(oQbtWbzUk>J@>UQ^$35o~2i~HDiq&AWXS-@RgqVbwuOmT? zfUs-#g=m*Ub15gw&ob~3HERL{c2NZ&Nz_@LB(xBsgYE>S268{-=8Jh^%jxq$j*cpf zFD12!5?}7o($ayQu0Ul#uO6@F(iWJnW7MYwsF=6?mfuZgpyDqDnJV3>`Q#AE7x6@h zSQ=&l7=NP2-t9~Mm<`OBBf%6KmbBNM`;a5A9Eu<%kq`&9(R8cA3pt?jOzN?P=goolJ2OF zfm|i(DVuQ#vSm>tjPo?f34u(=l!)1~V0WrMX-es&m17Wkaj*i zXgRC5(emP_wOCh5eV!Q1#iM3J6y=kv+1p2JOSyKJ!Efa<$=)Hc6}8t400Heo*;qum zx3z}CoD9pTeW-)J>30MYVLIxkzyEc`#fy&~J?bx;Ij(YX>AP9j(ajo(S7#gEt-th5 z=!VS))@>>J%cURQ_P#mGL>RvMwK@GCtF#~d)eoC*ow3$`;slx+HFFIfWyuI1i9_}> zgpSTJjLz_q)|q;thi>c9n5rp^Ng)k$GlXy_A#4Z0km1~SN*ZQU^&(%u)FMvlNU_T~ zB#1R!?!gsV;nmT2MomtqZ_o_0=Qb=na7~rGhR>aAyc?JCKfDmBMZFQdwCK0kwVERv z{$N5DLqk*RxCLPp6IOn%zJT2mhGTUTpr(r6|%_wQ~Df`d8m@h1G&JGTOU@w_P)& z&9m_O_j=1^u1e5LqUU?k6Fc}2Nouo|{M+G-4A$9C_t-;R?C)(Za@{hj!Uc1?}UQ)lOL|tc=Fp(-9czaUqT0y^~VD(}0)LHxPCDA9#!5_#46W;mfcn*EsSS3Tfjf=k$(BjDA<`78@9Y)V`gj3p2eg8zu}6%)*fCEv+2xzUncu{( znDzGsTu$wkAP0Oi}ZjLb)X@g8%STofE1)TPq~oA$k9) zjY8RgUD`aG5S_gpCBdQ#rH{5&cQ5ag^uJV_!Pk)LrSD}@M+H>;;KaEbBcAaSy|eHJ zFsCObGMizNjl^)6W+4>yhPcRh!=&R9LYXMJbUNgOtaKQFQy4gx&7F^vU)0Wj$s@g& zpJyRzHtUmg8NRB9_U9;TO-MZVUL2*4AEoOafiSi$oI3CMu-`eVBlnQo%B^MGT8>N@ zp+J%~%EE=%C9XI~9648H5oHpRe_gw5cR`|iyXjW>bSo|0{ouanm7ZwdJKIscrmD%i zwV6d@#+Q~sVV@j++~Q?%G~6+?~g zECx5aj>Io5Wl}uz*bnDB!{TO0@SjzxH2Lf!7k|1VLXNEZLuqj2dop(BDs_*)tX&)7wYlYeXS#GHos?pgp7)_K;onpRN%S{p?%XquNeKG;pU;r+#fJihdRqrWLbFzOZX)@RdZ@ z!Q(NTk8A(@1d8d-SXEXLj_MURj5~lxr&YgLvArb~ZF(mPNN&QwjW=t=Pu37GHwcc; z4y>^lD-Yj(eAUlSd=mfNkC>B5DahQ*M+N7%4U*_1cg{|7ji*c%4*!aHb@=Vx_WizF z%Pv%;oWwrtcly)-Lq~7hxIwrRUOZ2``|@E_n2FCN^L1cicAoX;RBpU| z3B)%${6F44bMoa&TlB8&YfA9$olx3(JkAWfR(hPaiKQTqyqP>R?tc@%{E(aO{(m{z zMdkMY<~ImG+I~-SdwaIb88G%5ihhw@TecQOy>MCo41IC2e!RBH+MZQv#CYuIWN2AA z>cn0$xqjX1l*{_71BM=FTDg@*yRG?#%lhesk`ebmR^sTyZ*wb`kgx2{d<=M zQ)y2uZ2yWx2N-kc%je{M+sIjvjk?pS-pY%L?CUKWII{;yBe z^rshHGCa+~_B}R6R{CJg!-h?qN5cXqN{}~Jk<()l-NeW(snA97zoT?MifnC)aurPF zQvx=^;oorMI2Eb8TvUA9v#bS{5y5@utU0SXF2XwDMMmwaXEdE^HP&>m`vwk963G5} z$8vJrl`ldFV26I&sjlM1|I6X`5_ZGwU3Y#MgZ0_%F+A0tS&}*UQ?f(1YZ~i1{AhnO zED4?Nh`62KI&-)7s%mKoK{p)ZR7b3pg|+sy8Qc4b?^_fMi({f%?NcYb*=P~XDK;%I zom=%Vyclg*aL&Rf#8%d)PQ8~}I<^99>F>qinFY6p48WdxDHCaC+uxa)%;R1ET!!&m zwE`p8Zb`L=X&I+;TWxL^ZA~e0J+kf2?XInFkJjMyu8A;*ZBODur*2VQ!=q1|Q_;~| zd44g5f{o1rI7a?ttFh1kjT)*|F9GFn_!fZ&Tc1EQ>(29B_jc+XqD_BKg% zD!X2{b3Hb`f$8AtlxtRdV|=eCd0n5}q~{+%NN-kuct z3r+8g)_-U892?6wXgqw+C*C@td3CK}iEkzsuWlXN+;gpjoiyoojcn!hENU(4Rl z>so`?g$<6)hL@#_6a5l!$!oOQc zJrr>hEGd}dq7o%F#d_n}fqq}v_Ab7P1?(wCooL@A4&-4CaLud^S0Vd{=IeKY5n zfBTX5BXn&W&m_$*AQt^612k}X!^Dhg`{3PsI7EqpHV6WD5vYEmPw|0UBS z)1~j)aJS&DV-TN0K@#J+ceQz*!>xmHqq`3TaVZ$FS41HCS`pW(UDHXT9bZ482;-*1 zJo>xjQd$-|>|)25pZ7V;7a>6|UfKGn%Ej%+!;X~rq;YM@Sdp65>_9YVpq=)YjjY4UYHy=nY}tM! zJJ5ANn$AzxvIYzvyWF`f9A>c{`) z#Kbn4;DFXz*iLTnHSOZ%vrd_rp6IRCC%*Kt=V&`cjel6XkKAwvy@3!N*=o9X;)frI zY3Dg%UBdmaR#4#h61sflxUK|OA}wp(AhVLa(XQd884O)HF4|cJwX#PW0 zsRrj9nBUaduWmQ-VQKNZ*0OKaTC99vq$_J1|KtfZHo1|!~seNP|5wdTK-+yi;pJ+O;aBl*Ka85NgLIA&Vq`Fn|CwXamU63s65 z$69Y6wVgw6%qpU5WDxQLov+aSp~=H(S|(n6Pgco3=`_MeLhG38lY2|j0RyoEE|F7| z4ksp~ZxFwa5T0T0;&9_ZMlrfFn=W#vMIYx|0yxUj;xNYggZCPUO1D>%(R-2z==$_+ z)KI@%f3Xd<3}4`?9~j#X?#e68jKM=pP<6!={5Tr7aL|sP!FGcgp>>jaOzPu@tMPyp z5%|P7*Vr;#W!80{hF#Z?x3Yr(6W|S8Jb3ASIbuWNX;DV;w z&$!Jv^ShEDQr>`h7k|ow^R4g=#gDrq)Z@Un?<3uR@b8Y~juwD<(;jangfHA3^R6Q% z|KMS_AFX`cx5F*tBEF4!2#q{5M$UK1PU4|juF!g*H8#!z{6zfy%H_cnflaprNG?oT z<2pe+HDkbW2=6060<*8;bY}YzInCr&Fvk%d91vAY+nw06-pc%a>6?H40DS1=?Typ% zYvpcP`nzG{A9_1usRXd(Vnqx>wc4V5ke9Q*$SND|_+>9SI;@tqO*PQ^qiR9#6 z2OAD#>v1$|5BNcTVt0fC;R{fHB!2kfAlcr8kST|%taG{H{E7=J}>+eUv_mr zg|kN8MVT8}=KorgCqIRU117Fq3KS=F8VBQ)fx{;T8*Al@=)<0IEJEk+-c_z zcvqcDb2n@Q#*^#T>#X!1ywRp&hmZ{XFo>xR<_A&sRI&GRun58!$h&nX2T9=+bBc;F z>D9;F2$2Z1C9>@k3ALyg+l{ExHc1x^-D{){GyU5jR991rhX3)puVM=IG@MQ+{J6_7 zcd@nbL$CD}{_TgOba67Uw|yVfXT3HS3jflOjYO4Ib<8*g<1sD6zZBC!vCH_r9b@9F z2lqR0nn~AnGx$45%cY15!f6EIb3DZKD|78hMf(hNVU!t_sKLLpB|UNAXL#kb>jc%#wT|fqP!m*O>gk*y4)X~RY9ib zm_WT<2vs@O)D#F~k2Q*UDgM$P(()j3`fEZ4zJXK$k4kUoopvSo5qYD5o{~#w&^mKY z0jyx7Bv;6)qwfGc^n{4>>klabdM*_qj$5$NY2KFf@9f@A7{F*6T_d?qL)Ykex}{|9 z>DrVR(WP+nTyI?+R(B=G+Ng>aMZj+CMcq|(tg=0WN9=}$c(>{VlBBWOVlr8;qFa&_ zyblkV7#)iA!VPYn0Id zuX!7ECg-3v?gA=jm%$;}z5;cg4IsYFdM-T|vs|+_WJab$T)YQD-Tl8FI1$+;uMEs}=y`d<{j&EM_&^=QCNex|G9A&PKc4+@`%oVO*JWe;2SOYWc3zFf8l=6PNgO zAL&49%z70o_xB+(r59^@R#h^U@Rt8q$;@yB^HV)*1QjMrO&TgE%m3R$6v~9KC00Y) z1x^U-1C__#fA;^!lI|aPr|Nopk(B6Issn+Ve-&<0v5J063ia-W-zF2kJ7uQyec^>CwQ%Zm&9LV=p@ z?zciHw>tM~-?#oR-EH@luD5eah+=yo`K=2)EsomzeQ*0GTEI@l zfj9H8uDNG=b=PsL|NUz_cA*GM6$5^Nbr}7kvj_AYZlWynuTGfMexPzw<1M+Zbn}$O z+mK`l4O-{!T~Ot;D+&4n%&!L77N7N4VNk^0O3s zE_50{QpX05;%kYrqe5vetUm!;_{cwpz%m4So-(2RFdLs<)*#JXuALZ&oD8)?cbKMiX5+Jbh3)i`^1pZ#5#;*N&J1p3RM_OIhwlMikTs@+UF z22Cjp6DDjw;3p*_Z*R0rt|2_qYfiC0dIwQ1F75Vjm*Qi9=GO4_tLB==Mw5TPxR2O4 z@{=eS?O{|Rx7*pQ^U)(otaJ|}k$rtCivMgK{83?icf_V}e_lL6a!al4_jM7i29Ju{ zm;YWuKQqI9_?|ZwEPgL1Qa+M}leJJwg9||@l_0&Zl*O^WcX&wtvs#48Nnq|AB z2Q)e?Z7Ha0u7BA*h8O}p^ALwyvO1tW>gfMzeYRd*G!(9%`!GGr@m6BTR^_MbR8ITU zY;62PCH~W{@UZZpPJK1}TYoGPC=suR|Hi)Xvm`Qta5OfMZ0hKbY5Y<7DbP3kgz|x( zrsPE@C#h$b4(hm$-{MnD_lv&nf_v%&(ARa)ng8y!zHY1A*!M|IXMUChsD107dv#lc zwxd6f#&$BO-m01Vv*heP<)?sbYCT>Tx$Xkdu(XKohkjP@^6SZ=iW;D?Z)iIgb8YGs zogk^~&h5EV8`lf38=t>w@V!yLP0rP}EJB`(+dd0}^}^5m&R-pNxdk-gN=;zvh_5WP zddUYJ-!@uiyrhU;F0WgmheDH0Vi-w-qbt4pXBGb!>1+KYPk!3U=ozn0dF;5#3&^C( z=-9;LsWH;#;mGUD)%{wvtK5M9;CB67mDekF{zH34)oqtdd8dvW>z*uV4qg;D@b<3E z#GIauZ)gizLP`J{#NPr^KaH(hWnKISVlq&6Q++Ee9>XfzR)`%j8-~Saf{cfs%r)4X z=`IDal{y+|JMkID_ByF?NIp0<65Tdwer;*nSVGxK2T*ErAx-p5LzNvK@{jLbZv|dO z0{tn?pnXN9z7Z~}>|I3Vr@&p*ee>Fr_}8>wi?g4P;A+s!Y6f~ zch5>U^?%Y)=#ynx^fKpkE$YGzWlQplokw2UUjot#ms719ehUdJ7d7&p!+fShkxoziVtUy_qM8YPR7lGa13j zbXQXUV*7~FHg2XYTDC9Gr7k_hqGptac>f?pjyF-vRxa2#;phJkf)qF|Pc@HetssER z3NvD+_RxLDDwDE|9htsK?s9j;B z{0DyA&rpfv5?_m*N)f3<6bx|x_k*u|1^3Ea+}n;|hh81aQF(He=hdrK%PiRn^-jsr zQTeW!Is6vkYz3x1Sxu#0`nZ+DA2yv5T}e#OazH&K@IGnIO0k!V#rU+=1_XqB%P zEMEo~<)5Awc_kfqm#?CdCS3s`n_feVYyNp5&^gagGF((V+(ncK-FS`?9Irei%xLA{ zufzi39_bE_FnI+^l_||Nbg7H6$=5GoDdj`uH~n!Jk-A0*a_e~|6Ymv%pNsjD+VY`! z+bW1)t1AEFY6vWPm7zsJ-{|R_`{pI@W~KberFV_Zsxs$SRtk12Y&i4!AvG-W}HSGuc zik_VdW3_*2h?|T_49YvNJrbFyceT48>fP)xtIpK00wUpQf@hKu_b2q@|3JQvy=ywg z&+9l2k$*nndjR72V=XAKrVWnTfM-OAE`I?y@0+HcNrY8C>Aw@d+b>SRo1R^`=dIX; zbM#Mg!Hcx;%Yl@bI)xvAx<62%9~4nm*|7iaVyw3Ajj=u~@vJqc+U8xIn(<>@(5TE8 zM4yGF?$)d0&KEaci=w1THmf{!_|L!6`h@i{?9UMz;HK_U$sJXKa3{DIXQTeda2ejQEpeG_;!}NC z%TmyK#_s&p@+x+flEIe-^eXBx&GOgOi7o(GoI+zybQ+fj>D#*U!*R`0>X31aqPn3a zWNJ_LFU2J*kn?pHa~fQc6C1UkY;Aa=TBfOadt;UvE6QyI8Fh^f^)_uJTPX?!(I=p$ z7p^WHfkgsYnaW9j80nbYswf!pqekcp{X=DzFFy~5ES_#2JkuRBAfuuC&iQKb}vpfs6==Ivfr|5^)x6QyQO-<s1gy zhfOqkG#0h~l@}W)|E}pAD!CGB%=eiObxQ`2ZQKmXec@RKseSIuP%2MGN(K1nsmfDk zmn^fTyu~FhPpYB>jt0xFE}`?*o2{BIQ;O5pz89k21EwjT?wy(ro2F$`D}FAgR;9CRocYAt$qp?twl;Z38Qg zLpk3(K^u`F5cez|Ftu0rwK6=u8CEcz{h_{cR~%OMFGKC|(3QaSlrH_yUOuMVoB*VG z4pZ8t%U39{W*^w5RmSG)G1Tpl+jm>bCySl_hF`i?p%>yQ*%iLEeNCDA6ZM*z78m{Z zpZgM7r!Gj{?+9?7_DESdA8G}zvo!qP%3@9(1I7A#m-vGDM<)jpCm~0)YfZnZ$8((| zKTaJ3hGyIhxEB22-OgO z2Y5N?;L@Dh^6wK3cP)Z)q494~UvJ_(9QXU>gh1Dmi;ot-_*L1>6-PsyZ?U1JPO@Ip zdSg@zx#?IL@V2+aLK`f;R}0FA!U%JYDw_!u<@uW3>+XYq*QgJ=mLXL9O9tStgSWtL zLee6%&9U#7qbtNVjq)45NuMl#3Z#v2@5!}Kk_7I0G8p>R#r?ISr2U?p;tkLJX)^c~ zph3vgeXwsm-bLTUM-xLwalc_@&}g_5?VHW(@tnOn!0s4M1eSO|C!z5EQvYvjxQ zrI%wJb40mUiH?8Uv|ZRh>+g41f0I1+edL3y$NNCpoP90R#T{2l(kq$br7D4Jra!)F zpb;16SsGqci!Z>L?qXH#MlixsbuwT<3A*GI)41|k^kC9s&bJ|(y0zL7gwL=Q>wkwjZft1qBoN4`8woj-A8vP_7bzf}D+ZhSq z(SI9{&4KSXt;$Jo=(_J8zL)HutF<4d8t)LOng|`8H)U0+JFB}>L=<0qr0G_NT5@3; zO(Q7O?*M~LDYxO_T}{oGO0AC&MAhu>aSN4`E=)-G+$G&pf8f(`UpoL@9N1#NELyo@ zxd%Yhr`wKPxHjecvT~#3ZojOj17TD(*w|1Kxa9;M;*5>jYh8#^6n|L$WnRaAZ*JJ$ zVRC;J4O?6QtDLvOw}ScP!vXPp1t!aLL$CDj;!}9n3W1uubhKFbNLW+vR))n%@Y}bj z)VsW_(7!-uz_Ea_9nhBLm3qnQ>`a2Pz;GNp^H=P`qxYXKC)iE@bD!3oY)z+a6F+nP zWBWw85hYGzd%&ZvowWl;@gb%bOa6cH)&DZ>`BS>kRsZMe8e#2of}pv=;x2Q+FY*(g z_QIyab0Gz$g2hQQcNMFTV5AqlhZpHpgWv<{a;Di#`_h2ss>0RGl$a#-0`*O_{RrsK z&-O7r2LKt>+0Fe0B~B{t0Q_;ux_|?v#3ry}KG3bV zf_*QTKB5_fjSvLmlsf4Wl8bjWdH?Z1x{1@k@%S4X?z||!x<{kr!10Er6Dp-LY|X&QH)TUXF{V)31~{epSv-Mpddwo&AOQlxXFtwym5>n!I{qYIqkriC^a7@>L38A@_Jo*yl50Morg z!RPMb#vW^d&$;A%UU!1ieXpsusnet9?!AQ=2Sp}jdpf6#{_ps7L(&ZKBJwb2HwSOz6#jBhD>@(abFO?EmeTEzqK&Yx!=yvp#q2{v={ zN)Qn7kSsFyZh1Y_r?*t7hD7au#45h&gKos7oXPn~r=-M5XYa6EmQT&m3Gb5N z_A`QN54e5aoie9eP1Xoq&3KrCP6H!O*-cop=-z{XOJl{sB3Q?Dp`#lKrYc%nD4g{5 z<5zGmnsb!=AmrqXvS`s?1?9qP5?*JyKT$T-48PHD8uY&AO!1`2X2!5E@CN;lp*xvd z;=qm_VS9_7#+KmJTN#5@hf9CDG*FXxzu(3si_Z+Eu_Gc{a7fK2V_4sP(5MZ|*ef!f zETnpf(u?OVFv26+phA|-HkF7A|C3PmKJv-+Kecy#po_j*nkrJK1V0U>=8Pf6TPrLs z6kl)fL1t8_SG!1rbVNz9^;n!vsOy1hTB+Hxl4*>L462?8cxcf8u43<_m&@x;*dvEJ z7BV|+E%+7YADxC?nhP|oX!+`om|2@`m!eb;?x_f1M2ED3AYY8=1Os{3-v_0iUXyiY$*7Dhz0o?)x2 zWS@Hn7rtLbm3Dyx?7}Y>!1l&?i~HVQ(Yt@M%Kuv#G-~#S#RoP=@yitl#|IaZTA-0> zw36IpNepeEHvkcRoiWHta0sM(qI&`?k3DT^#7Vp&RUQ|E0zfj+H}2G}THlfA8KH3}t(9kn3OKrUrv>ZD(rc z`sh(vmP)s33x3H!e386Teds#&z@?v#w5bPX6#d?uQCtV$jr-%U+z^MZMzUUzY2Jt0 z&R@yM_2j3zkgrgN^8BkPM7f`fc8YM1Yz0TP)kz9b|o+{ja9-GCc6&>PPSR4d7B# z>MJAqY*CgI-j*jF5TUR(hf!P;p+z?@8M*g^66ncRzDOYnhO*o}XQb6`A4rR8?-m!M&K;EzR%F5lhK%Q)G@J9KyLC`XCoOuV z(nGfro4tR0#N!(NRwp~9NBE#rjb6_I2Cm;2`2~7JGQeC^&MJc_^o<&K?24OA8+Ejx zxc_6p*TnFPN6^UI_dtzw3L%GG8n43de9vPm>*l~?C-woXgdK0|&f}BcwunqfJA@4z zXG%Y}TgIq6OE;?oA^-ex-ptHa)Mt}}0x3Za>dxEDmv=3Qf#HXIH-GwkSSCIy`dHnU z^>{PAhRDeF&AR)_bFcI{>s0c!#Igm1PqZ;Bb=p*=^xX*gLj5PqqA!1{EP*Wn9!zu_ zGzJidUR3d4$oR0^4IAEpnx@tTD^QB5q31Sk!`u^zcyKF42NldR=ENAyJQ(b zQa!yUa8Wrub+^jz`z@cWSmqk&L+gx_7cSBH_traYQ2Gif@se#+R!g`S62|O_aZ4s5 z6*M*vO9!?&7g8ca;5|m0%=2wRp-GcuWVD?WVn1g`gTkyWqq@ZF9yh+Le*jTor6qES z3E5|Gv7qXYdWaJ2eTM?xIg+Eo<@k>feF1XZNVIGJ)-)%7MwSU)VXwEDkP6=rh!k>N<>+P zjoQ*MvoQr=^Wb&IB^Vk_Tf?L+D$+h~c)|l(lh`^&rqRG0bn>CJJ%cUxG)I%HI9%m_#sd`8T?=>}a)X+4W<`6>_i7NL-{@Wk#)XNXD!4-^m z43*A6lPoW0f*rpB#xzPlk0^4E5_$szCeQ5h3ccZ8nhh{v6>WcF56We#c|5u2{+HMa zO0DDbBUHsbEkx@8;zJea+7So12B4NadvNx(n)wPQ_J_C~H7a9v$Bsnx%>(0uPBfg} z_wXN2y14n*_vqIn5L?n=*?0(fx_GP}m|%iN=E)(lfj&MDm+01u?z1L^+zt+ZMo~El z(t0zAdf}CUJO`j#zMU#7BS;HnOMvwgi}1z%;f?xB%+P;GOQYTV*>fQ!r|K#utn&Xk z96vc&f2CEUSw8@zFKvBJDPy#QCBYxrREz%Xwz-%g`P*J{zq>x!yYa_s@u+RyhHm)7D zZSy+3B>C5(Z!tUPqV}G^=loK{IpJD9L8&Wqju@>P!9Gw0c>lWGru_Z_H-A3z_uguPgefg4};m5a%KX z+t%OCx0(xC?q^dv+D_fFVG!b%FBEt82TnFmc}~(@%YI5m?}miqD1SL=^Jgl)Hl~;b zK>OY&EHI`cBw{*n#P8hUD+Z{7EiZmr$b0eBXii&rj<=+JdSFwR@F{KL+H}(qvaa6& zdUDxrdBoQ zbHKnP#bEN97T9!akeiIvcPy%T2ie53UPn$$jSvR1m;SyUjx-}kJ#86 z(zZ=1+H&*6fwhtYfCF-V(O`pR*va9VR<@5{h~)csV9ghcmgj-s+tiBt`4TYc+Ys$o zylH!2E}55M{m8H6=SlKMzoaMapFnS^CKKQRa<)I;18U_HZ=5&gBe?|O2TQI`Yv6%F z7+^(b+8RnHi8*U8%a=X5Hl{w@APuOoxygRsbQ}%7P1&q6?UNGS$S$!Z6Kqocws1TT zzwYQO=J%_r9pH*iTFt~Cj5(_qXqKL7hono&FJR5OS1AM#(liOh{Xuv$Cy6 z8o_ypVdyL>rj;n7qNPny2w)X6Jde);YA&Vu?>XWA0@)MQ)SJBvPb>_Uzwkw#-jgCa zf_HPbBR)zNnUHsMJQGikfg9sS_yvqx3*&rXy-{bsueSZq$q$`bAAGrHnq*bl;=VtB zn2(PMO34`K#Sl4)N_Kzjej~UVckAkK&V~icqr!#k$dK-V>7*$*E+$wQe7z#YXJ|jE z8J=|{izghA`iHg4iCa%Ud>3H=1h-8yY4^SCgGVrx!}fvV%?ErwE>tB|I0PSQrKOit zim%T6ZXM-H${MQ}pY9vx^`>3qkMIw%N=6j(3q6YpCOL$`cZdn1XDT1CfjA9f%9_{<+hBx{$VG=4CJ`HWpjyy(fo~aKrczqJ z#?>H0m!Q@8FnhfvtH6=84EmN-Eu4+~j{TIkwtjulh;T&gFZft`vE@wb4amOg{iH1X zLokCJ777okOjG4IZFRs_Rxk+oW)t&1CNSjVDarnNiT}@T`|jkjS%Z{Jj-7<@h@8%k zd8$!p6F(DNJNk<(S=wYA#xvr4*=M=-t+tSPjY~y>;1Y0?P-?>Funl-=PkX zExZBi8j6SHlda(H#PRwlnD!@Ih4*X@T2BD47V2lSG6UuZ#^zd6H21->(gzw_hdu0# z-~}AJ;`BoVu6HwT^n#$h-srS=e2tJbTadCwS<}vqb_m|v!0!R)y+RDRJ9C%V#zL9!bShg>3cL#D1K?+ z3_JD*ZKh78AbhQn*RVasNu1QA-42l~t6!FNydqmn?6;+wjGHnOAck|g+M6>=_LA!Go?5*=*Lso-(# zL=pe{fw}c=|*>BWN;so*%Vd+-!mAnK|l)Zek3PDgR?-X5a>EFgkPrG zYYEJNZt{H$&Vn$zc&XU*ZhSX6RqaMO-`}#{6Fz}1K<-6i(5-HcASslCUW8~j9gYnp zyINSVL9mez0iSr+aa{kc3a`K4QtwxOqG%wzcocDr+DT$^1utl(0hN)@^EHQCu&~nm z*S2{IqkCHnj1Wj5;%pBk_*hpvbHlST*p&1$?A8g{IT+L<60RjkRC#wNg;I)lx z&Pv|D^!K~=v3D^IJIK5TDK3)-GVGa)RE!->o+0tUf?Kc;(!j`z{Z&;VI@P~ReH0Ge z_BnhaAGMX!hqFQC3GLFMxY4vecaE?ZHbVJ*{qm?mC;M*LzWbUtR>$PfgxUG)-Ak0;I15LhIoogt`Ez9p9v(c zw~QxLl9AHFMWik9MqM7p?_v1;Se*|_L*FBkY(OMdJ@$=(DR-Fa@*P`KYgmiBy(F#6 zzE?IEV8%@|`e`_u0G;k}3B)&jdfzZkSha`K?quX|W+=U;D9ZkoL$kG!G6d#b<;-|EPp3m${C`@Vw9lk{!K{_|+rhL$bY1&@W!k`2X zfPbl_C2@yfae{*%+Jg__4)axjQM~&L5}gHVR;*(8Tp5NHybA=uO*2NxIP&H_v|k=p z&IMZITJ+wo)PRvE5|G{0`QZiZ#F7*m;Wohj^uM%6`{vW|R~T7%Lg z9XLERW<*NbUJNEy3pR8`qK$z~edD@a$Lu@}iDh8q#P9Iiq~G>u=V5RNzM{jzoD*C( zt-#wjwdH%44UZ(qlgnpqC_UWqxu_^xly<8w#;~n=fCXS#EjdcIsm5-4q4WksxBnxF z_0W;Ux(?}RA#?{)?THq<_6SP$1l(3yj5RPu{tzq`a$WBYew>!k3B%!qK7AR zkIWp$aWd$G{>l!;&Sej{XVk3BpAcrh(C^Vg&H^LPjMX38keV6W9EgzJfn%eKr@Nsu zppXFWY#fq z4qLDh_~Ev@Hd_S=y)6?O0v=9gem6Khx#jsZbEZdlQHBV6E?WrB-UGcD6DwCmB%5HY z_2%+M8KZSC?wkWj5*<{QK1B|#a8-xEv%P}Xt!+F2hXxSjN;LYWyIN0 z1QZ`&#)sZ0Ef|cBsGTrlH!uhWeH94*i3riySFo2l9`2+) zM%UVy+vfNCWqw+HWKB4sQhI5zBnhUwP9a}fjouUrJ+kI+Ypg}R^$1Oq19WHX!pph3V z=40(rNyjz@_T^Lt6Z?2zq1ol}(u8(Y7VRR#A;n!H3TwqRA;wJ?Tl0Ze(!i@^ zg35))g<&6Er0$ zpUHjcmA3U_R0n9q!BDWHfHhZZY*5xGcwimUE|L8B5=8HvJr@R1;hS=!EexZ*-HqtO%M&L?M< zNfXc=7Wk+OLi)VeA7+f8Jx@6!7;F24M4ABIaH9^N?>BBE$wIuen(uIO@DGEuq9+6Q zg(=LLrNWl%7|1Y42h3^AAz}KBF_x@wXNA9sMqnBhi)k^A+HI`sz={V$vnaLHrK*~? zjEW^zv9(WwdLW-{T|;#z6g2ss8Ibj(lDCF;qqH*^7+?=W0Y8xD=iNHU(;_n}koXJ0 z9=bnOD(S!$m76=(M45n6-QD=)2c>5MD$gUPYTQjj>SZt0Y7+}%3aoqz3#k_DCij9w zv$hCZV;x;p&Mj75=#6Ok&bcSB)QDOXrmQ)*!2@Y#&TcFDaWK>sOF(3YmCUyZ5RNgE zJ42oEZqYF<#panjM|e>aXx4^?8W#sADY7MxWZL(Q(O?Guh8zVp@|C z0ZOM+90zg>P9w)rhx?1SQnT=81|-Ejs#Y%N0WQ&DR*TjbQs>94%Y|>R_MIeoNUmu) zi<;;bGl~K8lSas(u~m%aXJ&#V>`h78i3O7OjoURJa%qh_?Y9YOZNY7EBjg?AwgwD+ zHUUbe=F7;C3c)c{HWYe;nZ-nP!OiqYUr*4*sM-n{UUa1MvH*vw1%5dZLnune#bqH% z5sa+%KBPAf9EL8C?`M{7od$PW_?3f2$jXid8N%n|YJe=SoEVm`5yo~;Yt9dCe60RH zHKC0Gk^an;rkU-j!5fE80XkCC`djT7nVx-UVS=c%9|uuOwC;gEV5eG!)tGUe&_JyAcvX8j=l8mcH6Q@{&!v zLKV8XXCU0<6P2KtHClnnFg2@t@P5B*SAxsX?reMFA__DGMDwuF;i?XxmMkQt$zo^x zzmiTMBFyOhHjTldtUa^A)iU2$U6dWXV-A4estlZFv2&f7uSvJ~`3$fQg6!_6iQ#03 zV58?EFAdDalqC_q_LM&o?#Z^~%c4J{7mKZD=3xWlNN;gEJp`oV%IXDpbp3$kAX@Ep z3FL6fXl57s=y15G1;HJNJG7X5fTA0mXQ|79Kb&|>(i zw|(hI@BIp0UoiS}vWXI!1B)V}>S@#iEA@;7EhyZhgGwb^n2Squ=(PdMyGf&iRwKcK z9$Mt=+5oU0H?_9E8bCw&NU=X+F`&1Jk01ypq0g4`tRN!F7k*l`97*LEAcecgGb3G` zBD^2h52%*`&@S6czHkSd!G`}DSE@lJ)fv1R_qWLr_%X%kz8W5Vh?ob&ix zdP#AR(hzVqcbWXkv%t5RRmGKVqcVOFX!J^a%1jpq#};ms3wKZyJ=!J>T)!!M?5jvv$(Uvh5o5eX%TJzDvADx zPo+VjN_1)z8(|%%hLIPKjZ)@X@3%(FFV$oH>A+PD#(^j1N*FPqw3$ZTeZtSU9Ws<6 z|Ep82uyR+FeUm9(yO!?sF)o^Wq>0|8VD=UsB)6mUo?=c2ug|Y-V|gF#TdhzwdB3u-FgS zOZz%^P}d8s2%W?Abx+;?j@yk zp)mTRBa=K}?4*-PJz*UWRcgWTU>s!xMY7{B1fH#KQ-~Jh~439OA6-2?N^fiz;Q8JVa^pX=WC@-AZG~;ck@h_DX93 zDRGSIJ?YmQiRRF;M(?s63tD)a*3Ox#z7yr#z$y@wg8gfOFdxz4u1lE7Vy&pg7{dIN zh-MQd(6}=jJdu)`W!;gu6%1t&g%@r6fQEUGonR~fe1B>r&l(t%0r^cIXbYW}Q{blh znEREyVa4}cfO)gJ9p+UqRL;4DkEALnmMloy(ua7eux?SbDLR#*5LmI>lpky7bL5#R zFrV~w`SB62=A_y^N;XZxI6GVrAm|(he9wEmyjfscJ;(oMv+)LBAgvQ z&t@E6^@C0zKL<`&x!ij)pxjw#9y*JEK9xRKRZ>n+k~F$Xhyuwe{1< zuG!j}BT!1+*IB@hsE2#!I0qmRKpkXtLC-6mP2!z(y2%Dhe7o`Lmn%SW!b8!2%7A~R z4Uv3q}dLm&Wd5Jl*yS$)yiJxMSKFm3uWmQfm2@XQSiB|ax9~SOP7+@FmYzCIjo?8lJ z?F$4FRUkjMo#>BxD(fW!Cj2d=wwU2qYkRmm(X}jNH>n6L2^{f+(PI`CZ)9%eQgOn@ zL!}NPYlc|oK>mWd-Kfc&U4cXX(=EW4mwxr%meAs#=DxWWakysUpRC}Kz zUHofttzm#feqUk@*dVi^Lj(TTTH01ptpS9!pB8+IfB7(kUW}rY3iktvXO%KD_tw5L zF0PmWQI1oW!+t3JGCVbc+UFgiTw&Wprm7L(=YcW4CxQHjv4q`ZLT%Q=g~c?Qrp>Yu zO;%eL9*oTwI&~`R?^8)`?Nkg~oUQDSyhZRSFKm;COvfGHq@3i-U4652=Ca|L+0{{? z&h}=>(6uGsvBK36^w$Pk0K=)O;7w-DnzGu?B11ssaxQ=3M9Vp0Y{bm`pk~Fw8T@LZ zK5-ZB=~6}tq%K2*)+{WC!O1LXii$=1glB3zQ0=l7A z4xIDtq`uosYxNxZOiF$}9ULQ3rE4@1lRShqf@473;=ZOjvs7()@>o;ny+P>W!ho*E zk={o*px6^^U*9d+#iF5t9iZdZjTKb26x)UvhnZe&L(*;LqM7-`y}-|7qM+g~1#5TY zphxOLe_xFP7WMk-%-fnz|LIIG(vXs3DCsP^MfpSzTU(yl5r$MVjXGYJU_bK zhrKeO)|vZ2x^OQE`zTcFc52*i`s6E>TG<+}6x)+RvSxY8ezuRLF3Xi^ZMg?%rJg~w zY_~b(km!`{_d=&|NF;(^Hm=z%e}3TeTCq5=W|TOQ*cItrwg+muq4OEZCzPBG0Pq&y zRiS|lu<$DrS{rcBQXSAEza#9oh)*Sxk(plg8k^IxC$trx)2M`39OSuu*+Ev@5 z#OF6|aF6I*=Ql3UW(OtvnW#F?q_VL%yxT_PXll1C5IRkM(x$q@c8>2FlL9xM5r&P-5jpqax!ifvqg(Mwt zO6s>F>9-g7p>P}={%yC$L5CO`ibD*5)G9^MQpG+@_*%F?ya3d@t2*Wo4C_rmJ@xI` z*#bElUqsLjt27+PWi_--)xji%Tj6Rf7JF&gR)- zsSjE$`bS3Vb{pwO2p~R3>Bh7XCv2H_(Totre(8bB$d*9}3ztXw*AA$#ObGNFbeiW% zX%>R{!V1Q~c{1bIOT>p)I%fh$BLN>QvGxSQQO`c&IUC~GYcO3ZzQEmMI-DQV5>+l& zpN7MXt`8nTrNV-BYsYQ6V0@Mhb}$+)zr$A}O`C#8JH#OA0Fa*<^o2iN9B<1QXbc*Y zl$jmQj{gDDzvNyOZ>Rd{O0Q{WeXdIL~)-`J@xjOg7)B7+|b1Bt>* z(m?)FQf4j*lN{WOleUB{J7HQr*} z|4AfmOhz^VLKBocb&=dgtFdOE1)37+NF|Z^?V;;NjwuxaW$L~%*{9TzyWGG+87Mk33ozymOgh_lJ<^Jp3 zC!I>HeMB1lgO}Lj%>gxGmm{g7!#gAS`d7T0rA-yq`DZnoad<&EB3eb)tGpfm)5Fh! zXA_Py{3kf~1{tw#-(b)awwvrBH8!o<&gsH=3g*RTG|9a=@P6>p98MxjZs2dgYp|Xa_1GZQXfeTKxZyn?=V+; z1^!pw^wetH{{1sc4QcC%qTbfe0!O;4R8OWPyG$<_?cn_|2heCv7B4Q!tr^}S?Ny^l zFCxMxKKsG4X>!zQs1NA&FZiDnJ~D6k$1)+51Qd~K*uBN?-XK-2c`rv_R4R-5oKx>m z?|jPK(&GJ{cnMQpQuTgpz|ID{(4P!6P^j~cBa>QV1Xc6n#;@<9y(_6^QJ7!Mo$IpA zYJej|p!Jmcr()*f9oo_Oy@5JOiuJ6MA;ZZG|HN58t^vxWX6eQ0w_p_{WU7r}ew=6) z5BW{BV;K9sXeCX2>(KqblIw6odj*#)T>ROCm|1Q(Z5yKRz3+&B4&#ZRn9z^uuRs^G z_}#cW(HZIvbCl*NG}I+umYApKcloGUqB&EitfjejAg^QL?i>e+YqI zbJhPsTIjC@SpT!y&!03`C1yF!t8W>LKM6y<=`CrNsC!Em{;w6De(%9&eiJj(J4)9= zaQ`Dr`_t*Zt$|{Gd-Li3e^U^u>;Hwa)ft@hE#Ss{e*Z;5>VGI<1m48V+<%wr^dH~a z%_2iP4IzHYSeK_i1iSj=B7j*=Il*;#U z+Ru9L@H#tCMSSYPpnQvIe4yE47K?*l*&)yOv9C2?%UQ{f4v;nayh-(lt9YHE^xs%F z|GvRMi15s7O^rC0@^zo22YuJ(uD9#d@bwf#Rh~w)X*)3Zv%j-%m!|=|ez!vY731D4 zHH5D-Ow6uOUOfm#b)TNb`a!zqy+utF<(*2MsdE?PxKCB-;{q$yj!PBjZJxP@>ohXpSNOe z?Y&R?SQk#&OftrM-KvBOhFzWmIF7M|YRD%m#pWW=k@N~i8b3NNELM_M%qwu2P^6JA zB6JwGe^2ykQZvE7dQ#4T)cc>e#H6&j-Kz4hK9-B(O&%Ok^(Xgao3kAuBi`Yp6d>!p zy)XAx9mOXmxX`+}O035$kT@u)`*EF9A){9uz$IYRW;t;=3GLK0dW!U_*)G3Z&G;9l z5wvk-hu-xN9sSlAu{UOt_OHS#&IPUSHT#)TaiA#`HyfOLPE~lNe8L$G8G41^<>G9< zjtl!?>}DVVvLDklRtLY^zxVmk9qN_MkIr0jr1IP6`kMjs94;XKJfg3;T?Nb$hn*+5r z4=AsK=t*{J$liN%)I5WAe$a3$m+iX_*Fq$-BxS1^x;$Ds$mMIowt{v(A!D@hw(WI^ju8lOuykG$fy~$~-YLq?O3ejJ1dta^~kn_1#>8VB}hL9+BQyhzmM+lP@2CsOpq>mK0jz7mN z3q9jZB>_M#TXKa$WPYPMR51do zc6j4<2So3VEv=lU0KN~e*yR+Z)RI&_;AvN0EFQeozHe-PN#P*A4$#ziKC}qwj*dkL z&J}S$3^MN^ld&Ymc8u2P0e$5y!f?Z~UN4Hx2UO5L-qssqI}#i+y~~jk)R}Nuc6hqbC~mD-iV36l67MZl_Ac+FRv63k>^A&+P3xaD6kn_F-A(-_sM;#U$CY_- zNa*@GQe?XWO-8#IFH~CH*dJEz#^%rKoM2?jSy#(DW$QYZr+Vp#%E9ugg_RO^Pu5D# z2Ie>pwj`(<(soEks4DLkMrRzw#U@RF@sXyFahQGr;wsWM6O2 zvQ(T4G>FfuY=oWTRergogkn>%%c029y?rOH=yKH5 z)hqjwXXXJ4+o1@NCfb7B4xDmU+bBTw+~ng^ojNPIWh0(t+hp!JP)}*w4W(;ho;VD} zg3lPfcT1B0dW-n`1+lCmD6Wj&dCVJD@tAz?!q)d(%S~QT`XFS-`v;dJ25FoB=YA5zQ;+GdKkAATlnaa7M>Fks_a8^zrt9n2aJ&?~v@dldC#dS@yG-2^i zeE``&SvgAW)pC%1c?(HB1xdTQ20*?YTgs!No=3J-Hc{&UmV6&&&BQiTrg>*ZR@lLp z=&mVkF6mzJM)(X~caR8~7NKDs8^6Bi^}djDIKg>1wrXhygJwz(uh_CbX9w==u2LV6 z6r0K~&_Wc?FA1)!e7K1cL+a-rD@Ap%Hs~o}GQyv`J5on=5jx*Ls}KB#C9ylmV(0~m zo;9NC>uJWJ&yE-ieu_W6K9xS909=&GWrB)?ht95(pgNR3q8J3Mmo#Re?GC`o6g0J8 zBJoZwbQf7W$~F0{eTnA*HzT4N(_Nx_6vBSou;t+sD6ur-l;8&DnuLmjM+XO&S4!4p zSUzt>vKlrKi#8WFtd5~0t$h+Z+negPFb0&V=O+e3dFkIspH_O*M&dK_)bxnYDf1N} z$yMQtbo+Sw#!1ZsS|~B}Oh{?^{FpwrXVW$1P(Zu#DgttEPbly*>AIm$&4uI%|78jM z?i#yMzNfT)JStbUuHfC|4Dt?mZFk97B6#kuMJ0MMNS#@z!1x~vc#ceBh%bB`w|7}v za1mf!xye}s-A#{(>1EmOd1BxdP-VRXTgl-$aL!Oi60DuUV*4<%WC!UHW;)Nd5s%|> zUPeP zLCQqZ#j4FwRfKd0P3v}~*;FjnZQz|q=k$b^r!dY=v*Q|C3d#s;^T^*1U<$?edG{|+ zWoMtaOi*5k7yHOP`wAE6D&lUor%a$q&uGFU3OyVdmSRF=2{|oz8FEIp=6|Lt>vrXP zifED2Dq!rx!Ya{vk=MqD^W6K6eRHC%HmUD!>>W|B4oZG(KXgrXc_!**jU%{17J>YLnFmdnKsXVfSWzGgjp1vQT3zQ_mH z%ucw!&s4k1E=o3>>Yx|_$wjgY0b^6096Pg=-q9rZPhpnZ58o%!w2Nh!&3+GD*RHhN zAevP2LIWbLLp<%9l+cJ_z$yy>W9hqT3GTOwm2SL>Uggz3gMj*nN2$d( zKx0iuI}R#{Tb0Qt`6m{0%9`SxFry{nZr<0~d;+kr*2A>%r#pH=M%=rz$lHjV5cRd_ z2wnkWLLVx=4^^u@kAEXY$0Jf<#Z|69k1iif9y;6ZU1Cmi9`XhiwHLc22ZaQAhbFU~ zvQe=OR+qIgFBmhvYv?EYJQ}FU`1O=X5hStBY&UAM=#%@|30@?7#-`E(<<^A6e78f1 z^K8tw-v6O|ai04T8+6V#V+N-vGnkfDpmnzM6rCKrex15idn>XSLB03_P#BD$DN<+3 z5FedE+7>S%;p(&@MXjA?>fzjLf-Oz!pj8KWN9#847*CE3pGe)Gpe|r-Po%DljglA&`+H z2K#y8DEoqzI%h_MH#xV}@uz}Tv$Achgml7cQK{2%; zy&ISBW=IQQp@5D|s;=dj1GEay4ng)qY5O9*bTwUrEvMRU$X^MSjx!#~VqLcK0WUXO z*7;nr+BRDbA$NuOQiR&;FIK4Clg496nafaQPtWkG8gp0CW8;9rB*7#CJ3V( z9!hL?mAdrq@yAbUZn~{0B(x180DTb2Hwl)Cp0J;oxI0>vC9OUbosQ-8iZ>672)+4fny+Js9K^d*=_L zy={3eoX5OxPb3hH9;{`eE=30)Sp?{ufX-pP@_&RUxN*Nvq*fN9e;l!j^nKvUNgwp5 z-dKXZI=^RUpF(pszx4+Y9yn({&j_%ASi9(C%wowl4DwXf-sd>%?pj$`UUOo+gA&9b zaaE+C=35yS7{1gAyMjGkGF$3=Fkbl!c6c!paF}!?E8mUh)=Bn%2O*hp z*sbAjW65A9DE_V=8DPu@0o8+ZH@LQ&&7yR<&@qqt<>Gt!{EQc?V`;&ji7TDC4xq7h zj4m$AUHvjKlY8WwSouL4`BTztIh>bFL5`~ZdB+$ZLJ?h#>dll}j@;y0i3f{ON~KB` z>MDYZq!%E{t=iJ+Q$3v^(QAz{xT6xj@n$)+FBhM5fHpT_s@PYrOKxnV6=)3I&7Ir> zOo}|@jhia@(iXH)wv})v3klH)4gq+@^;b{?9zpiQm#{X1E9Bun0bxWE52f*7sV!#N zW|nB>S+_Nbv~vmVO3%dk4_ikxS8h1xmAtb*R+@LmuTyVh%XgjE)L_@T-V2jkC1k(F2rsAF(%cmZ-DK@_%KBSNv>sBI%yp@G-tp;vloR`>uZ?ZUv*LLWc~eH~ zga{pxSo$%-|^&dow{iJo=> z344F%DQ~-g=v|UQ`W|an|F{sT;E)#y^lJCtzzzuaVeK+2N~k~n{dO{g8MFM}6$@i( zM#D%&w~)j0TCOW-dn*yxdyc{{4zjMe!Ye2llhjA z94dpWkC#f;BH&<0q_aCa1EFL@OmRh0T*&CN5DD)!yaOVZ=Bmrk&AK}SlpMfFUD!bm zNLMV6`|Zrzyi7i*tj%oneFkco?f|R2UFq?qE1tRCXzQQY5W#zL$?R#pt^PEV*Kx~u zmxee^cfB`1;bs@Qf-;z1gt_O~wBLCx(v@JKY%mmRcHz)g5=$y)f_6KLq`c~Ks#FmK zPCw@+&Gk#BGuh9QaKCM|U6pBtEjBjPlaMZ?+Xaxytx;W9-G$dtN5+94Z(tlW z1?TlcFXd6*g2l&TT>z`(JmrdRWAR zb1$|AdnRor57y+!dgpb!aZb@_Nk$LCLILj_vUl+3;`8p=3QB}kS@?}HX|;yZyNSiG zJ0_d6xDnhoZ80xXkV6IJPh3X5W7raJ)WMZ%`c_kB6DQfHb_&O;NX<3~p-GNek83#lrB?n8E_}1`xr=Po=W1W?@#ryo`E?Mcka%=sGay~T+&~+Df zVCuNewurAu7}r%K6iG6WTie5L9lctxfYp7A-KKOywM(Ar0und52)X9W`0sG;pD*#r zT^~zMAte_=&=)$aPP)X=_g2bXr3Xtkns<9+5PU)dizq~YIKN{t@Ll($<0~>`)?yEj zfHhXySgFog?v!Bbe=x_N79K__z+B?BjchJp$rRddNf{`v6@RM;vTjV8?~9xN z+Y|tjW{u>b`#Cv9ePe8Ef}zhNZEi$#%wRgK3MgPT@~Bpq#OqN5K4G2^W}ShB`ZmkG zGoeqh&RLBUXJblQ_nO5b|G+yFXBGf^Aq5c#GIul7d(G3d%bSGXRiCgzpXUnevt8%* zDY}uTyaQ$FXNS)tp_DG2Bg0O2%3s;767PAuDRV`_+(g=1LvBy>?i2nM9=dZ0)Rqsy zOt=`P25$ErqV191>u`}f5O@nPL=q4@B2a=QwDHF6lt@$Eh_d%{(z7KMZsk%etfL9A zE7QVQ!Cq@%aEZ3n0mmuv87Hv@!++c8{-yR5+geO+VhOnTt^QEDG$jvLk(Y+w>0U>4 zo}#|OR88kVabv})I#A?)D_%a z=9T}Q=Ld^@EELD$f@kBct*}ziIpIFvjm~YJv=pBj`>r$y>;VLGQ4jDx2aBhVJ3OeVT>7FDkaN#Kd!FSl z;9~NNj*NfmYAv4E;4W-I=B6LQL`e&do(@nR{X9}RhM`&M8uD#y%&mo`lWj0 zUdvXW<_DCYUCztRg&hJ+vzCAyWE@H|??Hm?1dSbjGf>fZ5NJzx zKra&(R3_3U+8>n61r6cb%fWFyrct_m6}nlhg!}xHbByiOQj>fT8y!PKFiQ?4&W$0DBYU>r;9iF=@WD9J}u zZ}CaTMGA0r7uN!sc1a+ad7blBMBz7moRER`MC)t^*3V;kC4Mez@J`&|UADS?V(?zA z-k2O0-p(3Qo?lj}3I~T%c#p|u%6%qocqVgeW4`R)DYwcBe*@~l`4?IbpS?-gUm*^r zJqyC#t4quY5hAGXxi8`c<(SW&)vh?t)z0e(yC-SRv5|NLMAr#%)sz5L%-HRRlI2>; zD0zgU=4F6RNIuXi;bfBV)@@Gb@Uk({NNW{4-xjyk79CgWtqJ0Qz!g@}a{g5OM2HS` zi0xlvC+WK{uJrI9PP~Ma*^{0YV;1nL_F>fI;o`GnJ(S6`j+UET_vfk!^~m+=vgur$ zM_+tSNFyI1^P-dZb!Bn!AI@`Twr7l-&cWl3ifs6jRfpD=Wwq~ACy%GX8*$I zn(92iN_PzfEC$k3A%HrLo_cT){dbS6Y2X^Uc z@h2=<$s;ML;nZdGquGrHF7<^EI_pG}*O8NC6Zo>EPUS6xX_qtR!gcy#&s zo#FRtJ4Xe`IIah2Tg1&9K+Mmj2*vhL1xfgwqblt7^(~E6-|-ROmJOM7&tByq3;T zhE#iOi=(edsNr%DOP~;JuHE~8=tKYICEu8EZ~xFY;n}UvAcZ^Ac`15+-UDS7{o++! z#T9fDo1WkfPLae3vLh3L7{YzSIM%0SACw_Y@#6XrI9X%7z6jp=QY2xIDs(cah%dAm zqs{M=xES;G{qZiYY_E8U=BrqEBCh)ie4(!RazSEzc(%hnksX*TZzqSdod**`D^MG4 z(brJ4CE(-~#Z}@+laK0D37NCcySXaQpZ7};Q}+suEf!#C?MIv+?KccUqt4%rN>Z zDiW>yr#C9?o+9H8rh@}wV12Yxb1|c2f}VQ)05+quH>2PRKS|_vcCw}Eb2Oa7_U#$@ z6qJV46gEFF++Nyx_TbQxFh_2}HQmLz)4fvcKddcLF3TAX3$0VszD1gv2~phqdNYFGbSjYY&0yDH*vPS~ z{;bf-rJ83rN8CNfy05Ie&`aj`y!TsH;hR1*$On zHj^2K&L6v2XH)1)c?&|~lhVAR=119HWLoOfnuBncUHCEny7Q8xU9P#V1BB*m0~K-)3W z<*Ijf(y!OM)u@|@5q5h>6-jrlf?eZVBXN+{>xzA&@cbRd| z5BUG-F-?|+-oNmu{}ZS0oilqwYcxOjHnH%XG0had^&fma@<*kAIYn>3(R|KKmEFw3 znH60L!rHgTa!)xU89Q?gehCJ?@Edxh`H^2=nTz-RKw5jIb7!h z^Lr!7nF9{nt#7`1(cHrMbvOFAUS;kOT@dnGfB5yoU(7Z)`E&0X$(|N1WA?V-=A0iC^ne-1P_etWdDs(-U2uMY?^H?aHt>zDO!pY5!`um1NV z-<{aVFOC=nA#aVVOc6fnDm5f8CrwJ!)3PL~Og)7fJw&3b2b%kJXnIeml#Xz2<||!8 z>8=8CQFo78Xw~B+8l5+)8ZE3bNTdqNs7s){Pd&9hnu9pOia0`Wa&>Cd4Epx?N>zK3 zq#f!(`uzyzBU{8_0`vIHzr(CjG{JR1V%5zNC?zt*X;?&Z5lTjZd2vh-l^tO!hDF5H zHK-2_NH)l{7^&hDYd^ER|_o#3a3-*={YjhncAT<(y8}rNwWtePW2kNC<0G} znZlxR-oHecd{IhwwZ2g{6)n(}Mv>r82lR8W#%1ih2Nt{MNOVU3jl%JV)&;MWaFeoV z1afM?)Vnd`L;BqZib}FMV?EsZ^F6voi;=Td@j-nm9>U~4uI1p3cltw2z9W_y&n%ZD zi*sGQ4|mUrcy+pipN1b_6k6Ixd-a2pw39onjG?{Q<}}6zzm*Su`*hDxKaKg%)Cay1 z4NHsU?_VioOqR`7K0L&jI2@rH;EmJ90!V3(4n$~LdGpe=T?pf^5fO&5Lx)m3Hbf|X zz|cosNd4KhTFZH4Wqpe)?b*C+s&=%f3}GBW7>9B6mwV7Lk#UTlxkV!VXq|AX(>3En zLYG7H#?SL^MDjC3Zmx)tv4V=|m8kAYRD-1|cx(@D&7!fkpMRKktS)2tELN3x6;4NQ zyJ1}RG`l2o2yPMAW3k=vVbJ;MIB_rfMno^#4JgLSd<|QAVi^<35OX{lGm|2q*Ipu_R`DUZMVBp#V$7 zq@J_ESE#i6P?|5WhLc#sVlMB{fP!?JN2uqS9uo0a`ci*}X=v<20v!U$VRFY{4Qb4(t_kvMf{2%OPU->7w?Nbv-%Z4hk29}@dvk=EL80H(az6Qz zMz<4dDwk^g1j_J?M-kY%S1(7Sr8v$C?|b?hm}YR;k#n3FM}lcyxZmGUn+vTZZ8{`C zaRSCC3OXakPhI0(56^fCUiK72grno@A=eNrC(C%6uC}g(cEO^@s821gf&FGFkQ7O^ zzBy0`;uvYDOLi5CM4XUc(bqsR2xJ6`<&~-P?ZmVa1rDh~*ARM9Pf;umW~o1hl`m0l zQ0pCz6}mR5(Ty4}jUE-q!E~XUHTakn$Y&aZEEKLj2E`cTM!z(fTPx58$Ix(KiICVy%8p| zC(0DyFWcmgqp|XJf$}v{1ydj<2*i&4v4p|}J?sd0e}w!}k6OPoP?H4~!0{$l_)q=) z5_uz1gtF`O%{qpq7*05f@E7dM_aIPnSvp6xP&T!OuOKu#k4JOhBX!fu>CKn0D(km!*9wSz2aV^q08B#cPWYF>&QbkyY z4CCjz8%(nS)vB7dxek5nzgW(DdkRN?6{_6V%U<|VpSw(-A(~k@=0!w28p6cqnsq;+nmxKv-X%RVZs+A?2u&1 zjNQz5C_$K}JLuDWBx8!^EU<}(Ge!<&L_xXebn%*vBM#`)t{;og4DCW}sw7ktGSY+2 zP_F4wH8s=YoSFwub*-_`_8|;?9ej7=B8|G4iF1~x@!D2-a!OlC2$Kqdj)b&NW9VWV zc~Ez4v#|9emq4eO+_M!FyPNE|4T-t#iF?!syy0Vwec97{*Y+mwoTCa4>|!_SZF_o? zC&H45BXFnU>FC`~M$Xse#$amx1P7-o`q5P-=&TfdO|KB0^xe|3c3EEp@EKhOlz^qTw zKb1|vn>9)JlhL@G)hwCn{TenDQW73Hi1AeG|D_h5sbQays><41U}<|__{yfDK2C_1 z!+V7y`}W!%#})CM~U<4QiK=GZHC&mw!pl(!nXVm znpxqW59XEZJT15h4H)Tkrtj&{A>RjREZsL+I>?ym^v{a(%nY%xyB;t#<=S_WW`M-kgep;aai6XFX?#ZqhA1C|(4j*jNcx6hc zVVsxamj;rged+;v=Rn&0sl?Gjso`p`x|qD?HViMNKHvOt9CrMH6wwITU!&`8p1|VP z9g!^-#>{rIFW+O_@X{B*B2F$hem!6-MtlQ(KRNDn6J7MISKUV7-wl;zp|57kp(v5HwC`t8V3vh9mAx%)5)5n;AoS^(NNX^ zW9A=j61RQO2(#PmC=i#2(tWd{Jez=RQ+6%rGX4<5aq?XsT5K1^N!}UdY5XiH9T>Kx zw(lo4mukDQ{bX!JLdRmeSe5tv&X!CHwXIM0PpmwilhV0)Jcb4THJFpe)rA(d1=*Rp z8H4iJMEOzqkfPELH>T@XczVfB__fjLCWcxF$33}B{I+d}_rygxl3?PQ%Kfx!{2e1` z{1O&(kp!P#JEE)U7;XzcYNhSaCO#eGWypByV@jMz7*AJm*E`|0U1L!$) z)|U(^8SrG_N?8}jJshv#YY~Ih5vnTB+*HU@L|q!H@$@50>Wf@~3udb6KpVK|L}fuU zZN$+z?tr++s=)X<`5?96zT-Y7R2ho#Hgigah-Q)kX-vzb%b zCGhE+@abR5^uVth`4Mgu!x>m7yy_4>V8qY(+;7~ukRs|THNGs(cyxhjlrST@OAY;{ zxcWvYTbqfWn*9=O97T^)ope3?_JP6bf)0M4$S4zKd~<|kd9m|CLU$SW__+|BGxr#J z$xRx&*QMWgN`Ieo$YRX%#y*j~LvM1h1ImfHWAj@Mp|2LYF&#Z};2~VohdwRrMO&3; z%u)T6anD^-7#U@1&^e9?yAMb`@--!!5-m`OB;$yoX~`H-v?vsliYJ$+<9kOp$Zy)7HXO^> z5awi~ow-aJnqiTMM)2EG^vxq{lQhIQ2N4gml-TvE7~J`Ga1!{sWOZ>SxnSM#v@5)LJW-GOmRiq&L#5-%$Tb70Jk%i>v1_IF{; zNEZA71_o~RA2o}CJ)+z0J}ppk$Z63Feg*4xR2#-Ci~E3XNmn84yNo z!6H_-$*3l)+~f!GScWgKP{~EU_>nk*F|+#S&UoNDv;a$r?~CIk;~&Qn+L$kzpy&mI z-0jB2&9LzWf+T%IdlO7#P&L8Q$mOP~LIK#n%z-WTmAFw7rIzE1uZ)wI?mP~B#7zRC z{8GRAs_AR2hS#s2THWXy8CpubEVL)f_#~nF;*a~*F8bt$uW$VCkN=&5|KF~H%Oj~A ztdJCe>2X1CM&rgYrMe|p{bEdZ>Sdd@ep}J;jDFf3noxgBcDtzc zRP`&2I)y@gNJN~#d9!RuUvQc<#3;faAW)tu2WGUu8otEpU!nkHWsdmgC}b;+HFf?Wf^>;DH>~BKY;>%%1Ppp2vr%|TX3#8iS0R@^25gH*AS7XfJVHlKy?7swSGwq)@X}3Qjyt(dijF`as()-}cs$#2 z#K|<_G~1xCGbx6!+G^%>^?Llo8vH2&wZ;%(ob)qIo;j;mfnX}qDCHQXXG&m*Zn*v) z{2RQ|6_4TI`q7=MxuX%3jh%HyHQJ>1`2G>7HqM_k_$w$jeJ}L{4WyUwf??rhdk`m{Qk{Sxxj*Sf7Uu zc$E)R7jwudbsZQ-B9@HQD8h1}qh78^j$A@r&goAX@5~c%hRofO^=E2J49^!tF|5uFi8{*EP2WRK(H+M(KzC9PU%sO zsnJFUVC|zIlDuz<%;?R-#!oN@u%;e;3&6a}pf5@zi2wzEGGG8u`3Z289|z|>WNRvi zy@tii;8tl|UA;FfGp&rCe1$qd#OC#3Y9pjCm31z>ehlY9TD%&{g<@dQx^yfD(?aZ0 z6_00kXV?01RE&07uTW!M51ods(eF~O#~Ndl%dz8BBxi-`lA^c)DuA){+p!`H8@-8m z#<*DW6!Qe_f^I}ILEa4Q%8Z{aiYMWFh@PZZftYn!aU4-B!TusHc#~j&Aq0_fW1Pa# zzENhl@x8k2XcLUV7Ygie*UB7k#AV#c)83U{8c@HOqy7WmDqJAoj#Ed?J|J)g)hX-9 zagO#)B`U>c^?={_wGWzM(;dHBcKFu_9UsIIGVV-uH2R(lEdfz&zl6L>V{uiUte)0M z`I>>uacqszu^+gYN1Ji=#xwK`nhpH2bPE9f5xbH?hgkh&R=>FH`(_)ZMZXC}F#bzm z53Ho?fCPP8F?dtv`)fmcGYG#y_4Mw)4Gs(Je{o=Nns{e+p2V6;0sMehHQJDj?K5X#hOc_I(S8SI>nCu(GaBHtkj;I}$_BPK4z$1?0f#ZgY!;A8+#WqZdz1VH`yGpR$8h~}#R z*1w|hk6p=ZImDdqvy%&7oprVDCg~rNq%=h?TK|0+u)XMI-zs}t@sUnwoFufGrtLu+ zf2Io6P0g~Ag3x&F^A*6HGFz8=vPy@AMs>5(Xt+GB@F=A9*Qg{Q^xGr#!iUrXb8VAN zRdGWtZ*-1rt9|?_MUCvG#+B7Mmjk2u5ZZxjO56X3(d@6VAZ;HA?nXCgs$rF|=0+<1`Hd?&i!I5i_{eoy zMs{u2oeJ#U^R1bbzyA@@RX;K8Pqa|&lMWPsfG$x^wM^usBgTXo$CL%BruIo^@F zTVZhkmG}dODqGbE(}X>W`uLcHgW2AuzeqRj!HHqr>%P(?V{os148D; z$FVA=aLj8!dMb2@2tdCI8Gw8TF^3BmutCgsvWexp6Tm59*LQme#NyggJ`U2U10PV@5QO~7759zzF3)i zw{O<*M&GRfD9$=AfuL>x>->&7ZN1$mu)k?WzwviwV5gq}>{MrZtSI~WHa&Uuia3_n z3cByEP(k>C0pNk+gAWW0#>(Xjo~mECkA$BBj%bJU($i6%R7(mLgvSr0#7`f%J8d%> z=s8uSo^n)Lsg0gJp51j5ar0mo4b@9_?>q)#LvzX#X_QF2j9&`|03Ic1XFgKT)o+mT zH%Q5=C{Bj5COYFmo@`0ab0$}44oJ&BXnugF;9fd4*Gi42fGzRe8IQu3(Y-IfNCI1w zIc&zs1q0FrLu?@j-$VCJDJrekbRoV)OjpNa_0Cw4sY(Z;W8kHdl6Ru9OWPi8Brft&lSGCdA&Ga|2COPXQHe>a&cpOpdd$PJ&hXM(ple({m3@)IJpM8gH z3ZRm>jBAY$B2tbc+n7F&CNUpH!1)8P`+4w3)2)+*&(zaOsTT5(V7_#Z=fLmvlTBCz zS7Rs-q77Yf-}nr|8zI*&4&o?uI3Z)`b_3?NtEXjYUs3Im#YLL&9bGH<1YSVDg@Ka* z(uYb5kP<|!Z>kI}Hhh}#%T-I-uYb=btBix|n!Kx1pUpw=b8A&lU&`a`h~5(Ln;-S% zt)kY(RQ$gtE)~*_I&TrpUJopmuf^yUQu@dpnwmlyO6>!X)Q@Gf@kEl<ZszJ|JtzmmZhgf z8{T%{x?Rrny!u6dK;3%{qk_Wexa@4?Suo7s?I3$D>J3I@ED&#!ZR{z`qpe8;(BUwv7h9gfUt8cr*&Y(}Q`bj!A_% zNx-O4+L#xbkYi3iCx3jR8v6L9YyWhb;m_6mi>_q6H~7KbgCv4 z)BCYxo%Vaa-k6#T2pH5-Gxxt+QDZqc`JGA2G)}geSaOrx&UnqFiiUs5GELFDV6tSB z=5nc`Ni>N^=2|aQdcABF>6%6OA(Qc<9c`SG@j{mk(RmtgSE@(L1o*QpdxP=uLZXU^ zf372HnGC~v^`eh z;W+Q~ue1Vo!uWyupRf2n;$Fju+KM|CmPCv8+li8=dcLX`QNF5|w=B@*{2h7vupITv z@h{eYq;BgJw?)y=whXXLATPrijjdg{)lWry;XH@}y?=KD{-{%8gG zcFu2h-R%2bcRw0+nBK}*rQEoTVS3>8Bw*h2gJ&t6hg)hsYipj1;*f~a2)oZR`TG%ev%>6wva82-V+m37OQwr`DZv4xWV zc^Et|HL33;?4RuwpD35$J`FufnJaO7{LA4potg6(A0^ih{93zMur91#wy6C_Pa}mfXEx41y;=Ca{KGph<@s*!7mH47 zjyW+d%uH}KoS5m^{SuV}6|KiMY=@#qnKUCEfp7uR`xG>x2?BDO;o&IL| z;hl(}Xx^LxuT$Cp@L^JmS>?8(pZ=Ti^ORWh!w={#&3`n~q#XU7Ytljic?-_DL*Nto5wB-EauLW!0P^1cf{s8S2{K}v6y@B zpT`4DmNpSz)^HHhucB76f^2ppXD*a^Y#ZL^A&=PZ{y-VObW=@K5O1M*HTr$|1^-9I zkuy$rZECippgNL1v5fWnyYq`YNEz%}^BFwqM?=zA#YaDwSZ$uC$44Afo@a@YranP! zt~Ln%RTOLp%u*IEnI};Odp%B7Go~*K=F@I`o0u5TI8bpcP_USLBbct#9-rNH{Rk{} zs&CmJ({#8+`28LIoWlE>!^Ce~!U%tNt~#x*#EX$ai-9u61q&uJ+wYh2 zub|%O&7U^T1<%;LpPGLpe8>M`?@j!n?)$#svxJI4b=b3{;wag-5R&33rK7}*Wia*_ zB#d=T$WTa9Nnv=u*>$=b9bzj%>T=(lf zk3V33FTTIe_p`j0&+nzVC|UpC4c0_Pk$cKww|=a!?qz8q&hjj7Nhw|TGhmH~@K4;o zaRl3bL>DKrrXy_6dvF$rpM9xy5Mg1h<;yEMsF6`YvK#7`BQ(W_(`k2WnzH-5_LPry zs^W*O;!G2TKj!tiR-w+s$?0_2oi!+5dnq1ukR|?<9-z;^T;3cDeer8Sli(TG-!L$V z)V|KXz9d*xTd=8A#dd4Yh5!EV*MCjarFC72@FI@ICv{5hQz#8=^Rxo zGAp8%D}i4;?8Kr&ajlJfiFug=ZH z|4Xjb?Jq!r`Sejpk+ATMtBBIZ9c%^u`TjH4B$2CN1{8)QbAQy9I3e}}ksXkB;yqoOo{Cei8r9S4K?hzR(4M)?NzmHu)~SVVRpm5oKk86!^TMspu)KOJ zTxIc=D6R@wQ+~H{)kFxVU=}`~P-PZ2PI@RuTpkaQ)91-Jq*l`_WfvIPoh4rdQWqWN~TM<;N3Q9DJ$U|v}uTM8q_(-Uo zH+th6x00mFXW{>zp0iGB%5}`-pB+fk!ic*y;O{!>BkJ832WDW;Rr5ir#{$a_N zcG#OYlA|WP(OLPz(ak10YfyEPIviJQ<%6htG{+H|2-m;{B0%)E<)Fpy+)7pRLm6=B zKT;IcW*;W@J1G)DQ^SO{=yoyuf?osWyh?R;GAl3@L zmCvG-_IBGFyu`_Fip=f%CEit{%(^Dw%pFUCwC!60Y0IiX7mmKrvOA)5q}I19`ijj& z5WfGE6qgNkZ`JFp$&01hp@my!qO5Hu_uU=mA;aF|OA=wG0;ey7Zi6B@KEIBiXUlP@ zeUam;4R_cVzx8<|U#sXyXnZPx+p{nO0{TX|(Y8npeHs{0>;&20N^{}2)n_uL3O9b% ziKhaNH)>|g0AFrLu_Y;!s4;Lw33Do+3*+gEFQ*@^kFT9+6g@~T|Ct%|H4Nko0c&BA z%Ha=dU2Q|=vIRek6n|AgEDT5;l$_$nJ!?4l_k&zoRXlYPI5F*&=){;eAbRtkFWTp6 zHXyoF2Yyc~NcQL^vP1x`$DR(&(zKik9DjJqZeVQeVVkQI!bIS7ka-JMQJm~*NkDIa zfYAp-EwBG{b)k5)m`I+_xR2HBp4&LPd8osNa$1ahMg`dU! zses`2+DpZ~-Se2qcn7~v`Uhum7Nv?k7Hh@DxKDi%aitZXf42DxuV<#a1Kx0w#AW|f z7vi-{e$MzsaR71Lv0I|631MAi8t5)nA185IATH9jha)jjqUdGr`>Vtrjz12bz@>@> zSHc(*La&>xFw&jxy(09P5eLnn6wP33wUHr4Ub4K*F6P9DHmck&q>B^RN+al3@ zYP)!V`Hv>m;*@47t!{=3I;?#|lh(XUQ#z4z5$^rN1n2`jr|zOP4ym+#fX-7?UYdxK z1CD6FfieF5zG}cM4l&2ahzcr*;&8n8Qa)sCr65Y^vBjxbt&k_`2T!<)-pFafYbusc zz2Xz?-(K)6bGTApb9tv4IHEUAoJt+Tcp=jJgX0O=OXlecO16bEhPj3{av8_zdQvC@ z5&x!KpG9RP`Ay95db=!kB!8^^Sl4HM!b+4LUz%8@-vFcYP2c#Er7$iZ2(WJa=oV9Y z0C~++IcS$sO9mV4+mpTZL{%Jj`4Qy~gUC?mpa*2>7f~|J!A7xR(h4{p=N}-?Z~yI1 zL3uFpYqZ{hG#d^0VyDAeh$7s(yCL0tohF_;-eoUiW-&1QC%-;{*He`IpB_q^+0PMk z!ukA;jn$P&#IFvO-G^(r%dq&h451nIf_6c!5Hze8@@oKi#C(H|3S zY-QZMA}<#`379)h(m6aCEwrdeLW5ZP%(+lT1j813^vrdyG}eTtfwrITct;5@U5(0g z7O^QEOnSHX=LqI`>~Sh$zuNf&^Et)X9j{Z z=1#a9E$XuZa_-5O=xpvmMoqeW?x);bMquvQ!Vna`w|I#=sFcexpxmv3V#@ zj)aD?*-%Azc_2>S8ketV4?~&>EchOi?D8&OT4-y;){=$)9-T#XGatg>i_beAstoMD z8}G^~&@T0G&~<_6;)CthPo)b3sm+z^u`vw?s*jJT&RUz)7vcBsOpi$G2@c~>ae;8u zr@$m4MJx>S|0K@-y|-%m_nU8c&JBWp%%)-GK<%M|Uz-K>mbV8CdCI>YTNl^6D%Sjo z*VDoNt9wfLI8)knp-p@z_jn_ZfuDSZ7G6>&6R2^v3z1E1rW$nqBb7)_+SD*yng{2`wcaeTnZFyC+FUU7lMNE=(cl%H3skS;m69KQd{oh&d3 zedka-tMlj+Foc|DPCJy@%Qg$L>tF{DKK%D$ZOkj5|FRHKyPeIMHZC0Io+5QeDCn9n*N-aF-dz=C|ZBIw#U_{iwW-6wV33~WnMZ&s< z9q77l-22rV)9ggzhA-yF2jBUY+TOPrK~s9?NfqvvG3bC5%dt@(ut_NgHGG(0NK{#i zI^-nG#evK4+d!Fg?CN=ie{Lk5j1dryU=!u|+q9oiNgnNgiTuT8`|K|FA6#?+ee z2#2-GSvei0b_c%>-l_Obd+}IN4&)756H-}nHcgj9`_tMsmm^TbVwFfbM}Sex_8zL| z?`{15WSaixS-+t=cT%_y>@r_O^ z=4n(?7~>_584-$<*L~wmr?7VfVubLDmdjZkE=}*c&c!f;VK~m8hPmi6d7LD-jmW^FYSnJdUX7ZtFtkv3@1u)&8Co$u1m?}BY z$#H5i_WS9{89`1_lNmOMkuPhet_QE^BMgZR3Djc;E z?)~@uQHQ`vK|%B+zb#jl`G!Ha#9eYHt#r7M_NWW5Pl*Rgm44y09qTq6`S+%m7vtpM z$FI1U#);{5*j_ukIQ)zs6S|TFQy>k+bgX4%&Q>Z%b!e!)RMSCuNxSl{3#LnL)dqd= zTReEPlj5-#NMh2}ymSEo*m-<|eM7kZE$~wvvwwHgchXfD%GNF7pJ?FYi@jl(KO521 z#An@Xapwbd_R#mMl0X{}QsZ)++q;OW7;8j4&AZihq&&<1GB5DpJ69#;uiE$2#lji{ zACnbDh(%A-b|d*2-|M=Y6cm#RH4RT&*Hl1izk*P{mSYbmp!bAge58&C<3t;5dN^^F~o%qgp3q_!@|=c8$br zFSY4(170JKEFkWj%tY=dHtA6xuU+svP132T&{4;p{Z!Fx4m67)`&ka%5;#E+-D~l^ zFmLsApD83h#CNDKF>Jr)i>ZmEa`ALtlsBIbx1=b5R50wIUZsbrtctf|_D({a9L8Pz z%!s{D!bjLR>t*H>d0Ki05Cm(T;;e-9POowbFFngjVubICDmG?(W&Ld^+D>O2@Lv3U ziv{yA+Tuc5wOIaz5h3_uZ9{sYgP#~L+^1pp=){BQj;ig0$&Nvq3GTV;Z}+Zt=DldM zXgX077;u=+I}=$BSGu(W4+`80s2=FG1?&owyaIzRGs{tpDwybfPLY0{JBi z%8aH*y(>`i`8in^U0pWJcdxCzdfU2LI#KWg&K*{dpE$1VDI9NaT_YM^8-sWVo1GL& zIcRSA6da(~u^aY1k?+QZ`q4IFETBV~7-@JOzJ5yYi~~?3&z;pd*A?_~-&PW#SOYGFEq5=F>yQDgC2t>}$lpwRg;k!qF(dpmdc0*2QV zwm|9A!^hKh5dqE+DfTatnU_S#962exbcu}J%OYVctnVe`s>{gs_&)wL%OJ)(?x?Yv z3(?mUh)^%|MvbS?5u|a~1u-()I3mf9hr_s2;^6(kNV4I1?hyOM)V`0fDhghDU=MhA zW)i-={ew(-%o!sDt8uJ0im2<}{b9)jPGFR5hEn`@LR6faU*a|x1k${>Eh}`Ny|wgv z23BH|>Gq6+|H?APTQbjpzB1y&F4&nt*|<$6>?@V_7#r+2+znstHQ1e!6W{k@Z)okP zW$Yt9C1?(9yBV;o9M?Mb3pU9%z}I`o=mUUFBDOPbP+Ki3JxY9UM@KgY2cU*P@zuvI(A;{9BXjlg}}@s*0_)}eJ? z?VP<*9q~>TduXB!&u19_RT})m-ro>@P_-xjBd`o!$)d=V_y{SWwmF@!YrzS$nn(4U z1r;uf#NOl)ohmpC)ZKUyOWQMUjK~#Jp35Yq?QTT{UgYq^No~(l^&dI0ae|dV&m^EW zJTder&PNKU43z{(PhxnM+!5aT_;ly%a?*40K+vWV7JEJRIO4`fY!pHBXUr}?fs?e#B z*jB447YqVoX=nS}7;#IHmn#*fhqPSIG#TaL8qxHuJ5zdPTTduQ^0ekgLNQ;Ob&$NYbE;?k8#<{E4+B{;*pQe@kIL@@w@RW(W zr-bIu+}lU!;~s$r@q7vMGHzV8-m}9JoxctvneSNDtz`Q_cu|k>j;(i8-s%phq-&Z? zJ(FzZlLOzB+kfc2*P^6F4O=T7)M;z$)aBn18`@88kZ%|f@WlkSPlxP#7Iy6Yh{~H> zi!s<8Z>kFQDSPO*#zd~|fJz4(qV9w&5timgRGXkJn-rh@4v^+R&{=xvZCAF0LSjdU zIGbeW)>hiIgd$G!hivWbaP92g-Fsd=sEA=y8C=%KK$or+AuVK+rBcka?0bLw2)(?g zPx$g>C-(E_UAx(s+N#j~sZtF5+s()HywW`(Pa6PV{F4*0_z>@BfiP=(34>}IL|(Qq zte)MCi#zB6_2C)9K*&am0x%BHqVITes5pJV&F+@*H*%!=xkvOp`9IBL#uRI*b z1{#<#(ZwUjhub|SuT_RKDbaoaz&268U&X4LwDZtrlnGng>CN@Mf+69~mN8&3FtjgQ zjy1lN@t0 zRT}y5M^5zhy|98-Rt!~1Y;PFiZMG?WY!jfGy|>c4sE)-UwPyV`hFdO~Rf%XWBq=fc zz1yInwnD^;kQiWY-uv;E5EGvcYcd*YY^~?*bXE{I}u zU_YSKM#|hPqJiJP*X+!`Uk>~7pqCX(_%+Gh?x|OhR(fZE81pW-lIhy?TKebRJ))JvOqMsst&S#pYm0CypkVLP4Fu0&}8Y`R4br) zMZa0g_J&H6rK0wNRNk~I-O5>$iaNY1uDnp?ufsCD_y+nUK=O_(=B|eavqctrP=GV} zhW#`VW?&H3#`gsT^uJqZ{z?)XNVzzaZC|DzE6Q3m-uoK4a*gMR)YNmK)4cV6MDzZy z4mbZSItwp~lNB|eEk+^cjy%6BaKyf5^&0kp8H)C)K6MGKf~6{ml5-S2v}5vYl+1$0 zt2AB&!{+{+%uuQfo-hUQl_V?1DBqPz!R>pArAJu$>*09z2v4M8uN%Mt)s}Yg?IHJV zYCmAy!qvBDQr=r(r^i1$jHXHfeO3WSj*!0otnAXyQ3wccdZYu*SLz5mKNVc@ZK!>| z881n^JTTEZXl|9pR6!P}cPb299l%^e`S}0>m?OA26g+U&eu9p(ZQs6=v$k)V)A3|M z;{8kGO~&WX!zaYO%V5w_-Q>aT&=%*tNPiuz91Tn7a94v)*U5sS};*4x@B?z2mJa1mu|p1c9|nfe&`^7f3R6)-9GJG7~N z8(h$O4}|$$S+%oCO`;Z3QEA!1O^&$hniXrlfcPO|t%v zf;bAYq}bQ*9uC&qDeDlenk1auDk)4qE>CqGWa>mpw0d&hl3F zHv(qR6aC3kUsGe6qcSj5CW*0C-sM6(typb)>)c8Ehwyf`&YM;}+WYi7cd`ctUL@A~ z)HUSJ#U&(IslTae(=b;{trQ*IQdeK!Gb|f+yS5V+ekW{m^4s96B)E2ge7ag{`&QkJ zq3^U)&oaY#+I-&Jc5TWcro3Jq^tfe1$S#RP`}1u73gkkCY5kBcje`Ed>9<*lQpQpY z%xziJFO9Fx-F&6F4$Ze;#$n-@k5x8Gd+JqoE%123* zOqnRIE$!HQM7IG}UM)|hQb9&PZm{kut{cEZ1Wpf+_VocAH>=n*5|tRNJ4+WQgFAS* z?6<=4eQyeVj$;;20^_IO@xVpjhuJVM*A2cL?%I4|>~VC!9xC$hfC}ub(OVP%RWrvd?p+ci z&-t$voG&bcLkw)=U?YI00heX4_v+TPw=YYkJ|AB~i9o$)_Vyg+yoxsFL4$X5Af!%HS9otrCW9EhAhUVI5#9M4GJ#`}qJy?q0q2Nk^k z${7miVDE4J-h&S}P{*d{=5_@&sbGZz@?gl_wP+~`7QAi2+!g5AbW1q`eK@^c52n@4 zf$t761mPn#R25y32kN<|=nZmUI}aCC$8+$BjjMk5KG}+cB+!<}A*aA@i_j{ubpwN? zE^DagvU79wNFjuRB>*3^5+Fzfwcz)ZXZ@zrFr*p6^&2GSLS1?&uy?I*+y@L6A?M_W zpo{NSSeFDF>ZG%Mbkgl~ZL`g-oe~zb1U#ug1ZgH(^mD5?*^f5cTL5rk#8{MVPh?cYyP}td z|Cv}8P}t=G@^JO`T`bZWP$j*sh{=-2#u0%eQ0xgIRviY>X~+G{(kF-chQkR%?H)_> z^Pe(nsAG7O6fO!?kGM|hYoJ6#QCzvBd^k%xj+KNiB!AfNz@?u+L%b!_g9kTahEE>h zzU`Zz&j+Hj(*W`Xp!*<8e$X=_S7s6%a(n!&L4Z)l&|VFT+y8Y|?Xg{oI9ay>9pG$< z`;HFC4Su%u_6gzCvv#RE3!Q9d)%|{doj|AT)WKU9ECB!oEgrsT`t17NR*UoEEYKIZ zTzn(`_O@%Fhdacee9$#h!KtVa9RQ@PeS6i^;nU?9(UDhrm5frT2J)JjLq`76YAURq)r~Ozf zkS`{w$FHbkW3Hn&&YDK;4{PI`0iIk##GjrvM&-Z6$2EF|{)ck?WBQW|(U`BSEPQ}pA^)YIub;I1m(XT_ zvORskD&crZOjNWs!gLuJ_S^A%xO_lD;MH+W4A_psm4rozV+_PITb+tAzcHL&G0ZBp``~9Q*yTZYR z`PAF44m#=SFz~X@%(`xn*P%V{XZg*Hr!cQ?mEp1-9_DT!dSl~)!Tm40QIM_V?;)F* z-f|ZZ=nf%l(eGL3V%v83?Xc{RX99m$O+}q~(>fCAyGH%|`E$l5WpZ2ZoFIEalnI=}H?`0;u{l`YYi%dc*1sdSf#m zh8+Fs1feLT0VyEYLEVl3YCndn;0mrxsR|wm=m^6?5jJzqs-dAe=`3YcV^AeRYKWk! zdON{PjGXx}fQ&agw@S&r4;XQ%UV7_0a=;9qoRBa_eH!O)O~>{Uz%ofJT}L>1RH5NK z(7S&9M`Y~n+}8N3^Y};S(TWbJRl`!hQSAT7 z#U4mjqr4Ak0YFYbmrYRG#x3F;4l(pj=TR%*$jbb-5CyPhw|-`ac6>D-E&|$!{AIn$ z)ogD5ps2*p%bcZgMm3@Y)|#K4e;szmb(sQVnSwnrA?_|>AI_dtnl}1~1R^@O4ZWd$c`(n? z3(en>*bl^ouFtOY*;TE2$|iS!Vv`j|^mX6dJoh-kwI;vZbqN;}N46;ODuI^UE;c13 z3?5m1TQE=p>aN(-UI}kNRnKB!>9?j4H&~{5KBn|(s%iFnSy&Z_bi?vb?9xJD@rrhy zJ+Y4v2s|jWZ@-gWsgMlJ;?C@dF3fyiZeT03)vj=_$mLcX8JcPDmOis2d$L3C7CZUk zW>*DW(2Wg|c0!e>^4S+!s1fIUQ)SC;n&I@t1#S1Of1UZ|G{f z$gI4M!D7_PRod_Y6WOFCV2;+f8YW6U6MIi^>Nzt`F;)TSN&?dUBNDm)xUQ)nQW$xI z#}RE~EAG9*h+4T87bq;x?76%C^hw0Kg5>zwBlVnKy{Ci^RQCt0V`>J3b%1s#1n>i4 z7_3)C(k=LShG2H_&7e}cH}j9aA4T$?)_6`mTX4V* zEz(+?zwH}`21aEid6XV0r96>KPDN>Hx`w|Pa3`9hH-x{>t>cng7R(?hVZ57c@;=hf zuMj#-onh2=yfhj%7*~UP(Vc8c+Fc6`{M#3mhDy$GhfcD2%)bo(s+_+*pnfS8uJfTl zWecN0hD?f&uLhhdxezBpk*3KxiYpl@HcHc+POvUcu`j`Uy@5Q-<%vyFXj;U=5~>Xs zA4@@qDUUNzQYh;8A3ut2mNg%LYWwiW}{S3Zy8fPQFcP6D!Cn{RUa{uErD>0&FBBo$pzIgt>xF7$2 z(^db~!^U+Gy&J12MsBMZ#Y?WjVGO5?3+Swgxnq-WtvdA-2?_7Fy?{0NLhjDWC-$+F^L8AUknXK>tgW+#sU+QKdnLmS9K{5ivTkLE_`JV0jcZ)J``3E9y21dlRNUtDXN@Q1R=Fq@EB_Vd@g6M`ct?ISMOHNYKWVqEO|<5LjmH4YCn z)P|*v2mD#P2C2fO=0`bh3#00TQMT>Sakw?k&l-Vh0|V>i_u@Z<80IM+XcxRZ2;w=cwQR1g28&7ML} zyx+%vLKo*{wx~l#e!S*~W7c*u2M~83x;Ohcy9Squ2$sy@Ae$<2{lmMb;NU$F!J`C> z8>TL&n%&q9#NMHJhhn*gwvS}`Is^T>jY6YzZz3wqjsV8BQ(v_#?8LH5rq5u!biL=u z3+4SuZMg*_?DmC5H<6PYck<8ys)I#0FZTHdU)ayyxP^SJr<-~^Kk-uF;8L|3%w{kC z^zEW9$b8p~R&vX()tr9_lp}bwsqu`RPpMDcX}!404+g!cg~39%oK5z31;(0>IW1nwA;aMD?H292#8UI=0@hLN?YXMkEv-X3qCCHh%V6*-32JZ1v#WY#Q0#*Lb0Y-? zeib;lCDk~*RG{ZnAkkLmdzq(PTPx|eUCGI*%kZ3$>dNh%y3@b-O*x=aSWS(;3|$@l z7APZ(0+D`Tmyk;Aj>g8}5_eMmjPZWy3#7nC$E%lrqSCe-Rw|E_S4to^P;dj-t=Hix zp|QmlOx|+-ZZz~E`|@LDrTJXtxBEMBz6Haoa29)(=sWoh zF076DX`t?BZHrc|_;TVFwOgp&n*fSm=|CY+k8$0>_yvi4b7g3KajCzz z^7J}#icS{p67$wH*Im-jgEN4>N?bBPZVD;QvuC{v;Slu}jgRklhKwf8l zd9bKnxnHdyED8@|zgi|!ZsZl3eeDNa@T9I4Af>CH@~W~K8qK=7ee7NGt!b$Sw{p?U zHNIW8dIs^3wLoqf_oc!=fQjW1&aOOxB*6IU&l%!s>e(A_;Fjb9@$X@*T4rrtLaufg${dp)rQS|kh7!)sbJGuTcsz2Uv5e%j< zYK#giwznO|qoNjBvw2?L7^sN*0kw;Z)DANZpk6qn%H_ z6MFt+N7Sij!&K9m$D0O0;R5=U-zxXvEmLY_HI9G_tFc|L?6i1$KX-2`73>DY_B&F% z8BfdKTd7{fyf8cW9`O)!avy7lCe4=!tIa^gzCO_abZ?8=6HeAx*wT5Vn`Mnu#Yh2OpNB1Lh1s zQ`8%2G=LHyBpvd?ompH5%8%$khEdHT#7DTYBxah65-etii5-5rzw@DTG$x4MTHK$1 zUapGxV%O(|SGh*3Z<(GcmU@DIc*=AC!Pkf4m$rlnZBG(TtoLtji|@3&P&w7llzHw$ zNn}(yjs##dkC7e-4y}Ud6U=)yA~c@~Kf9RAMg@A@)+iB}erFwGG-|8!dZzQ>QaQz^ zjZ)%J&c7fumc7YnWqy{c*$I-!RM3)cuT|=v`3z7OC}Yh2p%`na@wF;f`rx}_M*XUi zqg3to!f@Wo`I}$A{&flepO^4VKVH!eDq&{@ol20t>~m9q7up?w-gqhZ79O&0uJ9=_ z_1^Z_HqE+Y{*3E^i^^Yq`IF=^DXEK<0b?9eUBe;Fv@PyorgfYTgx?=9V?0Ez4Lj5% z6pl(MKkRG{SBpfT_7PndqBS-#KOVR5CEM8POue-%tO(P6q=x+@k8R?vHB63jNGvcN zoj33pUDp5PN*qQF*WDrw0djFwtjVRLB^6=buv}AGRmF4zPq+{|Kert}x>!_~P5_{D z;P1JM0qA^tCnxVFXH`~7s)g&LA;I)75BAUxzV38Erg*H5E+ra@-#FXOtEsjmFbR;T zuM930Ke)f8iapz_$U0Jj;wM0Z%gU4ps|XG(N``!F&B1Fx;k94s=lgf|c~d-%FnO-V zP8Z+!-F7APJ>G*(P3$^<+X3enEY0!EMoHz|PdyX6!TZ)ke9=wWLks({3$+wV+?E~E z6+y0F87icBgNMmFhuH(K0Oc+Tq4ev$Cyy0Qv#iAfksse?j=cyLIlDbe@ zMdbxqeR@Jv(R}~HEc<>{-Hp`hfq{2>oQ*xHvt8vqB{4gghb)WG@^N&)7kiaP;U_9U zCfn!n>0czDwJ$;nO*&mQGMeA7>%Zc)eZG>Y&^R#RPFXHjMqn};Xgcoc0c0zATm1X# zC)1iY6uW9ecs^;~qP!*4wKE=R1NH3_pmRgnP1OBI(1%M=h%ZVtSNU8Zp$@x^SM&cw zO-Eh1|DV5kn@hW*?XYWO zaUfA)**!% zsk?REKj1gBS8(S2N=29Sm{=ZNFqRFpk=7cK~-B!Gz7lo8(xe&f;rMBzG zl)SC*3h2fww(>f7@>3q2)sGr+t$3H*iirCKr6sEJiRx2A-3{lj7{?yAD8JVBVoP6bu^z16pNOBKp4#()) zzP^}l1}x2M1^+hLqrc?jzC;Y7G+GjxG^fJzZp(&VY;2O(EcwK!*{@8mwG;qsxLNFbgC)GM2KJPibd;Y8dS2}2MM3G*YIYdel@#p51r*8o-BkmDeqe{n zOYS3$yXMX2p_BH2)bE1M(uaRS75%G0;SXRymj?jeIhB$}XvpC{Fhq$mJu8=jyuZir zaD|w-YT${9L4Qtnkfb0wt9ToO?VcwXd%~&#zXG6C2bR0s_}mFFs9O^bW3LAjebmCk z2{TQ5$;WqqJE5;1R*~8leGQOo09wIPPJ>6^M9H;-x(Q5&p2gw|{QS?RNnKE8iYF`-u_=U6R0d3(Ky2bh3O9;ZeN$=dkk@y%VhQ~Twld?D>bh{5Q z$Cv=qqMfHPiWtuJ7>#*JGmybe7To#x(SEo(F$oh9gn8e2K%%v04$H}4}K{Bxr8UoQFq8OfzfSGG<8 z%8v2AKKVMl{(MW_qEY*w0A;F`TSbYG+EE^D@-)%^{hjX=GCB&*jwtP{kXFUU9Vi>` zdbwccdlAB1%_y_92(EPz+SBq2{BOJ@=@$2J^qvo7WRf*VGjO>)=KON9?TWxxG~ zyNE7k>#w#0GN(l#aNE3dGk%+5I@nPCOF=4pMP0&*mS9e8(i6f0#&H_3jSj~H zqUXG21TX9Z!dZ#Jii!F87(k?5a2Ieg*<4-G3T9DPTQj1(Q+GU8j?-;umKs>@5p^XI zxl&^HH$5&{@vUwoLCZmw^0pq@?598^d-Yn0~)<)-L>wJBgY#^u^t>^9g{eMiWzi1t`~*-TAt84jlB zQ+}LU3LLwOyF2GjrW!b_4yK==4*)IDSUG~bRFtHhQDmqB8jDGAj`UdAt=$?ql8 zW+XQcKH)C{jg!Z|yw?4^iaWE{0tsh4O5!`la8WUSVWKN_BNO;!UL{mPQt@Q64}^j` zTH~qYWXqPZ{X>l@qfe?xNZ5Hc=l?neeg>dvQNg^f@e#>MxCx&O+G9Y-;1{T_t`7gv zsL*!)=KQ->n&iUpxyZj1<^QHA2W<0AAFV)U&v@yahL09a4v5Bb_|jws5Z!VT2YW;S zzd}cmK3|)}j>CBCS;^zyOTzatcCGxC!Q?p(pA0L&Mj!bVaLbFk335?ajqx24OGnsp z)#MzJG<2Dn7{4$5EE*O0GZ!fI8a7#!p*oxic+xP*u;a6EZgFXv%g9TlW z(K}H-K}1oQw`>*Lqk?7v9OJ3?5@Fii{_o$&0z#lE)uRz{zE@d}`+Vt+kgAO)8G7WNjy zP`iPA*p=|Tul5cpbaUY*&f~>{2E5(#{>?_Wf?TrhCBg>c;v3>2Mx_(myfqkc-O1^F zLV=QorfZj0)WBPLQ9MG!zgIS0!+~*e>U-W~p^u5SVg3weM@sReIe_GzHgCi4B}06w zfauRPNtCqLNIs49b$iWyW&hk84>9h&c;t@K7_K;yP3sy96yZ*BA_h}R`u0w7m2BLr z1Y{92d`70Pio*V)KK=*lL&wRm`_cThCOjfP-|hqe5apyW{OaNWR@h(n(Z1yvV2m~v zIKAgE{`yRB)M{QB_wDqZHbBG(A7pk$4`VY_bwwV1g1S^mi>P}UcB8(ZX5~S?0+ZUNozZK#$E_V!2e@X z_S+v_s4W7bZ2_x5_udNd#1=Ga)w$?=QaX21G%SB1a@KOkbr~2KcWeNSwJ%Ah^jva_ z{_u|<U@=pYeZlv;Mu$09@3S_aH%?CO`xI@tzki6bM`MXtr5M`8um_nwl>Q zu-iMoKby=v7I{|kSKeb_C6DgcU`_Eb$}phIrm7cK{0@WMJqGd&P1C{swtv$vW{cpx z_<4L@a47Kp_z%*Obm`$hZ|>@@;^?}MTI0UA+jiNqT!LJ`QSbTXONEhQ3^Q|Qgc9bn ztbo0Ph=7gd;9AR+E*=14IRu9Avlm3kpAW>pfCH>!u6wG`WLNJJ;LvHLc#Td}h(lpj z(#b7Sj-|JO;%)_?nGfJ3dXMJV3M?!G`cB1D*6w0o5y-LXq+NnXDZx0uFp30ZWM|d> zr(J%VtpF;Ns`9?YIj~NojOUl8>z6;fI;BYr4!mRembu@y;H3u^c=u6SOLO3wETeio zapV>G1u1c-Qb!8c)41!*Ycfd?4z^#kg1L2Pc(S%uYrYv@%G~SjMc26^_GqXxL`%UN ze%tFrwS@UF;Bh$CWl(elU7QdNxVpKAf@NEcuD_;tj+LC+Tw5Dxj}j9L*_Y;3xcPk* za5UC--jVssO9HG>`G=R}&$@%vCgH8S(jvz@*#e1vmM;%jF~NiNIRN&+|Mqe?kXLPPDNcvO@iO^^-T59+9%Ka%y@xNrai0e z-%KroZz1JMt~_4811t_ei}rOgY8Xq%MxoJRH+WlfRSY-v80OcnOBO&L@A(rjuK@cl zRI#4DtAH8TrlZB#I~PFrVp2WbJrOcJMaXRgW**QV?gEnPN9Mg;o4Vg6?wjy>5{{RD zJj0f%57AJ^rbG(I1`nOl(9k&F)0vtlJJG6!y`vYpxr-9vW<^?UV-?Q#YV3Qfre?;5 z4WgVstb#n-E|61GZoBsL3(w)(F9Qgkk*T(lXtAc{$`E;6CaKzCQUaG48}AYy2dHLs zzw6&EyPfm;R`wo$Cak3RVJ6RtK|O6qHB9$&zB1Ny-%5rcwaS9Oj&ToNSPFUB=JiB# z4JVs?hx`4G6#DJChdY0v4gZ4{=*MT6p3>3I9S2SkvJz))&kGn-GnZr&0a#n_a@U^N z0*jG918SHsWOtEvDVtv10Kf$`KbF4&qSCXZko(V2)HUDkeYBN%okiS`-z``IQce*g z2n7}&mgX`LW87wnb}o7k_dTQRL89Ve+VHGDzaAJM0qsv9hz_u=rJhd_#+fdpA?MNx zv+T!&kLj3KEuTLzwh~CCLqBpz_|9~+*D{E{Z%L@r0o2WPfUloeQVE7QvEj1Gx=pKK z)evw#U~HnGC_&1F#XoR6=eMz4>Y6hsVCFfcg^epsp#lv7>6=QtrkTlZDr|qCWEkDQ zn<*a)g-J9LnSPmVmZw8?E>2B=MdbXjRm#FkXN4uo>U4Zc7wr^DI+%|97hVH`j4O<( z+Hk2b%`*P3C2syp{388#$NUmJ+?(Mp;Gvk0=Wkl;zvZ%4cyFp5VV7z z19Djh7D%4n3u^jD`BKG>geJs%-)-q-Ct|9y-A95Q_Nbw<*L?;knmh;%Wv>2;S1& zH}Rub42RyvuY;Y_4 zeYmjVci0(j&oe|}0R0V4x)(=V*TQx<-;o`m;!6sW2-R*~8y6QByA6>iR`#E~Tl-XP zi1C>$CdMo^{Q%@WGD+J)6Ptz@ev{xD>RZY)5&8Yr18aiWUg4F2ys?$-!~pV5&4F~` zy6#u!6f*ys%>M_kcNl3%BNzPifLM!W-;L*`dk~@Q4{Yy%xs|euU2uy3MWd!1bJ)3y>$3|aED zHakwTbO%^8TdhZR+&~QlavFNWG9RYrc&`UPI{%&10_R~X`G2wZCQwZ$UEY5y3MzXm zARr*(&L+F;t%#@~w4w%t00G&Au*yz|fPjjCHp(KK;)2L75|#jAkH``cAwURW2MHmB z00Bahf8o>p%=Gk}nfaf0-ZSUS+a6hL+u~Qhs=8IT?!BLHd#Tb~{DX7j^1+_;paLkO zpO5^*?T(F|_olJwFJTEd+gjrlgRw(dS5H2p3VJ;>8%f;XPcAI)4zkSeT$qik_b7S> zlkld#jNRScJhN_8b-@gHKggXF z__Tuv|Y>Y?8bA(G2q@%}$k9HRqc*=O+2eRtAb8$0!Zrp>^T#^Vsx!X(hRfR~di1 zzyBKM^uzuAa*;PG_9ZVUwqrM_70z%sw);noX10PA?ldnDqOpcF(YHH2c5g->7BmKpt8~o&W(*Ax%uJLuGQDymWFrqeso1ctZneA~3XhF-V@>2k6&M?PpI?>vr zsG?p}=|+J8W})W0`UW*+_WkJ3z+AKH@59-b4?RI|??5$MB>%NDcu){#AcMY^%dg=g( z3)#fP#a}<3-pP0AXvTs{G%>}*eo$ml8Gu)`n-Ao;>xNGsn^zlL`^8=vWaGVEnS16X zAod_Xo{x+xst+BvSqqm_O}9;p8$Vs$?Dy?KnOJ+Gd!TW};fqj^B=dI?=)Xw<{QzS{ z2xX2#p+pdwNg#4b^F;)IbBa;32rdTKG|M+e>tU>GLI)x&#@>~tr)`*pz%$@7o2LL@ zL$dKMxG>)=C;+uLyUSBquz5?#1+B8uiLsy^jgeZhE*gtnbBJQaeF93Xz?Ba~6S1pr z+*(;BXKie?XNI=nl>{~(0bimf+4kv{6j&6le;q zG$ua-g_?$0-6IbIIs24`P7K0dN=o`;9SA8;hm`DKEbb+BKEl3*Uf))J^4FP0k3d~o z$NcJuRpB(%esaCUD(K_}Bl&7r9iUR-zkYcvey`G9_rbYb9=}pUtMRm98Mz#jv?m9Y zZp7CQ6hOH1JE0f2V70xEXf?KtRx`A!O8ai z4G%XrD{Mr$&Zf<8uQ%lX&OZ4kI`m&K4l`j}8TOK{K~G@%J0>{ykMVw&laaR#-z*=a zLO|lZ$&Ua!GkSE9c_|5NAig6GiKrKx zd)gh`W@$m=3wu*--CdTqCD#@dG;cv6hNKzN5UUUPfhI0y;)%meJ}Pi+$Atm*#0X$P z0!AKyquD^~L3H2MGlZ5&Y(6@7RJ=c{*R==iiH|S6(KOF1m+KU&Sh>^&TTt{d-YV3p zvdx(5k;?!TWKxL(%C8dkL|5uJOKe0<8GuNLOXcCpfLAL$BRr!w9z8%1R8tG`U2);u zq*V6L)Q*PF1UAQC#_81o5;NY zf;=bqVNc#ejs(4d5kZ=w+{Vn18!)hDjl|GbOM5)mE7^0&*Xg}QmTcQE)yxK7r{-Vh|NUoBdkystqQlmTzAWnj06GyMGg zBvipO(@KOi&Gt#<#MS)qqPIr9Wqrd%jipK_pIl(Hq73f`rVQv5!9^;a zxkq%hliOMRR96Kh)R>Ka=*K?g?UYI<$Om>GHqMdGgz7xt%*xcNvk%Ufm5nOg9^Y0i<`24U|{LC$yQG9 zWarh@rk#MQ!u&yQi12F9gd6=-EyHrTOK_e}5TUQ8drE%sCYc>gHUyVNY*CiK9t!5Q zysjwosmpXVhp*|>^**rwP4Q_D3sV$NP96E|@0C3jNDQE740=Neeyi!4gn9ph>YS3N zq__N!Cnzcr-q=vnvJo`E{Z=O=r1J~sPV4Tt@Y}WIzwZJ5au3j1Y7i0u8w1*bu`BJs zoD+%*1)|DFC=$(~pOzger&0ClXgIhOg zi~=A}Z1rW@`~9kv!OUb)aVGP8&J%i+f!s8tt_IMbR-EbJsgC%IdaHPL@BYIrQ}eV! zDuFB}y=S+d$2uFjX&A@1!Ay0C%fUyJ^;NUF*&xk{EuJ#C%U2U*dt)xYQAJ1Fln(3m zgw7T)Iy)1FP9=X7Z5~3EfdngTTid4ECzBA#M_>C$A(FhxfaW3(02bW?BZgY)x3n%F zRy8yt7-C)5-qgiXh|)BrWHb4GkkSLLgFimAK9Q2H%>*f!W}dA1hZe|^7A&QMQdW^>mq?OC(nOY_Kudc5)khKm8%A6+;%GsG< zZmqfl>b2MY^Wb>%?(eA6KjHm<+)}t%;p7$c$aRZzb0W@GxLx((Epi*?NRtsicI@4nD5Mp5xyHdX;jM~?B28iXBlqsgh$i9D1THL9-Qs+3rg*e|WxRGe} zD-a6->9B<$_GD_*yfLmFqZr$zqvo0G^}`)Z;rBpGoPKtDLn;Naixyv*o#-W;2w&e1 z?`{mky+C|StrJXQOwe{OKVxzG^<<)gx$V%J&pun6tblPlf(V*>R={%%VIYm52{OD- z-Nbx5-~OmYQ(>n!EW^(Dk+>V<%A*9HMx%kZCbSX0Fc49CqDUT@uKi~2C5xgupjEZ_ ztgjctpc}U9A?>#!ezptSr!s<4am?W_Ge9wfIPSHKgF_g>G79a)q?=}TYS*{fa)-A*LCPNy9|z^}$Q51e_L62y)0ESuq-}m5XCYEZNs~g?^_Gt`1)B zA0)u_;$#8q^Y8oLzp_3-W)>Pm30#BfV0AORF(n6Ri(lzU;U7QqQK;BJNL&l-l9uLGt^ z^U!{ESOl2rp9zs-UAN@`)h%fv$^qw*(oLFpegr=9a9Dlzv#}v*?qNvkig@+GS>2_T zKt2zj2SnBID~mW-xiVkIyNnwF%{lIB(f0u$I7*y>UbmfAVb-}roM6S+iuM%KqiVXxk_R)MWb7<{ATGRO z(f&Gfh$IJtlkJ48OpP3UYPu7^k<96iLb4M|)E0yTr z00yD`RxkX)ITZD|2a0s87+~G!*7yjA*!o`cUy;T4JL%)sXItUNTMLIZTaZ#LH5Ayy z$Tuf4KwNR$&4x5|mQ#6e|1$rpssqo!p_zm=UJ6l zqU)?i?$+d{XnuzgemmS}@i!smFD&i_k^a_#0wW;S)OqkUIqTr*vAzw-e&&c4xgbve z<7AthwATr~2VOjOQ**b~3X_`1am4Fui(k-tngyy0N-qwWWk~o;%)hFvX8~$ufaK9LetgUBM9USm5gT*1R`|2e$q1Ro0_6}&R&0^ZE zgnnVXkMS*+*oVXbKw?5YL{WepL7NXO8CiIDgC$?+kB)^{D5+SU1cBej7bcU)bMl2@ zt7at`X43+nZapW(^BH(zK^XMR$Lho7OT;}F4teZ zx7J>DWd8lYG*`y+=Q`mr8YtND@$Obs<$&ui84OpDk6XqMANgu<^}AMDc{DgB9iDX5 z9QsfY&6^T`!`2D)3Hzyc`*P13xcl+T_1=WBZjhGJ>}a?)dO73sde`F&4;ouO zn8l^AclzC}%u0hwY;GfS8){4gy#gYEXVBlhV_`anJ6@)jiAvVSqO#cT8dy`L$-W6p zG#o^ff|#~2e~lCAo?Z>XdfVaq7$!^wH*&dPfB;2!&;t=a)?Ofax z^1LVRszT*11YO;Lv{RG2c^d2?n7@68zxWOhV^=akXk(!W1e*bw%R49mhS(@t{4f33WA^UqvC^1dxv7{Sj^jgpPWU-{Km9w=5tC zDcRa8YF^4}m6oX(f?IJ8@HsIl05K2ZG^_7d_7J;IW{o&d&K<(&j~gUbmE09ezKHJf zTg@9g1a~X%`}x8AZY%c1#RL6WJG(y=glG{w%Qd~xo|xw0fi(2z{#I9w30xJOX5YbhE~dwP+T`X$J1l zdQoTLSN)md@cQbiZrggEFk@dK14%ASw&tZEc)R*H5#J^I)6D>p?AQ~x1sJsiy-=eM z3hqg?!IBp8RKbvqHO8&*keK%Cas)1k!6o{#hvEGJ~c|D;7g01-^#%k*P1B7ksno7_Bke=(M=dT6krMRm_JM% ze|Cu_a|q}I2Hl6T zZsNuJjih(i2YzdgIc6T(s8(PEfXDKSKy7h88(jR4$Oi2T)LqPO8iJ|8-&0$vqnYNJ z^sAhUtNG-W%b)YmZ5ju;ANz#%pX zX=vj?5LbOp6aY%AJdZYx5BLq}0h0E-XbXUu8UoBG3#1Ic0D$#W5MQh>NO)J#KeXua z{f@s*dMDi3*Rqww*A0Jmp%8Pv>VnP2*)7sZ83l1QKOE`F90eXUj&j?n7C)^rAHq>Vqf)m!O zzf%#QiJ`;KKF_;ppA9Dz)8V?Mg{(RXy}uDF)GFMbQ~LJ0$dIhvX-fo5+$ z7%2oEO_pZ}q8Qg|k?&ZpQEhGG)XPC91p~=G=;t~^GL*pS{+Ti4PqQbB`oDd9YGlrH z^yhS=#dULj`t2O}{*B24kFXOxSf+#LW?!34RW^us75p9V`q#V#DYe3w*^}m(D_6!3 zgGBn0Vgp*m!P9ig!PD%i6re@c(YlPs_%Jf1CT1!X%q430; z6T#XTJg1HD4w-!)h>r3MoRHDfD9SN=quHshrKRPG8gwB!bW|AE8F=HeC4sB!f5y_1 z%D=R}d46>OaDj{S-J`Qz_{sxxE*%9p2$1G{n!1PVugGmP;>AjYTbBn>N9sJq(Q-f8 z6c>~Ztjb6&rj@}z-O^lGT9d!MSzI|1N-E<73n$j8&%!t_qZQ@vSp_`a5@6l=|1N=k zrq|Kx6~E4tLoM%{zN8wMXRGU~7g7NDxzONy#h20u-XvwQZWvif(1>ci)&&mlA+>H+ z&#h_@u6v=pN!m?U54t7y>gJnW%nZfKLq3&n z>@gAc3lAxm^1?VO$?YKL6v*jR15v|^eG^e=C?Fzx%|8>y$is3dz02$OgtDTuUWw0V z7E8Dt3(WrHFq6%#3vr(L2yk7`=yLzLxY9{Yt?q{Zi$cBsWBMjF@i4^}*Q}Q~pv38Z z1NIVn5%Ixa-bby#A{^vp+uznsDnjdJYP0&N2iASYXnV5@9IZgU#y_aC!&j{COe)HF zb`BM^75Cb0`jRAF40@1XGhc98w9k-smq&3%79&cL6{O;NR38ZV!1Uib`+e&lbhXI; zzNYHXk{N+8K{;93+vWmU*Oe=!Q^x-1OHnCtW6gOS48`KaM|Uf4!;e0U?@kNMlQ#Cd zR8~8-L@+^o#-2R#b8UV;V?D!b1I>-96*_z0LV13;*J0-Ud}a!ekMfJy@&E3E2WqPo zLb)EN*S~9Jm_Hv}niluoIrZbp30rEwev?86+eY*EUYEj%$!JB3%tcQ!DDB4BP$c_< zqR~;KZfb>jl?pnJbGZjRN+fv9vT4nYc$~I-!@X){fwZO2FF{T74tPleJ^`Pux!J%` z5F9C9uEM9KgoN4Ad&CQgsM(;_PB!3|f+CtHFe2x@V6q5fXg9Pc;c(aY_XBNE^+K_ojk}dAGI9M-Y4eP=PYgM5JPxSvz5!CD43~>%Q$`iN z1*+X6vMV6`y8Tk>>=XoS*GxnEOx)^I7>p#bKOrT8`089Gn`Qhw-Y-{d2|Of_FSy0t3j zJ%EnwrWUERZDQxvdK8e61VeVexXIAob!NE_sLJlnFm?1d?`xQO;eV4))6nIh%T6F_ zPi<>(eGH>C{I8eNE&~k6p7uPUv!~1vjuGrh>zLnQK)(6yLJ9yJ+rBSvG&ty9fKICS&|ZGR*(}iiI{%FBc<}D}S1tzq8Kbxpgr8kueAx zfN<2kR1llJh*Pe64E9c2KeDBV^vW+CmdDORtl}zn7>*l^ZYi-Ise-}~ixfp~DP%91 zT)sN=3b#^nKs&gpNsE#7t5pr>-iy0fXQrKCvAhDoRj*3)5%?PB`O>+>dSgnPvp{N;=`@dxt!)z@xkxEf%^4e?(+>4n znWTQu)NNiI!z1%Vn`^RFV%9#=dUM!IIfI^5GJs8|+Hi6)o6fu$^SNXo4lR_cxw$T^ z8TWJJv|afBBhX2}9Th!$|9Xs?OinrbVu{Fzf<$k3Q3JGjqh-dR`u^5|$lyCoMvC^! zSZl8fcVn4FwhC7gPiD0unMD;b*#lLaq1$* zwuM14%f_d?OK2T-MDoozUs=3D7KSnV#$q0^GX4taH#=3Rz)KXl?bu&2%tv+y`1Uie zeDVGk29(MhuNN&#Z7cS4{*rcZU_7dt!JdgqMTX4n)MDMO7!+5kzSTKL5QGMLgY$FX zF=aG$SiXcQy=9^|b%91M(k2vOWo^A!FeE2ThN+LJNd)Uy_J+X!KJnV`k6=<+K41cm zX3`ct3;|^Uh;5DiejUq8TN^&s{NoG+4wgNSnFiXcpTbgY`IYp?rQ^^IQVtuxnF{i1 zPo^Z9NC$S9xMrQ+c(Bi-L^Rfxj@gXB*Hw}u@QNeMyR`TqLx%%n&s&-ET1ym)pyc|v zMfZN~0L6auu+5oq*NDNHCGFJJI?q)RmQog?x@TtmF-%qr^`XzW+++pgGyM{6zl_Pr z47f;Rrl1H{V)@Yy{VVt3V8~Hnmo1y zg6=&H4$*CK{PnwnN{2>^Lr z)v(SWKJZtr3!%u089tFv-OTSSgBTF+oNGCfX`~#kpgZXp-qu0gq}>;uKsF4`^Y!-& z7^M>N;3&%a;>;-7qb=g-OpCYQ^!r5<%~n?{0HpjK>1K2^c;k>>s~UW9O4SS5&&L z(wt-Ae;426qi%c2`{G;(;~6aCl=N(-E|>Nz`Wm<6=Th$b%FQ}E zRNhC~UP1QkEZb5_Ox(-Qr|A|xFdFZ}Q@}rRvn;D=OI?|-W@3H7JU;Nd03|xOESk&l z%47M^QCeBFsNeOyvs$5d)gPOP_czpE>rrAgt(@r`NYz?`h&&Rzd2sC@fqXpWXyqG|S;5<;tT@Vt88t$GJhcoolP3!=3&n|+DoJU8E$r+aWOwN7RDwnD z8|R+yykPIz4bTTyg${5W3t(OSP`i1w^V;Gn=GCP~iV={qz#6$8O?>yFjGf;_gkesF zO*{bQY-?B)S3}cI&{rtZ-rSf2N4Re1#m}@&GYUAt&U%Odl zs8B$$DR6R?CHhlK1B@9i-`?%^p!$6Nz$B;^2JNqzkCbmS+YtWYcgFEZ2Rm%Ya0cce z8QyGcQ3O51aFxTNa+Vne2=2{sP<3@P?uVxW&PCB(+NzuDrkYss#_97vBt*}%Z~b?# z#amW*fDxSkI(`|LfZ5yKh{e{NoW&2WTZea$C*Fw_D+hgYjeBWIsBIeJBX^kCbDU7c zLXO{t#O%X;ljz;LB{O4jGI%#Wan;w2>>K=Be%7wC@HP6YADYW@NY%pK_pP}pS4oMH zdY?;+%6(=gb^>g1%7NT7}Z-V)fWDf=U!DoC9+3BvPJNqXg=90f-yR1U7WfsbH z^ntvFy|PK*oBck^PO?c#b>w$(DF%MSEN?1y{6Tj)J)fFW(fs9262<59S6zY=_ND3D z30g5F7FAo`?B2&uma?#39c`6sTXv*vb#|-APaXWf2zxH*M0JIfSNq0piSqKbY=KX` zvA;+=Tj7N#uf1q#R}Z~bp7%R((JacTP;N21RcE}kWFpjj0Q1x=ZcAcJ2z%*_9Y^aepXoGjL8@ zUHJag!b>eVJsp=N3+^iG{q8($-TTKRoSW!gSF?B_QH*WS+a)$_Uv&_8PCzaBi+6$E z|Jh0@R&O3+9TML9L2mK>USZ>v#?3;WQ!$ zjt7qg>1lk^7geYaBMM%OzX6J4@-bmfz*vjxny*k^xQNEcY>f~xbT8QMWc;8%CZXRKHNVO zq?a?LQTSwU=-nUVC*%uK0k(ig^Pq;ed5A*<^aG>jp+|GCq!rWqj}6o?le)koofg!(#lfdvfN1>3C%7Nh!YU*)h~R|O9&2qHS0nL7 zCEIb8L_wlPTT)Fi?ue#%S>?Mk(W7g#K}8&%Cc-@#U9Ni&I~m?sN1_uI8h#@Fm=4a| z$gtj(!5i&>?IjzP6}>dgKH%H6wqm9C$0(dF3{Jy}j&1mMmKVg5w;eivJ*{#!dSL6s ziG9G$EvatcxoBJ!-N#+>JeXWEv-$6{%9RUtdmDg^d`bZoR1)OM}d-H99p@ z#0{Yxx>-v!BE(MHi%ZnvADaVs#vB1#!N(!@ggb4Pf1D(~3jbpgoZXd5KRUujC+9>U z=O0)geaO4PrwQM-z0ygkr>Czh-fxaRlBg;D6XV(Gxq3k_LpI8q=RnR&-4=ak2EI75&er6^SGr>^Ep4B^M=R+kW>Ug)k zc-0-^_zUzGn`3s1UksJ%mOm%_t1-AQvXg!dEq@)5u1ac`-1Q80X199b z3EP*HbT_V~X6}95;zqQr;AjQ25fL1roPrihV%$a3hM=R}k98aF_~`ay_>(H5KpuEk zaBX`7-96a*q;%e0N!oK2akD1UUfKwMu$Nf_2|q#8(M!Zm1nb$wGq7X63T$bv;zoK; z@PQa?fl+Z2DR{p-$JTw6D!+L8fNkQ6ax*D)Z2e7e?T#d|Xc#xUepixM6z}3@xL!6- zcdO3KD9w|rBqs)gYoV(xZr{sEj|b6A{2aKtWYL2jP0YG|bY!EBkIR;YGLY*uCE3UcQ=G_ z8vQtJbmZ6yoL`}m-%jQco@Z?RV6B!=iC_bXMXQw=nva`7&}s$kNPBRVEq*rRRK-Px zp-F=?=j$&S8RJw=vks<#o-@J9nOMmgqh2oUwxESOaZ!7u-pak8Bn)v8Y=UxeUqo#E zyEKTRWJ9s#GpSA!Z(qGgN|9V{#m({iS?ygLHnhL;NLQ*V`j2y-wXNXo?X@10?faVU?cO^l;8$lIW|n!nY+=-& z^IXOK>uV*h9ER4T_T*zh;#$MO6W&Bj-sOxN5U<+rZ%H5U2{zt!6`w9!y)CGspa}0( zoj*S%^~Gm+*H<5XYo1jT;#xhGVw1u1CWW(@mV z#5L$33!jBi>ne+ffx0B}VHLHWS+hfnn-Dt{IrUCLfdP*UNRiZRLLppmh=pKUMUOE^ni_P^blaIS!1@e3mgK}@U zy$WrfUPXN2`F3n}SC4BcyWBT=29!#)vbd3)rdobI(!LTgmH^#gk?j)9_9&!F($AOH zVOmL;W^#s8RxUCM!?}y$+-Jxkr<8-c^inZnEX=Dej8h$Ql!^p%?Mr`3Fw%6hPK3P> zJ%cDSGt|?v&oi`>xjVyEHhgeXe-~V!jSgQ7?U88;<1~>uZDdX(JvZqdzRPlGTL(51+Jj`KU69nu!f`xw7mKPm?iy`fy?!89TaO7VMQhOj{%9)$CnlJP7t0 zA-2Dmof!+DubS`78>{=o&;<+r!HnxzNRLblX<8(kx4W4-N@uO)t(spIV+ z@-Li;=;8$#+(e!QES=|<9<8feob80>M_mu;Vb&E3X--l0zZ%wTw%smg>nPIO<`hXkL6&4sq!FL3`kb%gnZSXgSt#7m|283gZL9 za)s=8ItrtmBOc+y9MWZA#EKNL7CW?lX;%Oig(>)jHgt4tSpSA-KT_%VGWvj|mQ-t- zP0+3JmIoS<$u-&X4}sx=+|4XOA8J&2l^R)A*VI^68W~x8M|*8P0q#o-zz2x0%|BSJ z8jF>j<2LaAAZPYmJ>P4T{`U4*o1}N4?%tzj7b>ye@+~3zD~W+NsVeZHk999hh7diNnXIL3zZAY4l)A`f}$3EVa zSGP-tD|eC!iA`;3jcvE;?yt(Ay!&_q&F(v3e@kpgbgqbsc#FLv6KJ-eFzdR3fR$3y zLSdvBZf5cP$y_Sp#@=s|HwgO+Mc<1R#*$B3!RMN94^PGPjq}|g1c?TA_lVACzAfJ} zgyD9fBR2#Gs^9V&DC^r79h=W2G`LbLavw~~EDnC(JO>%Wp${s{^J&hsIQUx;w`tDc zr9WwB*%uy54WqyE6bLYmEW13DBsVbZU+3UI-NBndU^LFWH8^8ZO3_Mc+Q79XBxuo3 zb#|PxXs+c~=ykK>NxA5lzRxkQ%xg$bc$0W<#@f?gpkuO2w{fYi%T&c;CsoW_I|2Vt zZ1`n@W|!|~-HDX-BPk~&CZ;K)>)wk)-esNMw4lNL*jFKEkxfjI*s4f8ClMbXd%FLG z#5hb@eJf9}1?rY{G}50=l^B`G*tn zP)mZIliJ5FLrrCE7q^M(^ueUjkXtTd%Fo+K0Ei|UEaaWi8Q3_b7;FmVU(x6vOKyr6BWezaE4Jp=ye1U{JkWhz_}I118mUrBJA z)16j-+HKcDOZ69LdhWJ4*-ZP>_PFW|HR$i(ciB}>6nu~bag0_Sm0ocwHF3td7g&q< z8Rus4B;8SOCh6Fed>U;C6& zWJBcJrdw?uQ}WMfwVtOYSio$}T<>z$^SV3rxlMIMUZNw|4yHiZCW4bUbM|!Q$`%9G zi?o+2#_tRS?4Byu%;r`sjtDAOPyVJ+HY_OQ#9v59C@}PEQ5!qOY!{)moGe&#EJBhT z&I=FU%fs+?7%Z8nD9MW0_-^0IqP|2Ki9YY!U@vWBHro66UHAL%brzz%<%rm(*`3}&18qw4+>`^ zRtIZn4P1iZ1)`@`$pRbe1PX1bLm8h1jKZewBZxhs;)l(s??}`3cS(&_LDi^u^cRY> z=~8PgZ*^yClgKAWfc<&jnWf8Mu8afwQ}7^!K3&vfUCQ=TL9#bxt~t@hTcK@HJ9+!n z7i&9?@7wc~K|*rkI(Tol2}B0;zZdazg062_ufzrtG)gSm%fuC?{hqr6VdIbm)VwZW z#U+zhng)@x?CXL9DzZ^hraIeoQ~LJw=lyPl=Fx|u5ZG_=v&_c`NT5g$!=PaI9Df0v z?*?xl?~P{h!f6}3f5!|33Ro%+RHq2)x)zAd+xG5cnc*Ph-MTZH1KOdGy^*$=YPIf|g+ZRvo+>Io*{3JO0HSu{Vz!C}){KgXo=Cy&?3e z$S%s>6dM%lrOB1du=7e7PwChY@oD$bijs6}Bpw6Hjth|qrVyrM(vCTf+z&ZC{VdJZ zapXbB@#!~dd5$9wLlmYf(qY zvkC3nfu-9!RHtoAWW;ly9#PI91(PBKih_+#+bK(qj(ShS)3GrtY*@C~2x&Xc2@*9? zh6!;AF7gbYSnm(^3gT9>-1Eq`5tFuYox7zNBc*-G$>)$6|ztF$>{^PyhUm<{oSAWN>MUpS&j|&#UGX z!Cw=7Cv)G~JCK^*zqAS>WePhzw}V!J5JU8{6I*Q-Y?o(Ro%r+I`zBk&eSW*<*ok{W z!~}AqR>Q-miOC>%pYY80FF*e2kUFfN*?zw9jER((SqMtYUZ(iwvRKB~i6_m6OAqx;`})0_9%=MtN(&7edYxUIlVa>_j4ubbdArRS)*kpC9kFK!=xi$I;XA=T|Cv-d1pSdvD?!o%uN*bZ!@heg67ZKElxprQBRN9h6@5`GL7B7shh^_s z^&AKp&u^Xh4OzHneedig+Qg(7gg17+T3I9M9=I;`nQM#_OS;UaoqS!8^z6@LcQNZL z>xJXlUbC0+CDJCBknm)VXAJPS{ROh2RjT#E!n2{!1b<= z3v*6y{yel-<1~a?j;aSZgS11mUz7MuTcWkBZd9?5zWLne+p_0^riWi8CP-DDt(p7F z2%gaAJ9$)Pr}!80`*h8Ryl<#QUjVOdZwAM)_n{Ny0z^vw z$e-V`PNDQc%_kw0kKex?{Q2ztg(gKoJ!##*n8`Ex#!1P&B@#wmvLMP}u4mwTdwxyi zD)ldqpZAgXU0TDpzP_!zkCPBOQu})%Ghbz^ z>j`LSBx-z3WJ o+GxMiM{-L6WMj{p`eLL--Cbd_{Xm+C&6=vFAhR}Z$XS?WeXl6 zMDm+u*~DZ)(6E@=Juq3(s+@Wg`9zp(dE}o5?&~Fp=l10EvuAhpKw(&wUoBdPTw}K$ z+jXFf@4}y_OVORyqsr;BTTdzycz|o=S|P%Pdp8)|SgDS?e{FW@H}O9;>+*NAL$3e+ zOZb{+svlaYKZ!N8(*+p2BEo#NaDCs@1}zD{VRXXl$Qa>jz<>4KKiij%CW@#!C?9goCZ36hi9kI6Ya?>?VtvmL@l>zQ@HfrtaOi4i(YM<=9ip z9^J&q(3AZJYSCb7={6@i|MI7aa%m}{f8ji+{f5SL^%t>2;pg7^=Vx7d7-yw0L0h2v|$AUTaKUo42}_I7%u^%J7dD(em(^subky88?DM z8cn=0g{eL+9mIz+3u@hHC$3IWv?>9U>tDa6uP09g{Z^oRH6BLn51p^O3DTse9$vOn zHM($r;hy)Gp}x!($C}u9!gGaOmSU{omfh{ z$JRX8=6~+iLz60B@p=E!K=Q?Xy(DmQN1-J|ds&?*1Dozv(sbyTC-6YuO5&`iuCSL) z#iXM%xHtGW1Lt?F+P0~(;9-$v?h+%*LqrXc0EnVry)R+$@u=p0kN*0D|2rdZ9&CW7f%-926@)tFLrOyMo~9IHrOAbX07sreA6$i}Wr zkR5^=Ds!f=P-bpnB)COvz>|MWq?$cyQ?`~Wjv@T0)ptR|@AQKTQO1fcuQBE}9YN~s zC5JMraFl0VDbA~ual3VC!aGP~)Aob2ZAuW~eWM>y+S6vhFILebr=sq(myJ~W#KU|4 z)+RlhH215%=WQ9#Z|h~61y4?IKfe}eU7>_+=?|y`!c2K0m#tD@pg(I)#pT>bo3&*5 zQMRr$LgMVZkYV8=)x^OiK11b-$y&5{e}t}Q`u&?JEB=T6IV4bHoAY_;?w(npzjh5% z-kq^3%DU;MEUG45Fm+GS#$p~6Hc-3@WXO?-`YF~U6onnAwu!sikv98&6aOua&;rZ(pZOTQ+it(-dU@nFi^L!-R3pYp)Wl@k!}bgr$mar|0yO45+S zIbVf9#S&(fuTE4Exy`4tS|TE1K-ECB1BxA*Rq)bXa4iuDLNuci6kV&<-94;>#0lq7 zouB`*U;I8ckay3&qr=Zt5*MmqP=J8<%$ibp=4=&|zWKoP_O9KDhSLU$u>tyqCg;K+ z+T{V^o0_+Y=16^%Sy2_`&QS{~l{%KnuW)Omws^nsT)q{vlW*>1cyni5QF-m!+ZU#a z+%LV&H_o-as&Kv*4>47E)xRxUK~SPVR|w+aulnPPCMHl<;F6SpW(_zx z8Ad7gE_FV%nOWux>Xc{``@yMzqNet)U@^fmXY~<2v58MV^(Nn0RCaoBMWF}n70$Lm zyd4!1LLZ;WQna+lJxwH*qMV0pM(nhP4HlW7p!Kh+DBpC>AKHw7dmxY*gqy&urRWv& z{d?vw)pRZJ3e@$6WQTuDKzK;Aecr8K=+1;gw_S}T`irly#UKCCn_-}I&YX7BH;nh* zuQm{>6d~F9)Y*4{%np@mgy+4|USfW%TOm3Zh%d*IlsdZd#Q%AC2oR}noR)|c_N?n} z8VYj99lS*jYHLs{|DvHsXvQTsrWA-^!0_rn7_X}Kqay9VIthzDs_V*;f=5Gvf7a98KHRb>U7 zty2&MqhOC$tmkW$5HEHd0^8hypxXT_>LG}|UmZFEL@{tusaMBA%JI&30US-x$A#7F z1Wk_GIaUvp&jZOEP!(2i_0(>*zwxgu8cAJEt&>G?>$pHZ?u8cZNWy2InN{*Ayg)C` z_GQ(UJJ0PVc;P20@AYbO%Io&8Muc_LbnNd5{w@$(f#H{u>hMhe5BqW|Ii)9C#ir}N z(%48K+9@``hA>lNNp__@7=n?}Dg&s>R53`!^sME(A6wTBtc<*r#wkM6AC}DpatzsdY^twru~LEBU@7U z_CkhzDr0!y6i<}LiiynvvPGXh1nqi~!kl#tv4{-7MNHx_sFYu^3GWf-4al{zX~_k= z#G8)cT~BUSXvDn=a2-7S)9=3;9vMttCCJWwJ@9i0=l#0QeJTDzDBM?D%)Zj)`(92V zly`MUzX$UhVdjjrUhSPop-_6z)Tf_Lg-)@Hy$Y&>?$v0o+45cw>m(mPlV2}ZQ)XOL zK=I8+$5I$VR_I>``$Q^M&4_g}mxB^7OSk7@c%!>j&nmJy5Ak)tZU&!Qd=^&2a5M<9 z?ddAGj^uSRKXTxJ%BpsWQ+$X)@zFbsxvb`FoIzVE+t(1XcRHl%xt@I%gzYt3b)~`r zu4K(1-U%=!rm9i0IKnmMohATN&CbkcIq+%qB6M7t~ zMz`O(jxFSw2{4rjGOUZs2&)aM9=_MGaZ_y9OD!bi{Ps=_6C>gkf9kFPT$SsBR;Ubr zsK>TYf!F>-{T=-}UhqOfPWc@MQk|%+8FDzDt;DC(aUk>AuUAOC3~qaqVQfrPCEsLb z2(LStJ{Xg4HPR3?r3!D=Mdj5C}iyTqmOCr zg4GXGJFd^2DGgE#rtlZyw~zdaErbeYtCb4e_Q-*MH6Fm=?COJ{0U)IYM6Xo!6m3&hle5qErcOm@6 zZ?Sq}6tR+c8r}(H%Tb0L{ zrQAzVtN*XOYkx@cZu`z^S>~y2X=z%y+4e5f=A)&Cv~1DdYIc~1L?pAaL?uNsMX;0M z=7FcV)I5>7Qd16PctBCpsX0-!gj-ReqQF2!B_Krfy{)IR_Utcsf4hIU@B4dwuH)yr z?!y=5DEgA&$ic=>evkX!F3{_RVG;bhB(nrV7;i}$&5?sE)`Yu%M}|m*Yholl_oV&} z1KW~@^MuS=Ru=QR+IUJK{KfAJNy3ToI6ZS}HNRBa3bt$_RE*AuD z?_chKZ*Fx(qEw{}XGLB4YF~{5cODEGcrM7|)|Z06_R7|TbP0k2MX694p(IeLC{qk0 z){51Wwy_^)Ma24;&f@mxO{|hR8)ty%x>kaU)9s&)(cU_*qFZV2-!!>o+rbVg&oDt0 zJ=;B2>V9bja@Y@xF>xPe!*}erG^eY8$yTe+n3LSZxkNHwIvQSmo51%{1kqV{iuI}S zFl8`1c3}#+{@i&ly;hD6pLo$GXP$jaw z#{@X{Rf(`92w{ziWg>W9RK>xDu&NR-ps#Q9X1Kp{=!49-%PyvjiXK>L4s)$HSExw9 zBzj!fBp`K$qq_38$3sq^(Z>d(#YO*1Jbh{rXeClyQ8`v}1REJa8#A%O4^?K^Vih(q zl8Ju>%T-zN z8f)MwlzQq~I}M3ha!D2#V5L?cMB2Saw^Qm4?!!m#bk)}@>)|%To(0|`;lFBikHZt@ zf$c9uxdBqPwU#KW=_S-({UD$8l7hm65XMC5^)q^(0Jxo{EXx+#Rn;U&N94ub85%@A z+Me4q{pHlfJ=R9hTy0&?@>M*W8EX0?LE*H8GH8c?HE+n1l#d^$w67iTw(eAsQ- z+mLcDHmmLHd`_luiz4;eJ*_ofh*=!~gs6u?O&QW)r793*a`|E@fQ}leczGaowv&!|1y|(RIdri+~pwF;5D~j07?5VtUxf&f3GGs5Q zyro`&Os`~qYVrfFFo8aD$B)uJ0u8Ot_yrBYvW3ORS3mM}9KRVFw2v-;Ys&pTlQ1vG zrv=kh)G`8GJ(=}42hHv@uKM)8#p7;pgVd(v24#)B$>VF_>6|)ba+^aV%;CJsq=%H{ z!`eR5JG3%IvcEEHFO9w=2QhT_vIQ1n854EQn+Cg$Ip94kjMi4-T-o6ift2bRGO&n1 zcXQ+W*ZH|9N(PnL6`t)5}`xaf6fA#Zms3LK+=%t)z~B zP^TUjQ~$=boU$$ZEDq}Jzxyz8}o2p5^LprlkegIhsmoo$j|-Y5%)g@W9Ww zC1#x!zj>#>T-F>UHEf2o*-Q!guJrgU`imKzz%5M&@DP2oppe8;94|y;0lBNH(8CVE zkhOT1hcNPMHwnNr(D3^Q^Y*uo`s!@i3iyleyFb1S$aAX+4{ioK!Yt0yjjvs;v0XnN z;mK)OQG3IW(jrbXOLb+8o3)G1LLH``uVIs3qnz%U`VlnbZLQ}4?x-IBx&s8r`OXG}|h=Fx-xZ9&3@W-uDr z>Lq;FaE-CkS9TWqm~>$a_k0s;5#glv593R=yqQ!{U{9HA2t`}1Mz(KJ#G@O=mEX|1zG}Pe;TD|DDh4biy5wle(V>m8u1s4& z($8bXn*E_(ET@n@SwEkND95_*`G{@%KY2d&$m#cCRjx%}w>o(sIq+QlDlZ+QW+Nl` zau4$k%}+jKV^rb!z|1jn-OJKl=Gl#wwQWmo)ZRK_8+U91aM3^|LTXb&q+^ts=H(%z z)o?L!RXi~|vb>hujqBnEu+O03Ks07y<=BldUe9#noDVi#rBQo6oKL732n2at8W>5Y zPHZh_Mq>zLGK4HwRS2g$_2JV?DM(>SO%-nl4(`%OjzrcmJ+do5q%HWWazSVzY|zN2_4UtX1kdu98+pGkg)h2yVq{y2;=Y!j!f1BOjNmw| z8IpSYyi71VLbSCQk+@f@51~8RVm1VS8_mbtvKyR}mI7%y30_L@`0EP0Q-1QF@zCv_ z1dimPZ3YeH@&whvm>-t?#$(o!7xS40gg@FZ-Y|nuV$b*3=UwF6oRERMsm52?Z+{VH z*~em0B|@}3s%v9oV~IaMx$CRnGrXP%^g(3%G<=zSrYrs7Dnc8iQeI~-<* zf2HpjzOTXVV|tvOP&@NEzq`Bf-)ESE1SMJ$s9;VEq@&YM(eKY`r$(~TDqEr{Ap{Gl zaHLV^?svTQwFeSsr%gc2ziT(18~U!{I9dqL!D}w%$M5CzCf|>$@VHwwf*KdoG&#}$ zLj@$9(#yqj`?KOT99vVX7Ycd8x3qwa_Kd-HL5FKjpI7Z?9>! z*?pPz6K^wGSFDTQ$;i<#zmT!GkBwA?2%rS(DR${b$IbJAE44D=CEAH{fhI-4Tm4^{ zN8W>wDmlgbxsU~B^z2ang(o${Z`QV{>>1-5U^Vy7_Y~(XVjBgD2?QsOn9mV&SFHD* zr;c@7o^ix?qxPytna7t^psE&BU(twH}(D1h$5bxbR^DUYg zrw8@ir!o&{jCkLP?6G99SfhbkWo=uPHGbQzfTKi2OSjD|ZJ1sv=BqaJ_I_|@3%dt<)FzXprQ->at}8u5KIwmBhg)J1R05+ z{sdATm!wZUZXFj|`d6NKY19eYVKJXe|G2${vNnR7gZ+gW)h(&CL&QabTMHgae9~Cq zxXCscq@rKtctd?#x-5`%2O%>xnjVlf-9#@7UjXOGzox)o$r<1 zX>&LIQi%JPn^=O}_=s=Uaj=lKd~u)^c6q0KWt4|&3iTRLQw8&Y7XY@ZS*p7{2<`g3=EGR;+*4e&7VcxI)^anbzOb7cuI522sB> z#XF^vvK`G6zL{2ZsLyd#f~Unx_)$^#_Bxhxc#GfGu`d~Frqh8BbXy#flL0-NI4|#T z=(n#R67zc_;i(?&_7_^Wk@)pYkW+@%*Gc)RQ;pjk6D|inDpq&s{?NmDUYni##eM8$ z|LE5ESo9HOonIQ-$HZf`Nrh9(^?k2<(d?s;m$6JJD;SY@ugQZWM(*2JT_t2?-Qin( z7H(%K&!&Dr9D1EMk?oef+Of9I&5QIKB2C5IAv!7#rb2&>zH|9T`ReFDDBbax($(TU zPQ&z^mMr{97?PxxZ=SAUKcHmK59prh?s3r3t_WTO9ya(D@&o_F_iY|}q zTF+cT`N!QodlsB*WFIwEH$(4-j5B%g6%{xlzHJo}X`0Im@LyIs9N?;no=-!)itfTY z5G65#XiKJjAU7nW-y$>>`bkLNNdrOXG7n^9gek1^)lA^UA-pr!GZ^CwSkpV9RhRF2 z?1GU-s6bziw?dkwtC{aq6>$zjU@{LE^?&wX8h@m&%t`oA;FP2Jlz$)BRpnd@V>o~> zIDtQNU$P^Gdt9qwspT&s$_|896k5=3`HvN2mjf4$uC-RtPNc>MFXsEa8R7OI4EaXJ z1#7C*BZhq|&382>Ms`BGA9m@Da0IO_^vS8dk(@*%{wfy&l-}Icr8x~EwktT)KV6{= zn6Hk5l~QieN6`gV$3z!!)_mA<4Is2zFZnIKuyxPsmznd!O{2+L8xs_ED`5|?W8{j! zVp&D@TjFoT1(r0p+i>oe=7OOo7LF6bhDu7YSS764<{@iML8eT0&%b~9`2w1Pnx>D# zE*IzW=c5BvP10OnD1t#@L2Jt%OYqa!Ul;Qi^DW=Bg=ZVwC#XvDAY}0<4Qv*xg6;+ukw0OVxKSWk8FLn0}3k zH1%=aEwl~~|9W(X1cHWbs4y+|;V9fWc$x(I+Tk{vePlj^#{+NX?;FPb7nvB!#qU}l zBTVQr!Z0&;vqM_bcbxUo!>st$Je8j2&KB z_7>bZ?itW;FF{?c%U#eJr=TSh{a`g>58;B=;C*M?7sE+^398kwb!Vd%PAd#1MiZUm zRTEQEx)-C!e8W?=R+eoST!6BKn~|z`KQA}f z<|)dnPdKN|PEziBbG7J|I*!&&PgU%-?Rc6$BcaSDn;s}1i1l(%s$z`KTqDv892AW z;5I4GOeN*a96r6XK2AhX=m88{#eX`JJM;3{m457RFyo9A@?#xrW>Q}5Ma-;x_n^Yy zh)ZKmeL{-%nn+h~k^;)t3Y#f*LKYw_VL96_f?Csxv`A}HaOF(Yv>sODQRi5Q^LzK8 zz#!r&d84J7ixzX({waI9t8s4pDTig1)Sz22>fVvmxyC{E1dzY(}K9 zJP!(Rpl$VMVf*skqt$<+5>HDXwjwB3-?@$f`0Kt7yS=QX1t}?-6N{arqlQ;@|F|>p zUlj2|aavJ6tfhtEDMx2c4N7O2>!e?9@VdYyEHtHLPo~0$MWUpnaG#fVIQa+{77Z!(7C7vz{rFsm*2^ zhc6y{j~rD1dZvAo`!6ZgFAppTg!i!ipBLAF+DY4S H!T}HVBn2y5^k)ssDHjQ z>e#wH*H?YLS9-n7O21pmrgoRdq^Xv?Idj6;^ur;B*I-i^k?#g+#j(#9HVj3?Qs!JK zINEt;-DqmWP(&FK#)mh(h?Z0`j-31O_WA$wFLGOk7-yz zA|hd2=JwYuofLH|!b0sV&FUV;8Wdr{Wn%I{O}l~XS;E9jVKJT9l-P*4%w_ItnvrXo zIZkKnD8RTU7=mDYP$5Woh09dmh0uWUcbr0jP=W>MYm_yJr%5M*GPtngTrR@W9i(m~ z9HBEn_ZtQGfX_2Cc}krq_vA()__dO?yHJu&RJR5RmFWzjF^oTyZ%hO}tOnizLJO@C zaEow(WFbb#m}TuKq&4k@@uNu;;Ti5cAV$-7{CW3+X%J|>ewHsZ;Lq34y;1iX1rxI@ z#9eOWSuR(zlY%(NU(f_@nJ;!@J)-4-Kt}wDP7+h-#N|2wfBnu#s7hxHp)!T^afcDA+Ci_ zflf@q!Cdb1Q>dYYOd*RtFUhaP@M_&t8U>tArqC^1U`k>trH?P*=nVIJnTXJE!F53L zz;J;RdT4C{4fGp^k85JX=`|b}-O8P7j2?(6)PRSKGu?n!1*itXIRxvR(11Sg&Pq(F ze?Q=Wa(@^%v(Jr3r5J1CVAZn~?t-6p)c=-}Pa9&rREN*h<)MV6*$MG=!ibdf}P+!ZSYs9mFr3-b$hr&m#mV;aWaQ zNE6OCQY=w2-WISy%$Y~nVj)AwZ5jkw4PkO*S27&$jQXr=PaPSYB7A4^hD1>g)@Yr?t;jz#yGL{~ychSeHw#Zm{sNJnO#!k$P2=&mo`}&v;;ExHf z{JyWaJ+a$Jm<<3~>7k7W0LdX~CtCpf$~+4q;#jyqw{Ti{E4P$hGnCvYctxH9S;7EL z>qVqD3W|Vt7Ve1PaKY6)8fZ6+Zvc4JJ{TWMpO3d93a4f(hVEqw=YU^njRF{*hDvA@ zyZ|U5HS&~)%iYWQ@Fvh9%nzI~@FrkL z_dE>9fWH7GQBY}(ac?G>i9}lvPrZ|K_ciPySI}hXF3cv=Vn`HsKrT!tQTQX5Yek~m zoji``3>DCTn*`GU*TBLB@p+ggEto>@Z%WnzM=rO8L{v6d7^d@HZ4sUxX99dk%f|={ zXZz3PVT8e4{X?%nX$v*8kskLDJNXmpyAi6uy`CNqh_-GCjqr=(oSa$KxdMzbb73Fa z)+0-3#T69a!&tbRo5?!SDZACJiRQPs<+BwjdoU;5&GE8M5JOVR&w;)e)Sg)(|!DijOTknx{+TE|rrQ{!eNK2fybVpqMXhdhcic>Zf zxL9~J-&HfmFRTPE(GyfKm;& zUn;~P_VOQIpD4W9i=IyFBq}|d&w%rb3l+(9>n&_v-r{(R*M zL*Ty;1`QpCtA3;NVpbS-alZYjkSiFT1M{+Yv2^mhwg7&xo* z=MS?iOpGphmf!j819_TKAdtLxZJc+M-pU`c4p97hW|kR!#E=DRg5+a|5SEW>>WUC* z^ykc?;6bG)DNo?}l=@Na;+PQ4b0yWvmTBd?hZb6g7LK}v4a*J90VIrS$e+!eftWb= zrCCkiY)Z}xb8^+^NGdW*hq$H17Bs<5hsMdi#?rLlM&Y1*(6#4swH7qqboE>sf*BOb z;Zm9e6zlo6>4mn|$RQ4F$Zv4AKo_>xLTBadWNVW6|(2-2<$vg0`WWW|tvuxA4NNA$}d=sy1B~acyo! zNjTHB08qM$C1qaMl`+k4XwtQ+ZByzP4+x6r^BWYft1&<0$8U-$SGu+e+9A6EVBFUj zrSJfj$@Die4JmJqPJ5P;Lhzgp*@ivp_scW|obTHa;PBf*sWpE_y8X^$6~0L0VYe9L zn5|s@rfrjY`1VXMRVt*5H2Zr{?&RAHksRKm73Dp zZnVdQ^EmFOsd;M)XdGupM0i*TB55i+a93#nz_T3h2(`!OYAB^eoMGw-Caew^a$U zQj`^5BCIPH2L7-+x_3uZs2|6aPUKcspxAO$wD5V5&z_MWAM>w;1+)EE3qa5!J~_y* zaJtFJh;z`_g5O7#{OW4B0R58z8Uf*eqlfx80X8w2A`3kMf6p?=n?|u+`T<2` z8L-0E)tYj{_8qL14cqD-O-lsb&V?k1N5bw z2adZn45ZIeB&hH$j)X-pl1>L*otN+apew)+9WDX*C|Kg`4u)4*FtRMU>}AljTIY8|UX_2(6aN4xfctj&DqAYQAVK_RE@wj$cD42F{bs799 zHCruY4+$D5++URzesuyebjmouSkblID5be5+gF3!?&`7}`Rjg9*0gqfo66Hb6T5tn ztDWyPv_sIk;kK{F8ay_It3!@YXWtmSmy#nq>oGg@z>O4gZEpHvDzmlS`zKtxciIuu z&G2mEk5R@<*BIlpZh&2A{g8I=%ZY7rpdt065pkvMZpF!?vYY^;cDITo;6<@fN2WpE z{I4)>Dk&0_6|vPNCqffq0Y!2${DLJJ&cT>~1GO~WT^YKA?i1)nQ0nNn)47UAYo&@U zC}Y)ZGY%Ui!Ex+K3hcbFF-f9T6T?L4r z&x7_C(x8nWPiLhAd!*nuwl;jluSWo^0PmaTT1RGCohBpXDa}p}>S5fx6P-b{0U=&R zlc2U6?wfMwO&_OIVMn>#4V_Tin=N#nx+O^XZg%1L6L}y{vIhSCB%N#Y{ycixVJ9?v z3UPvHw5s*BL(>58XM~}V@N|1R_c)$CLa&K_4A99w5=HVNl*`=dV}mf>u2Tq7g7@hG zs9wNBKYoxS?u79x+eJk5Gi?=uH^0M3?V1hj0DJ>?M}Cl!aCh*DA<$%XHn9D*=8L!r z=8?CfX?sRyyt@>9TsQY#)5Au|p}Uze``J5VHvuV-pAy_u-^jdaC;!_Mg`t~I2rr}# zz4}@GYkC6Le6K<;bW^@^f*``eqYZSoQRsdTb&_~|H=nHn7tB7@ch!pg^+ckI#nKP? zdKB){8x`LjYr22`Tb`@5qt)+kLoZ%XTfR-jf_n4+uQ%pB=QU#%nw#dH?P&ZEWRu<4 zAfLTP7PDHRIHfxb-GLRWlS6pwEK*(Ywaq^o3+IUltzFL?#)qD7_RE@aE^+GI;P&BX zArhZ%LJ%u>KxrjJq+8Nk8As4jC0N{%Wra3 z7`rCAH;)Hiw2QvsO2(d!Yo$4G{ZVb7^s~W+;t}~8kCR0rDP%^m?)lW~jXki!wGqD6 zFA#l57xV{T;EN6x#6!s)+_S$xU?f^kP+TU?l}U+OyLh3mV)F?ietAu6x@~i2Mt^~3 z4Iem4KwXbh70r|yeTY<1Q~XpvV<_mZXz&bTjN5FE?x``Sh?yAog&jo*N%Nv8d|FJL z!8ykH*z93T(WLmU`8}R_-d`>vKe?%(&Kw3Fu58E4mSVsd4*6a>$;ep_1-qBEQhteP>7>xH?r+N>y8V!t8PjxXE; zbfWQi4Uj<>BPv8n8A$6sqGMt)4$9Yl6{|nq2z~ahHb`Bhmwm^PrOcBA)FJJ)Gm#9# zj+tG+<#acDFN;sZgk7xvcpsAljFVSq^6U56jSc{ec4)1Bs2<)CBQPN@L2;W<^Qcwb z<2)OE8&zs$n*+>Wq?cOrHQj5vUy0X5YVebg%T-`0nG(<=PjTJODF8T_JG9%G`8AXu z#}}AArOeYx?jprc-HpOUX&(}UD44kSe96d<8rQ3Bj5k~KnS`$;eI)mONkUJ%4e1|cfMs-dI6P#`_--aP3{LmO6! zHM4IeWF^Eyd^4j*xO{#*?My)0D0)I!q^9a$%3r2_w&dx8ubb_nb4{IBLdMf@x3U22H|=~o|*#ZVx8x@Otl0V8d zZvOGq&?(3|zYDN1Bj<6&x;12D7k#%Po@hCw(rkaMmqA8rF?)x&|dESuu zQX95@-zZ_eIpFeqK>5Z?v4JIAV~$KhJb8zuaIY^JH?y4>hW75_rCdL>GB5UE?vh?M zmWZX%ST6@L;%ydlXg>C7ZN8v9G`#V&V%-uV|8&-P+^U3*f+_P!OS1O~kp+z89nD?I zg{R%Nh8x%ENK2B%e|EUdhhwOPtxS?i2r z{PJYzFI+;|)p(7sS@%WOYOwRt&0CfwA1eX)sQSpwqcH_$G3xtvK=u!woykcMU8wB) z&CXowKwDzU=&f4wbusZGL#D33m#Z^=F4fP$&6}ieySO&C$-{w!WrZf=bpTX1Chb_G zmMW{(SaQAud(Gu#jEM*;akoqer#4x+m~?P^92V_b@HOKIe>4l6yl8`r&7PsA%b;+U z?R!dn%01;KUFqXjIahuKi+>leUH#6fN3NpLLOBaG5Mms&TKLdMce^8=$-;`(FD62H z*7VzT+U?iwM?(%4ungA(8yGJxU+sRF{C_b8Wy>+Mx>=gf;|7&>?Z@yf$a4=jCgmEB zQB+2MigQ|`L@7^2^MBSrPLiLPs!X$f!;)qFh@KbtDzcbEGLF&RP#|aUTjYVZoHxUJ zE96dj{gkxXD@m=J)FX1Yl#kD{jyvSOOt~MUEfrLJ%|7YWq(}w9>xihiNxH}E{5nng z6NltAB~}|(w*lCvo=yFlakI&DKzn0hqv*=Y{6q1%Zvsy|{OXk11Auz0z;XZ%-J3UD zm$F7mxt~4I?O0#>?zp`rE>{bW_)df_7a*dz%}#DJmNHw(0MZ@ILPc=Ds>E=p=ILP| z+Ev|0Wy4o-0JBKt(WL(aU zUQX(jP}O{@h5rtqdS8Dao-wS#F1F{OJKnW4zm$!sK2yKf5*?a^f4aj>&8?FDq{&`1 zUwF>9SJtsmV8-LQB5k|AW};n_#*eXux0wZS(@)^o07U-1%0_8gg=@NTf1wi0BI${2 zn3(5s@d95ONDc3elAqQl|AA7L5!AIv9pXEJ_iZdu%8i}%C#B0eQvSqi_9COy>w(}7 zjK262cr%Ix29ibVREL!e=l`S{F{I9EE@sf8n@FVxAH!pTq|O5Bcfqyg&m3q?~x_HT8|oTH4N$PBo6;nW{TGj;nUXk)-FI z6}!qg7B22>#KsBSdoSmIFxxktr~U^EQ_enlr+nX~e*=f^^(CF>`D;F{&v%{Y$*4oB z&1Jv*x0Y7i3tpdOVIO+@zXcWhh>SzlzZUp2{8p}Qe(?4GNI^z+_-+3qgG@#pDme0R z+rPxXq=IA9s5WL^yjxsEw?fi4oBtcBLvH}NqbL_ya=@Peh77U&Ti&7z7_F(=HHShwRBatuc9bn7XJjVZoY7+Iq;YNPRmh;9>kW* zT>>^G0}g*nymc?1>ML+4%d@HYIzPnPt-1GKE09zC#&PfU*L&-Bit;I={C`P~dAu`T z`;&h4_*a_wEUb|DKWIw*4-`X(-^cq;`QxUP-3Pzdw}Rq9&G6PjGV0~le2#d zTG4j?Q_QcG(BovC^f)0#QvNU)?hrGvM_~svTlTX+L9fkl`&(p+Ht*tNHIbCnJ86nZ zw>PSaj9wlb`6kHkEtv8r~vkN0i|lDqw`)ty%V;PAg6zS4Ex_g_DBcc&-b zMCN32T>QMXS~)8C{F|G8NgAu0^Y+$#4g6YbAp11ZFy;UDNIVDY3wByU^P$Cy*gc=C-s`hNybdUkO8<|Rn<c>& zKLRQJwax05%c#oJSo6O?)zntU!Z6VRpZKSCz2~>P{(0;ZspfB9oRHlBtxCX0S^{bh zRcXzQ3Nim%aER0Qxu47OXATU?lN|T9Zf~3+zq8)gdgfm6zz2y#d0|?=BfQW|=Gg6j zwnyUVW=)Mm{IhhmJPlN0TuhCE1=Hukfc@XPQp}D3{R53z5a0S3-DoW-OX$rYL)^7a zOK6vht8V1=jlPPKfKqC>-!6xNJSk87k$;xwht}<*=Wtr_4{qPpk{b2jiJVUYJZKV7 z)=Ea)-|~fXy-(-mnbodV!>;vud04yDeUZcHmuY8Hi+9s&-^zKM02cMB6k=2OMtbk{ zz4rfrtNfa03y+fjUO(6q@Ym||yW+*RXIMqp*JH&;lHT4xs5DYR1^~Zcs_L8;XB3V@ETSgum$vCcDBoF5u zleq6s0E63xh)V&%wmwZK|9HDtI)@DL?E_n>>JDNF4vMjv7t-Oj@8|cx=%)+CvvY2+ zTYx2aT##5yt9@N7DVDq^?seN`QR8B_qGz_l^(J#QVkTccm0hfFk!pP~_t}@F4`8D3 zh5F*$rgl%IyA99h&7}z7@P0{82dI#InEb64NY)J6My!pgZzfYZjb$r(+%}d(Ek>Bq z7`C&oQuN!8vObZI9K`yMFyW0<#^d)_?N|x9u!tK*PQzI$;o_6}HUA(Mt30J)f)aT1 zbGZuStM+_3Qj8$=KUDC!75_@lzNo+pu8W)7ztZIYyAdNBG~& zQmf`Zlj8Ovi2~zrz;YI(Dj&@8Uq3p34pGnMixGzwu|9BXT_K89!fE<|CWdXQN>K@j zW;<`&kNI;H(o4QikNGZe9qS<^M&oORv=>1@;ijgbt+)~&G}b25v>JIi=#q_UNn3cS zf(7Z&5?P4!O-jJR`Zu%X9C#pv=3!d0%^!59ml1{8e z{EVLiQ=cNMo!LrG55I1f$QBXITHe(qoJh#@nJeymtk}1;MhS2})6e*X)rw@Rk7e@eTFA*?d|RMsG2Hi<#sHgUBlTr!=V+J;L;|a!15IYVp0?&?~QOxpFHVSAJsa#E8*mF#e|s`0+3 z;;U5Y{>j_vB~|P0Rf~r|4_)cehD?5pZ-mv%&1=~Z8T z{ya@FF?eGo<&o)#c87>vGmh6N`5YzqXkQhIhh%tMC~bJbJ6h6OZ`{z97;N~3s0D$t zc1y~5-Xrmqep?cnxiex_x5lWv)-4qabCZZteV|4n@+y*3yYZzr+Pqs>m`qKVQFD*s z3&nV@zpp>4=In_RU^UN#FoSnS7krGuGj`FhePK}Ro0-c9IT&hs;kEU*`hz{v7m;ad zF0IADEJq_sMO}x)MaL3PbaLY8*kQG?Yv7F~9E~dC`F$#R*ktEovw)y@++GXkxG!~+ zO%v%RbgbsJjD9y+PZlWa1{+Z!_K)kOM8us}kp(*@z2OY~utNd+z>BBhN??j#X+i?N z`O+K54W(mq`h`Z_IV!$-pGB?Uvo1F>U1B$n_3Xqe)u|rzb}$G|M?=qphwHoQ4{WB% zu{@24pg7#p!3tNn^?48N^_;u~LH1B^37S{}Ml}%q{ndZGL8KXBVEbhE89Yn(QHm%zZb7>82efQi zMvJMx$2+gJMkyULex|g$x|Xx&tMIMK4K(fd%+%B&YWn;yRbbr^zxnW3*6o^N!s{ZK z&RJa?vFbtLLC#!n|78_lpacCA>jOebm`1uO@(hh=%;EOS1#C?U+zctuFjIwBb$bs* zPK4Lj4v=8Y54y1~$J`&Oy>B=hq;<9`*e0%o#_~HlTx#ScX|G&*yB(^g=nKE177?pv zLCN{T?W?|8w+zAr-9xkIyUS&y>pHd&JE6U@X-JXe$ZbZt1tHLy_dPq~K*0?rjb1mF z6bd0aNE-}EfoZ!%VEx4K!BVp|2Vc-euzY+EjV{ZQ1QT!WLfPh)brWB9R1v&?OD4{b z(75@$ZHGdU<DVfNg4v&%B_hg0+@H{$M&49B+G!VE3+^}p~x2f44uH7R1p{zc-5Aqb~|jz1@>AKeWm_L3`8 zQz_RPyBuM`6K+2B{{F+ErH8#`HSKs4zc96MY?H<|2WTp#LPcTwB#s2%_k^>!qoJ zwiKY@#@y`laOoE#`N{>ht`Ho3MN{K!OOF@C2YgT0)IsifGP3_X5Hih;B~2zF8lcOu zhlwSgnuoR2XP|!WcZO4jU(Tt4Z{^brP6uVNaiuiXIBPy0FP|o7L6!UC5*2|ZdPM5v z(M(%tX5%{wG9^t>Q59(7Gpm!xhxS8?1*4Uo)9sfg3<+iP?Sgjz7j;7rhqSMf_r$W& zQsdRi&UFv7jttw?mE@OU^@=c)JyeKxrCYdbjQ==w?RX0AygJZrq#!eyq8dHzx!YLlU)%A!?RW_&6xF}d|NZDV_F+qMdW24ueHwM!CZs`EkpG)xEH8?jX5e@4{$zPJw`JHS z;9D1+D^ON{FfyRB2zHz6;lMN*wqZS_mPDglmlt&mR=$h5tth~3(hAIordp9Ux4Z3| zBpf{NwsI+;djDz%A64Y_A7;;A8`BIE_#3cfuD!-uY1qi;YK0zfDvN)iQDm#?S4{9n z>uy9EX$OWiSb~Q)A+>ePO^46N;GOTpp{!k_YmIQP-=&f#E7UOO0CXG2xp&<(>i$%wz8afkOKa&h|z;a+e;LbLKiU9jw%SgE!_>#~Ns% z0N5)Puw_lPC0TV|CHX!j`D1gt#_!#y>4r8Gu&vB%+d#zk#%Y zb#&kl^xIAgvCu4bp0U{*2D$$PX>$ks;gz0D@zyV|Lbh+ks%2U!()|vqn@J9A(=cr@ z3oJv2Wq|!Pk4;u!;k}L-m3ry=m|A`U7CEvAQschxOy};teoB;z!dH{H%xmv%OZJGj z0zk$BSP~(~({buOAjd6nR=i4A^qYXfpRg-_%pyK_fQ@NJ1UzDA6W@|`a@qM<*zbWSz| zqHVLPa1czqiUP2w{mlEiTdCUOoOWe^Ns;T>Dmm%{vRpA5*)E%wfDS{_T##Mngf{24 zeQ~z1m9A;n)(i5*fglh1@&XbDN$H$%Abc$RQv$zRsa@N zbsJ9JEyvwx9iYkWH@w014o=T!br%Jhp`H`)0rUOF)xsWJre+hg-O;LJ24c$4d;tYR zbwV<#>)pTs&rg@CjXiN{nIVjZ`x%$H#(?>zjD`J1n{aB@8BHn~5??F_EjhRxtdGb& z#HKd4t}#+-2HzYrohov9vREjpo%WRac9w7oNT z`C3&{?tB;a<#PO+Oi>ExfpqY#Y+^fECyl8WI>Eg9q>7*LVC}e4kpO$yg)IZc4D?*9 zRHsLY9L%<8dRr=nnoNQCm)KRg-ib@Yr4##EZn6eV$+#XUya=HatVq^zVE{+h5ntpS zxR-0ZMSCrScC#$ct%px@`iF0dcz#)W-vemZWQ=tT7RU=s0oXC~MXnQEs>j@K;Q|~z zta|OTludH?OOL_@`}>Z&`#B)+=gXs0#~rGbG&zo@wC$sj@XUyGESr)u^Uj;B(_T9k zB}#0I(AyPI$6RECPB3bM!W!`SRLvdWu^y)uV@|3pq0GL8HKqr$iaQ=q;YKk}jQjjR zCZnro&3DV0-t%^|TU4)^1-pSttmBo65$9#;MY@k2g}>@~0w!d$dwb?j@%$N|L23EC z2Z*^+8)pF^G5zMlQZgk+uqLzFp)#;rip4w0lY}=4D5?k47!;qeQX!hkG8sm8=~h-Z zN+IvJKJ2S`7wL1-gmKqy$ZfnPOUDTlmJ9X7AwH%igw%+N+-M-3hD~Hy#+k%$U`P0wE#zh1X<;TSjZa>emwU`Zh7^A4oR8c;~-%mP6J8G1CJ+>L2Q zXdBaZj;YCQy!Z%@WprM#F8XXqxHlPpbPTM-k*wte4~yK0lLQD-0W#GZr^wEvv|n)p zhmax<^sd`7jyi5l-ebq}pNK@ms#u1|43tWS43H=oPw*G_;6PM$IYb4Yl?(0K1hbc8<^k*@3nJg|&gQ1hZHN|zw$51$S07neVN%b|LxR!VC^Z56G`iq^?q0n@+#I$VM-9?8;wBC(V zLm~}A)J(|7U@HNU@c=6+GPSO*RMf5_s{$0)a8_n72Dx-R0`-vQ?bpLeq9tJLOm4sN zY;O|P3yfkyF2@DO9-)D{jmeX*$$AcaEA}^(p78o3U{7e7Z!s29^IrCTReBQ^YZA{_ zP|$_w8M9itGh5K}*~T7z)*D+_;#$Mc%US5lekp${cSX|+Mer)W9R)gvN?FGZ#+8x- zFO*^N^`|n(2Oy%L5M9>VBXOgoQPJMYs_9^;8hR-#Kr-LWqp`#DhU}htAT4NjV=x-0 z7=wtV0AY`SzX7%cx8@d3z;3l`JR<`e1VS7va1*&uM_UJWAXBz_q+*s#9D(5jBuuc89389RJ+3Ki_Z~yT9dK*8`_JbX zM^Tf349gZNbIUm1p|4~l(#n3=F~f?pi;{&c&x}Yy;~d&H7@@3NSF&D*yJOHHUHXIV z(!p;?jsZYw5)we@z9cPYkd}%Hx|4p>X5_7T%^`Y;*ipK3+7EvyKlLqlLSh zN_|m+m2m>Qp>6z9a3bCXQ@s4ZdY&c19#-Cxi?@c|0Xv{}6?E6AOd^5vc@P`^DFB|7 zE@0c0M%={`Qyfs>6ga!B2w^n<)~=qg9~ao0GX}kUCeVVJg`UWp*NjoZ9=%Z=4?kC6 zFW?F;YQu(=@M2p{rPHRdmjP0L$@-x~qgmtYF7lOyTKCz9sn?Ha-+%Xnj5!^r;O?LX z{C4MkOYqf;u64gn^$r1cARE3&k#UjNP!OP37q_8uzR0@k1k4iKn*|4s5$xyz{bs%KPsEH!$SXSpise=UM^GsXMrcB!(oEF z#$K`p-q@7GU2y3ehjJ3~c}b_zO@9H8U>G{!;qw*hPIvINYr6D8-J-hXa4iL+KXVfz zk;!hDBe3KwJx6G(ZN|AQIC$(q!)T!y^GuF$?`e0SEqvLHfQn4;S%!OQ*#H zyE8LDCE>01eTvR)kO`K_tvEf-VTZDY-KJ1A3-2#?(0(hc7NLdLxtpIYak9nC!%wsJ zbB6(kRJo-c#*^1*W~wt}AfQm|Wd`ueG-)~Cm1Ap#Osv%XqxSu!osAK3Ft`M{a2pDQ zb_c{B(;IJikyL}Ku8;C-6)SzYKMGbN|9ww3JAZD!1?)NSoH@h03q5?&7k&SYMAOIlSXK;?&N_@c+}4kU zfQMf|$$R!^6su}=Yd7l?+V-4moj)x^>pAhlyUJuB6WY3*kZwprG#X-;sy~XOT=I|e zBZJbQ@}I>CQOS;exIm{faMLb&y7)+Fx#R{MhgV#Au>Ng%_J_h)heZz)2}0CT_Bcj-RXqMw#ZOJ-Dcv%c@}o^n(h zu6*Mi>!JuLiAxS^aVDZi44=s|9dd^~tm5`qn45pbCn^er9_!>Iz#|z1bD)f%^!&## z32_2IwOw^|Cwr9#GRw!OzSKlj)}V!XlwT=XYvVM{*iOi~{EURX6=x9;AtzhMskxC) zNXm;i{>8wi{(DvP+Aa-S00;k~!6}Vw-1L`K$WNO`6${viu=ru7+@bAf*9s+lE%bqU zM8mu7m}^7-QPx|kY>w{}rk$Fho&Hf_XK*F@IX$(>Rn3BC_WKc{s2ciZwG@q!cci?d z?ea6tQ9~+Q=u7^7jxDat0(H^PC2IX9*JbWploe^d!|iX%Y~#h}E+`N*N|N*Xj{N=X zicvVB7fxkBN>oPe;rahbHR*$&s4iT4XXuWbth3x{D^kE`If1C+Dk{q^7DN^=J-hX7 z$c=WVRlV5@4-u2qDg(w;$o;5~)ITX~VHW#!^3HU)6>m(UnEhv2QIyAsG*Nno?d+LV zuk(3nc2P6pL=(8e_e>=vT>Qm0i(y1m<`va~Kkb`=4;G8cInxbaRGvlSkcKpy#bsr%dY9{jt4KHV0JP{~FZ4v)-u@ona{=SG_ zyy-sW*nhgi@8bO5yWp_N1#9tBqNmg=C40AY&A!-bTTeLqC+@x2qYcsC8*h3|7AT}bWeAOv4_>^t;qM;oAv2~<&dAxsUM4>0kUq&3& z{>QeKJeMmk_^Z2;{;^5szZ4J?F~ER#iR{UIH8`&>VCC)^oHL3y7REHb?=Xjx%|YQ! zl55#%tErR;Y6>G`qkjzz|T8=1`?I0)Qbwv)xX=`j2*`?8e}#o!X}Fwzu^R%o6$@@Yc?XO>Ae&D?6*`q4ATNJnZl19-jQuI zS709k2%ukJ%O%?)X4xnn?Fqr)d!Xy@Q_BSK{HNG7OOR<(4&!qgUk*JIwTR|zy)#r5 zhalZ%hnt(SK5t(Nwu*bwl$E-~m5Rp~yl@xAAt>YAjc)HbYsjOF1#7G0L#gXtF)38p6ZFE-EJ%J2;`$DLZ1 zF>n5RJ9(2y&WG&?`s(;hpq&SCCz}S=*kPV2hsjWJrTxO&@JRJj)#xvW(b@cc_fgsd zpC<300><>-0BwDNeTyWtyhrQrb6RYzlQ8HUjPuL?r0C(%8)V!%MfwAmgR}CYgr|D) z-%Y9_)uiBh>z0+YqtZPfCmW~|yY#30yr#MWI4=SaA{$!JpwMOc2^I&eZ+30|?$$af zQ-zQ z3H~ny=)8aQ?Zf9Lro@XtL<{cwoFD$uM1--SvbYZkRK<*J+CdfVqZ`C27LWA_;A&)?B#{-D)} zL8-?(#n7zCUMFLXXwnXpvX`<@P>7<|XupOEI$8`5(GH9F_byRbyt3NvRtCiH8yScx z4tX1_uZ<&kwWrAdwXzc<)i)TP7YANlsvb8 zwXQr+Q&@-wSb7=N+8NLD2UR5rS$19{W@jujeW;|H0LL04b#3$pr+ud_9s%U zZ~mg^pWf;11N1Un=ay*LJn=FfXFGtqeEj`V><^xtEZ+q+ffeAhYn!Ki>uaF9ER4iN z_QhnSd)PAhxf7~!&MDj(S*uf^FPDG&B|dNC@QGE|$kp3dKg6o$0N<{LrOBhXr+{Le zOLb6he}`dwD}!|TS-W;d>b+VRH zZRdrwD|$`|AZq5NRxtUnNehz99|-kg<;9&=IvT+f=nGfkvVXiw-Ln2D+Vx2&_*}fU zmZVmj<5SDA`hwAhKoBRq6PFe0GypGYEFCr8Scoj$pLo00cE;(U zU0`Eh{?lFuvoO*iV$86#W!#ugoou;rSgshG=zOm|3!@5rEY66mDJj@ zyx;n_5?^3BSsE*GAOroOOx}Bl{}~D{!cYzR9=FHBJ9wF3ms^GNbg=f>NU$2!PUt*< z;aG2a6f)&iJOv64ESv1R3!jg;#XG2;CK#$^;F-8{pxy#|vk{7&^ zA$r`Zb*r2d^+v|2QHR(4On<%I!=jdT1_8g^Fp1S$2; zHLzv!e%Wyp$BImw2%o&%{rHS4loLNb{1@tPd4a#Fli2IHuJ)x_k@l>-16wk9>#eq3 z_|#%!Go!Oay<$SlG#V(BOF#)35zn(()BtTxZ$C33uR#p=rJcOkw}MgQ#Ao?)=dEF7 zM^Rm30j7B2eUA=sTzQ8-Qt#zy>MsSZ|aFA{&lD1u*0B$Xqw znAuQzwqh|Yy^8|z3ltGcK}g_a2rpY?d+bb6hRWn(*j=of(gxd373T3yBc#eW+kq~Z zl&lXWX8g9PSJ`jRhSW?`33n-(Dj6LQ*`cTR0nO6+u-d2)(F2&=qM);}0Y6st{L~FT zd9yrkLTPNx#tbV-mZK4^r17i+)B#wsZYdOk>67fL?&_CBR(n`mkfg*)Z{+ti@l%3C zM?T_kG*I}l;zxnud4w%Lxd;@|>5Uk_8fv~tqWTfTt&G+2r|t(Imy?Qm)}*GB#geuP z%uE6fENfmkQXc8vGNSE|a|fbOVW?BD&YOYCw$cW%+D(U#yZG&~h!8v8QeH2P-E`yo zsGHAIzd8r&&*Wt*yFhI)xHGPtEC1nji*5ZZQ@M1z>b^@WsmkZ~^P&%X4R{r|1`{t| zI?z0Pveo*FIh`P`iIvr2h5^3Vcvn;8r-VT~sp-Uq(J3`&GOBKvR|?`ykBHSi(1J5g z{SxDgt=Qyq4ZnU9lBT9(V)X1~IpP;^cR&@D+))6dABG0~a6jOA|7F_Xk3J2dF0HU` z@F#UIZ77z!e8l_hx2OghygSHVDX8@F$IBIS@K*jYy}@?JnF_8U2CW+Jkhup3>8L2v zum>AS;apa>Qk-cuEXNUPEt`dsOb%pRm2jTfZ>eEsMzkU&f0;9#au1P~Epi>$t#gnZ zh(7EVwy^8%rC!j+t{&=?EcWfVwhb)3CW_U6nE}}hbdXdl6=VE&bU)zv+W?yS&!{)$ zFzR?nN^AxUiJsR*90;d}9`HK{O?uxx0^N(Q4Jzgy4vXbhTr1+mWee2G=HJ*d5AANd&xV9;a<&tymJ-~lL1hq3 zNO2KjO1pV9lb6WYl3dng)IHd*K}&ZT(4}bWU__~gcKVmuJRv+X3#0puY1+5c8+c;T z00e;1Xezx%o_gd|pMM*Up|PhlyhL@0Z9BIDUvk|d zKB*9=7h**xq3Rg4$|UC?ftw&L!;+=>ef zX+7@=WnU*Zb){b})5Acz2T2r1Mokw#rwk1ZD0NNjzOo!H7SFsCV03#~2X z?@@2n28`j*MY=9foiL|f&>Ov!UBfrA8spGZgH5ieWN`Zva7gkvTn5)VK2re1i>gYc zLm@U$2Jp2t|B)&Lqne4&vis}0B9OaK_Zh^#5gL63n{GP{v=?V^v(J3akSoofCmtvvu@i|UNwW|Tb}Tb&He!Q>)$qqB;b zz_h;3%Mh||Ne$a4(5K$6x+Q@AB)`^HzF1MWQ5_)_Fy@$8d4D+`E&A%LW6j0el3b|q zZmKl=n#?`!Nu)!y zdi>F0-1)5*&VO)`aDShbQgT0CJMdzyFrlfi)vUYRLV6rU*dK2nm(eGIw6A(i$PEQI z3wFj?OqSQel8O0&?kKB3m+F1TQROhzxHD~p_8C)t#g&bxIIo=v`f8u?jL?t7shn5d#Dv zSWyuX8I>8-h#+Ao1`q;CYOA0^h(!ftipUrsi3kA_39$-^mLY%{FiaIRfrNktfAJ`{5i* zSd>Uf#$G=SEt6(~Nj^dxYs3llW_X+#A?HGed;<0c8t}~d%^L&7vg6XVjNtln$2770 zfT*sJ5Zcj>-cQ|F2efLtS8(gOkRUkwOi;GMM-ak&@Bp2`U(ucrN=&mLZQQ2dN@;=& z}w8LUOrfd@IdvJPESB$G)T z5LOcLd-Um6bGT1s3Zu;j`hGq=Gq?I!64Fd?Spb2>hpH!JtUH%!UQRIgk|*`@*vUYhsfS> z|55ZoDZHmBU=`;rIBq2OURamQ(mN($Jz>u>_V#u{0!HfCL@*sc^W24(`}{^OlYm{v zEQ;o3m@u-dZGun?Vnk2$NCsTN3w`1xc1%TpKg|lHpUK~CHkmYGZ+fV(^T(;#hOyRaENq;y_bQYv z!TW0G7y1tBY)xY>i-gb^1ydTCLU1UvN;jX}Ll*dmPtpWC#J*7W+Wy)+KgUa4UtqJ! zG%9nSR6uV@iugx6oIkY$Rj%xlRO0)SKPrg5xw*7!<&_Ijwd+fKGfYrPZ#^yr4}%2R zZ8!LgS~!QPL0uB{Fpo+E3B77EO-KeryfFhayH^fPAlf6dCv*7@cT8;E;-*803XQ!d zOS;rru58hH_UTayWMXPw{l&SMc-ZFjN!Xi15|QbP?4Rz3T@0-~u@p9j>?w+e?9!=> z8u_xtZj}w0CrJz`*e1Q4jrs%AIPw5h*|mt54l6Fy~KG zuJ3OVD=~~OscAQAR=!~zD4~Yljm}ez3$)uRp!Rgi=!}o!Ovcg@7zevbYDS)aHk?+e zeQL_94>zw5Pdh(NzrHuQ%ypU$i!Z|;SM1*QV|kZ>+M5R^i@DBS1|VI@o0He)&@6arzI~m@j#2NsKSdlx7Lc}kB10A zaL(G!YGTf&sJ=Xe+#?R=o4-ls0V7!1iG~)mpk8Y=o$tFR`WpQ9GW(}t{@wd*3q!K^ zo~>7~02pc}0}NOwsI?o` zb3H{1xb8y-&G)MFvYFXl!2GNj+4;IK0&rEg`ZQbdcP_!+X7jQWm3ga)re@OsOn(um zG)r|NwprMI`^7m|W=WTk+R9?$A*)ueyqDcDD_dmuZlUF%g<6YN4wJ2&m`C2tP3PAD zkkFXZ!y1t&0yOO&%Z;u(aHNC#>3Iq`g-@#t?=OCZdCpa3%5wzf-5-G}~@tx{uDE^BiQ2HHN>H2Q=+<&4{vn?JjEf^8nql{S$Y`PHXs zEg+Tsu4?s3`fubpD6c(rclp5?ysrVHMA@?brKV2YQ>P!z^_M2eSl)v3Nz(Ez76k=Q zzH%3yoWtsXxp_&c2kBHshPq~c)p*@KJr4guig)wt?uCy@&U5a;@mF|idL)*>&zEr5 zu1%#398wOdlesK%s}v;MSj4!~g~A(rDZU?>Z9b9>$5uyXZ<^|qkwbiw;Jp30+e#~C zmL$qKSKf9uQqWA_BKfEXV+55c>|5rvtRnGXV81dIT=>PX$3`a?`a1|;z z%ikuLf%@IM!t^J}VPNK;SD5}Ue$i?apqgTQbQr*5r0yO58=wAXL;m+K%1lvzgj&xG zVo_7L*zt`xX)G2Zf}(i&tEQs#6oc;dO44I^dO>B9ye0;hB!3vwmn5f;PW@W*QaP}Z zHXGOXb}$L2EW|=)*I5s}uZnY*S}EHfT1oGitEWarkIilvg^!`sN8@mFhy33TUNb|n z@^Lfnb7u9ny8p;oO72?A`7bgeQ2ja9bE|_amA_bj#d1y^arjEte^Q@?9M89&IUe6< z`Je3 z;q+lg2qOI=(p~k77sqBbUWV8CcPIycJ&18G8y!k*$Q?@#wET)UW(@;A)h#(5T%-R< z>IGPmE$2J`9JYw-`$@LrBt=?*=@@G2_wd+7^*G(%c=e*Jc*pdD!(;A=^g(^nBl&ZJ zGm;(l5#jMJ0}ZU)XC>H~_(p$J`;+jDwy7hj361m!htaaQYv^>)Y|)C+6RCR*XE9>h z91>QV#1Fmp0XgtDd!LG}HMSN0+8CZ4KfMjrkuOBc?Dde+tlsH2;WIfeNsn(A==aYU z^a}fD4148}Ay?~}m&hBvdXNzVagY*AXU0DEOP4xqg4B zza3S53^G|fJ3VKqd13lj>oLpWhXA)vc?y5P3O(AVeXD@hsQkl!dZ)j1P*vw1o7vMa z*Jg*T*)MZ?_%DRb-_#%dx6QVQ^5{#Zq1LnObY~j&p>>r2T;O_N{p@AO*+rA}8&$xD z`+Xbw0l_j4OdlqrO%k?P`PLbs7{Ykn`tStWEC7(WqS8E~SDGsipYM|LH=`M;n++zQ zT;HfHc3RESPSbcit9gtUIyz>pTtO>l&fSWvLPNb%Qv@v9o3c0P{&>JJdA8QGDYz#P zSgH8y1*ot|?TW#gldf@g9naNh*pV^bQ`F0JOLdqE9BI17>$x&$dWk97e;wxs$-gj4 z-C8ps=`7)d%56mIhR;#ZC;+rT(U)e_7sXf1zZtTL-L95l$Jq_$6{UkoI z&wik0yjS5m`e*_+*^Wa232@s`wqI%kL@cfUqo207_xT(bJM(R|zrC-`-DvN7AAlw! z66@)weJJO^X_I5@lk+26lID7Ae1^r4;V0H#cKsPIL2FjGmfITzr_hgb=al}bf@}2a zN)?R8u-CO_x@*T^P1%4|54zGTRS-K5|Im7Xo4A9+NO&$-y%NA9d<<&x=Y~#;W%CV< zybMRgTqSk$+!?2TAv0*rmDZCshQSjg%6LXsYw*8YsIWM#N#?OV!cP+9=F^ivDWj*ZxljV+- z`Aw*G&VDWOTJ*jZjCe}ro{`R@T(g(Jxzk`>xSt-NgGR0(k6xR-o*CmP$6dTp5YRt! z6V;@6?O3<63m9_W{E$Oq;qR*X8{&2-j=d>gg_y{n-;NNKQVRe^JBbadC(&sJ>ljz2G*95)V4NAegyn>E^l6~HHIoURuZ~0696LpnBYHA@DXFgQa9@oA!;UJF#Vyl8G@b?pCsq5 zno*;#>6#cntv9t3L93Y;defUaqt#@cmsFeiea8`@a|33TqU^W%c1*(6 z==GCW`hf69*2OJu`#!E*Kj~hXct&OW)wj2hr|#h9($z-^V|fYuiGy>kqK%Lvb{)T} ze9LS<4fNPH_+bKKc)x1*>95%m{`w36J3Hbh3+%SoM*)Vu;%xLN^vf9RbFPQfu1@5-L%~>7yp!3 z_ZEbONKCAi?TN@q+dr(un^eIf|M2j$GvO24SyR7Y>qP;5Zw4jSa#qAAz`4JjV8~Rh z#-AEzeZYVHH+fX%{|Ws3zbN2iqpXh|fTTGDKRb>6qP(hHL7`Z@HtSxD>0)!AGm( zeUks8T^ORilC#q;u!5-+>q3uqSanKgqF5D)P7cq~u)qrat6$d#tPksAH3DeEGZBOC zfgZD?E{&_FkANpumu`RYHp2KXxP%Yr)h#0Mhq{82(jz zE~~y5qT2c5&pT+?@7T-B1i#>inBUiL~pun~@cz)m5FH^@Z# zL5XnpeXg^?f+P*+w1t=51~h@;-@d1Y*>)ZYxCBF;o4wX)K}Z%}xzPhBrcujj8-$0E zUK>61{F`H&rL-1Vhn%N7G4C+`!+w%)z+B-(n8b(#=y6hbXL?<%kpw!g5ALYeSnk?U zgDr_CqHrN1tDS5l@ThF$Bxm$RAOe>HJHhpPIN>8`ZMh?S8zSP(EOyLv< z#ksc~P&;ZbU@P{_@;(!<1H%2IV1rF3dxX8c`K{UyxV$<@`u-W!q}-8`(NJP~J$>T- ze>LZtqWY<<@Gtz=^|l%Rl1Hv00Q(t8*Ub%v`T#OVd1BcDSsFdT|F<}#kF`^ zgBFPgO0E67yL^lmwj_4Qn^xjA-E6blznF~`hDzq`QZaMp_#a0`&0I&J@UFhh%7pUq zJ_3;NT|xxoC~!EJVws=In@Ubw*dz+-hO%TULb;Iof{6JAuSAhD)_J&IJ%7q-qD6K==UxwpL~zkgY)wBH8ky{*2Q3fXPEThea{4l;g30=Li0FIVs@GBiV7MKW zU1UF=c{H-uUgl+A*ehHkIcmc!ZSll9+m7c!)9>}9MnO(`$2&k*VJgQLrd(M|d+q6? z65=5s*gUSc!GMIoS^jbEiExWS^ z&i9+09|&YmctzSo2bEKa6tgQfFRZI_c^!{{ z#%5#QVAB2ZNgJ~P4)49RN5q=N@{BYYxxmqbDzJz1taG4cc@=_sBQ?%$xlbC<2+kLY z^y+Im3KyOmikk-UKv{7pUNShEfJc_8I+++03G{P|QSd4vQe+mTlaNANm}(bC=BM#x zE?7|H=GY;JITt|8%YJ+H#wrK1!gpbNPUV(L{aSbC4=`E3PmGyZ6e-@h-q)JTD*Q@( zw{zQ5QF$Z02U~|g>j$GwkftzD-CQ&2ikxnJ;l!0^6CG7p;pj@gy`r~Mss*&+?G5k@Db^uioS#`rL5+TG-)ntDtX#|KDN8uHcNN>P43_IX z+ASZZNW_ZA*|LXct%#Jg1KO}wa5JsQ)9NC;^)!hW(E~{W-<^Oi(8=DdKW`yVkoZLY zlFo0T4LJhPmf?gSeRUg4f7vtq%j$2qH~}z@Sa}+zsc_)O|}(g?qR%p^cmueOf}Ua`Sa< zvSbUbgqDKl7dP~Iq%gg-*0}|q)#KC%eyMEhSoe*eZUH^?Ft5O&cCfj3LUunTb)ENr zcT)H{+Be6Ba*#}JI8YUb?a1e;k(a~h4AB4&ZaZKUJ4)TK__Bf2Nq)yw$?j$gs)}O^ zqYXp@4HbADOzdU3m|B#lus5y=z=3(>VRbLYW;hAx5wa_1u5grOHT>Tq)=`6!@a$H~ z%ZCS%zkr<{Dz?5!r2?8AOc?LhYAqo5dRrPT`!y{)olBng;$m4OM*ot)38zcKC;OS% z01XKc^m>M$&=!F-%9$&`>C3g-SRWE3G;-FCJynKLE?j)A}RI!JZLJkOLbm#8vZQ1nK>;j?hsfjP2rj z(oWjDitm$=Se_f+^5k7X%7djYq9O@s}YVT{Zs>(ENBFAEmI}r)75fc~Z09D8& zGW$Ex*T&IT!Co%zJ*22mLy1{|k^%C^-x>MI8>Uq|US<2w1DA?(MHDzRcN1 zt47ds2a(qpw263fDFlwrMuuoXNB zB2Ojr0c6+~e!0$#tMe}4n3owl>}whcqmv)Duj;Tk8ZIv7M0F_tK_iMxd8N#|Q_JJ( zqw%>V57*0zBZ2`ZCm-fcxUD0@*7J%Wen=Vu3qBXr>U{`p5r%&)v-us87f@~t7k%G# z1;+4OsKEHTli&Yf#!d|5a-+z-R}F8^JZbZohXc$X+{PPAJ;g8PJ%JV19+?c@)X^4q zs%L~B)x9>7GrBP^3~r#AU;T>Nn+!6Q&Fk1 zfi*Z;NM*J8@ctxgo;-8I0p{x46xUn3`nR1}13Lvm1VdsUG z7DP+syImHwr;5RB@G`&}S4THRpAPcnO48UTg7Fn7*4I&4uy&=3ulFoi=C9TAgVA2U zYk~xR06`Mx?~Y&*RHA=F$XX3eEV<_AA4L!HuEBGVMMQ^_6WF?U>TUl^{S@v1x) zrs(U6CBb*2d8Of^6pU-1M>uVb?+#!GW_1uGh+2+RbFJBc7{EWmpZH1UgVA%e!)fI` z;88OGGy?Lhc{l+&!8^jt=Q;KWfM{_Fc}?)+vp6nUq>0F+CfLAD3dqP&u_i296hmW{ zw~u-A>}lM74>r+FeqN_+c%2tR)1-Lff$#47BZ>d##Jj|Bj-0K|sC%q49{7_7i}*Ld&IgNmf3i|XD|@=RfOlzl1N zd9yV^qB>!f5wM?#Fr=T&Kw9Vl5_xZ+)lF)+y)QxNQ$}*W8*L!Sm1kGGet!B2b4w1Z zvO>VKe%&|Um-u~nZ3g69dc9rxCe;XL=;B>q(;lZ>I5b_cs^<$_+>j?db5F&J+oN45>9IJfs>%iGvyV~<1G%M z$CbG}vgL6Ni{e~p+Mt*7z6awLMpo*YMu2ZDX7hlAYI)eh$nN|9i+C$jH!DEV4qf{tMQo-0tgiZytd={D)UB?hbSkoTLbF4BrZi>0tY)In7_S4c$MQ-Ki?6Cs z7`$fX=Ydc6I?nehOR=!#FVbE7@$sc9(H569xFm#oVG;P?!@PlJ%BH$k343JE7$QCD zd5G}b1&9z2sfqS8Mpd1+ItTH)#>bOBEJ6iH_lyKXJ6yySkR?ypF!OwwbrkCK;Ix+_ zA;@e((+~@eHjGGq5NZkvgtwa$gcer>7Y4J1%?`EINbVk-4ew`uS^^^(=c@Fo%2$X6 z9CDOSGkJv@EU-Yc4-wGqOt6d3L3#%hmCg8?>c3bR2!hF)->}CoPefNOwOm%3{--3X z{A4?()5TKH>>SKsCa7+cjalV@`It{^evK|J_?qeKG1ypq&$cAtAY)ICoUJzKy^r*E zvON%J^f+A?RyO;=M-q*z!tCM725A&7nTwA1uRnftEc5%Wlsujz@Jb{s-WTi5yUigx zaQ3xBi6U_WEydK%e$?Ap0KN3bBV!_dZ-8mH}IU} z)unm+KQV|>iSz-UXIlN?>_q!>D!wKq>^pzx5atCKbg;|v z_>7NO?Ty8c8PHaY7`P@EGILDa->0$)WP5C<5a*rc;SvC5T9;x^AbCHCY|fLG`adC% zTJkIe%~scDWyYjCq)jr7$X?Y=*(U*j@oPYbvRC;&*={q?PS(1Kb>+1qXx@x0>XL}$ zLh0^{zTBBaK}F+VY?&iw)~ejM9rIA!zJK30M*Rt*4y_?FUW;kt#UNpQt*wQOx83lR ziuCT7`YTd?FflS`ey`Yx&mAo<^GLMkoYm{%uc#tCeau<%SX&>ht|3(>+M^?^Yv zIrm>#oTXvSE(epkfaEAlutzJ?wFByhAQrY=s8f)=YOum4Xv(#LTS!<$h{!$5;y7NG zK~|W%!olZ0&!Uc83ogh~warg07`R8%k^BSuv*^QXUR5@mW*Y$qHME>{sV)m|PD|@8 z(9z7|Pjp+ zU_)cv2f|Ma7;*PBhXjc#%?;HCh1FbEX~?I$TrUFEEn*%6^y7{)-Gea1#GV zD=&4JKD>4@chi@W%`=SD$g;mn{ZqD;{u+ilqS8Z3(qH%pcM0Qw_*9Sq0S27ie_4+C zue0%=2K_PS&1s;2=i{Zy9QM-n&W`VU_e--cjC|^9>bp#E*FxhI_gBqbO^WDy zGh>~sDkH0K5VbMk7@+s=$4c`1TH$|oYpO0I{%oaU6uEr9hJ2}JX1p;@oH(1%cNG}s zU5T6y)J=a<4Wp_J|9={$7%2z@hM6Xzw$BfvD&056!7e`oFopBID>Te{g?N?5gs9)t zC5m3J|3OtV?M8DZ>M~SEG0I~8*UMSTpMi?I5o#p;OjmAh%z)e{S-G=s#@$Ns0xd&B z0gOtOT=~vtPu{w0UTnT`V;>2e3 zp_b12VvTqB5XNj0`ZWyT&OYxmUAasa7aV^Qr9y~wqSR!Q^QHa;ED!(+0a}o#gU|pH zYYi|rOkBIYU&L6h=BBqN=QgjN;gHwdooi;z*#l6IGi#1M7rr9Q;jbX3-&OhFm1f_2 zw2leC$!+xM?Pw*a?{a9^57}LT)3c6;BSzS^)b&uYwG%FGgY-nbnTytt-{(=1pI|xt z6-6Dy^xp)l-DD>{So;IrmljYv#>F9+`GJ-L8`VMaDqIr4mQyiVtSypvtVY|cA^R`q z{FNlp-`$AiR`$+4CM38=_(j&gJfC_k28LnFUR^66}ZO}u)MwpE72oY{I5v#?Hp~<-iJFCHD!z5 zfsWqSs^2MOh}+mQQo9Cn`F$&62gJ-dK0Eey^~&`nOD+^puHsz(d9$ZK{98tR8zn(s zsD)&t*xh4s#jZ@(`oqHCVL>An*HKklZM4%B-T4{IGxL)bN8|~gpO_cMy}2(bz%#W9 z+LocA*-6~&@<|Al8E!hK!hFry7JSW`k(cYA_qog&H#^b5>JP8D`w?>WsXd1Ec$`{Y z!t$A?Pq9IK73sR^z?6Po=ohi z;7uFU_om;Q+8#R7r#^J`-_)v(p1BAd%5L;L@)nHNva-#4XU8s1YfpG~wS_NDU@7X1 zVkX5+c-)qrvKW7D1?t z6>hpz$6x*nw}D*LD`#*?04ecaYyCyAXOE~?R4u^jTX)i5t9hU&ZwaS=m~(*Hl>(-Q zR*RW9VWeC#TytCHD}JLM09tsi37#9KE9e!ZEj>45=qLaYs?}%oL>E}_!|(8S?~o7g zqiJfC>m~K~lml@qbn-dfci%j{6m=I`B?74fJ@IiHJ*;~?`t*ZVs8nW^Op{}17XdD5 zQ<<09{j?S3-l|?_VgPFcc$%1%n;v5(%g(BqZRvR!qnCW&tXF+#W>5UHbkxR)9vAkM z3V3)5`9g_&v=lPUkTv0AdY;h9I_Mm89sAU<#xC>-&RU^zv`^;BH_i#rb*nm*(1~C5 zWbF@U3Eu*#L;&S<>s_fFxP<~Cb%A%yp^Q3Jzilf)X4W+P$z#Hz#igg4odstGJ!&-l zFfD*(SjjTIerB}K+y%8awZVEHQ5~A~TAVu-bb}7(&-e4lz;lSSa?J99JccDl1W7@p z_?CPX=2kMn>p9wSvL9*KJ6b<+qHlclNXlZ)%bO6{^Hox7`9U2h{k(yl<_5YaO>UjH zKgwB!bo@-ya%~Un;-6gsk|EVrN-(y~Ou9fj7!=j?8>iz@CjTbznxJ^^-mRE?3O^#%WZ@8!lt6=2kXMqNIjGM`(B9T&$dyNHTspKtZL}B>o`kmblooDpv&Bo8TvhA_vIMHR%PLKEH`6uhVrW02JOa%WVMS@sLKH zobG9*k8-s{dGG#N!(ypjhf(Zg*5!GCqP*S83G?sgOIVLv=YFJsehu`OGt<&lS}aemfWZ5(TYAhs4`IaD z`u1Ey*MMecf-P(T(DYQF6#FK<*NpYgSzqpC*WP~1cR27s^qa&rK9fExd5J$Ib_*sEOwz3Wxhy1OfZj?CKFZ$0|Dd~TF%I!M?^_67(BgjUf8 z3BmWO^+lCrvd}C);>#8p|AGOW*@~PX;=jqtkX@J+e-18YQj zjNIkkedEQNkK2Vmb3;toMyTVG$_J88N@5VYg?zm}vfIVeHQpigaU1z=bs4vW6Slmkc{9df00e$~i;giGl{LY9c z8DVP!U$?wAcHQ)pEJ^R2xAG~0<$H3gHB78Nzcp-Y`#@^i=4;3NSExnJ6&z*FqR}DYja3R;NJJ*$^#}S#|x} zsg<04MY2qwf4a4Pux8m#Rsc}V|F*xDAagu_#~(pF1jE=9^`6O&=hb_%Eu+*1GhXtg zgHaCOVA9nHjePbLN8-)h;K?@vavUzWjt+5cWX~=u|6p9B_OX^lz)t|wFQ4Uv{&oWA zhgyc$%9_3(9yAu*dT&_wv79w54ztXezFQXWVG{J6n~k}`2Ou;k>FcC4Of5kW7h~M> zQC2H_6TzEYwFmg*8o{(cW-iDaxS3{*_vh}Ji){82Mo47Z2Y_1r=r@-X=k|`e!pg#M z29k6KUmMX)3fC5}Oq6y9CqCmjG$ z2(2xqBlq+0NX78|{x#nxXB}U&N27Hx0eybgpoVB)^k&NsUW(h&6G>1foXa?gshvsp%6076GjU_ko~hkoLC>?~wE?}Q zFthAxH&(cAWkAaL=43w0Sr;a^&DoY`J^FM5Y&}W$O-Q4iuqVYc+wz4VLtn!d_HIBdTMG+OVcq8oY zyo2Qj+RPMj-7Otw9+c!L0RBypwm5dH>%ei2&b<`;K=S#G&|uUepci|KyRR*FO3j`a z3h)Ny$vpDwH>GP!fU7u3aV$s5Mq~h7YV`Z# z0C|Xt+)BnJd7ckn=zf-50vwWk*fh4sLszW+;U17zpJ#n?Rm-T8lXLYQD#bdohx;#w z>OdgjDgF;2Brp@;PP=C)vMFjk-R$wldSXsrNRZ?*SiGkyg)(l=u4W@b8C<(2W1_@* zDW?5NCt33$1S-C8o*ok7w@7xU5|xH2D0;S0xSK_dtCI z?S7v&nb;i-M+|Nq>uiY(#u?BqL74N-v?RQlrFN-A#TrzY*Kz}Y|B~XvQ6Tg1XB*{5 z#1V!v04j>0S?0kUd#;Oyx1)D9?E9_>K#g-DK`Ho9Q&XUAzEmSl=d7wNdLBRho-q-K zR-n?#NQY`4Tt%!JI}s=!^Ugf0o!<%Ng#wt+lfU+}tRT>L`@<{#Pou}U!B`WdsOW8{ z+R(>PZSPXH;m&!^puS-o^&IrK7WVWFy!a?mbw;a9h#?3M&fO!*YOl3{uMtgYJQtgj z^` z^i=8FTdFt`_jx$-x7*ez@)z^6@V(d`w_NIWE3VlGpR}mQBRt0h0lCFo=K4cJXH-U} z-k$XvoNzymQvfa)Cwhg#+h)B=X0~AxLNsB+2*$y)H@Wie9cB(1(@zhZ=Y`m_0^vXh z^@4%Rc?v-Ff$^d~d#ud)p0VKiRJJRxGgHIcTV?H}BM5IC2>Ea< zK&56NM}X}k$R{}cH5g`mQkgw)Y@=53K_MTCdg7)Q;EjM}?K5)Ttg~$e$1$|BLgt-6 zOB?smyTs5~2TwO31Pt-L_(n;vBS-+V^4ToNUP@WcbPLb}u!gz|n-o_BFRglTR*(UQU>B3^vMHA2x z^kK>T`R3VVQ6HUKXz=-sP}QmTeG`rMk1F0}@vF8`?Xq8+Ld|Y9LDhz0H=~};?_3RP z1>~fAcQY0>uB`*x@x{jFOZzF5L391mP1;C9fG~_5UOvHfx&j0ThUBJuxd7o?UaT5C z<#})j@k`7LU?td{0bRM$1I(uhDy}SG6}0wYSF?;j#VH%y{7o;mdK>tvY{~c{+pO%} zBy0pJSq7?5h+K~Q-&R5MAtfi=l<5FP)XUCYAZd+3=Q-V;q-rVVd7gG$4fSruR zPsAc*%6tTOfA8c!ZKV92ZG%^WzqZ9K55}pE~?x9uoLLomk=V5(XUD(qOIi^rW z(v=qVKgtRP|`tOw6iIL&|2{@0hU72^*U6Aen80E{pJ^`@X0lhee zVt{S|Nqm6ECo*|Zogs*Lnra-EEoK`J`T!`fJ?Ank{6*^k4F~oXw`M7ZuZWtwgmJ`* zscSG>U0>%%X$V6+u|QLKyRG4 z?2OD+A{$1E$@;_1-%t?$3Jd0KLQ69pAo^)F5#56nnv?ML?dL+WJJ{mgyCh~OZGhg< zN7>=QDMhVu!QFXNYQRC6Gn$<8E1AgJ#mL=<+F5F(l~o2yCJ-tpcf4-$#?|Z<4!%uz znRbc4hlL~Xbx&J+=Ix373k{anw6*l_j$9Of{BHXNDT?@x?FjJl1S7ul+-mr7-AV#O2QhP71dsxe$Cul{E!o!mM2*c&*07E6AmTl1GU?F62mZP^KG2KI z)84<75|tuG=>r##F+A%A-V5M*f~LX2d;NK(;vEE#pcAC)h21FI&YAJ%LBChtp23G+ zBL;{w@`l~LRFlf`g1^iZZw|$LmeEl)`u@;Y{gYhR<_!3gxDkg}U}hMZsd0&a1rI*| z&39j__?z0-t{gx3sU4Z_4HNqqtQQ^{V1S*KFoQdNx0hD>gxV(yyIc`^W_+IuC_@V5F%STE6W3P{Ncy~!a^NmEr0OKw3ke&s1t%1zw2WY^&pbYL=54dkf%u7BHL8P5;z8{%eg5*9(AyFV5 z+=ZIC#WL#YV*pfJTKz+fDm_nM(_!?26@u0Ol$6G7)UrfHPq%nFg2e#$OR6pSK)(V8SvkRyCW%Xy6A93hH#PfsVD$c1MlF@EQ2!h=JdaUH2qYXB(TV$5v= zS_&`u;Rji1kdCwk%X+;+i}@l|>!|@V=N%Q4fppJ?Ek0cU#(t62zb=1P2Y^t8?DJ3~ zQL*wFQ(LSqJb_(@pyi$!PJ_el^c4!Nncc?0a7MxGi&TE)V4Ng&1tAI6?(xD4T3t@q z4i}i{nGhFQ;W$usk%C7Aas=;8Kwb;!IJt0P)3jdMAGlJX)nPB0*Q|_zsLG!;kXl3` zy&hl6ElKzoT*){H@~%W9upa-aj3t#=cQw<8w_>&0fNq!rY+e7C1oR zwbf@WEsjTr&TJB#9*G^#cxb%$+y;Qc3x*-wxe)4b{!Y_d?K8S~hm*Q-N3@~kv*pa% zcm5BC=K)2Og*uSbEiJ$PQtoBHzv`g&#h~VnjKFhoUi71mrupzsEQ@%;O1K0RBD0G6swZHU8;vl^TpJ%#LBUvlT3%6kwbL}S; zzbgLnqW_NCXHNHqUPZ|4F>(a_O{zgx^% z_vtzC+lszJH#r^H+;X*lQh)Z8_BW~b)|TOa`ufY8XM)S~QiG3%9Nx3r^r(|_6+_cW zGi3Gc$Q@~VjdxuS?2B|cTd=*r;pkhtkTck02vZz_;Dn%+zUp6 z_p)*%k*#7MwKc0L+8|~9N zdCPQ+<>vjxYdkr}Yth7k*JYyn`p)+^eDAU7i!Z*|?;A`21;r*a++#=VUYD3!1D}2I z#f%GVA9=~}mEYYtr|s{ZGH|-T1o-qPD%%Uz_ErN!dfR>Tcjr*l{C5?F+~{Dn&gHjB zrBBih{^oV}l$z@EC_I^a_qDO6|7*&&+xh(I`R|U~6C`ZS`|3OVPb3;#`Y71Niu^ zA(^XLWqEj@%u-%(!7WQq_1&LP{}&WoGJr-66QyiephGx(tcW+wj%A@|=}LO-$w!uP z*y-OMqG#q@jL8iHZ~Yp#Z%2lwkF1(;TG--!&oaGcvx&Vde}|y_>hdRX;kis+V`U4H zv*b}t!{}=+4p!$LaebP8^XT-xJtJ88@8_Kt^uzzehR!x-U5Zoxqpf&S*Elfhc_-aJ z`8w=0(Id38bE&}j{*vGCox*4@Y#CmSEjC}qb?T36$mTde=TOEwh*xSm&rO3~(pEI* zpHjPZ=r>L9!dB_&!15pCHc)n{+vE{2*;6TIdEV;Lh;{0e399>m`1G(vsreVrfo%~m zg*LlAdGTzw-stV;_ZDQ``*pjflgt*$cRXqY-{0if$DisI&knZqO8@Aq*|Fr2z=AG@(XgPb-jhP_&tUHubl6d(G+r>dHZtC+(*ch7#pReN^h zNv7qk$-3n0+c&+U-XgIA9ggb@_t~1$ZvZ${!!dZ?8AMf-Zn;Ce+KJ}1))y(@=5K`Q z{9;zVn_(U)Rj47a(f8+Iua_B^C7OpT=e(sU%AK6RKa>-sa03q}QYg@&qfdk#)p&v} zI|FkXD691mWqakitC3r`jo;?`V=UuwW3UO<7|r)gU9Gj&b9xxG`Ss-m?${elQ+a>O zBMPS8+^~0Vve^vH+)%*u-NSJ_70Y^kn)Ev4OoEydC;gf-iAkOtRW9qFcp2~3TZ+9k zbsg^Y93l{Q)nNUT92!0S##m5z?9L7QE>-2Vc?Ho8Ft5ACE4v9T2oQz#=?Fbgu;=bK3JewxiIcH53h|=f0$zu170&G32iSebsEwNVU1~AXa3yMK}p|5^6R`9#1z~FYkw3xbO`d z9F`N_2SFJW^}8@n#>RoArz`Nht+tj4xZ=AE%aYFRJ4o3S@64T1s4*WVd+J!@4p-^v z;O0vA;NGLH=#*OjtSq)=|CAlY4-Xdl#6ibE#M_MwO2I4KO{Gg~L^*=K1Pm>m0JD|b zFCItYj<$j~CgDU~^1?G`AgHhrOy)X$VYWbEhcV;Sh>G2GE7%Z&Rz)&8&4Q6#OaGB} z;lkUQje5mrzC|OCX7_n6P!qY?Ej8w?{6?*Wi8)DNJ^c|%R}NZl)>yNOT@>(i^BWQ? zC%-R5t$$WvdbHTo)!H@zfuqg8T+iy4y1)aWiI#4LaB09;#4wv|JmJ-SG6$ZWINCJ{`MJ>Y02(aS zc9jeVZXMSVp0akIxs^Mo9f5=JVy4Xv?@vVd*@q4XqNX#fZ3mQYGnx3fe>%ZK`ca(8 zOSB?|np;%{uIuOiT645q%7%1_%e?&c0wa z^m{;|WwrlI$q{+yxSC@h(IHXclbv`ae-d(3u%^JrXbusApZhifZR(fpdem?N-dBu0 zVf=fhX~qH-t4lvWZ(@t9Jxk7Tw!(t@@L4}Wzh14S3Aam=iu-`rL*m{;ax(1tnf z3oa4HxTX$mL4&3>ZwF(t_SBAxxSE;D>&vC9vqaBPBTA4fQg~gNo%zs9y1Kv*`_B{* zKDHp&Kd88Wpu+8;x#@k=h-~MGdivc4%QEdE!4r>ZwzwY0o)$3d*UQa#q}K(4ZPV!6 zm6@CM)|J#33w|q!Ss&=8>x%4RA^Xk|(P*4QqoCPV+a$`WKZ7!T>{?tSE^s%m2G>Nf zJTc6U+Ji?jjp3A1G1M(&H52bMlkM<20});_JB;SUHcs7YeNxa$9~)R2+OW|Q z3*Nq|INTVE&$<^ux0kMA7B$!4wAMr8vps+BW0qy`fj!wRWOs8i6aOhgWOP$1W3LaK zNVcHkDOVfEK)sbkY)fVBt+?ZO<2i7DeRFr661+-q;kXsue5;H$C_Ga-+6^|1sJqj& z9n?Arc9Ro4uaC_XWIsQN(sgCZcy^5$=0&fpRx_JrS~&mi!;P9~HMe8aiVpD9EIZ1b zx4$tqBL14?G3`oru`{K(+uA-Jcet_9rPa88X)?M;Dl9&8!*+qkENW3$7~D9;WXIMt z>h@rlfuFlEhu77NWsxAfTP>ag!f>LmcA!hE3A1}DmJ&NDbu(r#3xWL;IxRLJI;7OH z@X|8lRmOev>&D>G-yYRepeKw`@dMDhZ0YiTQ;~v`DFn^zAPcN*yVISGjrB+ZtF#L# zs8(0jGkIv*nY6cR)1cho^b7Quh%7v60K&y;thKTLJzvF`qXMKczO)t)3x(ajiosGSlb8za+!sd&?eK0`;z9kd#ORcX6`C!Wk zJa{Cy*`}FUs!s-#%6=pk3Q!`Eam}$!0Ct4^z2;}i=Q&sRWQX={|Wg^(H zpJ(5rH5(OW=%?+!13O917Qa@uP!fw8W^T9YqmabW$ysClRv<8Ub^6;o&5dn=z%tvv zI0&l#hX%hnbctXhM=N|f0Xm(DOPmDCUab*^hPQLD72U|$!{l%JYi~{2=iornK~fTZ zDQ|pZfnSz(grO_k9jbcod&G8 zCF-a@>_W-_N4X1SC6AAX%pwTS%#KXOUsWD&K@Qb}#>B2O4C8Y!R5h>TnV?0=jX!F$pmkVP9r5ZOi)up`W zeZQ!zybX6J^P#46wY|g|GDl|Dz0Sp1*N~^P*%^Uz3O4AtGi(;#fGo^z$^>HukoERb z+SwsQ13Y`5yM5?PCkziy@yAgOXT82k1L}xH9&pJolxmbN94?n|;tgIC{gO!ZQ4?KP zj1cbC#A@t_jRC!VgH&h>L(cEhA93_e0&fq6Lyu;!t3R~RT@8;6+iaL(-?P_e$Jbv0 z6V2vbXMtv?d~%Os;K>9le~$ZVX5p)TxRdc#kYRPzZ#wjoV*abE(vR~@@w9L89+{e& z{6Cbvc|g+X_CHQFR#Q_>Skrsh_YyMnu^$&{0%rsU4ja;piqN^@5# zL&c38L!}g2M9PgR7feYK6UsG21O?>xrn$fG=T&10_eNwBb_AVECOJB!H-D%;gefY(sdF1cG?M1s%@Q86_uu~CIw!n_fYkw3|j3{W& zAcU57kVT{$)D`D#PK=Qy`xuGy6ZHD*{V!?9J4F7vtww;d_gw@BVX7A%mL|NnO`C~5;0aM)} zon&?1zyiyWvuwZp;b>>}P{SICH5B4AzW@kXJSj~JhK9cxmcsZDgW_>^J(RmB69rs< z5qGj{gcZYJ*nef5^X8OuYc#PS&Pnau*0SjVo!iJnBg?Mm65&07BliY7X>I_JjdEwM zDD&sLXBt%n#+4hK3?|d(*|h^Ew04 zxkX(rD~6x&ZsAlX+LayE7W=KSRhL~U7YRH(a*k<;sh)HA-uWm~(j@DTE_iPN3DSjV zW+uBrV@UgfsAH`yy6@?Zsi?7mUEmx_Jv7>~YlY&_S1PitPG)rASm|)j@;`FA#w|?6U71G^L(kio8szj)kufW?Yr;|xncM!2%*JGe3DSCpSQgV|ayTt< zj^uW9L__I^rYKcP5#srM^Rx4kaq{u>5scR$A^CVpBgaylOTNN<^`@BCz#CW*ZO5sf zbJ(q^jj6$CwL}~U8Vd`mPKJ-^mi9b_JK@2K_46%qm!s-*{vKA`81f%e@NwyC_*W}z z;0J{*vs%*3rXojjS`s&FmPpcGIWV7xa;=QE|32dQL2CDGhq!=gDtQ?IL6wOPo>lLx z)A9KYfn=U72JKX%+>|}0jZ*c@Hr;2sj=Rk^I+9Ql8Y4;k#R0;p>9fr)&a^QlL}81KTJN;d zWxPh(3N+quelsU>>cbw}2(9)lo>Fl%be`vr!~hPtz<|tqslAU^Lof6f;!7*`~ET*OCk9wYMy>JQqgbc^>6tp1mMF6)ZZt~=+6sP+z*AohFuQW6q4b?V#3*f*Br{?%r0x-S)4neEmKbAo zZNvsq7TT1s>~!Rl-3?K+Yq@`HqWoiuX&iBHLD%^iGIb=U%dD9H6J0Gj(9v`e?4bl* z9^a!gS+)RkHWvR|x&Z5bGdn$^ZruCA?FhEVA+a{>9cVOPR z=z+}%Q`7hJxx~Ug9`zqn7Nhr_{gBP25^6_2#reuEwE8y!Ti?OPW}GzuJeYU)dm63# z0-0KgcV;&+;djq8drH8pH-DTlaY1$UffxB%b5Fn8gBsc@gIYr55WV0MvLF?MG(Vnc zk#3EY>x_4YmU3l*K&(E&2lsW10uH7lc&t#;_!E6O5xoKv2}?bamNHL)QF^G#;H|6% zUY)oyW`UXaVC%g+TmTZYJrbT>b0sax9KZtImio2XILY6*3xA8@#TfPV&$rahF)0G0u9U%V zkjM@Acza%80J?R^k`%Otu|rgbY%CR{9_Ygtph}Xu@7f^q_$J%w)B*d5h?(r3v-h& z=Z=iy*)dlXexgm~)SvOkQYjJ%**83}BbY%<-d)TOISJ-k%_+QS6xl|jR{rhKrk=#z z6nR<4(eD|~qpwF`sa&S{d)wSBL|Ak*#Hh7BFU&G(yR}WvI|M!4X9FkI==@CC~JfL|VR`rnb;_G}Uir$XB?y+L2STz055|B$#7XaDb=2tRNIyBDk!a>QuOF z?^n8ON#}D)8U@IP0QfWXjqA&eX(Mo_k$Ite)wE^eE`dFv7|sS;s$0ZIS_M6&Gv+Z( zh4ZeHcGfH4i7gRaidT4Gfv&(8Q+$3vc()V_N1JxyYZnoz`a<;1>`FRu?u3gTkmQh9 z;YRQG@eOtYN}HAa4pTRX2)soZ zu_|qB*NKrDhlWNx0dY)5DZ3{?>9@UT9+tYBJ2}2t7jWOZj|BsszR^yRuD0dB$!Z^H zMH4q>-TEs2{ZLSs_RC<_cd!l(PT#4QyG3#$m7C_0A|U0g=Z7k=?TK`mZAGr$d{Zy# z-hC^^&f*8Vv4kn2?&7{nfv}08$GSJ3T25`7?H*5LdB%X7{pHK4ip}tEs#}3<9n)Eq zWRJO`djLX>(o8m%%}>O-5`=f_scxFA>&&qBc{M@u&C5vp<6q~VZ(3SeOX=h#XZOx+ z+Jrq(vGH}5-y=JWde+Xf@cggS@87O;c4DFY5-P-D}&sq)|1Bxegk8v*q z&3f)tkt!={4~bC3)ohsSM}@NfA7y=M15amnUaD6LH)*HuNR>NteVn4ww@2mPUiT0! ze{nJ@`=~UR6?)JOUXZ(8R);$n0PJn287g}w5eL5d0_Lkc{GH_sY?^o6_Pdg?B29$sOa*kvmR(~Rovv45<630p$ zBM13WvG1pX6*Sm)Kn+l3wdi%$Y-7Ex0qFZ zRt?09W4U9i`v=W6t8#`4h{k5UG@RO54VA{`s_OV`p=#snFLH^uy8=1U1)g+*mgnj| zC^EF_=*=J>`7ubG$z+hJAjtN_qD2euAf2%Dr4QT9na<-nth%Y%-IU#7kT15K{HQ$r zP&arhjwsQE9xTdQ%E>;^tH(u>6{OHq{01qXGQaYmM~W8-uD^&b3GG@1(QekR;txd` z8t>>Oc?dvP5Bv`z(EdF%V6|OzaP+M>1kJ(G&{s++#zE7o&tk`9WSDu_MSRN8>b^tt zG8{6MBMy+Novv3&u51h(YD_+{TM$pO)Ps0{9q7Rbf<~nKIX=qg_PyIF0^6!#Op}a# zrI-fPa=QF|@p5;ZYV&*Xxpjo38+|{I^`6XbL1!6jiHry#TbSoO%x%^#7JCEBUHHx; z+R$-MQw*1vHXY89$Qso_SzoW*xh1Pi1rUA9*8++ad)^ks*k(vg$` z%MmltNp7Ow`i9k%fB=XO@S;2nWxNA#TQO~0kr86Z-%2eGkc?+{lSVf;h>-83Lwi^T zQk?W#zK-J5RI#VbzMqMrQrCd)sO+Cmi8~#6MvX0y)%0SG0I8D+h;H83iaTPc_OR#2 z8)mMZ0=Qe+Fw>+V#L8gGq9dX$Fn6zJ zG|PCPeq)YfvS?G&<0h2siCt4W*eO&j=Y%V}=AT&5_bTfIUtC$0NlOd8x^btLSQF>q zIMx`QN*0U^Fj2ve*pd?!zinRFOZ{Ctg*gZn7_0_vlu9sl*3|*L8*P6ZYNO>2B=Q)y zp9oGJJ4|0Fl#LTs-86*hLy`Bg8eS&h0dAJcCpC+P;V4?bjIq+d3&lN5pkTQ&aUO-X z1O1}1u|y@9=3022K4J(nj4o*G*HEBpNIgs4bvKmXE*0=Yx$OEDSIwgR?rdS zXOnKDS2x(j%VQR-q62fCZah$gVe!tLl(xh4QO9OC_^|9e1@SAa3D>?(Y#qO484#}n z;WSJs=qt=#CW3v!vCs##1x1b40X}^n`$!-W1vZaq04BKjoI_M{rJsV$;_pZhc&G0OnKJ9b7 zpcI1ES)fqWxRvnBDBLpz2j*wRS$PNA&rXmD^J`Iq^HQ9Owt;;A2b`;e zojwA{)SPOQHIsDJAB2PR7E=78P49VJyx&uMgFwycZ)$}zvKLa|Q2t|yIeO4Uc#K;Q zhkqfd2N?!i0C4p!f7;H<(?T@<)75}QYsf?V&>$d#lcm|^^EDteHUXl(5Ut5pkMw6_0RM? z?FS&+sn$ua;T^vykq#-RXV0Wc3-@Ih8X~U(53=VP4bjbs$aNj$v|!-H_eEWx@R#H` zwYcbuaQdM~Vh?kE4M#HSC5`Addy^|tD9frV@!dd_1I_mtm5V9moic4@K4_!!$Aed# zh5Zj?#854Vay}1h5~-a6FU1|+P6lZJ+#u;^*Ybz32$NBLT7A}ZpD^VH0Kj44UA(F15;AelNw^Y3 z{G5B;EW~3*bW+^W%Rp!Msv-b)v;s@HT!1WjSVY=+$;^rKfPw(07n3F}w9__-)r+Cq zZt{AV66{vAx(>GTRg!CTa!Dwc1&tBgMj8GVc`XT_Uu|o4w2WN3FbgQ221Glc=7{pp zcsytNylrmV6F$%=wDscW{2^-tq(AD`0uY&;RKc1%j+it$Yo6xfN3||y^`HR2SPXSi z>o-vw_%qUF&=v!R4Fd(RSku($!G<*clNJ);_d(arBf6Q_lf!?gwSvSOZGR7RYit`~ z7{zu$#mV+Tjx`3tT>wxp*@@+e?-M0f`@?lX#-F3u*%9B`z&WdL?0%NzT7{5%}i4@YbpXrA&`ycUd2wFvPf)&NyBX z^xZS;WTxS+&}%7T#(*^DS&wZC#EHVA;tQoMe=QDuoA<*ap8|9Od=HBGoXY)VE4U!n zs)(AMwjL5oQPp;!GdET6)55~@DP{*+FW#e^zj|eGR4XIP=ETW~tzABvYvlbz`9d5; z?U}71y$wKsjRuhxH;|60T>h^WZ?zeBy5iYD+IK)UPt5flL+y-ya9x~zMOv>bPYxst z>I(~e0RE&u?nD1ZAT=I!(3YUQz60I)yn{=5jLC4djt2^c{3VR@Hb=93WZV^K%be>~ zK^k2n9~q>l5&M00?R@MVJ9nh}868ri?j(eAF`b@|Ik8%}(97c{yy!L*}C zccC#$1{DIHb}Y8z9WA>@y4SZrm_N^4Cyr7;z@(m9S>UN>wMt-d5@+kjN7!jDQx%@v zER;MN@_pn_AHD}Pqn@Rzh^TTi15SHfEF#=aKa%DKtnJ7qj)Yr?^Wa;1?XAO0eJy(9 zb>R_wT(nUJQ8*iY!f3??<`d7VeGJOHw*blC#2^G6-q2uyK;!BxL6!6^ z0UgD-6i~PxfOa}Nnzyy6Q7Bxz8T(>_&W_)M>#(lx>>@YRN%~=_W8IQQ34WxoY{47d zTJ?STfFCwmVbrRb9d=k&u(eGhxLvqt$75a?tj+g0LiYk zhgGfbieCe9eMaR4L1q1jf%1(i;d``2eeTkE(Mx4(?t-eop)3Eq6MH-Ps#b=%&q}2Z zc(y6SvnIN#lMXZY@iW>9l$w_tq8S1r%iK4!`$Zt{y||)m zAXgG}#A8p`!J@-s=wsQuW9c-Wfiyok!jZlG_*xXzXgSEU{Ih;#rByZG3PD?aQ3dKs zU$56*1zjB=*odT;C;_P_6c)JC*>I_;rq&43?cE?(KHqud+4Gi&$AK-ZUye);0_F@z4)JX*6J7Imj1L*Z$l_&Ffz&F~1KKjSf@-l9z zuO4C`;M$8&d19ggF7JoKy)_r@ZQ|$us%Fy=17rxN&vFBua&P&iwsp*5gZkdVhOaW3z{F0`5t8Q?iO~7{iQ>teH?SNW_hxw|<`kIH<5bVa)+R2hWbE z$W4v5tVU$|!(4}@t|odlC#}8i23`;N)7YV_R=bMTb=X+cP+i6gH@$2r{1%k__Vb*{ z*={Y;eq{svX_P8&oNz3{QL74l&YXbl1$v8Mk6vI-p#9AE@{|Pttjmi`M)TwxZTA<*F2_T#8sy9jg z_U|^y@|3s2Zars||5%X<1^}qx(H%&{+r3=jpVE zCsSBEl->C@;LnMlyKB?kAScndiRLdOEN91cFB*;*_OyBqN*fEgQ|p8Ob+kL4J#?qz z$qP{~-~cE>Jqgm<$kbh6KvDxu5LF-Ap}#vck$*+7oBUA3PDhgA08h*AMpksMjRCeK zzLSz1oeW1;H0GvlXgIQ^p`~dBQv5IA@(a+zViln9`ktJb@k)dxvX+2;;;2wn-C)qY z+c!0rAj>hDUjHD^37Dzjd6fk$^}X;dky%`yY-dPjp! zr>fWo@KIG`7w+k1pgtgn%PC(QgTq&#zXb|X#rl9eprG`@f-GH@R`H0{iE-4MH^cn4 zavPA%Ar+bhd|TD{$4-Ve$VvNG&EPxV!6r0%ssAEGJ2ErO9+zsR!=sZkuftt$*=RMG z2k^J;$XJh512vC*4imtqRZVsLWE;KY&n(lYMwpJRVW&8KxEurHG-wjWQRCRi+;)S* zIy)jW_eHW>PZebp+Bksxf zHXor}LNF_5Q7!0Az%Wc##sBtAm(KHK{arFEvLexOEj8gJjO|Tk7(S;(;rCz>tC9Q;o?|p;p2GCqTy`CLY3FU*^!N!4Xu4k@8YeYbs-Vl zxO2DwuGF^;pl@fPeV>ux_mhi)C~&N#zZ#2@GzTZMhI!+hxW+j$2@)u}06EjhP0Z0Z zz5o7zEeTiyXr{l8>;NCEn=2qCcFu=IB`;UJl`7uLEGS9dkjfs}r;|=fVY$3JPd0j5 zVeUB+N;I*#HNlHO`CeX>4pZpCtS2XKt&k@l_#80H&qZk zU-qO~f4=gG3rqfZc&5F07hgH2^;1BO!?@q&aqOH4AKyEfchsRBuNjW{bV#f1%IdS* z3H^E7%61b&`Kz8)JZADYA&R z9-j<$?c8AsioAupjKJLnV-UmMX<-oY!EQp+gvbW0)p|vGSD~L_5f5};2k3A}%vbo! zwYiPh(!70urYhesy)MwrORP%1jy4rH_ZGf){Zu00^y@`P4LLOs%q`7q`- z+ldhl6u)D>h&o!C=&ou-!zq_5ZsvK0zz%j2mR1=J@bZm1T6ws8PT5`PNn76lMIB*& z70T|o>9mP(-5vIZC4e_9tNP5j6l%^5ZZw-O)^1N!j+&hgoPZo?9!T^Wj4KG|3JM`&nAOLU=O}N}!MtWwN zh46~2`JRRM;%qM>#w~V`e7kIHFTTve$O9!TWGfW8#mQuqnP`L4WVaAmSFaCx>+u;= zwDFqLJjZHYOpDR8m)iBsSqs2%cThqZ~X;rfu^ChBWL?Wg>OhnT^ID=Vm5r9c~KVDD!Wkg5uM zWxQcG%);sU z8Q1n5{`9B-z5Kj}${;q1jy60+TGbyrP7?M4nHkW-ca8GqX}iEP!xv-Lgbc~&g0D@c zr|q-;y6+j@T;mUtpq5fQwsrf(@v1Jmtal=F3a?tjU-UA(aQ z{j^lfLEDJ)n9t&XuYp{jWU7Pr&T9_OML4OWCPw~vRqM_td_EnjqKIj8qTW3AgLINQ zD>1Y0MP`uggG!UNF`1jdP9lQMSj*>h`rYi`tG%4sSdR4k1T=uZR8h0k-6c)#Atk(P zf0O86>regw0RyGDJ|!aPPh$V9=sNuQJgN%M7hP48EY}&e9blam7W4wY_3-6v+F|CF z#cn6ePc@oDr$ugUR?SfU?XTPGA+elZG$T`rf*<9roVg7vl)u|5-F%vSq7i`cb}I)c z|JGN2%SZlNJ*{ieXtla*u^33W+s-pHfjuVlSXA8|8^V^p^T8>ZX3Bc>AON zS&n?5)V~DabD*&9Ez?poCGIZ1QW8W&ehS#YU#L3!4N{kt(zKP>AciDhz49|-$#kb+ zD}PAf{P|t!;quo)>rc@P1X96hGT?heI_vR^@A2(_t!8kUzO*s$XWee=y+qm9nLpz( zX4zxRhR66!xdTG;!!$B)jgR8ZZ(E{Y))mJ}T1)u9qM}e!hrUi7j7t~eL{IRtd#N9W zppjD+c)P&&S*s5ru4GUZ>c@dk4SnsWY7{RUQoHyAENW2eQs9(+nrLkZvi5gtn(Bp^ z!jngYR$qBaA;BvaJElLhpIJbV- zOemJLaJ?+Z-aptE*q`_@AMq(g158{+dq{H~=gu5o05`*4OjT2!ifvTm!j8k^DRw|! zdQZJJIQVN<6+v4pC=oU;hH-v`D@`_}%Z@WQ_k4ky9SmH&H+hlCD*tR+|19+L3}TXa zszK5A);@{ju2h~s%K|vD?Z2U;0iJQAw)y@1=C2a?;b|ql9p*<%lQ}#7T2J~y=FfAM z!$$whrg}+D;b-*3sJCl3t-e_tWuG*4TD&3>qFA$Hjx3uSl2NgWSC_*u{O-fk9)Qyu zD2{ta;NzD~BtBeR22BPx0>sP~C2!Ao#{a#TE!K`t@8WBkq4WNy_{%A#O{Zsrz5#)Le^qM0vOUk%PGM4~cti7pO3`inIX}_3+iRtM~rcQEgRGJ{s|JG6Z+}G=x z-3=iq{SiwqY4&t?EbVlolKY7D0Dv$}uDJ`@5z1lTE{4qAoPk9vZ(sOGTVfOOa_+Xv zG?@auaC#g+6fNnEe-$H{*NL z81pFh$$Xf1avV9VKUpz+Y(6#FxhJu*@sk#~lK8S1y`QEdw=BLoj?M6F<^!LpzyF4) zt&frN;@kLF`(PNO8sK({k5zDJ3a=0qT&68PkglaAjJWWv6+IN-UqK8mh50}x!=6dx zmrXe+sw#`mC6{&`p_4ypUTan!Op>URBIQ zJ@HY9bAF`(F6GS2hClj!(1XIW4%+>~M>npJ3#|j?nU_h}dGnW1yNt{01K_fUUx4l?Y^+f4iPX!kM4ICoidXtY) z_T)t6_gH=gI3ZyUX_`q?USEA=^fosMxFX~uf;WDAu9xG} z^xiSAB$SpoGlp;Ey~mRtEXtzbGzAd%PLFKBC%%4vk)a1^qb*wr9Bg+8^Y)x`5>t+c zC{n|j6>}X6NEYXG<3d?bj>zS}TqquhmH1^qWw_;rXI5yecwnDY7nSEfP(r_ke6hN^u<|0+pW8L(D zC=B-8j&c7S2*`7{XiW$~gVTgH5xaOvM;&}b-vxAI?u zmvt!#^I7i+>*`+Cg+YsF8`M$z>kT_IB4B~Q>3FdH#i7xVrJWCnhjWIu2DlB#wi6NfnRap!ANyb*uN#AN9>W5g@qX(AK5C1wi00SGk2^m&PIhXpF-m0Yi2eL1frf9 zJkY52GX2u8S_cdBpG%)AL1TNMk_h(90g+jpGEm;^P$KJZ5B_1qX$HReGtqY?JsO}c zM~zumPEHHuERHNfX+EHb4YDqM^y!(X0Ej}(M^_Kct^DJtiP0C3v%ejPC;!1YOhXGl z@Y$bs>B~Af*O`WMle>+m8kVKFao|#=|D^0;&A_V|z;8C}2hRwSf7Jng62zJ6a(k8B zIBQqD9bY{e<=-G{{qe?yF9>>cPE3uO)w?hIgRe&wytUl~fb^ej*0dS7%e#V4)8G1y ziZPQ%_fRz;#h;0M*jT#?s|=c=@$$mL_@jR{r8WbC%*;KBmPQqd=JP7Oya#v6WY`+ zlNP7Vo=Mir5)OUp3NF_9-mO1>PqS2>Fc&YDY(5SAa!7&Fr@!IctH-}WD)`3>uFDz^ zsm9hhB`?%(#!ies^fCs{D2O`E!HVDJjVf_=Q^0v;Jq);h>OW(<(^{+h6oOIG)sYX{ zq?8M2D^q&#`J<+{$u!aer_OssHEcfAi<}>gPaT3-9~H0OqXjI~Y>w|JvuN*R(Og=y zUo-Z6W;uoA^CJAK#0K4Qm#Ajf4>5x;UpuPl2soLw7=9X1Jk>BUe5oPOE=j=_P&qlu zkR_P{P})rnb%D|WR(tWf!XO&(<&|6Hq_)W$jUCF*U_{<19&z_n%{uF0|j z$^tjlm40nz+K(ke+GC7MRC|i#1ZAY3r1+3a^97Q^3L!i`4X8OWMgf*=opd7cd3>|` z7ta31c@Y4VA|dVBFhl9CcZ@9AdRgw$)5UeAlaXrc$LNn6Hi@e;LIkxzlPkS2a53(u*|^q8vm;9ACp=bSzT0*=)g?4QOC@t*RwECOH1|Nss<>kkT z?puKNjC4Hq1aMW?zp~*sZhpj!9{cU|9po?`NUl2_k}LS*H8}#%-Alo!Q}b!Rd4*U3 ziy3pHZ?V8)l%g|JdfF0x^sZ3ILW|FFnHEO(OY(ayhcXL-gwP|7n$N4;s}_9vG;Wx< zA;wwfkX0_krEwc+YPc!JlbW3SVvSXO4}lSP_KTRR^&dN6o7HOLkO#sk4^F-99D#w) zQm)PaY>gsvqi>Pbt)%+ZF$cU$&gRzog%M1L~ma(LQx z)c&l)`$Zyyl*7x~d6lOz<;kdPEW`kVb4b5h57E&JMRjdU(9fE!4Skm}NTW!n_FBh} zW20eXjjCQwhZG2fvT8whv*m}#!Dp)s^Zj)=Zg+XWny>-XnkFDs27KWsY-H1XQ3-#h z5<3>VC+=(3bccv3yQ5L%r1iU(g0jb4ZM&j5j!wX%JoP~|Y zr>;0l?qNle48LP)R;7ho93G9u(b;vU(?O>vrsS6|A7+lubsnDEfr82WG2i`-05gKk zOgzwc+Dv$+npI9R?^x%wYboa~L!HDIj{f|Bxhy64{`8Vr)u#lk%R6^Y?!RPk)k`Bi zQ2qOq>z1^2ldC%xts9W5Ysai_0MH{=XCfja#`o2H7;ht{gHy=lZuFWB@xYjnHt?t3J zBIwro9}(<6T<@55jTewZ3Uei2P4@2E07^fa67|!6I|jpCqp{7s%N|_!J4H_}k5l^4>LmG^}yIfn^x+}35(Vqa!PX``3299Z6B}fkZ z8MNCww|*%Z>q_GgZ|%VDM74B-_-+Gf=W(4?Jn{3gTB|P!^Z=*p6>~dn9hV#(Fuk`p>t6Mqup{F0bFW zLO&|vTHiuC9o?_+fmcR*hH5~lM~XN^SyNBoQoI`*6W&2FpRm*?NoqkivsqLD0Bf^@ zV426J2bZP?%fCQ&5)?fp#R`{a=f_~dNO&$;4bfX_tx@HsuZDO%5v~%!M$e0+kkis~ zCjPzbl&7b0||Za26c?``nlS#VC$_ZX4!z8Q^JH8a z(W?0!AtVB+Th1!;Z7TZtlnVrLv6d_FV*6KMGm#jr%9^N~16AfX!dz<%{$=izhLB5; zp3zX$pl70*5$y%Mb|gLr)#;+l$|c@@rWzEG|5X7i0lFsG+H2 zcZu81X4jbb4kiW+8b%jx#!lrAAa3b%T%S=Ooh=$|Ty>;puWBcqzU>-XYc&q8Xk%gLu=>V?i)lE!$Qdd8OD1(9t(gaN6`&BCCZLrC)yg4MZveM@U7+_!4Xnm6bhj0u$T zC1F|?tjo0$8_0&5V{h9Rt*2Ii!tat4bIf;h6E&g#>+AS_*FaBJu7 zpw*?Tz!f1I>@J;e4_tCGV$l%Ybrg!2mImgJ8$emRv-fG+Qr>jj8f~1j9}yPVWx$*g zNuAy0<93AE<~yP1cV9-z6EVdxq+_U8p4EBDc)muj)k;cN+O*Vyf?OY90g=R+-~;^A zz0Q9ehLr{0#q?H^^hC;3-VH#5JOh9E4i?zKFw@*x{d6O3YP417M*Pm03dzGDd^m7$ zz*E%)!0H(j!@h{A(hG~;}G+m8Ftib zPuv=yzw_^ig8F&)-#kcah6mG#=a20CjmSDrT!gN=U=_(cn3Hurq$Ug8i#TVt$CO^? ze}AoEqT8#(NY5G?y}X{tHTRrq2TlotD6Y*)G(?*bb05z5Au-2N$;iw->L(%;k9sWA zh;irAXlJ747wdV2`>b>OR5~#L`n1v0;Y|f?`kn&YnFjCl z{K0ed?%qp$lG|Cg2l5E4PT2E*lx>9D&iSJ9 z3Rqc7bN=Yzv&Rn|bpzUU*p|L+gXJvGazSW?OS+^fn`eFT{+z8ixq=}6=ey)1^lt)q zon0k7xkva^Z!7>t`yJQX$_c*rA$O}(lC7-OeQwxRi^FKJl5hf;&(r^gkGnKC;8~Sn z4&Lrg?XLG@rAm*{m798-%N|*s0U}(0ErhM zO+$W;Tp(}+d;c4Hj<8nl-+9+q(+LHPDI9=>bC3r!*+1SGeF0r}qUM(d7(4%up0b6+ zjsYpyt59^@80;Qx-Wz}Q1*W=s#NYY*E<20^wajxNAXLt<-K^B#L9yT8B(y8dzVpoJ1Gb5-1JkC>?1YZVd``GZcAe?l@nAkz}Q>RnoO4 zQ6-ZC2=Px_s38%sLd_qc+|+4g#oJAc4!RoUAkTIT#luyD<07Jafty5#H3wanG`7ej zU|yF+dH~L=zSsFp|)Jj4f|itHf98F|-I(gyvoi(wVOjViZlvY93(45R&@ zCeA_bmyKd79@m<^@6y}>#T8Q_b+D>gwJ!EjqPF;WPvQ!2++;r$f4Xat&1HGs$i-)? zA!4Vi-vRhvF<%u_H%6J<)NOE()6up)sBHVf6q{Zs*xHwgwPj(9y{-D9EXzK zhEp7>ojWt~cHQpG8@KQsjIW>|_F1x=63vd55XMOZ8#Q&xC4o-jhVTsm`$#?F%i%gz z&D*#0gFGhzNWOYKC0x6T0^zj=-q%kH4=Ww0@HHDv(&@Ew7nGN%GI-^1M$d@;X0PEs zZEZg8LB;p);IKynj0A&Kf7CXbS;V{A)*?|5EA_>u$4oD&ZH2zn&y;g{bGxL`5_!9I z^@L^ba*+Rj-LP7#b_s8lO~utcUjM+svvDVhYMV1Xd-AVC6LuXp<&fv^bes$KdBL_U zGU_uKZ9b!KO!kRL#jTmx{DzHfdm*nLXn_@M_Ny_6BJo5gGmjnYQ`+wBLF6NZ*^CHWVT<(3XAlDW0#|5;l};Utni}#H z7@7XE#iAS?yv;=w^-*2|U(V3ZaFHdYaW16?TNtW6vy9*wk8T96Kh5WywW8`10GlkY zP0a`>Xb`Xt)aIJNK@T@u;(HNu&DkpIV*MXT-@b?CKVHGD<~9<~UopFS%H*G&z;LQz zKx?~YPH^NoBj0^tz1weyiab{qSi}Yy@D7mIl-3}o9IhuH?bC3)U=q=6iM~P2f$8sW zle&^uXAkg;6`nG0jC~1r=>x5b&a45k<%@f&Xs&YV<-Y9a(7^BJ)F!hI)JAR4zTmaQ zp78yuDlgEB&Goj;_8K?Xi22 za>T@WA#Rx8hZ!T=Qh^p&Q6M(D(9r8{J*EfsFBULAj*?aj>sm4z;#@V_(KQJ!&o}?H z>x(iQF}~|E=xe6a*v#u)`WI!dP3~ZHX<`m|9Ixol9K;(YW|V2&@^2>bW;~ELZlA$B zv39UvYYk?+BQ6jS;qdY}C*!Ez;JbUV6)tzr&?>B;*)#4!=t7XE{2M10qde}Br<6T@ zcds~LUXY4g?a4b#jQu-}Y*&;wnG9R^ zEAc99K`(eS^PW*(hbFyG;~SS}U?{I`VXsw9trc>zG-l_uhH#xb?88Xgj@q~iNZsDc zz;3fL!Yz)w=o!=;aTWocz52bTzTx=fMa1+)m{MKOXU*q%j{F_a4kxCXi$M@+nka%{$nG^@)}B`@7J;QaXwr++HyM;8ISV9PjKTJ&VXpatW=# zZyjCuOS+UmT!WDFI3ujbW`CQ^*f8<$%0MPKRcFofz9W@q3aap5KR zf!A)|{@=DA!H&_(6D&lwmc2PiGG+j4zPd8c1R2TmmrX8($G|rO9hB zWD@;r;!8;dv%klo?hJ`&^~iIu5b1A0Gnh6gS@_no>);1W3cAdetOvH+MRu+ITVH}V zTj{D<=rw3q(X7Y|2QD`6jL?^WvvGJZCL-F3pzn3h)v&9AQtj6xf>#%1!Z1PM#X6vG zR2yvMcw$F{9%}%`hIcd*)5cyD&=I11AK-OqNG@->GNfjwrTFDdXLJyNP^D7>6FlUv ztrk47RND`&U{WyI;P}2b?mtHLTJi_1o~&u)ZPIFWsslBIP*bm{2X;%f@SxSN$740Kg9Q!eaW8yTQ!a_uCNN|jEKRH9V#8@ zuOa&tR`xh1;?ZpPGArBggz=!pY;I4jYe`NH_@}=RYT&_Vynf^_%i?6eUGZB4SVaNp!ifK`qh(DNHhn~_Z|hnpVY4yYeGb7@&8kuJ!=n;J$MU# z3x#?28P&}1;rN5Z>5BxWN6A31dx<+tDP|ro>0OfR=m$aPw%LlP=1BgDd49clh0Cc) zRIBkJ@*0>9bYT%D|HhEDU#Za~%335J7@haO+ItXQX1s+Nv}eK$X>>)9r(55RZrmtF#aE;vtjquDV zWSIbc)fcO$^__)cuaN0PP5p8)?)JcPCp2IU$b;D?{?syASkkGJQ)O9W7NZ0HSgT@r z)R-vuFJsspdlVgLvxCV2j}t;5+K?~0Pb5PUsQU%BeYD1iAS%JkBjUlbO-Ryor?5vK;2ut9j4 zRekcyd;j6!_P6n`glh*Z6uM700PtOd+aGXaLFP;V`&=MXvL>LSYoSvE&i}TG`x&rP zHI!u3yG8GaECQy_eWb;agRj7cj4iZ$K)&qp-SH5-|MMBp1C-R_4$v{hw z)Wj+_Bggp1JWI94YDaA)f61vy3x#)(3;ut3>l6ERohT-qw*b=et~uK%aGT|!s#!S^ zaB4FiN<9xCT__@j=QxSBluNcA$Ang7=pt&&TE7(UgR*@e?bZ0LnH@T2n4hmx!VSoO z5E*C(O8!E$rxc>zxsmfSkq%jf$v1%%=)$+9HEtgiJEw6iQV3iP1;opLZ5C$uH-QCE z`v3C_VFJLrFm-!RZLD;GajY(jbi|rgW+Qkygn4{Ywh>1kjeKEbI6~*&wyI~nIfh=q zG_hT<9PkcOmaVlQaQ4@6>5A?ibcp9PlbF0~G{_TBUA99ciO|H9 z0kr#R3z|uH`+w?tDsViQoEI9wn`-ZW!6sT6de$5{)k!u3@|qZcdcu?Y9oKWOm>s}WhB~uvVXU?POV`yrEW<=`?#veehPlZ10PgcQ3q(qIo9 zfU*6qQbwU0o!wLr#KXK>e62?;`x2f2eNKH}bQO0y>5b(ZwPWOn?U4m-+6mR1_x>`Z zV&FRaaqaawVt8#uhDNw4lrgj?vTY+JqOU7?Y`*@UNDj2e{OS0m;24m3buY@B^iu5q z@vVQ)A5`yGR+4RrM-WWQHYMP*Fla zh{#Ohs8WGIRrZRCjIa>{hLN}+3M5esgdHV7lnpV25R&}P8|}TVg8j7p{`~9S$Gzlz z&g-1lJg?XDsk`5dP3=YymJSnc5yaiIWk#QieC)qpWhagHMdUWvm21bqIgf$6<#SRA zFz^n--eU>mk`{4W?k9l*tvpHKDGM_MA)$nk_pTMiS}EFPTJ z24U~tzbZdzsB_o(7vl}i7Q9pdb=eJUZkyLBz`Ej{te}(UnYqSr2Qi*$!12DQE986r zlhfKt^vs0+$G%*ZJ_C5^cDFUq21F@oF3S3e$u^|=!OrQE|M1t5k>8wAZKKlUZgTEj zD_60l@4)mFZG_8(=S9~@(h_zh*2KH0?#}D?(WYA0_Wl#0P1I8fTkamzsGeOf4R5Ae z%Gq03`Ym7H#_r?TE~gDeCKR*sD?tDTER(~cP7-K$@nA(bjm-M6gBocp>!FmX=9F&G zHgnGV^4 z{L&$1iq8XQzC6W5?)qL@7qfc?0bpQMFh#oc#}V06am8<{{^7=sxvGMV_5QrY|0#K` zho|KmtR-~YL@7k?!L>ywFY}$oNfg-&#jL{1|3rfEy8BPq|9TfMx-jKB3HybU+<6#R zbHLZ89e;gNTg7e+b?K;b`mSVzu>8cQ*U8^dIl!)a zHJb{?Hf|yOCqsQp0Me}aqzhBQjOJF1azgwQwem(jaF*NZU4ZNB(g0l_lloQXilQF? z+<@}!#EISdR_5rLnQq?KLDH?~Z1*1z9?O~4JPv(tB4C8Azb61MxUX{ZZ$%W5OAPXM z7A&Wb&@+|h_4XXld!cPlH}@`hSa_r}77tZP9Z7@D$n{tPPT5;kD}Bc>LS_8NfzrO| zdGGDg;WMNJdOni!q_z4mDgCpBpDm$Rj8Dc7?Y3oEoi`lHL0d(_1wQj$L#JpZM&yId z+A}09=tGE~k}W*Ng(O>U^_jT9qSQvUG5y$5uTidK4%ZkG01o^| zwQ2VXLF?Mq?T#_pu|qB@{M7O@?&SmqrNu8}Mo0J30E@%T&x~o*f35BFa^Yu1P|ND@}?|QV(%JQDVrub`Uh@HGGrA)F0mLe zf7qqFYBYRbFs`D4ziu96Qez&g@6*IR`RnZXQ->rvfyKbST)*Hm=bx`98y4c-QN`%_ zDZXp4vg+Yp%gtMo(tUn4bWgFe54F^Y^@CYe{9x%@OKBqnjT4eUP4b*WQZFbPm#p(% z-zrET;#;*_#}k;00aq7Os`s|R+qZK>xm@fr8S0`%@lT%Vgdz>0mt=l-Wt-n)m(BMD zIysLUge`-$<$-+>U)zkOwmk-JfnwAc0EOl;WXS6IG&Vk{M_%%lp3;yj_N+_Q`5q3!l z^cDe@jIgyu=L1-OJ*!Pp{RzM@8I%|ToLWbQk%)JLDgo$&c0&VRwDNU^*OZ;9IoIiqXWb7;btRpdx+tDqfhUsH+U~UHDPrYfF?V$l zG|>=cPT`yi@`J|QJc6FSYBR0dhtUM+YML^BRa9^&`~Kumn&tShzSj%u*B{^`_inbS zI{_Y-@wYnDNvy+77lLu@O_e5jw=>dx<(r9)ljj3`q)L>M!R_m)UlIflTXesNiI+qy z*wuW))9tc&#7pr-?{d3P z_-~h5ZdGv#!F`1twwD>^B%Q7f*Z4_gDA#$nJUqxKyWwB94R>I+NSdpSWBoErcaBKC z-c@t^@v+1f*5^Bm7fu5CXDbOIG%L!UIQjN~;@W9kfJyiCtN8iZqo^pl@n+Fy=X%>& zk2~k@e6vf5+Z=LGS#njm(0$}3N?xXgKLhh;wyXN%7H>9LM!mIPn=~)jLvjgu(Iq#Z zfK42dS8rVpo{@EYRe&6(TT0JQ` z^1VT>`fBMg$!KtS;(1@6BzOIsm_$N7JjNVWwXISl4&ahj?IYbGG1r zQ2-sRPek&Hm=@v4?X`kTPXi8`}yV^AyqtPU#4fyGo;ctZ_{tpGZ?r) z^aTaQ_U;+gP3`bSq!!E!5O%PbQ&sRK8TRqgCjEKWmCb0thcyxZuqy9ATa|4&ocOfW z>+9@UJ+c8TIbQXGrJM_AJzyNWkZ@~kU7OU^CiJN-2K0)6YX}CsB=#;k$*3}-@iW`S z7MCTeZ8Aek8^(K8y_dW!x(5c%)}!^@!CWCX=37;hWsuA~XpqeQbak#ajQ`1O2WrI? zRn0uB3OiSg`Y8{Wds$63^~+SaXJdgrPpfeKvVX@%Ycq^;1&CXJVz0Fw6LfyNayIn`<>Vg+!Y?^^oX$|QOiTF8 z=b)au|EutlUkdxcol9>zUJ?raQ%)dvtcHYI8N}71cEdi8SR~!a98dPI4r6Lz_sPkS z_ls9D_3QY%H>5`cmwPg=fqSoy`u(3+mH!%IyTBmLynFvHaMus?Xz0ubK=b)7=9aI5 z3w|x|x(L^BDm2fLiv;$SgyCuz8OA+3jytO5H%SozwKLP5ua&B~$s)>_A$QU_& zHzUQ@vGJ3Vs%J~D*-!-C(lF45??dyU%shFmGZiiGuHp@!1d-Y(b4JN=ZvBD35EZno zJvz!K)Y_Mq3oZozYJ1mV)Xw$C2fNvyT8EX`jRLt*=&}o2I972cTk(=upT>5($g$6U zG}JYr%0Z(`2SzDAe&Nv}u`de8}48xnbT9#k-Ch_Z%#S%8aP zLHBwPuIaOACP#NH^pG)-%Q4+yE?6d$!s$x|k+~4&7YAIfX{Hlgc zz`}uGB8DKO$vG7No6z~iwiStR?@N8euj*~qU~)S{;bFOV1;}EW?fGse*KW<+zKaCc z&m-K3G0^;t0IfL?P@32O&0xTo>v`iva+tqy3D=N zhNyDX-`^f|bmtYEG%_2o4I(FxZS~Yz%+a(C$;(*of5Uj%B3>dhNSx^|hJ7;re|{^a z8@S-U!-pP(tC+QJue4(xRw>C_K%F$qD$eWLOJ3R>RJ7mjCl zdlX|>%j#$^DBKF$+Wcw6D{Xl)k+N?#{#?4L*qHC=M8&6bhzw#IhSh+FY?>H%^rmJa4U$~K4P5>qjx{4NDv-ue6MLd zTY3Y6Noa3kQRdnKe!!_@cU5om31|TPBS;@`VaHc@-LAAiP;O>%5>_D3`cINNT?zLp z9x|xTd6A6Uo|bJY&>>J%hQia+@H-05f#}>jk6x9$#jmjTP^5`qmydzlr%wff1oV6> z5ze{cf1MXpx%39zv;xAsJLdNlG%^1%xJ@K+HnW0gy)h%`CQV*urxUzPb5EUM=UR>V zD?w3_I8*H+zw#~mv0mA5dE0;qo-s?|T06!tdeXcX3pAowb+c5nal93Fh5;sT0LB7@ z3TMVphwuWQ{tk=afA|ASm$wOh`Gxs7biH1&v7Fh1*%7GNQMZgEfNNbM_9jX))O*+o zGra=eTKOasj(s|i4Jlb*4`hQjNw1KM?`FREWO(pu7LRUvEk;o%Ms9n7_sQuX781KF z`~(RA{U(=hn<s{EV3Eh<)P zXR`X&bToZgK~FiH+tWlzcOD=P@WO^ngA4|+6mR8E-za8)TI*cIub?oCsO^t%Hv-}U zFre2zAujz~y=>(5w3Tm8tu3Me&d?FU|JJf9Y0h$G(zt*=y9xbeh(| zX1N3{=RD>T`fL-L5pe^rskVVAChtPD;g^!x)-=J5`p{y=)Ase}!;L>}NRkPq9e3F$ zY$qXUcWW0-wxSxhVk1gpwZKf}!onu5JU|iFvc9}K;H6lkicdkNxMU1%wI6aiF{ZDG zRyH~7i@CU(du_K*0rC_G@2W0knl}er>taN%Dlg_`293|u0xkxP0!Sn547Glk^6 z^OO3RRT9=3AJ=Qfn4{+@lc%3$&nrXaJlY|Ik!hR_A(nr)Ff7teC*XxjJMU|jze;)3 zz=Pc`quI8yR|nJ0a|a&_P`}mbPX*^?mt+Q{HwxA?w9jVI&{kg{#)#i?v-HlE44KeKvi72F1aT=c88J-M zBa2&to5kwzzUhWpJ1|iy%S(+|S>+DyPJIx21&lY|m7BpzLrpkGjGVk{$w}FT5kKwqWz`*yBO?~U_pq~;eglRQlO6i?H zXLiOidj6vqfj9qZ+wv{uk$5o(`~qWi>%;PRl+;(Ln2L)}4$vO8h+k8ig^^$NO`kY*SUDi9 zwC*yZ)Ppv8fjCZKB3^doWH?$%;Gf)Wotv~G;Ocl@jp^wc2<$a~pf`RlaW4K%ZzK(I zEbRt5qe%_B$QGlMvA(4Z+9sNh3ozbfTw|96CP}T&CkK5xc#D{N5Vb1V zh~M{I^b|8avBOlO3)q%~6YROEyXIOOjjuZ`5doN>9bE~7RK1U|nXJp`j@cf>tEtsT zoNejts$5;g(wlCLgF~(v14XlX=3({ewFD$nm`>t38Tugdg}|Q9NW4kBGG;>&vTC4r z0*L}l>Bv!jw;p2fKVZguhvR%SY%$4O&B(5FlUX!DDfCy$Ess^1wF_SANxY^(`q-Fo9 zQ0>`8LTT?^it?$YNHF~n5-Z!ptaa4#=-nJ zIzHXq79nz!yQK6({LB{=Fj9XZ^pU|&sfNMD$$DZ|O4_iJIhP}nwH@3uQ1yeXN$>66 zQr})&W?r;xWEN?XF!~u_K3LjT8gG@lMpxOKhtGZ=F~BBKZ@7m1Y6Wb-jL<<~Iai%m za@5jfyFk*hJ6D0xj5`&?``V6-5|H98HhAN@5lD4;U0DKeoZ3wC-_zaOOv{;^+|yWF z02?}G6daI4CsfBlogxofBK;PRIRGZJ?rz!Ti2LF5_`2gN-un@l7YYD=&Wm!y*uF>t zVd+l*1kme#j-C3OjP(`Rm>QP_z*Tp9;4rNRP8H*(S|WSliuo>jA@F?I52_ANJAQQF zUPIN*Ic?)^PDzVbGEaYn=={QW6}NM+L2XDui&lrHcF+7+uc~S$=MzqrHHYd@0f&r~ z8yI=jOPtt=yaX+m{@&`D}yw=w&z5U`7$9@w? zEsR07ntFUgfw{0 z`@12uMvZ&>Z9>O#xz0UiB-SHpZvV~x*x`5mj}h_x^q8uFxuD(}n4Kjlh}#>VDhfja zTdcB78E<&PU(_9{l~)%}tJ?${MHdl*0~!{!#3NN2b77IQkFc8FOKyijki7v~-%l$Z z7?14gV#rt8yMRpCZn~>^ddI;L;a=mAcC5M&C9Jr!Er{*W4Qu2qf+7s*&@8UsA^qyM z_RIc&FE}VYme_DnjmZCW@ zf@;R;`%Idsze^fbcsEPR<0m8M+AysC@9Tpm(TTeQwguPTr?%BH;G9ZZRO&+$Y+D9p z(t8l&o_??HNMk*f8%G#CSijVyvMrPprUGMa1yk-CghfMO0C*dCW>d)5;}LQzzU=`} ztux1-ceW!STtwe#eg=eg+s?aaTmdKvY#19FqFbkpZgsNWmb*wp6Ph3(xP{=2g}!eK z^-K}Sgn*;44Na`}TmqvDQ_`997dbP&Mk4n2Y5o z?=i3;m_LBY5;mfyO_|3NG+tLT1yCma(JYN92tRWKG;n`g77$p@y-YAQUER()S0$kh z-JjwKyC;$miztjE^4g&P-J}!geMuMDDQ>r>D#?+qZ}$F39~@5ruq4LvifYN@*>CyN zyrXvA>&Vh>L|U5nQxTO>C$5NI5j0-d9lFgwfLZrkgm!wL_e37<@xhFZ;O#Re%i*Yz z@r|GwNS{&ZBgZzDGgDyW+%{lN(;E>$Fvy~m=&#!P2SgPB0|W(>$9L1`%J}rEw9kP% zZg=JoB2>w2TcHWlse!6Ni%?lKbfwc#G5o?#JtpY`s?FFS^e2$UC)k zJPC^e82xV_WGRop?q@9NQ_9X@h%ogaC>OyN1u47<#_*=$9hE-E-p$ zi0?$1JSyfMfcPBZf@=vUqj(#H zj(es#sMcsWH`QnlO8=p26pVKh44&~^C^mJO;gMf^d8`-^(Ke7TqKg=XEE1UAHFvWxsNtUV|b z4eNi0i!Q8}SO2W*zHa570=c9bIlq-ci$yn^8g zdhJ~@L+pfN0k{;z#{94fludcGvQ8fI-&OkNf^)x@#bh1v5`dMM^zBJ@UnKZ4_!1&( z%0`hO#tkQ#F5|hUR(NT(rIZZJ#FIdPj`{2rF9f~UBw*wL`C&IV^eJ4+BoYuQK@$0p zegH@Wk)DWw2c6Qnx0ZO&uqsS}7gm^o($8c~c;6mWj_FFTqv!m*E^JnUkXv>rD~95c zJhsO4>>J2900=U8kfrbZQyvA-<|dDw!kX5ze2442?J_0GgxO=kZ5|D^vzKD6UROR-|9?WF#1?@Ef3h;xcg7yMoMSLuQQl zaQ$aHshBlG5U_2OW9{4Hb8Vpi^(QaJ%4z*#Wnovu0IxpFzj{pmfY5y&$Mj?Rhxw#- zr(4C%+J>Y+`0|aFBn4_pi(&~z;)`skGDu#{6yNTvoDg&A`+Vb6{YNuIkU~7ID-Gf# z-m5D&K_4C$9DNsrKh1nWifETcntQm9p=-YzYs!zHQ;Rn0aC>Q2G;X#!!C1R<2e%T7 z->W{C?lWd21copXBh|P<-KER^ynK-wU*Okc;wK(B`amjMcm@y@#6j@38%<^O1D7}5?EA3@)79s|In(`I@P@7tDJ4dKKD5cr zkW0qrh#qsDcDrBcN++N!a9#V}CnKn|AS;pH*^Nx&!p?@dIvv*b9cQmJ+GkeH3a-Z= zycvW~_fdA?nFzddT&22|uBNSe$1N*kH_S|%b7p6{h0U}MU=T|m1BO`d)@rS~MTIf~ ztfwPi$b%@B0D36+_wNXR@2yz81Msaqd;|*XDY4^M7+rtlqkvvr6o_9rN1$3}r#S?J z{lv`9`_uJtx|eEUm@(gV%Ml=-*U)34p^Y|Bi&Ig-6ixp12#-KzjQP7AUg5sD-Th){ z_l(GMNTDGTj0DD3C!^TdJwx|Y{b?x}a?nsdld9) z@;Ncp19fp%$UcLliu0Lk0H7e&!e*;~K<-qN#=Vg}w5sT?hXTCeG8il=1;y@F@7X~T z$o;QALFB#Z3M;@Ri?aCT=9BP9Ww@Y-w%>PTZE)NP|16hYZgIoYy0+dl#LdqB41Dz} zINstsWVnjQuQ+pxQ>7YnP9!eTVKJMvE(H5>`nj;3%^aN!;6VF%P2~toLnT!Nari@W zgKNEDkzIYGppIO{D95yJ&+EKlvB5``L8??GGaCv`<8{-ifW#4{iO?^E4~DNaRX$LU zd?jMkCBa|??HMq~`lJMtxqCV9bQKC(5Dhvg@1p;D?cClMa6$6HOc(6RFoD3mH_} zl@{2x*h>_(56~7Ex=t7Q;OK9hFlrF{zDm2L<7l>{L7HqP|Ubbmlh11wWOv))$m zm&ZNRnda$_wTIRGiY$q6Z!ov7Ng;JJzZ|zsPpP*+GH1!GwLgFWOH<2U{mBNM=2vM( z@O%a}9xJGN8PDp7gf#(V_ zVQZU@y7X`{u{pJE&Zy!#0xYxX2(IRb9rM^HU`RsPoVi2U1y!rd8^0t9C(x3#D;M2- zf848z{h2I4E0;IVI&``ca#&5ekH^KGHOC_3?IeE$zlW-Az04CQoAnHl7wv5(r%}2N z?LXiykvRrUPX(i0mbh>7%MY*D>ivZ%9jC^6#1m@mx-&phvRhtRIiI8h=W#gzRoYKm zmrDqQ+kvdTOoEQ|MAvhyXv9g=80th&siS}G>MH%Vyyz8?k)Ga#m2eb?Q~BPFO=hHI zM05^5tMaHmtBtjYjgA?p-Lbb;fqJ|5^C6>s5yPgNUNlTi`bOgO`ntYoqwwEwS^)%t zw-Um3%6T6R@I~I5O*J$#R90373C^U@nLSC2kz#KQ8vobNjZWG{dX^F)|ywI4CFk0R-5cJ&0>0`r7GFX1p5Jn5Af`T?YXzv z`P~oo4w=V+@%x5h(AZ6!%=GomZVOQATCeJF+1;z2X?|t6Vmo6k#-uVOjmNyF0tyH6 zUAwq#ZR`k8Q2D*(o8(j{6mED=T(XHC4#!v-6YTNcTO;%IJF;wwh_qS1F$S1!D!CY`~3|)_&(U@-%(my_2J2S0%qOntOu_H(fAJf_v zQaj;Z<|;-Ic(nsYv=#%>Bmzll%SvP9NP-5Rc|9_kVI_@i1XTe%iP!Iqh?mblOL!>q ziVJ`bliw~<6`*Q|KNZx5p7Eb2=zs#Xd7O3wmrWpKk)1CuRzDz@KP8h665k}6F8lac z(;w>XN9hl1U{1kdWfsqJB*mqn6-;Vg+;Q8673%)}*V_WtyVORfuc^|9alOh3T(6*6 z_eTusIyf&&L4W^1eqZkG<|@1o`veH?VM+5T4gQf*j74hlw|V1A?}Sm$U?fFU^F!214XqSlC0QMBPwjOB%O?}N0=-8Bul zt$e(Kd3{oDgRaIH4_?M48rs8AzIKtNi=9__I3P#5$~}o?%tB0Dm5}o~-8oW~*UB!| zbEHhEcI1W(xqj@B0sN1Ap;et{UEZHHVNn5~`%vKn8nF~z=Ldp#nU{BaKw>|j@M=Gs z|1euD>(VOxIE?u$zX>x;lP?l#H1GbiZZo9{=+RSHy2O#|yMGwG`B+v$-#P6(qFLRm6vYfL5(z%qgGuM>0ex23=~MtmVK9e#xJIHBpxFTr>$< z8uB&c=1?R~{N*&nly=Mzd_#shdX-WqI9<=vOhzn04cGVsqRA_}wLZEy9y0v7w~A~7 zRj2>cx8%k#qEbL<32ChGR;4e*3HQ>VM|{2u(pN9im)}Da4S+S$hMW6G-XUOwS9oU0 zW#Yn-(Q-NV<-4nt`{xTxpXhuHJmilZ7j36(>V`N)4UHzs-Niiu)O~OJ9nUGc ztjTy7;IIq~1%4C;E@_Tg!I+kgs@3IT%sVk9k`uxA0~}fQ^OXvjb770cnoOrXfcV5U z6FYIa5Ic1mFayq0&d9udGbvIwnkRzrnR^NPRKnk4D>~s^!a^IJwRoeu(}CsqXCGVJ zaiGnd%&iYv^_i$RMv)!fBV^O9(aFUq4?D;U%QlepB~jtiV4 zuX0^cv2p#7-k)heMggGBDxc?+2-;MumK7ot_7TO~8z735I}k>I46 zbG%I9Ap=aMA>$BHEuO7j!S0j*Hpl+;&{BTmiLNw8byZXuU+k*5hc2!PcW^Aafd-u? zTc`$dL$wQ>eSi7hGdn%04;d=9<9YMdZW$TgVs`C_C*@`!b;2lpwhN-=l9@dVo-cwW z8>H%y`>yTAE+Fhb@1~`%?rOfn?k=Ng=+4V@_5=kjj-X7J=@f%mVu2+LH7x!po0BL$&0Vkr9$s>><<5raehjVT( z^l;6^myaIlA`72DM9bZnnAsP~5}pn}k!}7Vb$Bot8Wubt?u7_#)WO^eK4PZ)xlg~G zR;?|-*#QGnorFnP$qeXfTwp2>vTrgsJbaOPA1L{U-S&X1=m7reJfgLp92l8+W{G12 z*2jZ_@&aQf(2vT9Z4rha+$;f4zuRC)0Ep>|W0#(*803~vZqE38_ji9T1VDFh=Fh|6KpItp?dooEi|73u^-9&NMUwb{+|76Z4pZZW)e1O*1o>=eEqcIxj8p`z|SWK zV+NRXF-EFx*{6g43^S(EDj9n~^nP|)sK@7*=e7kH z^T7PY@ew|3Q@*zSU%0WoW88pyHgU(ZzA1NCf0e-M23}d)x z{1Ei>T_a*Jmk0_&3(O@#vRL5I0(|ygg(e%XbCxpDy#rhz-=O(cA@N8m_P&iEt%PzwzI!4|A5g#K148?&p=>i zSHAzPQZy&sjPa%lJ(K+xPY^qc6?5_#;NWhHD0R!h2QzkwYC0t_ahAwb>#wbg7&gY! zROTbV`#jVFEW_kqVHkZMZbAW<{p<`EE6$Decq9n;h|^#}i~kA`6;10GqAAd@Ce-FE zbFLa1^fzO0A%4f>BhF;(|5YE{%g8*qAa0ge`t5vcsmxF{#Ke}xFa^VKPuq!+(vOL8 zzy}vN$U0A?hsH?{?bo8p(k*uLHT9txK>e`6IN2lg`Ti&nObqYBXe_{SwTKRBIVp;w z_pk+8y`k6ViKnfpSRr2!o7LnYUc-9Qt^p0ceaMr$;M(W_+M4Y3On{^Wi(jwtNSt3> z<17RLU?hSi+Lb@aGf#}l|>cTt&%?UIKAxWH87)h0aK9CNf?z!TnnOCCrROIA4e%lPu=g2`OX+p4yz3M*MwV4OWF}ih5ul@%nM_Y+OVfj) z%?x(vP>e-g3NKeWO8bSn=bhEQV3DW!ckr7CbH7gyAreY3f+~D;N#5-T1%{-u*F)Na zz606^WX}D7VswRIhTJ=Hzk}#tKOnE8mIL-<2kwaxHOWZTHi%iY12XXUJ3J8=*dk&G zP4d-E7mRS@9b|yr*dIno@zI0|hx!vZ9E&);S6E02yz?vLJav$E^`!$%93B6#NdJ?hlD zsTz;}JO`b3GEa`x`$i-=FxmschlAhq{eD2#tF)r)WZLZLFNzR1Hq;D1&-p6TE1vea ziug)2G|?-4mfRQ@D7~KSQ&&iMnDrK>D5}L2~{t*c8#LqDX;EF(oH2-0cM)stAR)j6AfEMrV zNtv7J`*QC!QWKxn>{suNh57Y+kf}LlC_8?F!XW)8h??ILHUdLY^7qYqF;)= zB3`0&e=pY?p5g$AzxO)!MQ6L?&y+XCEM4iLec-Qd$WN=!fn+pP(&1#RqbN#mjWE$E z%z&)s4iCGMgSgVE?(2;7jxy?EZ!^8u!#63oOnx0M;Ny zT>K3R%7*9IBQ*l$O-b`*HY#0mh7S4S3~6)#|FBYf1usJSQm?}dp=t`<*f&Wo{zB>9 zTMdq3V+RNF^-RMa-A&l%n;Xc!)kHjb$QVN4@=`bLl)WQFXGlGgqAfS=bC zVkV*va05YKq>48r?QiR$V(7`t$rkB^c-PsF#my_4@uMK?-YRY@hl1!Rh;5IH70v2n z1Z|Iy%97W@z0-hLn}c`^ey+?gVmr|X9&teY(#+(nB(*imZFECh*6~)I5--M+X_wYI z@v?MPM@TXU<&qFKoI)eP?I62!<+=#a@_hQtCawt&_FgPmp)Xft2R95P8#^v=Cc9}I z=@jk}R!n1?NOfPTI)c5tEGnaTBe9HXmUKP*9SuDJ=&^5PA~(w}&X<=R0u|_=heZMh zIv*^`+@2Z+an5w^l-(cGa-&KW9}h7aIA> z%r)k}_Gq581uWwnS*gi+-@pfz-C-lZfyKbu&$OEIcSp0p|IgsabdhMD%3dPdjgf3{ zGFB8E$c%z~@9pJH#Qgu7pI!(&2>g59R9>WOm-R<2tjk2Z%>Yyj!7IR3tT`M2;eN^( z-!Z-TE`*kWp9>CW&hwkUWlKd^>>;(^$t5oc;*OS?4N^y}Ow9Yef!^tq z%m?Kk+9P8DR&u&4$~1f?fWnW6npeEN(@l_T0YqfqU_Vy`KTuQQV;iAI^s!Sr45dGK z>{+6ZQ$!Nc$C+Ogq0WT;RiQzPvOE}%Sdn;LRh{*FEds3U;D5WJMZMEz9af<03CQ0I zR=KQRig=IS2XEnY?)@urgq--xgtzx9tzo}i8Y#Zc^nBG|$xx(JTGw*n$Lw!+W`CP+ zaK01Ax@of^qqFJ8-Ta-MZ{?2vdSv^KlY4BcQ=@*J_%7_*M{8DJ{`re*-*5l!-wpqM ze8zj<=a#>SDWhM|^SLEN9`mU2aAe_-BN0n1oHoI;hxyYZ6~p>Pv-YFB4Ev^igSWlx zWKEO&9qEw8zkkQiyyRw|9qgrTeyIJ%%I(YBASxd4ujbel(xqsiMtQf|90TbG>+?5Z z)O$)Ypn&V#@s1?def5N(`#IK=1-tz$))s;3PUx@BK3$DkA@|}1Uh;Mr8SU&*58EM^ z9X?ZjrsmxhNILEkv{EjwaY^#t`of_yU7NCXfUOJsVf!}oW(~W;1!YEZL~=CBQuP3T z!u!qH0Vqb6QE#g3S5UP~{9N-=v}5a0#4S&acNl#xGg)1`%39m)Y{zj{*V+$1cb{9w$#5*q z_fEMyE{JE3lXqB)&pkT!*VlbNxYjznrmH7h`O+{gw71k`^k;OQ=Su6ctmyyiE_CZD zaVYnn!2(nUC~ZadXWoinbi;M~pB-=9|KjpcOf?YRGUQ)9L2J0W0l>pexhP-Lhjyy) z+{oP+!>He7Q-Hf6m?VQ=7COZ|%%bWcC~%g3i%f8k>G_fZy2ST{G`rrVx%#*y)2_w~**v8mp?Tnjqi*p5Vrij)P_aOj_7|li8iWRcfkkJ=47kfC*l#+{UsFEKoYFVpj`v zea(M$-R;if?!EQEX2NZT3k|sc)1Q2mbxKN05q>Sq{aW^A4&v{Xm$!9Y`TP6z#91`f zH+EuahaZFscRB^^0Ccx|x~?Je4eVFlTdV4BA60zi0uf?20g9`6#LNGtm+hqiLO$O< z17)jS5T$HhrN&*tT)Scwm_gM$zGkV`^0p>0_HJlX_T{S1Pit%#H&bva{R35he^J^K zT=O&fdb2(2A_1fs5X_(55yHbMy9F*t6+Ceb9?wp z^#_0JW)-@9dupbZ^nK1-IqSI;#u^;%2rXy!ILSIYf!_Kg@Uij9zGlRk(d%&U1VTwz za&6S6La-1pWI!p8lneq=BE+r7liN^l`2xWd%uKf=1l8*+Ib1JK&?dzrx{_NkZig6D z5iXTY?Kj{B65St(;*YR|2JtR5MWMpJF`N+?KYRK8r%wcUB&+?!rF|LKqiu}moT}Ym zvNv*_PM_YleBHL%U#_xtKDWBiFVKu2bD^@09g{jsMZ2 z{N>Bp=;QlhHNMpCTyEv--dbvvl|9@|U59l&xWoeY#sy9@ygehA_}Hmw`wBPWSfZXT zeOp|Jo8V@X*c|+~eI=sy2(|GmL-1S302HMKr4V6a^Trt|c$%`lb9yt}+dQ$T{3%1b zOyh6qtE)%3-@ojiO$c1!#}Ier{F$|y0!3%_NnM^&0UnO-`IR>T*rzTJ;ki@eoQN=b zEqkS$4{BqYNmz->0B_2`j-tTK%Ex#Q2*urY6wSce;jZ{@cB$K3ulEG3k57;2-Pt3m zoTNF28B0y3pQ82$k_`@A;r%e>BUHttRuH4#Tje0j?-b-=ZO9;^@s`Mbopi z+<-U1(YzY6?i4KX@fRc2pY|t<*a}D6?wtdum6@eKWVx!p87k~AN7k#_s}HI9!R%#* zqO}Cbgskh%R{V6AUdCpY-`BzT8TXYrgJ;7e+WTt^dikRV$JnoMf?tP$Sr;piq|eY_oPGYcjLNW@y9M&6 z=idp`G*S{uZbrN8TmB=dtX0lq#IeOx(b+GpDc9Rhw9Ag=vxQ-rds3cSQ^I#0^VpSp zbL>q?xp33jeI}o8yBukmV9*v^pGP<25AQl7>qfRxC@8hvI<`6tSHf%L{_wVXNzKWR zVv!`*d5Gq73U;gR98}GX(IU^xE@aH_ksmfO?T(u9QTt{OgdOgJ#b;8qk427gFR%Tw z)tR^=X1Gz?|JMDS>Ek=``)H%S8E~cX>NXS*i*8h`a!ZIh9+j!v=fnzTbIIt4o&!CO z1s?nO9(`QH(&t1|Sgy+0O3$uE@2%y)x9PVRb6F*YKbC*x{_|690tVMzrG?SOy1d#U zP^{1oi)$fXUWa9dX%wGJSaCi59xKQO8Czn&oAaJFiVH!Du8Cyb;G+jmQ7T0t8-?Wc z;U-O4)C%Z)rySOLbZFO*fK z4-S9Wxq62%4F{`QJzTPeT3sp5N1Fs{vVwaC?^$hOuNlV+ZZMeKFGkW9Q*{Dcv! zqWk$Ju2i{@FtPXPvn>a62XR5tO4Ni?kVh+H!0WIEvOIBm7{0nnu)(?C)iG`lKlMVV zY->P~k#zg=t6*h_Bh@=dRvxb219;heg74#kk)o@j;s}#{eOVlnz#(VZ=fndz)MsaD zAE~r@jwh6p$sw9P$KO$)Rjy<8)OOThY8-=h)tFEAbsnkVL zf|oZId|!Gd6KfHsrGKQ|mSw|NENmdvPnL{V_e<6mS;bzzz@LzsZ5|)y4QRU|mU?nfkXzndBdvKUq5&F0l9purCbv8UbmUbhU=oca+R){4F>UOHzZ<^SnZ{j zGi^&KcaS$7w_MoLH60Bz84d z=escr_6}I3L4$A;0o}-{Zp#m}ykD|dj`zJrB4QD%&9c9-ehXvx&~ilYj(E8E4tcM6 zR#oY}YcblE{K9Re_IDg}A~l0eY-s#jq!?$o2UUS9Pc_8D`G|hWzEZb0S^XyHKwP3} z&F{|=HdZ2>%CZW>1KQFz;prxSTZRd?ORu-wN)t4B+f#~4L$AdP>a+WH8b@jC5tFL* z{0qEZ{Q6n@TBZ=d4e)zHwn4Yw4BWAZoHJHa#4e`st}~eiZNyQtQQj34sbskFspva_ z;vI+HYy#t}MU9F~Zcb1!*F-?_xU2F(j{5#+6B_r&Z@v`k`bA&?zD2iW*eTs8x1sm$ zQ-sA4B>{;S4lhn+S)+6B(#3YS8o;138=WMwvMN8*6i^ zaqc}usv(HvaB5B7tqM4WPpGBL3tLhfiJK+bzo4%u`D{PI^_;>kpH0Jdra8zS*T*Wj z9Sr^Bd$yNwF0V~(aH=Y^_xfsSvC>626qCz#nCX(oCC%a}_o^1-j>pD*ZQ_jj@)C`P zG_KBDa-*0UxvnoXZ){C)9LY*iw{qCM=W3`49)h^q&0v}j3+!^1^ zB4E%{N@#7av2u8vW?aaS$q_Eb@&6*bq(wq>x24gxVU~Fyp*XF3%x^2>>Wb)nF{*(h zDU&!dnl}8k#@u4T#;Qshcii;i_3o-ayN`>= z)X7YJHBs)z+WS{y4ockBKOtO9Z9rkOUU#Vj>_ zskt9H}f3O^K8cZJH;oOldRCH7j!m&BR27np9j;$^}$ZN>maQ zMiEdD{Vue9dEe)K{`tP|pZ+Mi@9RF-xz72WbAIPsQX6YA;$dpKsBF|s#t%!IGHOaF zY1HAy61N*25F^G%EbhNZaS#wC&%~*o>+lAv)vIR=X{%+wuz!kIG1^Wt^7^uGnN3ce zJ(1&O`+5FM8C2)5Q^&ZxxQE9(R^KxXx=1Q1zhnS)v9lJdp+crB(s3ap$1ilAzh5y8 zmsWmXVvx%wcM8fN5MwH{6jl})i`^rRVN5K%(FODOVi-FJ+#cv8kK66T(4#LSt40XJ zkX6_~h(4PJmHPf~3*_H$K2Y}R{ugox{DXV$cul@;UcS?%NSDsqr`DHpsp!V$frT$h zCC{f0v&^Ko?$q7@mdSX}a177w>2rBySZn6KT!ppbT54{0&=Sfd40;2zjoxn}N{u-$GA2+@* zwqj%N+_>Y%|2T2M{r6z!{CIknVLvARA8}hm&|1=(xkM;~G@UCX@yK+SX(#`t9HA!@ z4M}#7wBd!56e(wQzm_rhibsClxWKUJIM2nZHC~!Zee|7**7~m?-x(*hz@Xxvs82n? z*+B4wNrU;WR;|@Aw>c_{i@$>Ll+NS}hZjsvhPjEXaOJBoY*%nQ)#mt>2@F;_K8Y5GSNII? z;e9V(tYRAQapK^5|KRd`Ft|+#29<)utigPm-MrmD9ra6OKPXxnf0DmGoOa-e`Q2G0 zsT6?vwNE4RmxT?Hm3dPSI!D|nprGdbe}4Jmonm%Q(?(FBJ!}7o0^H1UE^M03pFe)+ zLg+u=Xa7lx(sW2RpE2nGGGp=0AgfRetwbnfK_mgsw2Tze6K*Q?P6>i(NgSs98$tF*!oIc?2sK>o%EjQ;5Ztn|+zX2v$2pP>d0J z)ib6w{6x|RXbL%Ym;+(u5jP)2uyg*9GwK>Q3W7P{RmG!kCvm@0W7@0aKX z5Gi+V6kv8%OGdssT@dKW>q5~hV}}nLlt%`2541WvSpM0F&0ES%4ei;gN%Bh#NHq|m z`f9Hllp@4OGl#l@mUJkd*9#JorDpv%oC5=ygSNIENhS?LT8j(If;@PWBKq`F7SWr; zg>}_5ag^_H>OHqWf1J6fT-Ch8eDn_!P>DIP`5U*3vNyF9T%CPj$V-c82k)`H&{Tw=ao${sN>4&qw^U~$L$%2RCgXD2@xWOHVx%J@8 zf%$R0Nec|juK0Bg>xahUxHmbcW{N_>jysDzTUIU4S2f@JBwV~pa4Hm>{P#H{Wub7}g7~v}zu{)eHv=vl?b3|GfWo zaXR*2)JGHcL8SKStah!u|McaoUcTrYldW!d~Cf(n@(Ou^h@IrGDPb62*iGxXQ4@?W%la}Z2)XD&_R+_)vx-Z|_ z*7g-VVtzLfGHN)x1;0_nRax~Zf5)`vPCI2M&QyE;yXQ_>O%G3Rf61%Z=%K8tzHUC0 zFPYZOvLcUGhCFl1e)QizdcAt(WVVHYbeXhFXWD+IM7-gj>0>3b^lySvCQ{UEgKh8h z9{R6Uhh+gY{|0v;5EJ7$R5`SY-YM@A;&#U=jl)UPumdN@I_S{;A2x@cbilQ*>u=9= zEXtcH)%nGM`m^7`LdfftbHRZ+H@t2xpA$el66{fAw8=fxboY}p*OchsSDgkm)F(B$>Fq3-c0lWSZhaa!fYB0{91U_9uM zAJ6N**U!`G6dtc~pJL2fLXgg7Ugs_ZkA8VlH0RMt7rfBGMa|8ptmK_m>l zL(>K3(?YjAl#)O{ivsAno?q}K4Y_(qsI@&6r6uct5GIGwl;8%1Y_p=<7UGZNr>n8Uzs|JCpy=- zZU0bH#xKgdf`rew6+XfT#Z6F!w7^QfQ|GEkxi{^BQ{|NDCLvXhU99wHx22I62eDL< zz|-yFA#TQud-2fTiYO{M_?GxKE6S8D`VPC^%QA*Q&6h+wDRFdAKWxSfX46pT5-d4~ zv85H&TaO4{P8t-nwP!kUZ-4$!(u#b^}sE4JYPz0Ct$j;i9W z5syec-Fc!^S%ZA5W2dHW?^n=eP~ieXsq`7m$}w){P)d8VEaCAx`@(ybD7!}oI^k`0 z>_o`WtdW1uGa0%x=|)2Ot8K45m_OiFvpnANfKJ~02gq={*l$l4pH?>tyl(M*$vnVy zQt>(Qsg1LhK8l{qw@|+Xau+HpHeoM3`j=0s1AN__-Tn!i>M`Nq`37U7OJwNWmi$wd z_kSP^tlZrQt%n~(9Yd*~SH_p5i*IXlZ*`(hd4BS!;kDdX0B`47Dc`bDRZ%o=b*=adc_VlipHySgKJ^jst>1%z7pJIS^wcL{(dm0`6j|Z0#eV`m-?z)n_ zn~Y$ZMiI^%-Bmf*-Xxr!A}>)QVlFGiwb$_4E{&PX zQzq&H-W59EzAlGg!7Z8BD~;ZtF0XW`ol*+(c{S=su^WS%e(E;~?2;C7Hf(%PQ7zxC z)=$oNZgml%2RARyow}s1Z8?x#uri0S=@lw>qH|p4pJspCr`uuJ9#9ZxUb;nH@!$iW zX)CUZh7A2WIa!*g)XRKpMQOf{QuQgeS2^f+w9&T-#$MV@8C=0B zUwFGJ4;DW9rv*owEm{3Q=+qbXeiPs36@A>Y@1*cI2gGga%$XoFP9phZv6{|dt3C6@ zD_s{7^bT0vA4v9avs<1|yeqlC3j~_R?{c7TkD*H_l~t;JVH#Mg_%|;WQbxAU8@)^< zD^?$2XV#9)#>+}-?E{O{bLhQKG=QG&F;kx&H3iHSWO5#2kqODCpvG=b0ehnv1zh|g z>C61Qg?-z7cY_yPU4)qyD1oRP@U(YzzG8KaTGJOTt{UncLHT*oD>eG@nHVN$Q~{`@NK7?GK#|MdL}8;Gp@ z0!I4o;zM3AF(`W9cjTnFR2U6xZOfDgJ}{Y>m~w_u-{TW-^*sLdZC8*5Kr%i+=&OIwNPg} zXPO2jB8@)b-bP)r0PXCAFsT{SytBzA*DU^+1G#2Q%^*5I>mih$Jsc4{LSAesC6u%! zX0%qsQs|D!9`d#l2oSk#R!G6ReR1Iv@9}m+W~XFashV3_Nc|d{y+1+qg?cVa8jPy- z(fN)}2N8N(jL-jq&VOMX>2{Uy!tI_H^U7Kumi8mVxlait)xK@SO2NQ{=DcqROOGh(Np=!LS6>}_a5+>GmQ2666Nvv@glM(C>RSKX0S){p_ygb&O5!3Cq7@ooe*FgBT`#>*F?3 z`0%)UNE0PaiWu+GJ#WiY-`7It^7(ukr|XsmM^>|E#3C6`Lw+eacCThf5{lj%t{tPn z`p3+7dM^`QkJvGZYoofbDpU%WZ)RSa+nTQ9dK zw&Kvn5z60bI(X4h9H)GILfpu{pZ9yau^%&$^zAW*>+@4k(TyQj*ht9hG<5aOW9b^s z=~)^r8_lNC1T-tHBv!A(fXhJ*K~C3-zf+zZy%jgI?!QCrUpqN~W_%JjnzO3`2~S_W z)+K%K5c9a+y=sj7QjBi(4q?loKD`%i70+b8hu|8yk=A0ojDgfUz85Foamqk;s=AAG zh=p606YR+L7N>&s{8QGJ_>KErhlqp@VondE* zf(804h@T*LVB8+}hd2fuUt=%U{3VO8xA<5O_1@X6F*QR?*2dgcCa zk|;~ryfHT-ww+kup12?oT5VQ(ps0X&C2OpJsZP|mo)a8P5SPy{%)Qcazs916Nc>mV zzHV4!S1uKyd3OvNX>>nBW+D?@MN2bd$Pj+Y29?`8{BbxGc~`#c8yMk8%`4mzd-``Fqnl>VJY-i;dabNBzc}5 zg=A>Mw3tznMOGWY7Ab||P@wqsY$gB+{qWMx4`KtQp*?%_wOm2{canNy3t3u%7dHIy zjZeZ?&W^58pxv$)%hu>*#pRTd=O+bTs>(HZCE~zq+L}qi+@QN#k7W+a9+5EQnupbV zG9d_1;c)^FJ$|{jKj2jajdfp#V*4k@-Tj9>Wn!#I+6HM5IExreIQUO)}+GW zzRawohA?Vzr$ZI3B^tyYd)2GxI;dDHyvvw#7E1ceO(=cU{P@~Ogd@P<-hZgZlPxVG z^}aMePo)^Ob?N>Hx8EE^`EG~QttO?Mnk6HX_${wBSf?Y=fah&zJYccT=)PgK84=#X z|K{<1Wtz?Kqcq#h0e@^k9$ja}knW_4*uxxQOD>&IC*RvtxCL4kP=`wu4qoOjdRf@< zWkt8~^>SO=;`YYR0;!erRnHX)0&PU0;S#K5K?U_~tVsc$fjeae5 zUsIu08lncVY!OcKO2L9`u;zYy7Wp*^K7K^0a*cH)4-DGWdU>u@TV@bhcrfXtSZx_p zyG5PTK2H-_2e*!ORNNupUVGAWP=lSt1hGs~FihE4wwIsJZ+-N8?1@v|^hHJl$6p|$ zqCs;uL7_ojp|a&F$VGl}xrbOn>gI$eyUYxJ_H&O5M<(%G;#DP%C^CBByR*m#U)k#l zu+oxyg|ErlT4P``?6qvty#{6ogz%z*X}sQvI|O%S@AbTllMRSD7`r;NzF=)wooNmv)wNq37-SK(uxZ6d0L^e2JxgzZfC^;6Rod4Bq!w+OeLVvx`NJHNLo+ zlJoDp?@Q8n>itvJ*%KQ6)3(y^Ev_rK9&Uk)i>6A(IzWS@zmHPfePK7lhg0C+lNklemdeIxH^&o-=_Ue&b{EW&5~x!`9eq-_DL_$LM z4Da)Hi*3MlT^UbOIHLCT1U>7SNYsK^2t>K!6sfDc8Q`c#QFxtYqMs;xz1_r~Zr*o; zQYkno2Z|MjKWkU;%Z+2v43$Fp*Py@RZ(+zUcTVa4#E%O_?%xM+F)a)hrnI=0a$ z=q^H40T9%EGddxd|MRHhvQU6BQG9IsOI%Hw{GdO<*uO$l5?zywh9+DCP6>HGq`>`P zTG9s*`pow|n~;={CcZF-;#|wePgw2*Z4)D^jjJ`xA`jwh z#!@-$<9C|93Xs#$H9yf$*iAZjp(eWSd(d(wD-Z=9@}3KF_TFmuJ4XUlqCXD|m9SqW z=GFdK(Z6$TPwcKhb~fR1MMzHv@#=c6{#q(pd%wI& zdZa^B*^~8=eocJ*rnp(YWr{ibnB8@Q9g~&qR>ByXM)ib7nZvrKWfMgtOvq(3id|!R z|A>9~h`i#M=jFzc@wCWv8jW-HQqbzn2ik}?D{c~rzRGCVRTv8M~O ztI4q@Ux!vs@=d9a27Qwujwfr&`ic&S`FAe$;Lh(a*^n8WS40SE1nn7uYh3LR%bJxT z5G2;k={0_IoP>I&upur{IrT;Kpwh~`xx75*dZ^0d(Jy9GwpEsi&ZXSFeAo)uiE7h$ z)kRx>I$bxnVGVaFt*mJ79&}tT4Bl;72M0|}8vKJ8L5^@ZT#?G-ww8fwqB60Ga=S`( zmBrD>1|ye>cZt9^{o}vlHs3;CDg5^FtGl12_>=}@Y@lgDNZc>S6rg=hZK@UaFQx@7 z9r3Iya}RAPPpf=>EuWA|TfB4u= zeqC+^aBL#H|JCO=DZ7BDb6DLgk@Z$v!~-)jLyP4whi zUYr~!OLfjZg?}lGRyhbeaOp03=@FI3r)MIYk4slGq0L~9R>rXxg$}WQL$!o9^8<6; za~E#v>G$cMc6H?G)|fd66Z)-O4o(gc^Nv>k)|au-e(~ zqAvkHm23yyq1gX@3B;GK&NEAhsv^#hwb?(Sul}tqT{kY6FPMI_f`W3XQOrI6DYTSi z-?NX2*5?bb%elds+(hBBqp6QnhA3Y-25H-{qq(i&+d$cC@mJ8{HcQu5RIYHjh5AK) z+@>qD2A#a4%qdNe*KjN3o5C@#-jycnr#8}F7_rH69U6H}Q683f+B=CG3P|sxY0`pX z0cBsXw#nGk$kgVk8uOxWzM_^O=`mBg`%+MsoN>veWI;62f4QJH%;cd@0=Q!USJZz8 zHduCfHFJGaKUo6rR^Z{?S1Q@yltUXj5!s^GEE-x`>4;Lrc~R7vsmmS*elcbT?^V_v zQzK-9rOIIvABFvm2lp6c$r$(NUjp5K77@R{cxcZ`JEbNFDIz}n;V@iPW3zye$lRJz zQxsU+qZ*4|SQ6!oqdD#IZZpV5H?~>u1sUe)8Pd6DpZ@VhMFh#F&#L<~N1#2$>vpHh zRsZgD$r@Joj&~kCzRO5mW+M%HZ4QlEu^Tm=!L*1&TT3Vdn9w?AbsM>q)&%8K8d0Vo zPZ{oAB3bY_@XNM$_H#dE><;%kDYH(-`_xoT9J4)2kLS#fg3phvf8Ng6G+G_JXr=?) zAnNJC!bAK=8Qi3j#yr`vTp!CF+9eSMST_8kh*0jPa{6;*@tzpYPeC=6b$@DT0uJdr zRr|J?pRb~50~i0Qn#ixE1a=h7q;Q(L~Qp4m2%|&QO*^l-6(7L2WuBTQ%fLZ5Q`roJ- zR&)-mdJu|X-=(;>QpBo1+q!^@>zxb^Xra+2q``vrpxTO9lIQ~WCzv0SlX|I*isvrJOcn(t zTTo7Ut&2nBu*-8qNLI2(D>^nwzE*El76ZXS}|syjXPK3$%Pz@NmRNTD&|!8sgm$I!LevTe*Hzfu<$t%(wiYuDPOz3GCAVWwnLMl$L+d$ z*b{!1oi|1EqLO6nlIHYvhTr4&i$_N6(>F!XyK2%gP!~dcOue(yJJQ9B33At-meQ-) zf8F(%hL8#}WN|Oh+RkZD6U$7N=*fggqhnjri4&<0_)>&)i`x#Ir>&Pkv^0 z&U?(|kE=%B9k8nKk7!ffP>&s;F3*XG{xcohgEyXPK!_IHOUg4=d+P3NvCaEIIMg-0 ztbZl5Y;SOyK4mXTClB*LYu&{bbZ?qnkqLMLy&uuq7P>#2>ibMr&c(G(Y>r4rQGFBo zurTo|L@oVlJB=n55AsZEuDF+;uP+I4#MpdVn>p<`RXJK{X)>o!^Rv_WNBuc;0Ze7| zDHtOWv#j4TZ{lCUtn7K|y2eF*{@q23Sq425B6q)-Q@&r7&hx7!PuP z0e3(1*@+K)hF#bhlj;|9271s%2T!&~u5ng{*xFZy`Zt0_p_`bl;#KS2`cR+1F%a_P@bxM&Sgkf)ZjNW*?pK-MxYO=F_nxX1I?u&6uIeTI?k?%S7Zl}B>VcKLjvMJqr9CFl${AnhZss~>9%5netF&iIycgV-x**7VUot%(#9d7sX-_r z5o>u?x1);Qcnv6c6ou|y7Y?WA9T6ZizF6Ei3%wDkx>|1Wy~@DnMLK8o{8S!n^OV!o z1>171p0A%i*C|zL#nYyA;rExqU#jz$#o|;bKTc@pj%?o&XHcd-nuLT$ML=w}PRMRt zDDumV%y~K%11m~0&~gCqWTf{WY6|gub}Fntg1&crMtB_ zF6ml`f^T15BRX7MiG?H>GKovL89ul|x;b`1KzZh5*JZ=jaXbC~ih+Ay7gK+k##!VS zq@rrfq)y!MKPb$ct-{`c;oC4uIa5^w{90<`JIX z2^bi$#JpG}FU!X&QpnmPSfgXfK;hd!i4-)`YZoW*FB$IO=E>sg zx%XtP1NpY=Hq=NLY7vf3OLt-%D>zHdoo$`@U%lUFI~Xl>zT^yT5HE8U6% zTcmCl1j@VvNWRQeM+d(rldDDBf#u424KN1}k^PDs zE@$v3eW^>egP3O3h$YOe-82Yh_?iNRQJt(D$BDcFH7UJ4vRm=GJV-N>X(B|hhGWlY=Z{fT^HNDBC^pE$!C{ZL9xl=#{S80*}yZ=4xw~7S|>*I@FJWnA>4y` z=EsKR|Hd3AxZk~o+gwS&p~{+Zp&&LtGxmD*r?3YM?W(61FP=~=Qn~5-8P2>9`pzt? zcuQ%EQt3MS)e*X6ZLb`3bp8wJ!7Tu5l?Y$C-xVh0H(SQw!tLtjn7herd!8#UArc5L zHVF5y(I@(F|42u@8qs=7Xz^*RL_-@$0tB?arng!$NO4Js+RBV3XhAv~QQYJ$xybL6 zuM;|0FgC>L#aaoA$S_e_d<5i%sl$&((V8JaJ6g*MtQ%9MoAHB7X|cBqB#51EMz9E? z{UAC>e%Y_b07fGj`KA*htg|auf?*v!_`Ssj{)DJYmB`gb%qyZXjA$HPP(GDt*@*hB zC`iod~v2 zs#5sdXQ*r2(I;fSLVqH|OZ01wFvH#2vQ<=c^~?{M10k4hi%53GfkxH^#i-Vld6SOZ zcoJtsY=FDysLRI_Nh^J+dzrEWQ2a5kyGVA5jX_jD-JIw=+5mufl5)eTfQrScR=tmc)9~VFT%5 zKUlAYZmSkXRa4vV9sw~(xp6@xJA)I3o+vj64-dD($M{l7Xi+!@Y9;7|Ec0*5M$(fp zBH3Y9WoAtQcB?1_iG6jyOEp3Mh*WtJV;5&xFUqEv-PbCO)Zx1CW2N=2>@A|0je@)K zUJ3g#8fCB)>Q+KFBCv!V4E#1Nrz}N^SBy^LMCL|8>m(vG7ag}|Ivf1e$Q!jy?OdR< zte-Pc+;R{n@(Q*zioWv|L*7RfRkJyr5%az~tNU3bd>*7xCSEyewbal|6u62rR0Lt* zNg94wru#E(1t)Hlj8j09P$*DQS+hbSg;Ob0`t)ITUWC4cpHzGg7UwEHv2YNm zU%781sx*~KCg5;nNi(`S{@rkZw>;0U@W4PadK>zQxz*SpHv77qHZOAG4;QV3>R@PZ zI8ycAD4B3%YVx0hV&ES5*lhzg(vnkjUmds6lll?^DD?>2Hxf(-(2c6OCN;%xs{H=N z^QlL&!YkIv1N2u}5Aj_mFe{_xCkE%9=%3&HoDa5T& zMNDujV+{l|P9~G|@A?vD$L-Ef)A1k`^9AnB->q{I z>N>Z@mcQ}NBpMCua!bF~4)l@xyp}Uc+2N~WGLVtwjI)t$kqq*fud(e}vdi72&B3_Q z3aC6qZmzaZa2>xdlezmMCrS`4#dBe9?Y~t7bqZ(|cq5^wTQeZk`J#inC=$z-Sm+S(D#d=eIrj@7S7O zrQ7|nY%YI|Y6Mvpi+SFbb)*Tc?NuF&RMoPTZPX2p7aSgP*05$L=17s*S`OPoZ_0Nl zYzBtRX{Mp9aAdDWvX*DabRT4@lBZr!tln`2@qFd>`z|S^K?c1L)3yyUid6d5eh8Rp zmg4WAJg5%?wSoiKQwgt=f_~43Dx!uQhnS}$tb6@$T)s$CKpJSQ%NMg=psfYK2UwN?b7(g}QMGqEpR z!&+wSI}L1pdNYEhW(zqa@;>yLJe2)&b4I4SOLVT!l4V_*wu2chVEpEXY{&orrSYYf ztQ}EdfxIL}XDLa36bLJa zB8IaGl`)Fe5&hty=ZXt(TOx9u@Yb2*-pxQT(KR>v#jfdFB2U}+>KmTw(ZbewS4~+j zjLl@EEv$sV34Y6IjPZ^oTxaG6?I2I0Fa;z=hO&A=F}JXxs30Ch$Ehn#A3G^tuSx@k$_> z$z%^Vtx~Rp5r*NeL%EML5Y=>YWNci;9OnEXq9pMnJnnZy^^n%0d&ft2z z{0(fI477E2?pr5u2J_cMMl#4Ue)uuQs^oq?%Y>U)dHhr?RGKmxhTcl6>LFQKU!LeH zyN1$u$`zN{&&WHWPg_vnXd1Tx)Hi2K*n_H@y=Z#2$pZ1gT*w`0Rup|xM&@&{a> z%O~vKZNV^xSeZKEyjXJ%=m;k>Ttq&(N1B?^@!uZLBAVdui;=#9+&>V+6 z`NgmNVF@+Q`OD;SX4LWGr#TSosVM|TsEOy)!pq3G6UWn*n|i>ldA(|-x4Ro+O@Vse*i57}bf zr&Rrp^$QpW#G-kYBMQ5DqAcQe)ctp4ZEbCW+@?{=<*#T2f)-JEy0Hl@7YgXQxs=LY=>1} zkw&J<33y_-OTxD`BAh|-_ zNXP%Q13uoy@Te1o*RW&52pr1>?lG2|q)0g`R`f zkHP2rTeV&mnP6A4J-HT1W;M@>o7%4-5~Z%n%lrBCpe#= zW-u(Hjjf4v^WZ){4+Ta@-Dt{c+8)71Yg-yCz45nL?}{Ss6+5!*4xO=h-r zkYgiTbP@dwQ$58`b3g_|vt&m=zF_!ai{Q}*wfwr)X)pHHpx~HF&{pZuq2!gTEH|)W zD+g>`fdxE`JiU=N^kFl1ArMuJ>a>k^r?HLpscMd{YJo4fRiY1Vs}t1^aN8hUCrh?5 z7?AEpuUVps90n))`H6TY03|5R&2-P=ND90w=#f_pOLMNJqGW$_0l02C3# zcyc`en*x@H7M=&_04KZAv_#%23wV?R(SZ{jhOGLuvP`2*(?OzaM=zj$2DgE+vUm{N zhSdyV>ipJfhfahgTM!tP@$f_PtI|72`U8st)n3h6_FBx-crJKHz|aKanow@YJo6fp zkcJxKxajD{IWjkE8*~WU**TC*?hWu2avk}pMrgAEF-Nu|Y77ZPY*+<3Aini?qG+bS zF|_N03hWUbEME=Fl%M0THpMF^c+3vl*r@FKU)-lA05f609FO0s3qI~}9N6g2oo4~3 zQlSk^E1MihbxT53UTYj>+OT6ToKlpn+DZ)ZseN9bNdEupJ545LVioQl9_l=z{2 z7c10pMVGH~@$9^*%wBNX*3lfzA>J@136?2!-(kEyvqpyG9$HIxl}*%*Vjeawn-OEh zlsR^3Y#Ag|>V1P~T~K)uB>`zGf^rdnBaiF>xMXxH$C56(ITTE}Nk9rRnE8E>I>S;B zf_Oi=7LhrUP&HiRwa?9bdU=gt{|$P%-c&`JXe2jC;I`DW$JQv+qQ}H9jSg55J*E1R z6D$L_W&>L8J0Z3{64tCl0pAL?+HdmXd{}+sZB*wsF@9(fglaE{n!pU|BNeC99?mHS2ZCmAtruHGO+_qmgoT z`6B}VmX+orqtMG8G&Ix-G|wFZxyK!y15&B2oUFAEHk3={ae*MSn#%OnGcOPF-F#oC zjdup?R0L`3$V?Ha!bVn-iV%SLcWp+ekf^-M<9t3)ctwb=A|yZ8VI8fb zWIDE;y5kVjq?Fc+bFqSD9SQd$>XCl$_qa)`>m6PGzUbGCljY_~15YaWI;QH=4Tvf1 z%v3INSdoW6+wL_=L%iq)x6spd#~q8W}dPDTo% zsKfze6(u-Oo!dmy@Ne21w;yB(9AFi(6{@*-=U_Q@q^s9(nsQ38X4D+2ilWu4ZquZY zK+&Sf=(JTU`edU~t-R;8r-3k!*K+K*-R7Hyd@g^7#QPxAQ>+&+wH9Bb$q9`=fgE)N zq_?7p6G$LN5^EmfZuYioq2RggILo#T^@g#l?#gxmsQ`D+td)0@qLIvMWM8eHg^6bx)>zt`(F)7rBYf3u|B1I)xnspUhpoJVhOu=*E(li@$LRWrBTau#obT3i0m z;2R0c8_q~>E)B}Wu8@zf1hFsnd1#uo6j9riIqH1_;-$4Y9T7W@XiR|fogW+;v}7Fn z-EMOlkT<=(9lM4Im30722eY6wRbJ^Y0pcE2_Hd1&H#^EvtGW7>lwiC47MRGHE~gdr zD-#+RVtKE22(%)ot_vsqJ%Qlf=6z%8*{abAjZxII_-B>_=6kK-AyBMY>lLvmq7QrD ze!D(}DJiSP*)UJyx zo8J>MX8h3T$;L&PjSO*N9TTCgAOEa_k#&)3KR74f^nOPP-Li;Ot6)UxFK)=(ma})Sd*X5)U#x6X)0>6a7=LkL>CU+ zUNKQC+Ls&I$IYqPV+8T5Pc%vjvnA5sn~aR~i(OVRTyOJI8Z@Lp6K6;!RlfGjIwHsq za!4V1noAS;i4z{WSRNwQj0@op?S~C%qgqOb6~BuIW9Uq}=wTgG?-Dmr-j5;6#-O$( zcpI%25)4!g>L!k0>~6?w;LxJIP?1Tv0c(I?F4cfgPSG`i4XzkI@IilQ>3Q|LROwEk zK0PlOfK7j~IukTSA5l)qx!E?I@+fHmP|(MLMkT&t@Ihsacf={-E7ddqY~FCN zY&S<4U>3+>$DQ5`AebHCp{%NBj?UXZ^=l3$d&k(5YYnQD))6%00!pqm;(nQW%SxNQ zSC`vbvcZ-+QZw$FuKN!T)~P6|N<7FkXCCU-?9vhW6LwYT=dxE~yiLZ=$P)?aG>jla zq}OPH32Y0!?rULhmwXA43V4>bw4)=Ce=xm8MMay;80Z22>+n?HSo>OThAjUn_cG%0 z;j9`hpidw3S(C$Fkog7$GriKW(?`X|Q`~>ImSw+=Ty+Mzkc?&D=^?s5vtpi;YsD&$ZalUNe0Ft&Z;b>o`**?ZEjGTGXpe;?7{TZ5qWoq9Us1)rEbbcII z#d&dUn$>3iXEz6bo`<}!T4iy}WYA^aUr*Qc%!`0!Lmsz&8jmP9o`H zN@#6fK+#UW#B$94{_N4)KCyc%R5!YRw%4D`C{m8E*y51olj>L~09Wr=MjYS~uf!ua zNpjDXq_3+6qF+3palA@?_MFiGsI+`EBH)wZMeqCL`M-Ey2*~E}6GH@67qQIr^zMd$@zE&&fE5Ws^ZK-L*-dv(x^!AK#?mKIq^1M6Msqej;#Ib1Tm5S8KXwG|!wYyiq93wjHyKx-dg z^zAO^??iVJdD~0eFR-fDRxg#TWNqj%Syf#zGH$_eJp-$m&RRhIJ{OP2i*g|CxQNK{ z%AY2%Na>=RF^Dox<#?QrXSygyMXKDi?LR$xPWoRwSmC)qdB>Dk<{SKY;^}W~K{hC& zRY_Px`RTZk3dYu{wxkBBNmGd+{8kcGVkC~9z{DO`x7v{Ww>QJ}K>}~PbF+iEV?D2e zHBR3sgx$T8z$};$1#Q+Yrmf-xwv;&JQk_uLtSg+=nto|nvBG`D8~oGO_(N^CD0$l- zJMOOw+dKVCShmdmgg~6Nf*!-2Pg<>KO}ukO;2+2_$t@G zXsd)5pW%&KM@S+^obBaq;iiYF#XQ!&M(11r7m|4%fpe@v@sY{&KYjod^u-dR58UEsD~VQ@X9TJjslBc(@`Am^N%r~l z4av);{o6-~hoY@w5$LsSdvw#5S;anxJ01H8 zy-j)Pr%IvM=F*SHt?;{lk_LL}crhLzu}e5m=^v2=Wusg}k5C7W*T^@Kjfi-T8}1)W zFp>0XdojXaFiq(Gf7pBTxF)ZyZP?S++G;IrtwljhwN;BKh!hYI(rVS?2+F8HqC|)i z0%BwgNm>U`5U3*vLR3WNgb0K&iKq~eAr6oP0*Nw3NkW8}gpBvMgB@(oInVoi-|u<; z`riB?kURIV$F+%${?%@vuO(yftuYUDHle zv1nYeL-!@vIIqdCX2^Zb5Tdu~)9<`-P@GK25qs~;?m#6=OZr*UhF|ONTqMO(bn>lE zCnl^(S2Hi5_VP}g-9SG<9bQzVtt+99KCQrG=|D003kUGOYNUyj7My_2I6FzvRW@*g zG>U9!ZiEzxYjK&faHRU?2P&JzEsXwTPA8?L9%_u0ixt*}z6F5wwCx-!2lIJaD$e{% z1##EsoW~m6t}bTRGWL+XLMQiM64)SGfxoEhBt&)n7)%WY(K~{C@-P0ed+lEr58ybi z1nHegi(Vylw?3Y88P^tVcFtuYFSCoC>yn$m8_euiIOf!IuTi7yuC8otoOoYy0H+qP4WgsjqI0(*k}sH z-47764Axh~UI5*6pFyqttzfsfLE$U~e)EKtx%y`PhtW)X-ilP5@*JjyAzG22<_&z? z3&h>S1@^kVmb4(;HN_Oc;D;?&aY!1~0vCYkPeLW2^Vs=!b^=t6Dcu z-t>z(eoQ}?{G_B;di@l7%@hiYFW1KH#r>2(y;AQ{l{&K>NAahk_&2HkTV$<9FW_$r zxYC=nAse=_6rg7v@|vUg*F0b{cQaGY?G+7BPT2As$^ED3^}}FmVNgTLd^wDr)rPq& zy`~@n->chFr21xfMSZ`@jp=+ChT#-Ftk*uQ=@-f`%J=uOTRc2!MP@6=h0A!9!+?M* z8j}Eq($1x@Olu9!>z4dL@uD4^GP%O-TOo79&dc5YPd!0-L$|?0{`lO=^alN;5kQJ@0qX1M+gJKG%)9=wlq> zq~@7rE|#nF!RLW$oTm_;2P(|pA`<*fg?XD&H5=p)9Zdm# zyE{t*G#g8)-UiIZd#lsk)tKs*Utyoby7!znXtid2DN7W;y7U7l@>Z#c9=F1zQIyGo6fw4gq75q%ylG<)Z?(!M{bqfXWoc#_!2-+MLP#D$gt~kP zG0RxwK*8h&&-SI4C#bGP6itfs$Bdg)7Ko@r#423i+?XbxAP}KrhxSTP?lSSq5D~?g zAf(`|fnYk!Qe@LHRxL&f)wBJ&qb7l^#7MEZbs4UnnJeTpn|dej_Q_&HJ7LFgI%)?M zzhrpmKyK4j$58`Dkd{Q;qr@fb>D@M2GrR#}N)olMg*<%tzwUnew^~31uGtlMP!?T* z-M-?HB;99$pAc(9x&dz{OQ+29&uC%!8$!rT7lp9YrFUs{h1F;@01{O;*BX61SWHY7 zDpCT|mE+;uoH7iHAf%&qNXe*FMNgBTx5%y#|Cz3iiMco&nYKzT$*uLgUUt}QN^2U? zS`POR#xeMkUL1BvQ-1)?1_~UY7T=VJb?DA)8s4q& z=_1To$>rApT2*{_p*mRV4N{fuYUi9@vU6!wJ7&N4`T8i>9sRCR`xU&y+y9%Rh0i@1 zLtNxE@{2DqyKdsa{il2!>tYP9+|YYvWJtlzDpSAVXW+NXgJiy1gLv`aY zsmQoE5B1%-#*v1WVM$VQ@>xu5-nQ+mCcDW14~VI-VZaJ*!eQDG#j6}Ub<7H0C5CgC zPdy9|Z@1r59~)G3T0Xf_z3JTKiI$7%y(a$=Mb|Fx(l5qo@&`w1%2$xH#xJ$lV|DZi z<#$$9-~M-p0;btj4bysU%mB*Np`%h%h^^HE-3fQEbZzS!D}SwOIZQ3b!3v{<->Gc! zSUwpA5{p)1aP&ivM`hp+H;ACK%8aR4Ab!=z4o8$Rr-4`W`w!t+8)h2emGY(KusTTU z@CHr(LTuA_>Y|~GklebMU7Fq6iCn*OX_Uk}Fvv3xQ*ayU!5%Xtajp(zqEvjq!4i8f zXFuPlTBAOyEpe&ZaK}#AbamX}42>6ZzOLIVce?3tLX;0VnQgASIzXne($vAln=;T62lwi=v9i+vyreanO8MgF;M`rm6QHnTCkMLU-SYF}6EKDq|91z`hV zXS$Ypeg(T*>b}SUo#71Q;Jvh!(QU#OVqtXFBQehj;>YtC=9sQtbCQRLiYayT;PMT2 z>C1?**T;uL$o9B0%!ZMH9W9z2Lu<|*8r(3+&KhTSDd@^FYf5how8GHC98vEe zIThFMc!;l)B<&bkwBt%Xs@?tb(Ri*qYhP}VGvygQlj*s1a}zhc1a7zg{LPUDc0k_v zf>9#~mUf7OKVpR%>LRc0T_ry=zF@wL_0!+bl>e~$9H?BX{ds+d*?wR0zCv1N!sDy3 z=^FQ0>4YS!-c^$R2Tc$QZWBLy23$H@V7>qdSH}^-Wn zMSi1N`^5agN^0o*ac#y;l?yU0X=4Zck`K8M@XAJ`k&S;kF#o0_|ACbY4pe3k*c;bT zG>^-^SKSo9oH})%KBo-$BOh?;<|LTm%2A{7Z0RqS>%q()Oj~w*&3MimtIQCQz52Kh zhH}v`A$b-wYf2F$zv7nXla zUt;hMlqu^NoF9Sz+T!hySAQ)(lo3I*t_A+Or=)%6vIDdq@a$rrdK&fEs|5f3sGh?!7q()uyy`i?oXF ziYcxKPpD9&;#_;tpo{nPpq*!GEWa~B>;G?8_)y?HuJJ~AUW%WId-b;FlX268{|?Y!qg!?==kr6=DI5&R0XHR z!&bG}p|-zq1DYrN^mFl=b5(~)y6ytcB~z<@NWVS;`h{)FOy#(f!!`QiHMkXUXXIX>kPmV^M3+3B4*aJsab2`Ns4+O77%G> z?%VveH`b}F-O1C>J>Az>IiSh*_~*$$o`ttm6vu>IFEqO`yiwv&O>yM8>fC{{63~l2 zPn*oF4|}_cX%ar^TC|Y!E6!%2*ydc-x5)!VD$YxLofjC!o`}E}=bv5$tuXY1nl?(@ zAbP!%HG7>ZCsfj%%cd$6v1jYlkurS9M~*-%_SO5@uJ+viM{|HEG}aNB7$M?27i>$;-f%>((`KgEj*NreL$`>x{C5isXl| zVVOiSrHM^HN7%u!Yb~JM?3WLqi#H&*UoX^I>OMK=z@ZW+=GV6Z0!p}WUvxfTuiiO9 zxkwDw-I^GrxVPad@cF2bMcn$Zz1N?Wco4!n?R|u1{uob-=!vHstiFCUoPD{s&UvT! zEFoOqgpC+Fz`Z!^LuhRSoZ_FDa2lygLbG1Yd$eWL`j1GD|6$zM`x zLlP22D$>^Ti{9vm^jO|H_G1<9rqA`VZ@ZrwtQMeOaq^xnTCTk2YuNEHVFwkP`?P+C zRw>&le4kfiRwJ*uIi=|dfXkctk}Q&+c#NEbW|?~ zOp(iHae!x$nI&a^=8fpc(aDBU=WA5Ws5FI1wpUq*?+D4jc76xqnF%<~C%)^^2-wk_ z*!iGxV{452OOuIHyqfUbX{nwDJUH? zu#&QXx0dmtE$7ALRX|$3$+K3+t-!CVCY3Ck)YY?@w(@P?hyv&L@>&4R#hmw=SDKkx zk0HuM17_p#9#lFpeWpz&L|=7)x!$HIs}~3}r-(B;E{I7B{E5tw>EKr$aZ2p>D6RHN zn}nB*dub4^=pI(YT2Vj8Zw`KN(J=|0O9M`^bBY8c*?X@#S%D$|MacREYB(rdwDZaU za&p=gQ*#Z_(5{vJbe*G=ouR%0I(d5i)r9ky#;o84+k~fp$<XJ02b3;L%}5ZGEID^Pp5CFLS7C$M|gCUc!1& zK^PzI-4=l8;Zh+Ua$WUhCoP408gb5bvecoMHI5tZ$3v`)_9Y$mJ z5^lxIopSJnpNlw#-s`qN&agJrvit}G*<=D14nGd(osSxmjKeZRQ9}_RBw+Hx$8EG? z&G6(RmV700l;3hkH#03i(&WJy1641ceLSU4qoh)Sr4 zFaIzmcsX3?#U@1cHkC$|&?VkDyD3d|-nMf~vT|&es%sRJ7X6gH1@#$N!|?&{0gyXB znVQ$K!BCR}_DqIrofXsS@LhpxKtnKJpoW%AW-E-7(>@@KDA>!8^ha!iZ%plZQ1`As z<nL$(QWQ6F&wOjKwUrfPTdf=vp}$-nfRDi8w&e zZFp1Fr&2s9F{UrkUDgR+ruNQHZyw#yV=);|zDZU(MR!wlw{MGnCA6!fd72UC3N7W( zIk3WXUlU9EbJ2*BgteH7xXYyE zt1ksrOobn6;Bml|;aVndpa?i3?Ps%Snb<1QZuW5SmPQqvAh$mJ zOun~h;@L*(ka({J`rL*%UegLg^2AQ&O0da6+%R#W4dsP*+v-7%5vSvNvpFrAA!7?4 zBTE^=i3#IR<(&71*1*e7sI;x2sbu8M6-~@lmHb&7MFW*k#3@@7~|0kpNauN z16wYzM~A&lq&Cz$(Pd`LrwI7FI}z4Y92|U|vNQdrnILBlF}vIo+ftjA;@UJzk(uBg#VNO5h-v1zEkwKAcX7K&8;MFP*k*w)?u&BV{(8wqL5g`B^`@}DuF|Nf+z0Bhi! zOF%nW^PT5zpq<2+ZZqrC1y}6KKVPd2ls&~6!gY&W(xiFq~8OagR@LAV}8pO z`O{wt^dC}T?)^)L{~izDRDwSfZtm!rnXMak`7p~tM!?_W*PBlIGcV@f%KiVPNTxVD zac`Xfe|g~Eck^k)aQCAJLuWGB{H5r>MezEqOmDL{|E)}aNhb1pl$-zUn~Y-P250;l z|G7&4w!i-BY=4{i^lu&Umv;GY+~jJ?V9dX5xD6mFfT=fEf^_D;hfOqnes{*1!W)7k z_ip=87S4O*?F|Bg{AGNB^1eIZ!QP=e|2ixFhX#vGYCUVo?Z1uUT=7hH)tSlp^C=%I`pS-H)&b}lAv5csB4eq$whLHEGec$8oRXPo zX$leyUX=;xycn)C2&MVXy+Ox>G&f`>zMkpx0H-TIzjNz9GFF)`pEXP3Rnt;lva>X& z@S=oVy0roOyL5Z|HAr32`xVGw`(^oxw-0=Kn0qrk4dhms%YFJQNPzLj>->+VT$ew+ z?ai5^_j*hT=dQl(ytn6NTbM5V!jysW_ZP@@0CFgrlD+)?Iv8%szPYn2eWnU;k9vDM z?p(ox`=*}|yba&<_VCYZfjjSQ$!DrLbM)R7m~W zIr?txW@XQQ_i6XmyxTsr!c4g~{&=mM_wLhv-v4gZXLX)@w}1z~dbhT-s!Yj3{&@Z0 zN6rd+a-fnpYJ=1?s+14L;-m(%#qu{l>WtxTu{8w4&k4|ebe_@!4>8ybwh&)i{|LeA z&@wuh%x6JI<-v!9(F3Dn>lLwu4%2B@qN#Q@s4PA&t;=wuoft~%pFm9p4n zu$3GpB8O2uYpC0#l8!FxZYI2s$?Uv-n2HQXB0~@nD8ykT@}<-O?$4>^ajJNn7?sv# zjMcA$T9_WXIuRYk3PqAb<e~dk1pGOY99|-r6%>N z)c#VXoq&csuF{sMl-6VID(&`I{dSf1+?Y9bV1ZOJoM(l<4t}~O7vT*@&{S9`u~|sE zO&~n9K}@D@<4aNmw3I$Rxi%V(7D3k;ke8dWN~MKpRH(g=mJ+~%j!;RhLg*;kurxvr z-DW^v;|*WCsTQb=nF2LDj0!t~^LUJaiWx>TmGOP4GBbb$qsd8pCJUmW*b`{N1V$v{ zF$?sK`y{YRHB*#9B3H>-;H%p7#w;sVQCEzo&bU-%RP)s$ES1&KcRj9F z4&|k5_TUXxv3fVDA*xo+>T1;^RocKjt4px#Wf2pNN?Lpwru)!bYGQBMR~=O3Vcx;x zMA0Xqyd&7ywM>SS;KA}(p@?QJ&c{!bn|6oGPf&@BSx+f6uWkR*!TL@sC3HNWhWC-NFg8-`MCh?UX3zW%$HQ*MR?r1Td! zh07cH*)=8j&Jz6Huax^scN=ycDJHMjTucrv?BRHAG3;8p(-8Q?3u^5N;T=-SZ^|^S z#iD=Qxkapv9*?_uphtdekDDT615lJfQyxiBf818M6)V6EK&}?pMej4gz;e=TpG&G(e|RG z26mr>-B;(E*U_5TB{A<>Amd+>X7e88N92c48GhWJE@EE>l-e~}rqc2vuJic$VeF;d}<)p{)U7F1*gFM?zU{Bo+`z}EDvzhh>oiObw#d-(X>sLB;x4r9Ex9w_a zS$(=F=cuZK!f>R2%Mm=w`k%;f5i$gs8}epLY^U?MQq}8>^*v3el`XHH+%2^GLEz8& zh(p=g3C?U72mtsE-75kZxs<PHg``GWD)8?rwBk^(68rVd5YvDvI?H>5Nh{ zSAuiHLv*g#$w}oWxmy!8$PS@&50g_3Q~mATIy!DrV%}$`y`<2Eea8}AeR+SL-I|c? zgEHsQ`BCu?K91nXE4Qm0hHdf&1(s;|Jb5vnxvRU|uqPNuPTua;ZbO;2$*81KTf5a| zDRnPzrIpeYhMX^=l7=nI8lRt>H!&avlCUqkmLbDls($4pfrq^HdZGHRv}@%~Q> zC+v$R+7IM#GPM!Cw}>mh@8Mir*2B5GFE($K^JEv0r5%(iz50F$2^Bs`nwO`oJ-)h& zr5les+212~+GDWwNz+Pe$_962!Qz^LL}$%xpNZf9AL?_xc^nEORBnn;M?j4E1k_IPr(^*Z$I zrr`rU;E}~>PE9d-OtO!oa&U{B%BqcCn_wGdFz|*yr`Kk!L6dk1dyq8 z4T4DDVV0{5ygmuFHk;L~O)B8+DTjg?SBD%Ix*6BT>P38f8N9B_#WihMWf-jB z#O){h9vZ;7skHejA)}nbkm?Ng>j=u6$7cPL3+4`W3t3bKYxP+{w(pMv2x{sJRX@gS z4)~+}QKel~0eS8x&&l=0b_}`QTog(5KXXi{^}>Te7l{P_yT#rB|1h|!06fX}gIcDp zB}zy1ou(`EL)heSg8vrzFtLS+T*#F1pGlRaFR=#FAb5tbd*+zw0D*D!rQ;UrS}lz3 z!PYMMF4i=61=@eL7SH<`a(A7=JlA0juD@tmAth7tTz^A1vEnW*c1u{cGk1B^| zUd+COqz|QdRDpkpE**6JWHqQ&Kz-@FjxKXkA8#>Qa>2C z%a>sk=A1HCEZx5Bz3Tl7Z^$tIlL<=mZdGs8KvA~ulRhRT6{sv9tF)6&V?R6r0_Y?| zXa^LNls<{!WEH`%UnhiEo+{{MpHO!cZ#al2Y`WPeL@vycor-yGmnBo02_|&{NiT9O zcr*JLx9OKY1c_z1;c~MrLN0>}?}FQ{6HNMo!?j;!$t1M_EW`b~R)Ij^DL+0F3jGpJ z_55yi;Ptp_UfHd?RxE}Ua^Yp{K4oHo=!_LLgUp?0Y@lA#4Lq;B8+?JS;%$fY4XGiE2d0>s@N>)`TLkEh2efb zg`w(SZc@9H&Z7k|!%HBrqJY5aP?*=3Q5wRB*H|~+4B?TEM<7izlWjFM#@Qx`IQ6#42V@yZoaA+?*LFjs4)*>J?5BQHp?;?XL&roBCB`T>kt zGBt(mJZ1+?+4&o%Gok67MN|GHrGJHovcz++=_)5_%wdah#M3zP z6{>H#CP@owc-M(+kfK*9ib&cbO4aAg)S{5oB z4FRjgFNa)qtpz?mpKiajM~>e#cCSZPY##-gwgHPVAI@iXh!=R%@X%?o?Ra0OkJRHb zCvUy-6HIWdVN^k1*5knPpo^z8A-gr2;yh|nM#IM{w}K?%K;`sGl%hjCCh_8^&eY_x zNlja6iK+;^<_zM&>jTNW@1+dO#PkP37LB*DNp(n0pz$^}tsJWPoO&U?;vr@w!fnzb zmA9|y&QQ%lDlNXE1!IfA(@KI&*|83(5(^i9I(yg9}% z2AIZdUZ<2{xL?aKTH|F<$2}eCA3&gOi2{=CIaVZxdTSZP(+uKmhF#jAsvj~>_iu>= zngx(fKs8}a(W%Civb-&kKy^7Lm|S2i6(Fd_TLLBpydhxU9TP0l_zwU%#HSCh`jReF zj#F!8WzP77L1d&rvcMQF_aB7wcyE-?=8~*@1OtM1pmJs1xmUDXQRS^z8>AG>%=29kNlZz zCQ}d;Dm==jR8#%4^H!7P#Yc5bijU$sp!4h?Im3hLNgLviLl7u87WKg`)C9k)&t( zWQKZ|PWy?aTB6dz1(G7cWKo|4S;b?8BT0Q3hKmG3DFOOYaTss-R;T?551Gr93#7_n zZ0E34iIOQ%Qb~bKQXrkS<4d;nOSbVRQ)cu8GO$3p5%33c!OZrix&yV9TR`UBi}RsB#p0z>G1p zkUFi7)z1M6245lv7R@sj2SJhpvq+AWi#mjjMd^(x3^g$F8)egReUe7OBni!CJwuRE zte{xD0UN7_?hcE><%3-aC{U+e%a!MPy2+}e?#kui?1>Nq^9h1oayuOa^pDC1fRnb(_eT5EvoKbogW(>+nO&SJaXnCNpydW75>l?^ zg|Zwz4M9cvJ_;!xk1~!$MGjWVS6$%#8hyU-t#RcPhW)CbiPPi0f9JHmjpILm9KC6? zVa?)AhBY>!!@n6wI_Nqhu!)(Zc#38U3+ zlhyIsh@F@)mC%zfnbS8(uI9l*sW5=Jp9-Pbc*A$VoOY*9x^pw&K%ju{n=_zW4B{=Ifj__0Wazi}$!d3jc;_c(6@R|PP*<6VgSgsUzB1b#OcQ$<~mC{yOh z%4F`;7l~_mmf@FOOZ+up z8HWkRWad>e1O23mXfFWEhA?npHC9@B1B*bhlTdzJu)1+e)oKYo!zGN$G1-4uOZ5)i zrb9p-oGVbpuqv^td4le-_;Uogx%!7+1Waw$bLSRZxu~=ai!>xp=AwO{(>;I{TP1Hy z9bknJ|A4L|<$iH>r-hyp91yNtLxt`cXTP-Dv^ecbT3~L#_Jq zzD${Cm{Uq1ZL%L|9emOXzyl|hO7*~CQ_d;~6H$O<~%-r6L?MuEjY3!$w^1vVufAolv0iKz}d zQFa_lH)!J@6y3t}9Ceo`c$NXYe#$aD0pL%8Bf$Wtcg|2U)r_dM{y>Na_cuWXIr)6> zaAQLmm6^C6W_+EpqOn_4O)(?BK6Q#8V;tMZ3&M3BHW{khi)+I7;(NBr2<9=^lzr-K zZ}s7|UE*y7diY-W0*i5U`)A|m{nm}$Q{p3ch$}NgdHap{7d#M)CCq<-S&Vr0x<6Uz;^jl9t@`rn+A-fleVA2Tl5c=2C)Eo2%FP%hWpIb? z8rB3Y%w~k4L}agns{RZHvxpIAC#ganrt=WMH>{ zv0i|o{#Y@-*J+F`H7uCh#f6H>Ij6cx>7kPqsa;=*O8F5~#)ov|G5T;BKtu)sI^s_) zYzMF~;W_|x^SVl3Mj*)%68Dx29jp|;rm*pvXQ(jWvvguvNHqbQ`h;pv zt%)u>SzUimtA$y~%?o9W_h%n<8=L*gcW-|yCH1LE>HK)hQCuMq8oNO8tf{HbR9g2i z-|J4WN*=6|R@PrV+5d*vthK;ZSm}dx+CvH;gi<;k!+JH-KiC~+iB29(?VB^VpDPbm zIYKe2k4ZPN!~WoH2zdlQ+qlmy_8cflzm8Ob6(NtX{c^*mkQnORf!HM0J?s>~?4eP5 zO)oOi4Zv7!*kR8qx!IQuc?8mZWb_QI2EgjUZXt)%#aJDm3x&&_NlzG%|DZ~_U&Vxc z)x;M9rpE&S%`s#(qG``6vu@yv^nD>v#Q{hU#Tt>;K%@w%Ia4@o2W+mSedn z-&t`g?dj!pgheJqkdFMIR!-^$=oAnSrJqem`9|c=0v%kYpK}&8%xQt-{gz|7ihIbD zfvjHSDS=vD3o9;TeON2^-xBS{1VH3};YF;0vRm6$*+Cc2W;& z1Dbjr#p}-aGsb<>MtOF3G3#7dQOS2P)lQ9Hi4eK6L!Z0Jd+pZd9%MCjKO#PX_Nd4G zS&73DR45Ms;UiM>7maZVgA#=EVu4&fj?y{gDmZYK1$dTibn|R;A2e8;3ARnI?~hBH z2^CtI98H7nc^3+fgMX8;fyK_5rDD zxzWAaU9?E)cF*TeV7t3Sq~R74ox7w-qw;!*7c0#sRyd{B+52?Q5W*vs*eU7L{ccG3fNr{Z1ub@r*2GyXBDa}q>2Ga zz_J(tVC5ONk&}Sei*$3tla%J1F!0-rAbXuKNtM?Tz&oW&ypM##;x@hLYh5yJ^Kw`! z;6N*?tK^>3m?L;XemW9>y?E+{rPE~q6DF-`g0h~BrK=c?{EURPvA*YH0oGn)LYGnF zcDMa!AC-t_zl!TOCa#_S;m7{Ic)J~VS-tfR{EymGt(b1uT{(%J1g_kzJuoaM2UOD2 zR>Ifq3~>gz_-0iuQq5D1lNN@pF50sZIut!#@=QLw@;92x%YyLoomIWFB_5iSe2pRZ z3u@A+4Qn>IY|-##UU0lR%V;bj>uk(D3)R7d1N=9rP3 zMdDXz(vX;>kwVH&q`S_OB+Ify6NTQ$YfI+u(LF=rj}N!SXgVGx-%DM$t?LyY&pMcY5Ve%}6pqk6LQ#!h)hwMUAb&pX!{YGM z^L+z5a~f`!?qB_uWYnsd&eFY-LrrX$PG^&!I4KmOGR=!&xC|G>4Pv!xg%;aBi>XstGt^010*iLKLOs7* z^+Ez@Tk8he<*YFs8M;O?DlX}%x1huuoC%q`++mEWQswKicnSFluiye2Tr2J9Nn9kK zL_>y9Pv(&*`IFthG+6J+MaCCSzE_hlvKQC%gW;@q*98V6g{80a1}z?YH=z&IHKOyI zJzBQebt1PuZm;Q~;8juLS<-)HsK(BBfwxb3#fzA_ zqqyF_XZNL&T>(bT{pWQopDPPXzl8XRoY-T8M2NsRrY47Rc zopmk~H5KYNf4*OOv0@oKzRzi4xvYVJwG^-kPVziZ-K#Gzn-Qn++YD!FRtG>WF-->z zO`tiqhr6Fw*P)=-s#R8Y0r2rP3+8dzY20yYnHsvU{Swy{U|84qyzHcMCtPiO*1eHO zGP{bI*^sm@B>=V8F*~_*@H-O$k9QcqquCV``AN*P!{cL^hq=8`!hOb@XV=8GbAN?3 zNh{6Eu?A|i6Ui=6TxLm+0?mIa@69h}gP7OI06fhV;{1ZAX*pEIQ%=QPuAQ%Gq~;sU zqVP{8RGnm;8&ZR@0WB5)v-5-WJF!b8lGzgPjC?~)xcf|BUJl4TR!aQbF~-by!O;bn zHT11Exkzr*da8lnbl3KBiNz&vM{D24eG#I=yLZuQ4(E0C<>(?1`)j=;VAcWt|8?m* z;nhx5{SRom@E6#p^s9&QD@d;o8#i~TO3wy6Yufiwt~oWNN*PN5_)Vh6ep|A%X33wrX}Nd4jNu(B~1|QNTV*u z0`3f_F@iPSiR}HL|DnaH(jR{UEz;L->K=KOgszP61YFVoI5RaV+_5XLmz`X<*AJTC zj_UkQ(vd#ILYibH=?7NT{T{NC6s$BjmwQf=-!q>r zxrB`F&g)4ub>+`pUYy5xX>}w`eFyT*Hj=8+-1q__zDe^?iZl0X z3KqCSYO3O-r(T01t97KwhvuZ03*a}nk|_bq2*Jnp!wiC*BwYbX*MKHG)H~xUG=nU{ zVAM*Eevrj@ehp@n%p*y1Sn*HiiBuQlJ__t3YVb7LGim$`315;BJlxK9Z`a?#8=}pP zgR@!jMiJtTglg!?EUs_RKfOja(pHft{XLVW7L$zGR|>!z^86lM)2e(GIx`;735@)0 zO?tAi2&#!OO2)~gcA>DqQ+-kiXQ)Tf*L7FkPM&+`dK``_r42Xp6UxxTb4{e%)QWlz zXm+<2)rFHWTj)h!mRF6ndO7dD6Up4#wBIncb`+M{7LpY)U(?*7-;7Z_GCL6_v6_By z5VM``KQ+g53|<{UP36a2f`6SS`(`$#3Iv7@0n?`5E22vf8XFurZ`M7E>*zNyT{q>V z(pqIqGwS7;#k3lOY8#|#n`Z=TWGzvYbYIV5Fwpgx6EedX#24u1S3$i(1);R9 z%E5%bdh{al*C#nDt%E6g2&?R%tk+oM+U-Ok>SmQa~a-A~rWz`#86ZLz9}5N(Znm zin>TZ?CQ4}L*+%bx@osu7bi{B3!Lrr%i4{jPf6MWBr_>WzCEge62L%KszPGc1h9lc zhs-J-+|a<0x{pXR;rqJwKck_{2ehB>ZgyQ*Mw(~=I-o0}lGsvjKDaW|UD3&#a+?`p z3r57h?j;gS@}r%4s-B!b={OrBfY0Tg{2!ixo0}6AvnAgdHyy=u+qs%4+t_sGC$lQ? z2rV84afK(9NaJuvFTX!ui6@b!6h>(Si{kOZt}Hi1xs77u2OnnTLa}*lLoG*U=oux~ zsXmS}^39sw=yygHjpaRZu$#UlCr%YYQ|7G1(aDe+^U_QxMp=5$Dz(?M?p{VNyI*aV zC?m_=r==qN>71%N1=~1{qYscOPk;BZ^Na4Ism3}CUt^%4gv%?FauSmgGM)W-ttskgNPk4Hz@RThy0vY={jIXbF5}uKCPLA>~ z^GVIyCiERq`rK{quP!nG&#pFwRqkOFRUiy8KvnbrgF~+Y7QwM*xZE&;fkNa(-A0(1 zw}N}3bura&H--Gh)K*u>b^%b3cO~&c%kfeIp&8E5wb6K_`nPR$V0{06V*Z`{`kF~} z+#YvcT2EE;cOKrI981B7IE%i=Mj7#a6|M+OKCu1bAZDT}A#`vL4a}Y33$j-bTbS^3 z{ocGm_?d5^H>Zh}eV2He)1!QMnHu3MpBQ*lDfj1%I~C&WuLzkSfZ%3@K( zI$FT9j1)E`U$Io5w{UG|~2i!utPb43mH+PMWTkIS;0No%n{miOyq03Hx z_G*LXnmLo84{XW8EatLV+U5;(Y=O&AiP;|a$h}_7M(wM|<*{-~Z0GnX^pN9gH6A&C ztH|D&FTH9mG<3}OH2%^nFqb;WlrmLa>?A3!@lIX5na=f?xO@ENM7VBnysccgFarNd z&|KVZH|HBWydz(FA5A}`bh;QlY^x`;jAe>$QobN2E@t#o$)%IeVd$aBhuw{PHsXg? zN8mvbH+{71C-aE;kJwxaKj*7c6`6~Ti57vlo$tvmACleUCOq*x5uDx3%~{;GIGE-1 znZt+LR7$4q;Ks*}aUUMv%A1`3@baPgpCOZDJ_`BvWYAvHli9S;fR4(H1@k{~I+DbH zo_1_b*?eI&|Hl_6>E556S{m;W`}h_j@dw@42;XrXd^sMmYA%JrV@|23g}Ok7~jk(%KAj|x_f+co~R;rzT{L` z>|!pdQGDf~qS5?AQj@ObphQCMzijFpN5ddN&2;%lRmW{??1xq#V^<2pr*fMgZ=ZMa z^GnE&of==CdA}8Lf<0b7u{C<^+5&%L$I+dSl55a@QK+~8@;UZ`s{=7p30b6ctldn1 zJe>L{@Y$hC$K~{^54qPb{(5L|_RzQYxmyX9^7ZAR0sb4mdic)|l-^M5oXOCDl&`lg zZ(FD|5B@ALx#kneh9~z9o@z~*&-^H^vj6$+G^;OdkGRdYfBB0THD~R3;hbu-p_Vfgxd(VM3-_?(!-h6QM>&%5-cJ43M-&){2PMtUO*lf-N!#Z%%oKN2e{kh=3 zH4j==Hi3>=7FKr&^B^xtcd+Skd*e=$CAW2{k6^D1MKiT(Ru*OS?UJ zRmYC4eV{#oH`KI1Ap)n==;4(F&1F2AID8iE%5hh12?^L1R)YzEBb+KABFCor<^RLp zdw(^#Z|mZFEfiFmE|n^vqT&+i0s;aeipxb?HV8F@Du#gcnnXoF7a%GM(p3ah0tA6T z2oNze={11_2)%{SYeJIyVeftJ+2fr1!yV&$#yI1C&-w$RWAM)RnR7m6zJQBP8`4$S z>WS}I2%Cu)rG4O!@A++`Drk9>O~0vyF?A2T2T86uFvw=k%`m=|r|y`jdW}7Hs{44Y zwn=c>aVt62ofFqLbhaUZIHpL;Acr=Jyobr8^qnTL>W=qwg)}gFi;`suGEspB<5jDz4|;y-Xd2s*c5 zlVCFc%~)THb zeo$6ub^PPE3mE!2X>w!r*R2Zx?=GTtjzV?DUM7=uCVrewmaBf}Wd)wmr3)C*jfFyG z7yw~C%=IBHz5aQz2R)dW@`_Qs3?$CPccS73bl9Ui7QU#4T;666{<0^$f2eO^(JIYG z1}PJA&uTH?n%WjEXHme;EaJ38(2vJXTc=g{<(2J2c0D0#`e~#s_u+M)h-E& z)m$8_+E(}~5kL0&_8BCMmZ;~WO{#RYx(*ePl_ZZZg?LTNxO7ym#QyU+7Vp3)3-hT4 z`zfyr_u7R&@THvZPvz%h7TC%erC;5BafLt+Ht@<2=Uk;kNqQBeJWB6chx4hqyepbv zRfsn5Irw`V_Xv=oUpA8@|LwmLTGcU+ke@y{vK}!a-xiDdOsE_JPsC^m!0VI9M{fg8 zMqaXdRP*7me6)Gti;_yGo$meVqn2BLsANd6DiTd6FlU|`D`1tiMz2A;_N$RH?4=hb zXaeI=7Q+r+-3h1<;(LVZ;v8JBiXT`0l;&8WYA_dFCP=j~$}~)ic1h63?O9ppJvqo} zTwVpb*@C$bp6}(d}3hnlxoA6kbV*x$~=MAbMW~;P5cSu5&D@=7_3=bZE z|23!YRbx;0uPVE%N!3^Q-ZrpS$8;>W&UIqB?26<4bN%gl&kcB$9VJY5nwkrj8?_rm z3GDrJlXtkdUd*n45z(^}@7(%UElXNAfbnLiVq>$Aus|)iWTOv4dcP1BYuMsLmu%5m zVL#+2k*Tn|i%xBeKv_t)KPw(c8Q4 z0H90r{SQI+Xd#AjXV8HaGZhT75)QG+!C}f75+{@nbJaJX`IdsjWW!dMb_+3e| zPrWdF7ag{T@ld_B&!WNLlfGjv*V_+NE)+hm5!s=C+dj9{gdOIMp00&1*dK{gCxtc4 zk93GuNyb%rZoCN}day`=q?=$nG*t?4Y|VP~B05cy+;06SCOU97|C@0M6;&QK-4Zz> zmyD8#tD2~HYN}s-3={7Vjdr+bD510c#%X46Ngh@r-g#$BlIKIX{b%`IAx7sXW#5Q{ zJ2DxBb>DhgLPiM1V_<{R7RD&BzUKq==(z&RD4^0P! zBf3{ghv@LpW2qfQ>ngoSBRCx9XKLDb8i9yx9A7+4qv~$J;fgzlIjUhxBR;&4NMWre zZ`^=8)HzTPOedaBluUktl^dm0NRr&gkbj!EZ{->VYVQiB@C_KFYDS?_xz!2YwmWq| zrJqXjA+J#yk*cyYsd{`m*^DiI=VmPbQx%f#9<^VL3O^BN9{2A~nT^l*`K2!ycr9J4 zi0tF{v7LYSz=-ziQzOw!a(ee}s_4H%9#jePyL|qD-!;oi20`6d3U56^Wh?x~7m9fp zHoHIhP)5Z~w8Oy|)J2JfnEig&3?E25RU)N6{I7*NS4i+9-0c@0VK8P1MuL2AHLqLM zr6KOkN$ik96y)Z`(fG}h8W;k)*W0~pS)~d!53f(`$Hk8=@v4@~jJdSX))SV&2I#s~ zHxg^42_v#ts4PhywSFY+BCEt7Zm@;IGG$aiEw%aB9gM5=-GHY#@V>u6r*i`|V*8{w zs-~(ng9%e4hoCISbmC^4(I1rrs#?Bjur&cHfVy^YM}LPoH!7~OFFMr*dGj6w$}&-% z_#WB=s$I={kS~k6G0{%r`UdSR%oP}u!t%5AeB?Adb2CIeWjN0y(+9lR4k>1%lV-V` zGB4bcuB*i%= zkF&k>l9@Q#!tH{BDD*?gdy8JZngVR!fa{IX=A`I&n=`9})?V0J{(Se6$W0Sj`W5A2 z)y`mTL}`}^kJL=r#b3oToQUi*rosoq%pa2nib_k53=3SF(01w%S5n#z_QUU7@mh7# zqWNC6qm^qrt%Hh$s68ke=+|`kr5`sZrM5qa>4}Bk4KiX_RtL)KRHLz|Y2L;9-72qQQTmu5VcSo^kG94WTm%|{hjc2*Fm`*7TPt!~^! zL22UL(HZTq#}{5)mn45PxN}p5^KIRzwLeCCC3#7k}O3GjiYI1h%!;; z%N+f})qiY8uVBc~gI?O(8BYDDS6hO2$7xA&a`jQKh~acNB)vh+B?n=tJZ!6uktv}r z6c$R58;|(Vb_;flwyEO2gDXA4?df*rpBU+Ua$%*YtgLLYe4&z|RXS8qz(6adDW#LU zyA8?}tz1!4^p0r(-dn`9`U5Afbcz|Re!z=s>}98Z`zHNxiW#@FjBLwL^h!JWW}O1% zN#pHDU*NPDgxV(3$ni7;+6lNM9i!fOOssCJl$MooE~Ftc9$sp%3keZEUtDFIVHGS6 z4@D<8n&CwcEM_YS{?VuzQe*rcTl?jF=D{mZa}pd57sGQ-Va@x>#0e%EyH)uq*-?>& zg~yJO4TcNvZ%;kr05oiWA@Bt!$+ zGQiLn7{RCJ2H~qqhRHmZcy`>n7aCyXB}rDtj9=x86qb_H+B!K{dp}ZJDd>-icg!Po z?*uXvWJ*6J%XS((71z@_F18o{K>>W5+4F?;=gY*d!yg}A0ua3M{?D*kfkIK|27{`X z#H)a@*i$Qj|8Q7@-?{sl(0}dNze_3X4v4c+WeGxz1xfN*_VOu%jp79SZ@0<=2E&aK zhhT_?QXeZhcGv*cV5)5F8Eo003zkq_Hdkw@l;XRTm?0<23g}!X^v##&%PC2d*@mU3 z`niRa$A&{%DiZoGVrQkv=qkohLJKPc%#F6%m>L+BNgb2hBxvs2s4}>apIyYK;2yXZ zI6^Z+E}Y86hBsG9kvfS55_3JxfgB$LkU%@WeK$$i~2 zS*~3|5xP!^@PaGu30a&nJ7YNSBG-b}JwL4Irk>JqLA#u%nhZyGYwFRc&Ba~MH)|8V zkKJIo`uoe?o%Hc<>m`#POAZHL&=lr|K^ElSUwMx`x50T4QR8*W&SKtTnoK4aMP0i; zJD$?YL+zi`ZgXEv?4-*jtq!>TZdW~7X&N1%5TcT-cwTwhAgn{PnDW&xwySY7J0Ymu zy@W30a^uyh8$-7zsif*N%P`6Xq1SJ~I$8Pi*r~@6UF+iFyVZj1*HvLXD-_h|x7FC~ z#1sG=0D0DZ<>CY?r`iD^>ef7ZtB(0;tOYA#6nFK7;+zUNLbkib_b@5$B0W*j>h``EtgF!%s@>5Ke3@8~o8yD!Uvx@G?X zI8i=|fSQZka1->Tgi65Ukx2Wl{Q`Vyvsd{;F+YRV(L^ZC{)9(h?N-j=i=?cj_l~-1 zYrYD}O*ZBSV=SB;yuQbReZ@Ii53e=&qg)gKXceLVqa5d|%07vwl7yzG%9ri`(5`*} zYr-!445-+SweQsHyNK^O@JB0Fq-`$86Os`6p@L;(c|P>ZO5n);=rWFXpeqD?QvH<^ zO;2oZ8eP3SAZC3gttEP0ECjJ;?ms*kh@-S2|niajicQvvrnJD8GHk{7*{)K%NkUJm83V0XusaUO&FosLyh)sVUST_Vq5Tx99A|3NJBhAqKlgg3QM+v~`{}PQlx=g}tFq`en!)fg_8h zI5Ee@iz#U>W%}tYWy61}`TCr>9U{WnmU1RWZFc#%J0~~((OaHkhBA#GZO!I=hu%SFM&%2huCA#4+VrG!c(P&F{+>1!scE3cx8i}+Ne$&38P?P=ji2r}!F zD$@Ah-qn%?L%d#0VnX`6U-3*$OBa5Hx!Szww~xMnu1+FrV8lFJJiZfA9ddOX=C#Y8@GZIk1G`8u<<+R$0zH zh@nk-_?rDR;~TR)zYPN@NrRPm+BxMc4>S3z%L2?8b0Co!y{=WbOFOre!G_!%$ErnZnJH$B4w8A;M=$A$W@fM zL0d?zkxH=B(iX2t>zF1U^1c!kR`aT5l7HF{AwBI^5c94^hs>W}v=ca%gQ|vo`x{cT z;?#{>+IRmdnSYYy>;*x|YoBO;;dH2I*! z`}60|vn`-QjoaD@#y02P|Lhp+0@ZV1(Ht$|5IxqK6g=R9u*8e2*+C*Np5zvJ@WJ-4}+xV}i3EE4n~R(}4h6nnB|LhbmC zSEX@Z{FtNAR2MbBIBQ?QN)p!*#GiMsC# zfS1;}g6ATSiJ4M+>}1O#B*<<(6>H)3Jk1_mb#)Sjm(hk>j5Mzd6!165g5t}{0$oi_ zQMp64jJG{S<7H7>1dU4ha)IcwA1kbv7xK+PWu&F85<4ajY&z3Kn`r|+7Ue3Q_wTbA zjRr}xv&C^8jjbN3snpFTcR|Z(Kc=?R;7(3K0pcBJBLrds+RX2}!wTAA?E7EC%HLP2 z|2?dj_;!7kFuNOU9qDp~#Cm(p^anOm<~8xaAcuKh17loL=?XE7qkJ3QR1wi4x_PU1 zYhc6K32_k|R-T zzUw3_UJ_MqfNMX(k2+A^J=|QO6ybAI5@y|?iWzMn8tdta)>bzWt_l*L(^FA5i^wclZ+ochCRSEN4h-_B72&%(U%(e-Xod%3zeWQaWa8BnYDKNj`7rf^KC?&D#} ziQ)smKOmvzua$$n{K!I)m zHmQ{b=S0-n;p*c~$KB^o%2)o)*Jx<%d(;V1r68cEF7*h)<#t!U95>^mq{cq}= zxsTT9PtP%nKnbHWg z#2=VfKQPsQ*4M782uoY>6!Rz!!9$idHiT8ZQ0KevUk1Fc zA08`xO*GJ>r2Z(?PWy>4_a)L$P7Lu0gCG}z(4~*2_OwQmU4A`I>oS~*8smK7MReUJ z=_bQ@(sNKRq}_1g5}2m7j?xOG#caNOttp}RHK1&XD9@#l+APt{K3*%Q;>{1Ro|kgJ zai{o&rP51kzEUuT1!nkrr1fP@^32x#Gd*hW1`YAIco@99K*Q7cH-T2=xG1;qFP8mV zCB9C`d&}p=H^R1=D_dUo@3Si7_qQ1JCmEe3Qt#&%u<=+(@Val^&d+o9Lo%EEugeG% zh#ecbsL9he=ECn5>i3u3*h9#Ly~FJG%%cYt4o$=s5-XZOJ7tn~oi@dqqo@97kpVxj zsOpND-ge~@*4w280Ex`?Y!+}2wcdA!(919ON0=Z;EX&}ULvdB!YYu`eLCLi6xh;EP_h~%UUan!1}Pn_?7}g&f4vw-;nlv>oo>xqm*mH>E ziD={PUmp_r-Q)}MbOlm4M2p7Co0=jop46IKdNux2U>Y=8#K5Vn4m3%PVCac37740j^O`5ADnBQ-Bp}AW4&a7T(`e|@ z=5zTGC(hGV-p=XMd+{v7>P~obdOPrgzDXYa}WH5e1bOKx2KK8a&V3=B!V+8JDeVH+66e3Yd8= zbcJ_=+gsAeH*`6gu$r+>8`^Ou*UHwvO;do{e?8q;^?ApYc4ZTy8Gw56_dXWahd1l# zM743L$?!GJb$1JS@x5!?Vl#|_rHenaH#wc9$|-%5E_p{A5!{d}w3c&1rII^#a(CbowVS?X*cAxlw}ZO_ot{Iz+$u4nSZS~jxqO*2xa&`A z)INASTJv&*B>BYCm;3y#ajuEiC4rq!*W!X||GQ;aq{??0j8;gGvq03YJ}wkCvfXbK z8#enBGcu$&kEv!zc&Zgr1g!YnzDu9PI6y9C8I`I0MJR&+opSf?tU=h zg`)1L)A2JIO~(`N0{cMSQ&^(n1V&kc81S>l)?tHQQ-x@Qg^6Eu^7oH*?#INMhjdv7 zTxorvK#A7d-T;2K#9;BXwd|~!y*~l}fw*MX-lu|dx#G$|!E*qN@kELvguM$3N=nin zq66<)0W;cZq|ZMoGoEt0jGp3!`~e^9v|Y+H98ewRCEttq5~Rs#be5m$3ROo%G|Rvz z<`AXbJaD=|k-R9I3 zx+*8@NB*8Aq&Ha~EghE&)B^iuGBImGf_yzPJIRj3(jkzj3-jmccL&3qwd_K)D~+jU z4Ew_Ibg;KBM%}G*4L(3~$Wv01Nd%io2G0E6pEb!pCVWcuuDt&VXj%&)1q#vYzbh#r zuD_c%^Wt4_ZY7QX8l?RRU0zyb@4XZa7L+Tm(I2ZE*z4|p8!NC(fem)QGCW#D-=AsB z7XL7)IMTPYe65OmNp{f~zkk4swpr{@gf#od_?3DPy4NKr{QEV3|ip5+~ShWw-{`n)ixE(yb;0P__=p8 zIM!tyhF)w$@!;Pc{-juhcQ+M$-j=--<{9y`^d?!v#b}G_5`t3p(;4&=vr0RwQLdI7 zWSpMwW%m$yFSwx^iHLw5dK*hrbEL}E)i^x1_fmJdjGDws-FU?^c>@;>Li|$u*IfQD zbNM+E6*v-pQHhCOk41Vx3~i4+=xiMuELI8Gbq`)QDf9gn86tdV0V^WRxT)PB%;?f? z)gV>+*J-Xg9@9c{r(hMmR7BYFG{N2>{a&!uqOUoH@@UZ+ys5SX_jQZpy|rS8f$_6C z#NrLi!OaT%KTmJ?>`8l`rS0Fdyc)9u+ktP~trfO5Cxy+gz#-S8J#ODdk-Tu6TKhum zDfMf1w$!Y8y34%c6Vo>#UyIzdG%Cx=mh8jX)YBThO_S6`PjBxbei|}tkxhmJ-HW8^_ zuHAf3*Hd0)pC=R7cON*fOb($4Gd_QJw zq4~+b$kAPO`kzdfYxH_?(NXb?uXO|hYx4NTu$u?dD=QX-8OE|IT5^|4E~>ag!U5s+ z!U>W9R>@9`Y@marMuB$PvY%=MYn~W0dJCTXIir!Oj`;D52F4UBuz)&x5l;wHaEe}k zVH>@^=K*)esqS>qUgP$R&H2t{!C)yZZoMq8AxPg#C`8!|wq3B#t)F$~p0)hS;E#e>|lHz#Z+gneS^=0Z2 zU)z)U6c{2m0G^*xz|1L9R4FHnuK*bLp=eociVEMZxUSb2JRfJ@6{ipeyiJruwpSAx^_#8Yxyt=Sy_?ri$LljrRS$FC!^WPLU&Lu0~ z*6T1Fc|HdQv;|Dz+de08EGfrS%tvRo&HB1y)e@gZevG5*6L%n#0?URJnWP+cTEcH( zCyLY&x(t0+ZV-o4Z=$2Z~&1pXod9 zWxpo1O;Oq>sR^}gA3J=L6N%vw4Xt+QmV!_4ub0DQ78(Trs-N#&H^En*PF&(s)dqCY zQzTxN9V*C09qWMpobmMT55DGz{=q>M@0}5o5p!bhON2le4s*g-nVQW?-=}udk75`15fA(Rc0jlU1*c((6NHI4{t9%t-weLfbX6 z2$psx@lQz3mfP>V{UZ+Zq;Uy3o#6>&r3MwCE|ebZ?AeU{$KC{|G!D#)SuzZD1;e(yWHA;%_QKbvB#CO zQG&+Nf#M*e5}GScHrU`%=P-yiq;rW4~$S%LAPah;Xu@G8JSqMrf_Ow#oq$M~%(LZF$vIwGzjI zVKb>zxb0HkP@%FTj$Yv49yq&jf4uqYjotT!cGqZW)6_5`e5PJ-LE3LQJvqkQ9-@Vj zF9Bk3Ou7%6ctl9jkjI31sHCgy%!^YFkhJa`C9z5?Y^onikWXuBmL;^h>v`9*pG&@l zzW!0_HH(_`^Y=%^cxmyV;f9zFp-$dgBLd)xkjm$AtrBEhbxMO9sa>e!xxm+fjoW|+ zK)b8UWNV~s(BHI8?qn@Th|bgYRPnA3OlXkR`2y8eo+3SMG=y&7{={aVMeB?O+R_R# zRjo^!`R~yl0WuDnmS#%S;DncMiay=G9-E+^H>%05WT<*f>LFX$u#ei9-CJKBqEfvZ z+Ved~9M6G$=S(sjy3c#>^gD;^?}w4tBE5HXT;2GYjlkv6#h2(an6HhX&#$1R&Q?9T zXbJR3?UQ^|N8>?BzzR0L*AIWhEFOX`Qw`5+-Dz@&n8MCfR?GVn{am0Ptx);MFmURD~Vb6&J?a+J3^->1Z5JY zQW4TmV829FMvy8?k$1!!wp;o;S`K-fr+ z_FF$pW|ZqOXtF=$jRnrzv7f36(2X;swKU5(2Lid0^9bYvRrfV0&+C!GTIJ=uLq=RO z#CKoE_V<=SDSa-j7_chMk@Tm_`i=``)WGz+)i9>H>+7qJ z4_SAh!s~kRhojqs=D1v@NKb73yn5N{JBPeLZg7!wW$Rosh@HBm7qRl;K8adQ&;?Q$RKhE)y=pAiqYaHp)DS}{2o3#_0;`@>tGDBc zNs+s(9~Iq;E=xgzN~O7$Y*Sh>>XAJQGp&;ctS9|sh*w@(PMoMRJQVmkPF177waPfo zSU{)=RbqeJrUfLiQi8St)}#9=Z({0^0~zHECpM|r{H;Jfspblv(_ml76ouiXsP|55 zNcrP~8eMTkw#rqH?9)@*z$GWm4SnR#$fW}N!ayNsj`ZZs7=oxt7)gq?MXA}SR{2n) z-*w&cy#brb{8Ci*{IX$?dr6db*Fw~!fzn_+QRONI*(Jv2%v!PAJlOZw!jRwYARx(R z?i#I%%y0CfQJ=Un+#x>x3G0S4+~qZ2nnxAyuwu+g(N%o$aumfK6ukGX#Z63`YJ3EP#dc|4AR_-zUiv8khuBPQ}cGXbV`ffGt-g zDtcW235UonHN83{V1S$W94V`!Y_LVogvlU}R(L=b(a`C%a5}qo2%TCy^6jN|%>(*W zIq^tb<%tWstxF9PujGxCmk3=?*XtRCRnyisQ-d?e+x^}>f7TD>=|u;w-5pt=JR0A& zNEVZ$p;pFj$2e9!NH$kVXU{xsaWuaGBuQDn%;Mt`TCkhg*#Uq0z#3-TaG$aEZu9G1 zT9`Z9i`5c7){nE$qQRgy9axnlFRg7V>#9TSP-UejB-xc8+ZUr{9}iksit^;g%Hdyf=;kJ^ zGn;8lZ6bY7QKW*c?TWZZNdax@#l%m1HLC+y)}0k$_$8}v6=6s1pYI5XEJ|6l0%eMi z2fcmUPK@pAURvGffoOB3E|k!xL}G810cA_H_+0RB<6Rq@Cwe!P~AvXPe4c}dwZdWg>+_<%Q#38s7p13c_oOIb{S3Fca zZgH|n+yU+T-z4ZUJ^u-3z;+W8etXGn7D$D%R#sw+d_N!`lnEm&KpWn(;&&Bt%1FAv73z`ECN}j(nDpu;^_jt281b--*zY+jGK)%j$EQ}i zP_Jh}?&bb{+s&4y>4Z)NJ6m4%Vj&<(vf#Py2E?Ey+hWk+ygUA^?z3u~s#a5=o>v+~cgRSi;+0;3${ z9}9foa##!XA!VaXk4AF->$mog zsSo>L`GmvzP}cZdr*wJ%w9=)81?jaLaTLRM4#E8u`~25_VzECiA#c2r%5E+bAZR^v z`pZQ9FD7cBo8otyM0ydQ=whLxRP_2!31a1PE+%D?sBH}uWLGZ8I0d0kXS@J`!c6~@?mIs5!1XE3}ezv-GZ%LcHl@3a3TpY zT58?gNMQ-PCj21W(2}VmceXQjYDjNoMf>r4@pguKe(gbLh$vyHZwX$@?#CLO_O=2u zSK!B~sU*JQA^XOC^R1KAb>pnC;-RWsiL>V5>$t06kv?g~3GJY`Gqs+?S1s1D8TRk6G{LPI9 zZz0{n+#v+1i>f@~HJ(p%@cwdWBqw5D=vR6hO15J<()u97d}5ie%JEN6cK5Z}_Mv28 zS8*vBzzO&J{L21a6~o+o=>ePt<)X1UNyvh*Gd%`-5Xb?9wDY_-TU;#v6~{u4>YG-- zaNl?o@1+G@Eu23EyTb!r>R z-?mhk)^B4K32eF3CvEJohSMhF^oGA}Y<`u3dhBBRUyA2{Ry_TL;XJUEyJ}#jH{ZiJ z{jX$1xC#EjWiD3MLk>tZ$fIm^0Zo$cEa%MPl5{2w)+=0+sRH(e@v5t&u-?Gc$U{Bu z=!+3^7_?~CM#KlBs+P5S!+}uhs~-dl@Kwr2O}AZ>&8|ytz|(Kp631+cb+q-Exwh&f z9ax{%7hMLoA_POA*ds!WYy-epe|uCwFD|lwE>!jvb~kq?cvQGI{MR{KmBCZFZnapE z@^HmuMYA?n-O6q0>1|6FxxC5MNsh8KS*B-K)PMWHdRak0_)61q02Wf};`x_+wDCJg_X%c0eWx!7|A|-m% zEMyxlhV0jgL&j@w9A-8VvwoCjJR<$l4g;-r_w)=2xq-qU$S9!?jr(CwgzI$F(5f zhWzZ_Lt>wQX&8s$)Ae!>s2u0MJWC9Ne6+iQ42^qfe6u6Gi~COBa$sl-_VR56iOMzp zn`5}x_*^*bW#&v`e~e~Yn&53)+Ygr}Vu56vx6$C#AaU`R?Q@)|P@qWNgR_;)0Yl%n z)$I3=)WTnA{U0MQ^;neQh5Xu^FJaZH`z%mJ3Bru)$@3viiJ-hcx$NFJ0p*=Sj(Io8 z-4yGfE!MoN$H}3F(1P+sS;+Y&hR4zC^eWXZz-z6~kIhIs9?&9At~+hL7LR`Y`0%dK z;!QAPM0C^~h$#1LAL_IIxeX zF~B4fM3*L?c45goLyWUp-38O$&C9=AJ38Wa7~{1DEM0hWa|F>bun)MWS{irb#YGc- zOocSOgxC$GIz#dEyO^j&Fiqy5N+z8<-{G=8wD!0!3~2QS?RQ=TZorqoUrc{5ng`)F zb2<-_sFcRl?HzG1vk5=EQIPe*J0tu$#U7a2fzZekTMi=C*!!@Ltmo)!HwO!p$ID%O zaJ)KS=hkCbwL}r~DYBSACMdDznD&Jaj?RS+vGUSacedNwl)U1QU+_BUFOqdug>D+Z)Sbbfu3ecwHpb}gtG4^hsY0q;xmAs(P52| z&x(d{jevDba_2zfN>Qy(_v7(t>k$PBf$5Qo&^GFQ-hMs=@?ep0IRQ`Ve06Hw;x7&4 zKWiZ4V*KXURs4kDAPA8I17?+o9~LqCJ#G+iv10+)6jawCtn-HWEv<)_?8X1RjWz_GQT@*_2M_|itiB26=iGa1x5!``g*Z7=RtBX zH^5_kl~?r%;+83be=vMEJFm))hBXLAVQ05;Tp+ydY!3+fd|DeyBdu*Qqh(-GEf}fv zK#NxM34GNL6PDZe)deE%cvW8@^(LreK+tebdHF;;xI*Z$6r7Q?b8rEb@<_hnVCgDx zQV-~t*<5u$2y5zL<^jB;_{&sV((q#?J@ zm^6`M$0g&%5^mKPnGG-o-1wZdzf^y3|lPCE5D81OLIzZS`V zUbXn=A_+jS*R=kcLbdp?gN#&PLqDioL#x5bryeatsKYk%zF+V*RNL_ zojM(#${jSx_DL1@LNQvvqL?qln0*gJBv4va)FS4pmPyK4yEu_Af5}->zExlZ*e6`R3Q*#TU7F&X+7S**gnPu1u9&<5M`8Im=QSE#F%QO z&rc;+0xwhB+>8hV(C_ZW@4FPX?vCyQ3RYGfW#k}Vs=xw=1OBQ?}wElZU=aFw^UuuTlr?F0r@s6p9UqwdhLP>C!H~3Qs2=;tAxbOKt**?U?-CHb zGhzBHR&pfFx(#)I#;s$qba|4uohoykV5Y)#rFhok!|4$A-y2B*ge_yUasXTb#;JOmjh;;-SY)xuQnRfz8qLO-lTXX4_u>l6Tpn(kQiUN*t(Hr#6(dfnLFchO>_Lq?k4St#tfW@ACa#kxPb#u(*QPZMf7Qj_1UO z_oCA{F%L6QN?Tv@3t8J{xo-P5IAwlx2zq+j1&xvwd-fTy!yWHx0@W+5j>;0id z54vea59;O(E@0%&EjWONI;;Lgao3NYW>ZZcnrt+d6?}}WqGO+5SedgilvYEw*WIM3 z>@#uHBeq~Rgcyf{gJKbP~{rCTb3!Rn_mxESKZaZr0gb2j61^dYi~x3J3>73+APBv4A@cO zL?K4zY_>ZfiX<+o(xTNZU!xZ(xL;GgPOnb|rvg4*zB3x{ekTuDwldoa*V};NiH_Wn zi$aF{ZMg^zvc&K$R8b#r9T@lO4fxsDkShPEqpLV!5E5d~o=K87j&1A6v9~x_kRIIS zMvHw!--+}}N|Kq4lfR%$^%6bx_aGt&gW5SRFlE1hF)4Uyp_tfgQfjKLVE-^``j|^q zFD2t%M-MP6NQK_3f(awPe*Trn&l>rnT4d$(;zwz`#%=xc@;AD_f~@ji6y<-QC_4`c zM&=d^#>QGiGy>jzZ|zI}$dQlbtR`$jzszdH5>F(gBBUQ?85$iJ-~rgmz#mxtm`i^) z?jcbxjHT+>#nsH_IQ6azjz_Qe>TKK{Ic?&=_EX*Yq9eC6x5As)NEH*Jt+I~yGw0?A z`KHjlAW0nLQN{^cf1dcK&qBbBWg4vFt(FdY!%bc}Tth+~SQmD4%-~qBP4(=+YUKOr znmb%!hK8e*_S%~DFKFZPyjQRx4dfnSMuIRnhu~I6IIS+sR09^(H*Gk%(tlX&lC0#Q z{%uKOYjoi*hg!Eea6!u8nHCR3dF(?2up);3m#twb_~Bee|8H#>Y=Vg1U(x#6b7=>8S&$CmzB-Xdk5=bEsLmCHF!b$unVK>G&= zd*1jb?L>6DFc#EvD%xtw_tY)`7}i{Bo6AsE65Bk_3tE50R8gL4?)tqE`bNvZXCX&k z8L24YhXOmr8@+rPQdrZLc%*rIs+^bK)Q)s?=6W|4`)Blx=)jJ(o4_z5)b4KA=u8kl z$=4TWvx5xQN<$dG?1g^L1okVplBn@RJ}7I4bzw%l7WH0^mzLoH+Rzb%bvtsD3HI88 zPn*fGF4-S7VwFJtJYLEq`3>dhVW3ws8{V6bs>xUnVwD#~9ZQN18`5fqrR#5@xhFe_ zwK&QE?e$TFt>u|S$%n`04;TcPEo{E*;I#fG5}VVtD@R>BM^o9? zc0@F^}Iz#|pJ-{6|hJ4hs1*b+lh1qR$OFlr6 zFPf>0bo|RK`H!=s|5lfP?^xPB-lAnOFHZ^+YT%&~#OyZEpF$emTRs8#{2>Qm*8*3s z=`w%=zl*xG)pfj|kOV`vmC)tXIZ)insy`PlF!)rh*k8$C=LVmEm~An$t3O%c zCFNGeB%Ps^jl;T~DG3_$xv3yY=HKve1ZsK>9=n~+23q=txuFpMFI~dFI;&-;XT-{slay&sDBfZZLqznIFx#9nR zFjnEC%fQt~9ff@s5gPlRzwHYlQO&wJa*C+UhcSPO%dr-}!W_WSN1!r-6Kipys{1x- z0)L=jns=#ze;Y;R??ciIhP%7J@~v=l5gz;+xL+Sr>}*%^MysLVQmF9md4{h;1@_$gj8+d! z`y;+px+>{tsNSh$MPy&VJ>8%b!mmP%SL!KA@9(jzKTR9ca`Ovs*}0EZ%iGz}(6O6# zO3KuwoZa&jWU6aFl0y@J7#y8!) z$|Y#kFI&no1_uW#m9Ko{M*r=`t6P6@o&R#B_uq4nF9rXMwFt@oU+leiSd-hfKDw5u z=mJDVr3t90s0c`xZUtR{s3^UPfDl4ZgoF}Uf>NZZD7~m4sDvgYp+{PfE;T?1p$G^e zfdEN>kZ>pNv-eqRuf5Mb_nveAxWB!hM;-;@{N^|27-Nn(<{0nmVIc+1A>MNxvxfkl zbd4gascnbZ(EOug^{n6`M15^sV$63A05gTRxwB}qNZJVyZy=jf14uR5Q}cMwAZ|#f zV5Sm9FSv?I1-0#haYmH9BlZ5i%|#>JS;0_SYTjpnQxBT+LGxI9(FkRzd*0l>f>ic; z3iffAgdXdQac3&4``ON=(8JRTSE?#2zy;_aUrQJx*_%9s7#^;#J0*5pv|p#kV4#R= z0Sab&Ks}w+OJr{AJ~`3wbWpP)2nq;UFt_Fn-T0Z6T`j2avy|xvEJ(%@zK-PYp7(eeP?6uxk)tZzqn!5e-ghA==U{THI_d^Fd{m(H&2ry(7+N{r zE%+<{SWXVceo$g2K?-8(?cEx;5J7*5<3WlA2_Q;s$3TY7t40)@2QFFKeT|iJ%!KKa zUTN0(k6f}h(oe&I_IM_*OZOcz!QI`xSZ)0vaV`c2V=YCh?WeDu{DWanHcPN7NOf_+vou|2+-KR4a!o=) z!d{>rv}K9>yDjU_ZCSsDG1kZ53XSlCl0#4z)i&D0)+xHn;z*s)_5YeRNiHl{1F%*A zR#B{zRbEnt$ka+$4=B`1@Hk+0QT5^YT<5M#&4Ag64*-6%%HNJ4(LYA&!+*_l;jJ%7 z6Q4UX8QM+i0W~ru;DX5oUEH*MXk|LPqojU!q?lUJ80KPUdT{5noqQp0rWH<;Z9#3k z%VsU3CE#09vu-_AlsfyE_9=zEr5akf8bPQxI!+f}N7A}%?Cqej zVDu&N1EJRB!Mnk6c`DqK{O+-;i&>G&McT(m>cLnr0zu&sOpyx$$Ri z*;;PA;qC!n6_iG=;ZJh)@8U2roo~TtgIL&Ot{gU@aOwDa7*T6V^0}GOJBU-Z+!l58AG6N7RN~o9{=JGpylWuZ+ zM!}8$j`rE^iEoDn#$Gh?aql;WtKyyxtISrJ*Hq4wFLg>PIXa$sSQq8P5An)DIsl?V)w}0;lxy04 zg1EVD>|Q@{`o#(5cK}(U<8q}Td;)_lgl?a{-QLd3?_uy$V(rnT`)F$39T*5yu4*gO z)6EjnUUbF%uu0QlTC44p?N}`a!^HK0Y6K=@2%toei`rw}Pf)*nL*?9kmJr?jAo{^U zWu@Zw)X^z$f<3c5c9>b=$hfQYBphHOAD2yR$9N{TPZb(?9;i6(a;aC(*|fx{cjUW{ z@ba7FQdU;5;APH-AU>ixs{LTQ3m=o%QaR4pxx$V)VP_t2B%VCJMwKU#I96&zU(@69 zQWdiw8SIAJ0NhZNU9@eSJcgIwn11?2TY>b%&i$f>iw|-4#^vQsP=(Bt%87x6sm4a~ zKm+plIOIqX5p#%YL(wkuUVO*YJ@Wp~_}~HVqHY2!KaKcZ(O;qEQFYjQRfN;%ui~Lh z)fq<;XnFs4xz`^6e}{GLlh%KsGg>U+9c%%!g-Ktb17aylp!=vOK&>6sR6;1l%uC<1VTv zI6_LUx5D>ODJBO|q5SfLmZU72zy^oBuUtm+B_T+ zD>*h|ka6H`ocv}k9L?!RH%5;bh|f`S=hf;}`AyATGKdnN2WWF%r>jD$s-_!I*VV!% z3Z29&wOGdA?)u6nX7&%KktNxzS7$ES?y2{eA9`?SRs9v8_+jOUKOEK0-J zJNkTh2}gG@fAn zvNm;pj_jtky?*xSRrjh>1j)Ifqan6EXa0@w_*aBSK!fGEx(mX}``nHKtTKg+7yt}0 zH}1-+E?mmE$Q9bkdkJSt1(4-QfPGMGp6YE)r|Q52b`5i4?-&B}5J*>oC$_`oWD=jV6i$MDS1QjFqmeLXpf4g*U7w8Y9H0%+XJQ+{z=mw)(H<*Fz6ep5SwN1{bAX7>%*uzMQIcFw9}_*Sp6ABuXJ&4> z#zw%mDUapzGkO*ax~Dj~e9=#s(u(++@Pnv3egc$Vj}h%Fmn6$eC%W*xP7xY=7%JhH z#c~35H{4op$S1P(zHzTazsWu4hFWF2yw>q$anny!T6=p_u+WX}b$Niatz-7@{QjTv z`zMqD-C(*%zru35qt4W$QE%R1`@nAzxjH`@?S5Pwv4?LcqR7LPgmE$)msRHf7gK>P ze?95Fp%pkgcT75`T_Je?$}8V+25QvAyxL~%)WUb#Y?9+9s8DRYW|WqZ&H8$*klwnd z1=JSm&#j7)1#RMebCk~@vEOicep_mEszUuxJaXH8Z*R7OQgW;#z+1niX1xOJ2=b;d z=6#=;HOm|Hw8{Y_qMQTj=4!z;wbtncNei{*q0cN$^7;plovVYR9`_NFjdDf zg6|=aZ)4}?2s`uhC33N)`#OHgeZwsVP!F*Ova+0F3d2%gy=+Y$vmMwv4^30D{$zix&BHX_1ux9 zQ*RYfGrll5?;PvJ4*|KRdixzA6ZU)|7c;5akx-(nOV!E~9?28B4_8dzIJN_raFDBd z$xj(&RMSqx4&28>?BDqKd#H`gD8!^F!6wE`U2xiK73SbrUnD!dRT><^J$xo10EwI` zi?c29mjRRr?O(am+2bWzMyF`YiQeSZ<59IDdr6KuhdnLWpkl9C6m(k&r#R|N+nAZp zCl%imtM{+GFBtd~rVtn=Anw99xm-Q+)ttfHwz|GXz6Y_^N^Q+24s~^P0g5qkt`Xd+q5@>}Xgkq)FIXNT~0O*oE6fOMN zv+z>qzS>Y;!B0xen4eHK_Kw9=JpbTIn{oB-G+S&G9YKe&Cs93eo-BBd51vZ4I^bJme1ljxIn0mQq_V^WvONG3C?G-u+*(ixV>tLxMu*BaI3kCt)#nVRY%x&YXIxn;1{zQA8=Ky?P2r$7aD>0U%x6O3s}_eG(NEn5Sz1lW!nQP(aj6v z{*^Hk0D&vt;k7Vnso-*Igk`#d6wCeks^-Fr%~G#$E_P`j1`0dN97>j*4~9)khwF4P zK>Ig2pD92g)BxePiCSlk3}8 zS?gKwgO~fRg^9c#*Za=&x`e#n{8=-YTv)mm1YumSS+$u03%6hJENU(*7cQ>wuivBw z#`X<#5P@IbwDzX1X?vC*rh50C^$lxC{sza`+Pca4xns;U2C}WTpnf{Ix)8zyH_d=+G>jp8-fI9Dr!z24ryT7Cz2}a$fQD3klL>;%#(bBp z)v}lLCWUOtQ`o6Rpxz(s;R&;)*EOb+R?N<6EzG_SFTZi0Femu%^4u|mR9C9Qs) zQx?@|5nb-;^NWCnuMH58Fl%-NPTXG#9TOVasV1QgTeLT+T%w?AG9I*?jIViyFOm%C ze`SA{$affz^jsKa1XINO5!%ptf#7bccj)d5JG#tQ2$W5RZopGpgg*MO%&&}J1VF>W znHP4{o?Uys?TNIja4W#!@d3PLdgfDQiS(7;@ty%p_1O1T+PZ&Z5fdtN zbjvGFhxaO>=-hOJ*wkfbV434D6;LG|+i@@mrEFLkwSn%^*Mm7vJ^_52`**^jkMecO z${+_kj6=tMxLprwqrtxl*(=8lc-l+$UUg1`FJ_Uz`p$Z zWP0Ig`4L9T>xiafTJ}H7E65MdD0!Vsva$Izk$q=cPhsXX&5J$`hgW{1>V2x1Rdio3 zNfk&gIw7JtHtEpgePC;IlzT-Ex5qJy+(N^$pvb=0<=K~^L;uy+)f9g1K<(4{)5OTIGf^o*kHTj711*r znTc|K+{{Exqg~~9=^+vdcI#hag=>69ldgJ0Jwf3>;}>r(JNK|KZhv&rGAQ?cs~+ko z9Ku&2;;(zQFzSX-sKZ3h6lrz3CxgT7%C%+rfQ3D~)D|fVQ}u05NApwU;%puenx_Fh z#g9$a954=UsL}Z;t5hl^j!l{m8h!F2#jzXZ; zW*`dC1woJtN}BB+H_7OUH2ql0DoUyoZEbXvGrzuY!K*(Aes*}f=g-Qz+K|j=)t-pp zI$iZDc-@$=-U}zjJIA(T8z<);tfmGNCl|54!_-e$3>m64_5$lWYfC{T3d2pDh3ht4i1%h#tsiPm!^&g&~{nKzt|ztYXK&}lt# zBeh4FEAE;xIKV0K2Y0`@CS@L*u4a6_aAT!Ok8Nwvgza>PSX0XT`(qOqSJ5>404f8| zUF{6UP|PY~fCLX;3I>E(VwjHB{>SkSkaqrW(FOM65u!ZE%Y-HXzR+6aa>!*{uOU)v z`{IfEibsb4nhElM!iX%|-x}*E2Nv(lwepQ}fnUSWqCKF{7MwU50npHKqSTzZi)Hq#nL5l0HdhE&hFn&>b zwa~S*jsj+OJAgRvTn^3GP+DggV(_B1{rW#%MxT4vLTL97gi8q?zn6}rbI(jg`*piM zJA~rH1!{J{i-tyxqa^mp1CV6J?~kUPc;#l_@sTBr>USJcD0c&>yo(5O0J1PGMU zq6$|I5Y2Yn@0xW@yO00P^qPkAXboGk+ap?3Ij5Q1fAGib-@;b_BGM*CvvjZ$YG;Jy z&y1Vo*iH6w&qH56@Ix~1ITW3rsNZh?Sjz&skkh#97h%A9CbIl_sW|iiqnl@lmwrcv zx8dy}^W}yKCmFeKr}lqW_YYBdW+2+{aCvio1tH{MjK!qfA<NEH&`SGyND zFdF5>n3%$+PW? zWf#CTA`qU1;#oiZgr4@MGpo2`7J4ybi%EjU%FfCj%^kZ^L>Cde53X2z$pU9C$A#S) z6Lc?+fCUmPvu->xql9`xYWVJPW#!C(3m|iOf22|4fXNR9+yTe;z+Z=#(Cw26PaL{LGatwlOlu%Uc$7;}2<-!nh-o-Tm=gJ%a zV5YZMtpD|03u5{pyxQkfl#Ks1dy#(PLG%ju{)B>uGyOlb=R(AesmqtnY}6fF)Jc+g z@t9J4NdL(D%r$l0DGrb}-|CsTW`~S@O9did4l^1=A}%A<{ukF_05nf*%IuEYgQ4iA zAS)%5Fd#R$XOT!NU;-RyiT*f{cFg*$m`B|Okyb&OIMFY44l=(BdeZpn*XDK#K_!ix ze%Uno%|SSiD8*+4kB9iF1E4BUiNEu|uEbA}`HyG&P4}isa#!B~mO1~2cP%Qx9b7uB zW5(kk&FNafPZ^j8RgCD3N>ETA4mO@XUTq7te8Cdvd=SO{bjEiLq9lVGU2_K2S?+L$ z_itu@AwoV$rm;$PvxGm%prY#0e;L93kWp7~+Jl~pINiDAaqA}B!L0^WKV|TC0$5TvIzB>*l=(agUd2!B84IUfw{|(TvEgeAaw%%m@Jo|?oZU2u& zlhu{6g_8?+As^7^E@s$#`u0^ui+r{Lv1L7XDJFD7z$K&!eE$W(uRXMie}u0eebb@7 z>x|^-k_{e26>h1WLzDHDe9#1uDcSTY@ZQRvR=5GYrb{HmftWRd<5VpjFRUNm+y#F; z?oQ)p#!txxBDGbQf1F!Sk`y;L9iTp58qO{4dK!y#tc(PAMN3_hxuRt*bw_)g?v)%W zxmED&u=Uir2}v*WWFDIT^wdRIO0^cSst&zap=1LoJX zSi^jT|1`86hXe0g6dnpglNEnV1q73>MrGf$WJXrLew~2%KkTKeYb%W>MF57-`&Eyo z-x)X*-oEd%g`llD`>oY;Rdn-dW4WYNBG0vCjoIJgB}Kb-UnhW{u-GLXnlPQext~pY zs@3jues!sSt!k+w;9y+bBLn)H^ygo69^<4M0tCnDKYML#6Snb^vcsu=ldSSpFXQW+ zzf`V8NAQ+O6q2@k3Y2ea`$^r2ow|UKF8g`1plQNqdxm`!tDtt`p_i(ia?YaVQ;4l9vc)~G+t2wI9u0|+8I*e(@kQC zApWyrg;GzNiVqur_KXF@Svx)cPQm(#H!JMj+LF;Y!MY_LdLX}~;JI9CToZ@B7W8Gl z22mq8LL`1(s$M9Q8Q|%p?n)!k1J#RUF~n_gTH*+pOchpWc10#*-eGiA?=us@nMbUP zZ(k%YtQR8Y6$X5@Jg{=ykpqRWP!<-yl)q5+?h%U{{UEq*NpGAX$N!ReurLl!$yx~9 zHzFQMFc_U5L+JAR?5baSgV5bt2zxr6`g-{IN`TWkCp=2N;ydfalGZv0{IRau5ZC64 z`EXl4FWHZxGbcEiKm@rfY?%RC6Jo>QJ;h*!9gaXsuXA{`H)j=D{LP~XGJ&1%P9Cfqii_Y_|LpUc4S|TS3@;UCfbj<9wL(q zc^7+NHuoF%3-FY4&x;jQEw+S(+Q4qboixo+IXK4>6gADgMpZ7t>D@6(Rm<-zo1M)I zp5r2^XoA|yyuwOa)11!)UL1D%zC_FNes$7A(<42(CL33?%D8J3)BEkJg!R%~fHyBt zy+vcga_{$W-RT5t&lzP;9LKjMA3L(PH1eQH0{)QCV=1b1*l#*rU=l<0WIHzi4%t4L zGDQnsi%`!TYcM$ zOC1PSx?fbQJt14?ta`SU=MeH@9@~10e-W&{HgEWBey<1gj$2*7%i1sBQVO(Ii&Rk~ zb^SvXeAY{Oyv)=B1+~4w$bA~wl=rP$zDMcuAWQAC93gDo?>w_Y55wqld-Xzxc;STPbsSjXa<6lJCO4&b96p)V%~m~^TmD@ z7-8om7O??;zXTtku~yiEK-GHDJ4eV{gJDl`;iIe9Uy6oJPf_@rDAEgoM_Xd*$XVB? z?`e|{7ACGAu3{Nfab>EwuG$K#R}duaclkFw#kI)?=AluV*o)=3(RtiB12=|Tde@dU zB)QHrf0^5-M_;>!s1d@*uG$RrYZuzqdLm%(D%YR^Ne2^c^}^GO?aW#>`)w>|Au!m( zRg(BKqEj2oLti_O2s)FIDg!^kRg0sYSx27DNQJ|Xo>=umkgBS)Y84R*i=5bcO4G#Z zPK4beFCFXmL?+a9p~5dB_fhazxnI<=%spw3aP0vqf{Pv=PC?%^9JIM=g`d7zZ&|lo zR8siqSg=rVD#0yd`k2F!43j$MRHWTgT)VVtVXwC7(zU{^=3|_^1=c?8%wO6d)ndWo zJ@gE}V!}B3OQvcebGw)Hh_WEd#UZ1>ukv|kfOrdO7wue7J70}ZA!!>8zZ7t;Ssl-0 zjj1kWNS)JIN|0)x2>!g3CRIz03u#)&@atBrA5uenQEMQ({3`zZq^RO*Jy)ZdQlwR1ilEr<|vJ9NOQT;Xe!}x*FH1F`Xtg z!I2u9fAKteTJJN2+_aQ(T&V>WNB9-~VUz)zSk>G-Zw|R;BAHeQ=ox_JG`Qv3dn!UN zjz!#^wZGH4Lz99!u%3QV!t!PF>p*1;q>dSS-No2LYq!vt>U9@^m#YWtq<&K(H(>G@ z2O8|ECGUCH7c*J=ldAf+vUOhLin!jjmvEiM_j_zTs^p@Kl9i6_FMBUuR+^*+wc7t> zHN{XUO?kVh2+>d?jW1Q?i(zIc5?1gRKj=VKW|Yl-%oE$rR3}R0TuUE1Q-mv5u|YEW z1g2YL3q3wGsRI1c-m;mT)U{|xVKwHJq7yz^cY2Ld2vhbfpGMy?6 zA{?e)&^~uKyPenTS$Z$1CBxdctjwu8v*`$nIxmDG&6xYUrgc+=RK(PNl|E+G?LRe;&gzj>>{;M3I@oFQI)oEphuT^M{ZBk@UrD=$h$ z%>SgVkUVlCcy~sk5sW7|F&g&cdx+gzzo%1jLoxYHuZ`N~@J`L&^~+y$K}TUrXKYI< zTfT#zeACyN-cqSZQ2E&>$<=*i=JM;io>?}Hqu^Jg+Y=otUz7_JTpH|duS1+pCL-Fk zZI&Li_sAs=A?E$pp~JsH+j(ks4o4GgO^Mo0%BAgkY6==I_a@4d2L(s{?z$v;rr^n- zwC|VUD0SH>m4*2i1;dA92!@+17S$Jr)UyrY`r%q%XiqcvU(mt|M7j`mKEf&)nK7D% z`YP|iwLyjH&M=+5AT-fL`2gN@+4|B^w;ewCq%y@P|muEG%tLXz_8Bw=7(U` z5AjR33xTd4LdxlME)#S(rPJ3Hz+Y+fGF2{S19`JNRJ0KKXgYPm?^Rbc!BAg$7iu8A z6;Y!&vcZ#)NVCE(bzj|bOSBEa^-jC@(a5FBtlU(zQ+B)ZhywUw#y$Co`w}nnL|PGr zcTa;cialg*X%{NJ>gqTSgx-P(J_J73GEi{$L}t+1Ox{>EEX{8!Iq&T-VUN>(k0rem zEY&K^qFZh{^LEw7tNRmpq$n|Pr%&J>8J5E9xoP^)bgT|@pmUtF0UwW)3EEy5ELMng zZ%a~?6#%tH@rdyhrcbfhIeo$1L9c5$OG_n4SJO54D_+j+b*Q+3no-|dob(QP`n!O|dr*k&3+{~{f? zEc%5lJp1VWz!_TEM67~uxJ(A(t>dtFr;h!vJoupc^LoMMlAXG2k9xnAv2a`+g1b8X z!(GhfMO&7*Ff>G*WW`HdJMG-7K)7{msw16^t{IO}AGvaOq{?rj=fp_hQv0p8OugV& z86nhw#*l4%H5m=oFhF*4+NR9wHcuxz`cCl`KQ#{!MI(J~e1zq|&Mqn}>K4Ph} zC789-j(;dNqwc}Wt*uD8BICmI;xlKRoIc$++a#z{BlfeF%%vjlt8j1xNru*6AgmWi7$!}lYFB1Nc)Eu7g z$T+a9XA7h3@@OxV{GwrN?{Sp7c7mB$kR>m0#x!v zep1sNyC^^Tsp`?{?ij79<|Osd>Dw-6M+!y8rl_Nw5I>zC4kf60M-w^n_}PqL!O1(@ z&jJM2Na4E>ZIGo3nC zj#T!V+~uX6U<9i&Q>>!HCktTO8{!!o5{0HE;+<6S7(*BJjKnriZQ9SL;C>Tbo;LYz z4;;&I2d<;dkR)3@Q! z&@a8Qr2H67S-ATIBk2t&zEKwMG;~pufAx+>5u83Qyeb8=xDqEsz~>90#E;8@w;z! zU-8m&;Cp^yN3r(8nH6P=jCbC};OfKS@>wavKM1MxW$EEE@uEh3kc*X6%=#AY;P+$H z{m&=qe;T#d&OyNfPXi%u!S&}Ao1bfbO1L~@bNcbh ziQ?0lamW5JQ>&JbZqHB*DkDT^*V&^<{;j0iY7=d#wqSK|5#julH#6B((?9}bW@E)6y_0@PxZI~LTu4wnd-m=$DWp=yUQ|1{)%N^Cn+JG>sU&Z9^}D5BIy-i!nhgx; zuj_4L*4gxjS3W9tdClmx}*rX@)n%Lk8xv3rf3xtu0{3zB&F3T!EO(1+R3y z^G*rQ;9EUraiF2IG1KOxNJ)K1?@xQdlEq}PhOrOrmO6b7x3;WW{9$_8?@*QJ`$FD` zYPs5+O%~JJTtlV%LjqTG7!#j^)GKiN^@M{TymMG=&Nb~Z0O>w+&$IJykM{Um{%IZd zsqSq!!0+RV221~~XyRII`uqjV&zzI~^8M+3;G*YMyx`fhz8!os^M86NOIGN|Cl}c5 z?k>wf?Vf&S1+Vvf^tSt={913nmM{@!rW_BRZMH9Hty&aIrCgh8@$`wg@u=pJJU7KxSD@bBZm2P^ttzy zc~5prZQD!xG$ObherNg)=qh}?^8T4uqEB%8pVX;BM`f*!bKqeyNF9XoOEb)Ep zKpyr!7yNza^}d8@Sb3D#=Fo48M+#eEuXpS)s9#e~|Z8sh=AU@FsuQe77TW2Y0MpEcQKfar3jc2K;tk`Gblr@44=A z|5>u;=F8P}JbJCS55lYbaPw~TNjmSNe-HaV5JLCU{GWNuDo;iheK9y++<4w)c2oCv zXz_+{ucIW|zHTbzeqYJ0e^`&w@mXuK6?K}t)<1iGuVZms$oiA>?*EPd$&vQOLLMha zJ1SnCeFdtlzbWwFH2t9;0~))5u}~etVu2HWU$k`S+3%KhR!VW{%$?7_^))?iJo>@> zg`@3r+?gxJ62EP7gElI4)uSqjlCrI{doJ{b|8aG0ZbZm!(Fb)> zV#h@L_pkDW_V&6R^~^~0sjtBL=qKH8n{{;-KYz1IrD*N3#f=}P^b{Q`su>d^!FDP8 z^oi7HWM`%)OL@^R;yWW7de%*<`}3^OKYR@L@;`dakb`q&6gt0fmiYm0fJPq+sqUQJ zcgOF@n#&^*S3&!T$vu4)p zZsk);AQgAElofd&`YzZwRLh^PKQ{H{9d`Tee64s(zfh;KxcI;Y==vCg z2=2Ks^zYvI<_Y-o22iv5cUpuJ>#v#Vvz;I`=H~*iJYQjKn=Adsr7$1O;e)H&Z^MnP zgs~Ya;0mwGGkK(eCJ>~>pWHc~6~Pbr{VQCj9f^$NhkUs4SG`ZyAD{BGR72T%m*@H@ zPY^k&&>o>-O@ZXT@vNsmrmqy*qMAt6Am&K6t@v(Ss}ztW5Xzuu?wUo#opOsM)`a}M zPJXdHO1BgPGf6%m^VY+ofVs3l4RdpAbbNYem0pi!+aM)U`{tWY;P6e!dk#=AV$}+U z=ec!UE=iF>%usrkNhH4aM-GeJFfx;uLk>=jl=fln$MKrspQQ7g4?fYq%jLVuFX{#^*H@3< zHsQC12bjqa@d{4CB@d8SgE6&>9`qvsggoZq9OqaZ_R+(uz*i`0rhA7s+AeL8ZL2spf3)t% zhufZdapE@$w2&Dbwem9d^W#m`RI)!K@g%lca+U4;>we#hL7p6reb}S|e}d6L@^Uc_ znI*GRIdK?@ENMo+-hli{7b~&hHL=^Savr$T!a+8frB26B$^>z3EbMzv z!B!@ojU~W|^VlvKW0EdoR>ecFr=RLMjj1-XFQ4qbSx9o5H9AG^!F*aodtse7_t(ET zg}U}|(OX;ysk(KDUlASjB_3ycU8q0m-)HBQ;f6=6b*TH8Cm~`@q~@x1YcFIg zn*Vaxt3ubZPifha6HIJo_|aK(qn!Ap5>>a7`r+~rrXqXB99Pvi<0%R0O7|t?^~ez; zFrPmCy@U+z-d)vC!5Pa9Q!A`9t?}O@{RT|~<1q;z2s^q(`sCgC%@d|o?xT!uh5z@T z;ozgPzrKB+4e6TnRhP69DwqvmiN7PUPdptFQ1A^^I zpY|{{^VI8Ld6nsW5NtJmHrlIo&8G|sZ@mnr*RiADQ%@==6@n(nO~Z)sm`@MT3s~5` zKN+ZpNImo?8dHW4{;qgp*r`h_vXuEq$))G@Yaxq3?He+fg6(fE{~ z1_s7QioT+;(MI_y%eQT5H5D!?W1jJkQ+ctrK|#Y%{|hEV{X*+!nQ%Fwlvl2k{CYAs z{LFCn{W0S0kttgdHyL^ejG?F;qVpcQoH8Y#@a|4PeM80FfjR5gNUa)ds*Pf8jU z&G)we$!(u!tPM}AdiyZ+g;$Is;qH=vGF)6r&vUCXLF}pt6DzZFe{SCDxQc9Ni)c_r z)*>nhhZ2`L8AeX9LXIayU2%`9jUJ}G%{$a}Y}If`}v z#Ce&3I$VGKf@)ZCx9D4R+b~{a;_73^_uUP#OhseeI(Mf#lj4^}LO$wwQ$E%>l2!_f zVMTAZ)&5eDJRx@dYS@dY%K594Juj~s+V@S0L2De%TJNf?FBX*d+bZWvdTyw^tvNa@ z;!Tbpxup{t0DdU)vFS z?sxI&hv0AWFIH6Q*7ICDlz1vKPKq9g`VfR#)O|%!Fy*EuGIrKC+$yZTy8U53^Y~=O zD@L*Z1lv+i-AtZe$TWB|;e)TWd+@t8rPue)7llBk9yXyC+tJydJd;(z9OQs;l6QL> zm(fxnZe+?%PurOHabvW~8=Xbb3r(T5Evp)I@A-*D^;vW?8Ej`>ts-u$K5|Dex$`m{ z|B=khUN3V7dsZV%C{Nv3R4Q0J;UWuJA56~GqAoP?{=Lx1RSyJ8Xkw0TWWzc#z6l0_ z(J8LZ{!y@VIf_;SWDivf4Lw)JgN6sgJB0dv|E%i!GvipDlX{+_p|9@Ki4C*HruZgu z*yL9vd4bHDxu&&Ful;9g^}=mPqS(Lk`wXd@q+Z^--0@5x3V*9L3GH8h=hoHx@fas( zTldC1Y}{22DYM*oZ=rXxdY&WAfnQlP)=#7*y<&XN^SAXEk~ZE|*<-)Q(+|A; zRbrxNGHni0jr`uMTTw=|vSqYnLj$Uz{zIN{@qRlNM<1h-*3bpvuZIC&drMM7{!?R!2XY#nv6UeP`09?#?X+Q}KhznlhGgt018K z%tz_L8nZERO_yEe<#ckKSL@Ma_b_Yue9Kpt?ECsmt>qbE3!H5ygqXXF=H+(suJ2g;N6-Htqkxe@edgC#M<>Az;=MCzpaG2{5~MK zVIolMk2jN!R*0hZk9FJjyci=iw=5sP1|?9eRy6LihvT&?Cwody8f)=mjo{3_**rAV zgxS?dQghR;bneEGBr;JWK=%2G=*IjKAp7sof&iX>6j#UNZxy_=(s#AU&Z#It=6_<^ zS)g*M{PU#I>skS9XasK(Kpa?J>9 zBR6qU0C`)O*rcML#11l{eBV_N3F3gjik`w~;NZIA)5<0-({+QQvpHU^*&hh+`Y~?p z{YSf(OV~j}7y=|w3M*6OPhyGp_3ywHT8aw$?%K+EW1*Gwrsu@)LB)F67`Mq9a*t;B zcnVa$S~Z{JZ<#zYDqc*2!gQnw->+tn7E#xv^z^%f%AC73n`}iJNMxT|wz@`-ct6VM(h%o8T*hlt5HSO5aIRW^Q2{O! zJ(Sw3fbzIxKjpjacIhFDN6DI=o=KM9QZAv}j%L}}lHL@~{;JgF{_3g|&6i-57f$t- zrYzBkvh|wDR>X>V#@)1>8CMpk6bh#}Qy);~PBLbpx2&btYh0Xdes?DI@_u<2(RV5y z9{~jMI&ol>FltSjmjrCW~_ku0CtoBtfiEiodNplu|>8*EQ+s z1Yrn~*55ib**|DH+edtl(O*Hf|1)o4rTpSU#`!_(x--XATllG0b!ms3Q-siY64om(G}^IY94oFSqnZXoP?VWdJ(QGPLEI8+1TW1aQ27vs!* zxFFH~=H8)xWTDJNX@6Oi7ukg93$$Gx+mUgzaAeb1=OKT8P`DbF2=NlVI)4mG7RC!J z=l6*D^5bHvKSYfY6#heYeM;8x?^P3|hgm%s6433|{F2<>JnW`#VW+&Bk^NJUJA8R> z=7>l?I=V}Lci71fcBmqYQ^i#2ALcIB_{qTngdXPCfsxl5Nz}K;L6lZ=vMUAZ<*Zu~ zzh1QOyWM)pX>$GGQPH;~)t=cg0)cpNC7V=ube(Ex7w|#|tDV==6!*MT3hXNk{a#|{ zrq0or+hr$eGBH%fP#@7(W>24+%E$aYLG~p|@_$Q_<=!aM_~?u%eozyca@UUeiatrO zZVk*ckfz+pZoj>k%S09Wzi|D3IQ&08H`nv`lzw!5$V@n6bd}@w@zaBtkjczflnY^u z2Zw&FsMAqV{etr^@qtfU))>o=Yd+@n2Nh{&t2S3aa^BVEcYZJUU=yVh<}J6*afVW^ zUK?I?8+w@|5w_{?X9@E#N~~x-QbB-*xjHc{7muNOoQ6+xGWP!UwZWnB?_XOnUN-+t zUm`xy8U~9whn3H&%y}T!Ez*Hr@cuH?|976PkWB8E2oGc1i48;J@ln>tp~OLYS2`=; z*IT(=Kh*l4ZGwWzMm--hx_+oNBlkl~i|Q{4!Da<~pzq_6hkc$yPVDo$TgwaIDH@U% zt5s`3qR8sFSZgR7o`3Km!N@Sw-c>s0a+bP}wI4XT=4`rj!ff6SJ^d^Jia$G9O}DR;v6{aTT;UyQgLL{VNKv1BWm2e#r~0+OSvTF z+^!~{R`V`Zf9&|H`^yhQ@nlO3rs&h2pvp=f<=2+c@(H%}6*A?v=bhPB+mwBd3?EEZ z{FdtGAufdt{EPoK*AZ%0?;M6&u136b{7J03{Yeygik1XUB3Gfan@i7FYq?S>xEYZ+ zmJyFqT~m~zGfH)gukpj@KB=u1_kbpuj9$w^pzo=F@5B3)s{*rHNeQpwzROO?=Knk> zF}xOS6=nygFMX9L=_AR1FxF`tLZ<;Uq7PsdHP+~7JOS1@Q+|7SRjQZD^@^vdD!&RwL0}$fv6r*g{2>kR?1HG)$92-*d%9PC?%k$f75zcNk0Y2 zU@3plaa$VeP9LtE?Eco^MSOV+ZZc#mh^^7b^!-;JP^S>U?slv5mT{#Ny3JMk zwAnkF*yrl~Q9X^_>L|7nR`&hy<=VY%y^&t+hsa;2D5r;wE9NkW2geew#=8>3UeT`-&Fb^@PR(QDpgn(1Dy?7Mn>dov!e|>-*`AkYMg1V0 zH$v)Q(0)>@d#hOdX%Az3?Nn-Iv!A14#dq=g3iWbhCo^$JdhaLyEMLoYD<>z>_{tL& zb*kPXH7A{Wm3wT~pO2af?smN%EqKOrvWFlo(%@j~X z2{p|Oa1puRv=g;%4-`@M|{rk@P*0{=M2tA!Vu>^L6$cIgmak5WeMo`n0;w4n4vnh!m-)nUu`ax) zk&gtDbT2%+Hj zs3Dj_J2LD&2@X20r0PMQ@)+dd(vbN&+QDVo4T6b;KLm@$Otg%SRbpd55sb z0PIa1X=pQ$DHh1MnKoZ;Ym+3&92tM4isns8Nf z16GPfV>jY_UGX55??N>xMg4lBFy5ukfz+5>`dGmXSwjh%K)crWh`hvV``9r?q{yL; z_3;*=Q#_t-r&IwQ!4VU2;&C3w#?k zAyJaFKICD@_`O=z^r`d(0 zo1JUXk`OboIKcO9o*3rCK%j-1+3A=Q9Wy7zY~6jguh>b*ItPdx*qkMH$XaX~624wi z|GC&3igF;R>y;Nk4lEB6)_2BnEJvZiu`(Kh+w<>=w^`6@%MTT<)+w*L-z;L>E>9bC zEFg0BxZt4cnycf4B4>|~J}JL>g9 zpGfw-MyT8UnR+|_hpLCvMLB$jN}VYL)Zi?fd;}{*6ZhFE@u-grMZW)>amolQK{v^- zUQy@|$DYMCT=Hczxg8IDyre}h0?Y@&YhF|ktUVt6(eT-1>PmR4ifv5?rWJo{vV;5( z`MQ!3aQllD?r^(5(y$J#J|45rKo|8hiWlGk5AGWdu1t^kZFqZTJ|yR0+n;vjP<~%_ zcv!K~lqPPB6Bk@uMBd(DqxLL2X@TwYh8CmB5O zaGg%UwTPFbOoB=8)Z;2is`*cvGQSClGg#d#85E8KDVDb6=ymllS#E8EB!JEz63QVvLJ;~DDIGZLC z#=_3Ocq3?K468(-7i~JvGCax-2%IW>0GHHRWFkGkkni1U!^ zw%+b{LiS>9^dp_@DU>RTH?P|tiaPXF&kWRw)iHN_gsG>Bu4Zk@x@>4@NE8($0?xZQ zWsru|PUsmoJ_4fpI3|0`uaem7YtB5U3tBx|Yetg}pcQU>W7Swz9jLpRBBd5V=$|<9 zWH9=JuBd5AYiZZbq744759S^hX`;F~kVAQ03rp^~m*BC8d4gFV{Ae@?5L%6kRwfYd z2@y#wEdYVKpHl3*1NAt+redgEj~C7LEwk#Ov3IUH@l;Lc!N8t4Fs)nTTIdYv&%B(rgE9<8Jedj%(L=ZI#nyV6aUp_6uL)}b53 zi@8!nsW&!K9F=kNGOF)_^m7i;&s*|U)?44W<&yX06FICj7x>0QZP*R^g(L33W)*#i z7V5S);ko3C6WB?jfQG-N2gr-ZtpMPx=9Gw~Bi9%E@St Date: Sat, 14 Jun 2025 17:35:23 +0800 Subject: [PATCH 15/76] fix Prerequisites --- README.md | 16 +++++++++------- README_en.md | 8 +++++--- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 5a56f28c8..1ef98bed3 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ vLLM MindSpore --- *最新消息* 🔥 -- [Coming Soon🏃] 适配vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3),新增支持vLLM V1架构、Qwen3大模型。 +- [2025/06] 适配vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3),新增支持vLLM V1架构、Qwen3大模型。 - [2025/04] 完成vLLM [v0.7.3](https://github.com/vllm-project/vllm/releases/tag/v0.7.3)适配,新增支持Automatic Prefix Caching、Chunked Prefill、Multi-step Scheduling、MTP等特性。联合openEuler社区和上海交通大学,实现DeepSeek全栈开源单机推理部署,你可以在[这里](https://www.openeuler.org/zh/news/openEuler/20240421-jd/20240421-jd.html)阅读详细报道。 - [2025/03] 完成vLLM [v0.6.6.post1](https://github.com/vllm-project/vllm/releases/tag/v0.6.6.post1)适配,支持采用`vllm.entrypoints`部署基于MindSpore的DeepSeek-V3/R1、Qwen2.5等大模型推理服务。联合openEuler社区和北京大学,发布全栈开源DeepSeek推理方案,你可以在[这里](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm)阅读详细报道。 - [2025/02] MindSpore社区正式创建了[mindspore/vllm-mindspore](https://gitee.com/mindspore/vllm-mindspore)代码,旨在将MindSpore大模型推理能力接入vLLM。 @@ -22,15 +22,15 @@ vLLM MindSpore # 简介 -vLLM Mindspore插件(`vllm-mindspore`)是一个由[MindSpore社区](https://www.mindspore.cn/)孵化的vLLM后端插件。其旨在将基于Mindspore构建的大模型推理能力接入[vLLM](https://github.com/vllm-project/vllm),从而有机整合Mindspore和vLLM的技术长板,提供全栈开源、高性能、易用的大模型推理解决方案。 +vLLM Mindspore插件(`vllm-mindspore`)是一个由[MindSpore社区](https://www.mindspore.cn/)孵化的vLLM后端插件。其将基于MindSpore构建的大模型推理能力接入[vLLM](https://github.com/vllm-project/vllm),从而有机整合MindSpore和vLLM的技术优势,提供全栈开源、高性能、易用的大模型推理解决方案。 -vLLM MindSpore插件以将Mindspore大模型接入vLLM,并实现服务化部署为功能目标。其遵循以下设计原则: +vLLM MindSpore插件以将MindSpore大模型接入vLLM,并实现服务化部署为功能目标。其遵循以下设计原则: - 接口兼容:支持vLLM原生的API和服务部署接口,避免新增配置文件或接口,降低用户学习成本和确保易用性。 - 最小化侵入式修改:尽可能避免侵入式修改vLLM代码,以保障系统的可维护性和可演进性。 - 组件解耦:最小化和规范化MindSpore大模型组件和vLLM服务组件的耦合面,以利于多种MindSpore大模型套件接入。 -基于上述设计原则,vLLM MindSpore采用如下图所示的系统架构,分组件类别实现vLLM与Mindspore的对接: +基于上述设计原则,vLLM MindSpore采用如下图所示的系统架构,分组件类别实现vLLM与MindSpore的对接: - 服务化组件:通过将LLM Engine、Scheduler等服务化组件中的PyTorch API调用映射至MindSpore能力调用,继承支持包括Continuous Batching、PagedAttention在内的服务化功能。 - 大模型组件:通过注册或替换模型、网络层、自定义算子等组件,将MindSpore Transformers、MindSpore One等MindSpore大模型套件和自定义大模型接入vLLM。 @@ -39,7 +39,7 @@ vLLM MindSpore插件以将Mindspore大模型接入vLLM,并实现服务化部 Description -vLLM MindSpore采用vLLM社区推荐的插件机制,实现能力注册。未来期望遵循[[RPC] Multi-framework support for vllm](https://gitee.com/mindspore/vllm-mindspore/issues/IBTNRG)所述原则。 +vLLM MindSpore采用vLLM社区推荐的插件机制,实现能力注册。未来期望遵循[RPC Multi-framework support for vllm](https://gitee.com/mindspore/vllm-mindspore/issues/IBTNRG)所述原则。 # 环境准备 @@ -48,8 +48,10 @@ vLLM MindSpore采用vLLM社区推荐的插件机制,实现能力注册。未 - 软件: - Python >= 3.9, < 3.12 - CANN >= 8.0.0.beta1 - - MindSpore (与vllm-mindspore版本配套) - - vLLM (与vllm-mindspore版本配套) + - MindSpore + - vLLM + +注:请参考[版本配套](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_zh_cn/getting_started/installation/installation.md),以获取详细的软件版本配套信息。 # 快速体验 diff --git a/README_en.md b/README_en.md index f99e67fc5..e402fc66a 100644 --- a/README_en.md +++ b/README_en.md @@ -13,7 +13,7 @@ vLLM MindSpore --- *Latest News* 🔥 -- [Coming Soon🏃] Adaptation for vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3), support for vLLM V1 architecture and the Qwen3 large model. +- [2025/06] Adaptation for vLLM [v0.8.3](https://github.com/vllm-project/vllm/releases/tag/v0.8.3), support for vLLM V1 architecture and the Qwen3 large model. - [2025/04] Adaptation for vLLM [v0.7.3](https://github.com/vllm-project/vllm/releases/tag/v0.7.3), support Automatic Prefix Caching, Chunked Prefill, Multi-step Scheduling, and MTP. In collaboration with the openEuler community and Shanghai Jiao Tong University, we achieved full-stack open-source single-machine inference deployment for DeepSeek. You can read the detailed report [here](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm). - [2025/03] Adaptation for vLLM [v0.6.6.post1](https://github.com/vllm-project/vllm/releases/tag/v0.6.6.post1) supporting the deployment of inference services for large models such as DeepSeek-V3/R1 and Qwen2.5 based on MindSpore using `vllm.entrypoints`. In collaboration with the openEuler community and Peking University, we released a full-stack open-source DeepSeek inference solution. You can read the detailed report [here](https://news.pku.edu.cn/xwzh/e13046c47d03471c8cebb950bd1f4598.htm). - [2025/02] The MindSpore community officially created the [mindspore/vllm-mindspore](https://gitee.com/mindspore/vllm-mindspore) repository, aiming to integrate MindSpore's large model inference capabilities into vLLM. @@ -48,8 +48,10 @@ vLLM MindSpore uses the plugin mechanism recommended by the vLLM community to re - Software: - Python >= 3.9, < 3.12 - CANN >= 8.0.0.beta1 - - MindSpore (matched with the vllm-mindspore version) - - vLLM (matched with the vllm-mindspore version) + - MindSpore + - vLLM + +Note: Please refer to [Version Compatibility](https://gitee.com/mindspore/docs/blob/master/docs/vllm_mindspore/docs/source_en/getting_started/installation/installation.md) for more details about version compatibility information. # Getting Started -- Gitee From 26ec377ad58c7b935ce691276be937ac880a6dc2 Mon Sep 17 00:00:00 2001 From: zlq2020 Date: Fri, 6 Jun 2025 16:46:35 +0800 Subject: [PATCH 16/76] refactor native qwen --- .../model_executor/layers/layernorm.py | 6 +- .../models/mf_models/deepseek_v3.py | 4 +- .../models/mf_models/mf_model_base.py | 77 +----- .../model_executor/models/model_base.py | 234 +++++++++++++++--- vllm_mindspore/model_executor/models/qwen2.py | 188 ++------------ vllm_mindspore/model_executor/models/utils.py | 24 -- 6 files changed, 231 insertions(+), 302 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/layernorm.py b/vllm_mindspore/model_executor/layers/layernorm.py index 53050ae56..3e0251cbd 100644 --- a/vllm_mindspore/model_executor/layers/layernorm.py +++ b/vllm_mindspore/model_executor/layers/layernorm.py @@ -21,12 +21,12 @@ from typing import Optional, Tuple, Union, Any from mindspore import Parameter, Tensor, mint, ops from mindspore.common import dtype as mstype from mindspore.common.dtype import typing +from mindspore import nn from vllm.config import get_current_vllm_config -from vllm_mindspore.model_executor.custom_op import CustomOp -class RMSNorm(CustomOp): +class RMSNorm(nn.Cell): def __init__( self, hidden_size: int, @@ -40,7 +40,7 @@ class RMSNorm(CustomOp): self.weight = Parameter(mint.ones(hidden_size, dtype=params_dtype)) self.rms_norm = ops.RmsNorm(eps) - def forward_native( + def construct( self, x: Tensor, residual: Optional[Tensor] = None diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index 307c7b91a..deb68eec0 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -187,9 +187,9 @@ class DeepseekV3ForCausalLM(MfModelBase): weight_processor.load_safetensors_shard(self.mf_config.load_checkpoint) return None - def prepare_inputs(self, input_ids, positions, attn_metadata): + def prepare_inputs(self, input_ids, positions): model_inputs, is_prefill = super().prepare_inputs( - input_ids, positions, attn_metadata) + input_ids, positions) attn_padding_idx, attn_unpadding_idx, ffn_padding_idx, ffn_unpadding_idx = _get_padding_index( model_inputs["q_seq_lens"]) diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 2fe0f8534..5e3dfc62e 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -45,7 +45,7 @@ from mindformers.tools.utils import is_pynative from vllm_mindspore.model_executor.models.model_base import MsModelBase from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask -from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata + logger = init_logger(__name__) @@ -91,74 +91,8 @@ class MfModelBase(MsModelBase): dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.mf_model_config.compute_dtype) self.lm_head.set_inputs(dynamic_hidden_states) - - def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor) -> MsAttentionMetadata: - input_len = input_ids.shape[0] - max_seq_len = ms.Tensor(input_len, dtype=ms.int32) - seq_lengths = ms.Tensor([input_len], dtype=ms.int32) - q_seq_lens_np = np.array([input_len], dtype=np.int32) - seq_lens_np = np.array([input_len], dtype=np.int32) - - block_tables = ms.Tensor([[0]], dtype=ms.int32) - slot_mapping = [-1 for _ in range(input_len)] - slot_mapping = ms.Tensor(slot_mapping, dtype=ms.int32) - return MsAttentionMetadata( - max_seq_len=max_seq_len, - seq_lens=seq_lengths, - seq_lens_np=seq_lens_np, - block_tables=block_tables, - slot_mapping=slot_mapping, - q_seq_lens_np=q_seq_lens_np, - context_lens=0, - # To enforce prefill and decode are both complied in warmup process. - # So set max_context_lens to 0 for prefill and 1 for decode. - max_context_lens=0 if not self.set_flags else 1, - query_start_loc = None - ) - - def prepare_inputs(self, input_ids, positions, attn_metadata): - key_cache, value_cache = self.get_kvcache() - if not envs.VLLM_USE_V1: - # V0 - seq_lens = attn_metadata.seq_lens - max_query_len = attn_metadata.max_query_len - # When Mutli-Step is enabled with Chunked-Prefill, prefills and - # decodes are scheduled together. In the first step, all the - # prefills turn into decodes and max_query_len will be 1. - if self.is_multi_step_chunked_prefill and max_query_len == 1: - query_lens = [1] * len(seq_lens) - else: - query_lens = attn_metadata.query_lens - - seq_lens_np = np.array(seq_lens, dtype=np.int32) - query_lens_np = np.array(query_lens, dtype=np.int32) - kv_cache_lens = seq_lens_np - query_lens_np - if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max() == 0: - is_prefill = True - else: - is_prefill = False - else: - # V1 - is_prefill = True if attn_metadata.max_context_lens == 0 else False - query_lens_np = attn_metadata.q_seq_lens_np - seq_lens_np = attn_metadata.seq_lens_np - - q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) - position_ids = ms.Tensor(positions, dtype=ms.int32) - attention_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) - - model_inputs = {} - model_inputs["input_ids"] = input_ids.astype(ms.int32) - model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) - model_inputs["block_tables"] = attn_metadata.block_tables - model_inputs["slot_mapping"] = attn_metadata.slot_mapping - model_inputs["position_ids"] = position_ids - model_inputs["q_seq_lens"] = q_seq_lens - model_inputs["attention_mask"] = attention_mask - model_inputs["key_cache"] = key_cache - model_inputs["value_cache"] = value_cache - - return model_inputs, is_prefill + def prepare_inputs(self, input_ids, positions): + return self.prepare_base_inputs(input_ids, positions) def update_model_inputs(self, model_inputs, **kwargs): return model_inputs @@ -171,10 +105,7 @@ class MfModelBase(MsModelBase): inputs_embeds: Optional[Tensor] = None, **kwargs ) -> Union[Tensor, IntermediateTensors]: - attn_metadata = get_forward_context().attn_metadata - if attn_metadata is None: - attn_metadata = self._dummy_attention_metadata(input_ids, positions) - model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, attn_metadata) + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions) model_inputs = self.update_model_inputs(model_inputs, **kwargs) # enable_mb_split is True in lager EP enable micro-batch and per-dp-bs > 1 diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index db9652147..4a9608451 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -17,7 +17,8 @@ import os from abc import abstractmethod -from typing import Dict, Iterable, Optional, Set, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union, Dict +import numpy as np import mindspore as ms from mindspore import Tensor, mutable, nn @@ -28,6 +29,15 @@ from vllm.forward_context import get_forward_context from vllm.model_executor.layers.sampler import SamplerOutput from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors +import vllm.envs as envs + +import mindspore as ms +from mindspore import Tensor, nn, mutable +from mindspore.common import dtype as mstype + +from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask +from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE +from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata class AttentionWrapper: @@ -91,6 +101,8 @@ class MsModelBase: self.is_multi_step = vllm_config.scheduler_config.is_multi_step self.is_multi_step_chunked_prefill = self.is_multi_step and self.enable_chunked_prefill + self.set_flags = False + def get_model_path(self): model_name_or_path = self.model_config.model if os.path.isdir(model_name_or_path): @@ -194,9 +206,143 @@ class MsModelBase: **kwargs) -> Union[Tensor, IntermediateTensors]: raise NotImplementedError + def get_kvcache(self): + key_cache = [] + value_cache = [] + forward_context = get_forward_context() + for i in range(self.config.num_hidden_layers): + k_cache = self.kv_caches[i].kv_cache[ + forward_context.virtual_engine][0] + v_cache = self.kv_caches[i].kv_cache[ + forward_context.virtual_engine][1] + key_cache.append(k_cache) + value_cache.append(v_cache) + return mutable(key_cache), mutable(value_cache) + + @abstractmethod + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + raise NotImplementedError( + "Function compute_logits should be Implemented!") + + @abstractmethod + def sample( + self, + logits: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + raise NotImplementedError("Function sample should be Implemented!") + + @abstractmethod + def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: + raise NotImplementedError("Function load_weights should be Implemented!") + + + def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor): + input_len = input_ids.shape[0] + max_seq_len = ms.Tensor(input_len, dtype=ms.int32) + seq_lengths = ms.Tensor([input_len], dtype=ms.int32) + q_seq_lens_np = np.array([input_len], dtype=np.int32) + seq_lens_np = np.array([input_len], dtype=np.int32) + context_lens_tensor = ms.Tensor([0], dtype=ms.int32) + + block_tables = ms.Tensor([[0]], dtype=ms.int32) + slot_mapping = [-1 for _ in range(input_len)] + slot_mapping = ms.Tensor(slot_mapping, dtype=ms.int32) + return MsAttentionMetadata( + max_seq_len=max_seq_len, + seq_lens=seq_lengths, + seq_lens_np=seq_lens_np, + block_tables=block_tables, + slot_mapping=slot_mapping, + q_seq_lens_np=q_seq_lens_np, + context_lens=context_lens_tensor, + # To enforce prefill and decode are both complied in warmup process. + # So set max_context_lens to 0 for prefill and 1 for decode. + max_context_lens=0 if not self.set_flags else 1, + query_start_loc = None + ) + + + def prepare_base_inputs(self, input_ids, positions): + attn_metadata = get_forward_context().attn_metadata + if attn_metadata is None: + attn_metadata = self._dummy_attention_metadata(input_ids, positions) + key_cache, value_cache = self.get_kvcache() + if not envs.VLLM_USE_V1: + # V0 + seq_lens = attn_metadata.seq_lens + max_query_len = attn_metadata.max_query_len + # When Mutli-Step is enabled with Chunked-Prefill, prefills and + # decodes are scheduled together. In the first step, all the + # prefills turn into decodes and max_query_len will be 1. + if self.is_multi_step_chunked_prefill and max_query_len == 1: + query_lens = [1] * len(seq_lens) + else: + query_lens = attn_metadata.query_lens + + seq_lens_np = np.array(seq_lens, dtype=np.int32) + query_lens_np = np.array(query_lens, dtype=np.int32) + kv_cache_lens = seq_lens_np - query_lens_np + if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max() == 0: + is_prefill = True + else: + is_prefill = False + else: + # V1 + is_prefill = attn_metadata.max_context_lens == 0 + query_lens_np = attn_metadata.q_seq_lens_np + seq_lens_np = attn_metadata.seq_lens_np + + q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) + position_ids = ms.Tensor(positions, dtype=ms.int32) + attention_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) + + model_inputs = {} + model_inputs["input_ids"] = input_ids.astype(ms.int32) + model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) + model_inputs["block_tables"] = attn_metadata.block_tables + model_inputs["slot_mapping"] = attn_metadata.slot_mapping + model_inputs["position_ids"] = position_ids + model_inputs["q_seq_lens"] = q_seq_lens + model_inputs["attention_mask"] = attention_mask + model_inputs["key_cache"] = key_cache + model_inputs["value_cache"] = value_cache + + return model_inputs, is_prefill + + +class NativeModel(MsModelBase): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: + super().__init__(vllm_config=vllm_config, prefix=prefix) + self.quant_config = vllm_config.quant_config + if vllm_config.lora_config is not None: + # native model lora only support pynative mode now + vllm_config.model_config.enforce_eager = True + self.is_graph_mode = False if vllm_config.model_config.enforce_eager else True + self.prev_prefill = False + self.run_model = None + + def common_preprocess(self, vllm_config, prefix = ""): + self.set_modules({"model": self.model, "lm_head": self.lm_head}) + + self.casual_mask = LowerTriangularMask(dtype=self.model_config.dtype, + max_model_len=self.model_config.max_model_len) + self.kv_caches = [AttentionWrapper() for i in range(self.config.num_hidden_layers)] + + compilation_config = vllm_config.compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError(f"Duplicate layer name: {prefix}") + for i in range(self.config.num_hidden_layers): + compilation_config.static_forward_context[str(i)] = self.kv_caches[i] + + def set_model_inputs(self, is_prefill): - dyn_input_ids = Tensor(shape=[None, None], dtype=mstype.int64) - dyn_position_ids = Tensor(shape=[None], dtype=mstype.int64) + dyn_input_ids = Tensor(shape=[None], dtype=mstype.int32) + dyn_position_ids = Tensor(shape=[None], dtype=mstype.int32) block_size = self.cache_config.block_size num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) @@ -210,18 +356,18 @@ class MsModelBase: num_layers = self.model_config.get_num_layers(self.parallel_config) - dyn_key_cache = mutable(Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype)) - dyn_value_cache = mutable(Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype)) + dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) + dyn_value_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) dyn_value_caches = mutable([dyn_value_cache for _ in range(num_layers)]) - dyn_batch_valid_length = Tensor(shape=[None, ], dtype=mstype.int32) - dyn_q_seq_lens = Tensor(shape=[None, ], dtype=mstype.int32) dyn_slot_mapping = Tensor(shape=[None, ], dtype=mstype.int32) + dynamic_attention_mask = Tensor(shape=[None, None], dtype=self.model_config.dtype) + dyn_batch_valid_length = Tensor(shape=[None,], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[None, ], dtype=mstype.int32) dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) dyn_intermediate_tensors = None dyn_inputs_embeds = None - self.model.set_inputs( dyn_input_ids, dyn_position_ids, @@ -229,6 +375,7 @@ class MsModelBase: dyn_value_caches, is_prefill, dyn_slot_mapping, + dynamic_attention_mask, dyn_batch_valid_length, dyn_q_seq_lens, dyn_block_tables, @@ -236,37 +383,48 @@ class MsModelBase: dyn_inputs_embeds ) - def get_kvcache(self): - key_cache = [] - value_cache = [] - forward_context = get_forward_context() - for i in range(self.config.num_hidden_layers): - k_cache = self.kv_caches[i].kv_cache[ - forward_context.virtual_engine][0] - v_cache = self.kv_caches[i].kv_cache[ - forward_context.virtual_engine][1] - key_cache.append(k_cache) - value_cache.append(v_cache) - return mutable(key_cache), mutable(value_cache) + def prepare_inputs(self, input_ids, positions, intermediate_tensors, inputs_embeds): + model_inputs, is_prefill = self.prepare_base_inputs(input_ids, positions) - @abstractmethod - def compute_logits( - self, - hidden_states: Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[Tensor]: - raise NotImplementedError( - "Function compute_logits should be Implemented!") + # for multimodal model + model_inputs["intermediate_tensors"] = intermediate_tensors + model_inputs["inputs_embeds"] = inputs_embeds - @abstractmethod - def sample( + return model_inputs, is_prefill + + def exec_model( self, - logits: Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[SamplerOutput]: - raise NotImplementedError("Function sample should be Implemented!") + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs + ): + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, intermediate_tensors, inputs_embeds) + + if self.prev_prefill != is_prefill and self.is_graph_mode: + self.set_model_inputs(is_prefill) + self.prev_prefill = is_prefill + + # for dummy_attention_metadata + if is_prefill and not self.set_flags: + self.set_flags = True + + if self.run_model is None: + self.run_model = ms.jit(function=self.model, jit_level='O0') if self.is_graph_mode else self.model + model_output = self.run_model( + input_ids=model_inputs["input_ids"], + positions=model_inputs["position_ids"], + key_caches=model_inputs["key_cache"], + value_caches=model_inputs["value_cache"], + is_prefill=is_prefill, + slot_mapping=model_inputs["slot_mapping"], + attn_mask=model_inputs["attention_mask"], + batch_valid_length=model_inputs["batch_valid_length"], + q_seq_lens=model_inputs["q_seq_lens"], + block_tables=model_inputs["block_tables"], + intermediate_tensors=model_inputs["intermediate_tensors"], + inputs_embeds=model_inputs["inputs_embeds"], + ) - @abstractmethod - def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: - raise NotImplementedError( - "Function load_weights should be Implemented!") + return model_output diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 36c36cd4c..27cf2b234 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -26,7 +26,7 @@ else: Qwen2Config = None import mindspore as ms -from mindspore import Parameter, Tensor, mint, mutable, nn, ops +from mindspore import Parameter, Tensor, mint, nn from mindspore.common import dtype as mstype import vllm.envs as envs @@ -53,13 +53,15 @@ from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) from vllm_mindspore.model_executor.model_loader.weight_utils import \ default_weight_loader +from vllm_mindspore.model_executor.models.attention_mask import \ + LowerTriangularMask +from vllm_mindspore.model_executor.models.model_base import (AttentionWrapper, + NativeModel) from vllm_mindspore.model_executor.models.utils import ( - PPMissingLayer, _jit, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix, set_enforce_eager) -from vllm_mindspore.model_executor.models.model_base import MsModelBase, AttentionWrapper -from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask + PPMissingLayer, make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) +from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE -from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata class Qwen2MLP(nn.Cell): @@ -286,9 +288,6 @@ class Qwen2Model(nn.Cell): self.config = config self.quant_config = quant_config self.vocab_size = config.vocab_size - if vllm_config.lora_config is not None: - vllm_config.model_config.enforce_eager = True - set_enforce_eager(vllm_config.model_config.enforce_eager) if get_pp_group().is_first_rank or (config.tie_word_embeddings and get_pp_group().is_last_rank): @@ -321,7 +320,6 @@ class Qwen2Model(nn.Cell): def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.embed_tokens(input_ids) - @_jit def construct( self, input_ids: Optional[Tensor], @@ -415,7 +413,7 @@ class Qwen2Model(nn.Cell): return loaded_params -class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): +class Qwen2ForCausalLM(NativeModel, SupportsLoRA): packed_modules_mapping = { "qkv_proj": [ "q_proj", @@ -460,162 +458,28 @@ class Qwen2ForCausalLM(MsModelBase, SupportsLoRA): quant_config=quant_config, prefix=maybe_prefix( prefix, "lm_head")) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.logits_processor = LogitsProcessor(self.config.vocab_size) + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) - self.set_modules({"model": self.model, "lm_head": self.lm_head}) - - self.prefill = True - self.mstype = STR_DTYPE_TO_MS_DTYPE.get(self.model_config.dtype, - self.model_config.dtype) - self.casual_mask = LowerTriangularMask( - dtype=self.mstype, max_model_len=self.model_config.max_model_len) - self.set_model_inputs(self.prefill) - self.kv_caches = [AttentionWrapper() for i in range(config.num_hidden_layers)] - compilation_config = vllm_config.compilation_config - - if prefix in compilation_config.static_forward_context: - raise ValueError(f"Duplicate layer name: {prefix}") - for i in range(config.num_hidden_layers): - compilation_config.static_forward_context[str( - i)] = self.kv_caches[i] - - def set_model_inputs(self, is_prefill): - dyn_input_ids = Tensor(shape=[None], dtype=mstype.int64) - dyn_position_ids = Tensor(shape=[None], dtype=mstype.int64) - - block_size = self.cache_config.block_size - num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) - head_size = self.model_config.get_head_size() - kv_cache_shape = (None, block_size, num_kv_heads, head_size) - - kv_cache_dtype = self.model_config.dtype if self.cache_config.cache_dtype == "auto" \ - else self.cache_config.cache_dtype - if kv_cache_dtype in STR_DTYPE_TO_MS_DTYPE: - kv_cache_dtype = STR_DTYPE_TO_MS_DTYPE[kv_cache_dtype] - - num_layers = self.model_config.get_num_layers(self.parallel_config) - - dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) - dyn_value_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) - dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) - dyn_value_caches = mutable( - [dyn_value_cache for _ in range(num_layers)]) - - dyn_slot_mapping = Tensor(shape=[ - None, - ], dtype=mstype.int32) - dynamic_attention_mask = Tensor(shape=[None, None], dtype=self.mstype) - dyn_batch_valid_length = Tensor(shape=[ - None, - ], dtype=mstype.int32) - dyn_q_seq_lens = Tensor(shape=[ - None, - ], dtype=mstype.int32) - dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) - dyn_intermediate_tensors = None - dyn_inputs_embeds = None - self.model.set_inputs(dyn_input_ids, dyn_position_ids, dyn_key_caches, - dyn_value_caches, is_prefill, dyn_slot_mapping, - dynamic_attention_mask, dyn_batch_valid_length, - dyn_q_seq_lens, dyn_block_tables, - dyn_intermediate_tensors, dyn_inputs_embeds) - - def forward(self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: IntermediateTensors = None, - inputs_embeds: Tensor = None, - **kwargs) -> Union[Tensor, IntermediateTensors]: - key_cache, value_cache = self.get_kvcache() - attn_metadata = get_forward_context().attn_metadata - input_ids = input_ids.to(ms.int64) - if attn_metadata is None: - attn_metadata = self._dummy_attention_metadata( - input_ids, positions) - if not envs.VLLM_USE_V1: - seq_lens = attn_metadata.seq_lens - max_query_len = attn_metadata.max_query_len - # When Mutli-Step is enabled with Chunked-Prefill, prefills and - # decodes are scheduled together. In the first step, all the - # prefills turn into decodes and max_query_len will be 1. - if self.is_multi_step_chunked_prefill and max_query_len == 1: - query_lens = [1] * len(seq_lens) - else: - query_lens = attn_metadata.query_lens - - seq_lens_np = np.array(seq_lens, dtype=np.int32) - query_lens_np = np.array(query_lens, dtype=np.int32) - kv_cache_lens = seq_lens_np - query_lens_np - is_prefill = attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max( - ) == 0 - slot_mapping = attn_metadata.slot_mapping - batch_valid_length = Tensor.from_numpy( - np.array(attn_metadata.seq_lens, dtype=np.int32)) - q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) - block_tables = attn_metadata.block_tables - position_ids = ms.Tensor(positions, dtype=ms.int32) - attn_mask = self.casual_mask.gen_attention_mask( - is_prefill, position_ids, query_lens) - else: - is_prefill = attn_metadata.max_context_lens == 0 - slot_mapping = attn_metadata.slot_mapping - batch_valid_length = Tensor.from_numpy(attn_metadata.seq_lens_np) - block_tables = attn_metadata.block_tables - query_lens_np = attn_metadata.q_seq_lens_np - attn_mask = self.casual_mask.gen_attention_mask( - is_prefill, positions, query_lens_np) - q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) - positions = positions.to(ms.int64) - if is_prefill: - if not self.prefill: - self.prefill = True - self.set_model_inputs(self.prefill) - else: - if self.prefill: - self.prefill = False - self.set_model_inputs(self.prefill) - model_output = self.model(input_ids, - positions, - key_cache, - value_cache, - is_prefill, - slot_mapping, - attn_mask, - batch_valid_length, - q_seq_lens, - block_tables, - intermediate_tensors, - inputs_embeds) - return model_output - - def _dummy_attention_metadata(self, input_ids: Tensor, - positions: Tensor) -> MsAttentionMetadata: - input_len = input_ids.shape[0] - max_seq_len = ms.Tensor(input_len, dtype=ms.int32) - seq_lengths = ms.Tensor([input_len], dtype=ms.int32) - q_seq_lens_np = np.array([input_len], dtype=np.int32) - seq_lens_np = np.array([input_len], dtype=np.int32) - - block_tables = ms.Tensor([[0]], dtype=ms.int32) - slot_mapping = [-1 for _ in range(input_len)] - slot_mapping = ms.Tensor(slot_mapping, dtype=ms.int32) - return MsAttentionMetadata( - max_seq_len=max_seq_len, - seq_lens=seq_lengths, - seq_lens_np=seq_lens_np, - block_tables=block_tables, - slot_mapping=slot_mapping, - q_seq_lens_np=q_seq_lens_np, - context_lens=0, - # To enforce prefill and decode are both complied in warmup process. - # So set max_context_lens to 0 for prefill and 1 for decode. - max_context_lens=0 if self.prefill else 1, - query_start_loc=None) + + self.common_preprocess(vllm_config, prefix) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs + ) -> Union[Tensor, IntermediateTensors]: + hidden_states = self.exec_model(input_ids, positions, + intermediate_tensors, inputs_embeds) + return hidden_states def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: params_dict = self.get_params_dict() diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 81b3f1923..279af7ad1 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -264,27 +264,3 @@ def merge_multimodal_embeddings( (input_ids == placeholder_token_id), multimodal_embeddings, ) -def set_enforce_eager(value): - """ - set global variable enforce_eager to value. - """ - global enforce_eager - enforce_eager = value - - -def _jit(func): - """ - A decorator to apply JIT compilation to a function or method. - """ - - @wraps(func) - def wrapper(*args, **kwargs): - if enforce_eager: - # If enforce_eager is True, we do not apply JIT compilation. - return func(*args, **kwargs) - if hasattr(func, "__wrapped_by_jit__"): - # If the function is already wrapped by JIT, we call it directly. - return func(*args, **kwargs) - return jit(func, jit_level="O0", infer_boost="on")(*args, **kwargs) - - return wrapper -- Gitee From 78bebfa742ce4512c9a15a2637afe606eae7ee10 Mon Sep 17 00:00:00 2001 From: JingweiHuang Date: Thu, 22 May 2025 15:35:40 +0800 Subject: [PATCH 17/76] [feature] Add an adaptation layer for MF MCore model --- .jenkins/test/config/dependent_packages.yaml | 2 +- install_depend_pkgs.sh | 2 +- tests/mindformers | 2 +- .../python/cases_parallel/vllm_mf_qwen3_8b.py | 75 ++++++ .../cases_parallel/vllm_mf_qwen3_8b_v1.py | 75 ++++++ tests/st/python/test_cases_parallel.py | 30 ++- .../model_executor/models/mf_models/config.py | 160 ++++++++++++ .../models/mf_models/mf_model_base.py | 120 ++++----- .../model_executor/models/mf_models/qwen3.py | 227 ++++++++++++++---- .../mf_models/qwen3_weight_processor.py | 115 --------- .../model_executor/models/registry.py | 2 +- vllm_mindspore/model_executor/models/utils.py | 16 +- vllm_mindspore/utils.py | 44 ++-- 13 files changed, 620 insertions(+), 250 deletions(-) create mode 100644 tests/st/python/cases_parallel/vllm_mf_qwen3_8b.py create mode 100644 tests/st/python/cases_parallel/vllm_mf_qwen3_8b_v1.py create mode 100644 vllm_mindspore/model_executor/models/mf_models/config.py delete mode 100644 vllm_mindspore/model_executor/models/mf_models/qwen3_weight_processor.py diff --git a/.jenkins/test/config/dependent_packages.yaml b/.jenkins/test/config/dependent_packages.yaml index 531bee81d..16ca50fdb 100644 --- a/.jenkins/test/config/dependent_packages.yaml +++ b/.jenkins/test/config/dependent_packages.yaml @@ -1,5 +1,5 @@ mindspore: - 'https://repo.mindspore.cn/mindspore/mindspore/version/202506/20250608/br_infer_iter_20250608031509_f31d63401e48787a7677f6e5c61745dd44304240_newest/' + 'https://repo.mindspore.cn/mindspore/mindspore/version/202506/20250613/br_infer_iter_20250613031508_11bcfd2ff4dc201a1c07e5d525cbeff7ec7f9558_newest/' mindspore_gs: 'https://repo.mindspore.cn/mindspore/golden-stick/version/202506/20250604/master_20250604160014_35fcbec4406d3b18faf02ef99fcbe2741e80348e_newest/' diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index b3d8306e2..97da181da 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -67,7 +67,7 @@ echo "========= Installing mindformers" mf_dir=mindformers-dev if [ ! -d "$mf_dir" ]; then git clone https://gitee.com/mindspore/mindformers.git -b dev "$mf_dir" - git checkout dfb8aa3a59401495b2d8c8c107d46fe0d36c949a + git checkout 13adb2201abe8979b679a98566495a8642d7ec0d else echo "The $mf_dir folder already exists and will not be re-downloaded." fi diff --git a/tests/mindformers b/tests/mindformers index f046081e4..13adb2201 160000 --- a/tests/mindformers +++ b/tests/mindformers @@ -1 +1 @@ -Subproject commit f046081e40be777eb799afee10495b51cdb2f3c1 +Subproject commit 13adb2201abe8979b679a98566495a8642d7ec0d diff --git a/tests/st/python/cases_parallel/vllm_mf_qwen3_8b.py b/tests/st/python/cases_parallel/vllm_mf_qwen3_8b.py new file mode 100644 index 000000000..48de16921 --- /dev/null +++ b/tests/st/python/cases_parallel/vllm_mf_qwen3_8b.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""test mf qwen.""" +import os + +import pytest + +from tests.st.python import set_env + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "vLLM_MODEL_BACKEND": "MindFormers", + "MS_ENABLE_LCCL": "off", + "HCCL_OP_EXPANSION_MODE": "AIV", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", + "VLLM_USE_V1": "0" +} +# set env +env_manager.setup_ai_environment(env_vars) +# isort: off +import vllm_mindspore +from vllm import LLM, SamplingParams +# isort: on + + +def test_mf_qwen3(): + """ + test case qwen3 8B + """ + + # Sample prompts. + prompts = [ + "You are a helpful assistant.<|User|>将文本分类为中性、负面或正面。 \n文本:我认为这次假期还可以。 \n情感:<|Assistant|>\n", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=10, top_k=1) + + # Create an LLM. + llm = LLM(model="/home/workspace/mindspore_dataset/weight/Qwen3-8B", + gpu_memory_utilization=0.9, + tensor_parallel_size=2) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + except_list = ['好的,我需要分析用户提供的文本“我认为'] + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text == except_list[i] + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/cases_parallel/vllm_mf_qwen3_8b_v1.py b/tests/st/python/cases_parallel/vllm_mf_qwen3_8b_v1.py new file mode 100644 index 000000000..aeb62ef7a --- /dev/null +++ b/tests/st/python/cases_parallel/vllm_mf_qwen3_8b_v1.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""test mf qwen.""" +import os + +import pytest + +from tests.st.python import set_env + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "vLLM_MODEL_BACKEND": "MindFormers", + "MS_ENABLE_LCCL": "off", + "HCCL_OP_EXPANSION_MODE": "AIV", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", + "VLLM_USE_V1": "1" +} +# set env +env_manager.setup_ai_environment(env_vars) +# isort: off +import vllm_mindspore +from vllm import LLM, SamplingParams +# isort: on + + +def test_mf_qwen3(): + """ + test case qwen3 8B + """ + + # Sample prompts. + prompts = [ + "You are a helpful assistant.<|User|>将文本分类为中性、负面或正面。 \n文本:我认为这次假期还可以。 \n情感:<|Assistant|>\n", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=10, top_k=1) + + # Create an LLM. + llm = LLM(model="/home/workspace/mindspore_dataset/weight/Qwen3-8B", + gpu_memory_utilization=0.9, + tensor_parallel_size=2) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + except_list = ['好的,我需要分析用户提供的文本“我认为'] + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text == except_list[i] + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/test_cases_parallel.py b/tests/st/python/test_cases_parallel.py index 35d31ea8c..f0ef9f1bc 100644 --- a/tests/st/python/test_cases_parallel.py +++ b/tests/st/python/test_cases_parallel.py @@ -99,7 +99,8 @@ def test_cases_parallel_part1(): "export HCCL_IF_BASE_PORT=61004 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_prefix_caching_v1.py::test_mf_qwen_7b_prefix_caching " "> vllm_mf_qwen_7b_prefix_caching_v1_test_mf_qwen_7b_prefix_caching.log", - "vllm_mf_qwen_7b_prefix_caching_v1_test_mf_qwen_7b_prefix_caching.log"), + "vllm_mf_qwen_7b_prefix_caching_v1_test_mf_qwen_7b_prefix_caching.log" + ), ("export ASCEND_RT_VISIBLE_DEVICES=6,7 && export LCAL_COMM_ID=127.0.0.1:10071 && " "export HCCL_IF_BASE_PORT=61006 && " "pytest -s -v cases_parallel/vllm_mf_qwen_7b_v1.py::test_mf_qwen > vllm_mf_qwen_7b_v1_test_mf_qwen.log", @@ -212,6 +213,33 @@ def test_cases_parallel_part4(): check_results(commands, results) +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend910b_training +@pytest.mark.env_single +def test_cases_parallel_part5(): + """ + Feature: test cases parallel. + Description: test cases parallel. + Expectation: Pass. + """ + commands = [ + ("export ASCEND_RT_VISIBLE_DEVICES=0,1 && export LCAL_COMM_ID=127.0.0.1:10068 && " + "export HCCL_IF_BASE_PORT=61000 && " + "pytest -s -v cases_parallel/vllm_mf_qwen3_8b.py::test_mf_qwen3 " + "> vllm_mf_qwen3_8b_test_mf_qwen3.log", + "vllm_mf_qwen3_8b_test_mf_qwen3.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=2,3 && export LCAL_COMM_ID=127.0.0.1:10069 && " + "export HCCL_IF_BASE_PORT=61002 && " + "pytest -s -v cases_parallel/vllm_mf_qwen3_8b_v1.py::test_mf_qwen3 " + "> vllm_mf_qwen3_8b_v1_test_mf_qwen3.log", + "vllm_mf_qwen3_8b_v1_test_mf_qwen3.log") + ] + + with Pool(len(commands)) as pool: + results = list(pool.imap(run_command, commands)) + check_results(commands, results) + + @pytest.mark.level1 @pytest.mark.platform_arm_ascend910b_training @pytest.mark.env_single diff --git a/vllm_mindspore/model_executor/models/mf_models/config.py b/vllm_mindspore/model_executor/models/mf_models/config.py new file mode 100644 index 000000000..ff6003741 --- /dev/null +++ b/vllm_mindspore/model_executor/models/mf_models/config.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2025 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import types + +from mindformers.models.configuration_utils import PretrainedConfig +from mindformers.tools.register.config import MindFormerConfig +from vllm.config import VllmConfig + +MF_CTX_MAPPING = { + 'run_mode': (None, "predict"), + 'use_legacy': (None, False), + 'load_ckpt_format': (None, 'safetensors'), + 'auto_trans_ckpt': (None, True), +} + +MF_PARALLEL_MAPPING = { + 'parallel_mode': (None, 'STAND_ALONE'), + 'parallel_config.model_parallel': + ('parallel_config.tensor_parallel_size', None), + 'parallel_config.pipeline_stage': + ('parallel_config.pipeline_parallel_size', None), + 'parallel_config.vocab_emb_dp': (None, False) +} + +# Common model config +MODEL_COMMON_MAPPING = { + 'seq_length': ('model_config.max_model_len', None), + 'use_flash_attention': (None, True), + "compute_dtype": ('model_config.hf_config.torch_dtype', 'bfloat16'), + 'architectures': ('model_config.hf_config.architectures', None), + 'bos_token_id': ('model_config.hf_config.bos_token_id', None), + 'eos_token_id': ('model_config.hf_config.eos_token_id', None), + 'model_type': ('model_config.hf_config.model_type', None), + # transformer_config + 'attention_dropout': ('model_config.hf_config.attention_dropout', None), + 'hidden_act': ('model_config.hf_config.hidden_act', None), + 'hidden_size': ('model_config.hf_config.hidden_size', None), + 'intermediate_size': ('model_config.hf_config.intermediate_size', None), + 'max_position_embeddings': + ('model_config.hf_config.max_position_embeddings', None), + 'num_attention_heads': + ('model_config.hf_config.num_attention_heads', None), + 'rms_norm_eps': ('model_config.hf_config.rms_norm_eps', None), + 'num_hidden_layers': ('model_config.hf_config.num_hidden_layers', None), + 'num_layers': ('model_config.hf_config.num_layers', None), + 'num_key_value_heads': + ('model_config.hf_config.num_key_value_heads', None), + 'n_kv_heads': ('model_config.hf_config.n_kv_heads', None), + 'head_dim': ('model_config.hf_config.head_dim', None), + 'rope_theta': ('model_config.hf_config.rope_theta', None), + 'tie_word_embeddings': + ('model_config.hf_config.tie_word_embeddings', None), + 'vocab_size': ('model_config.hf_config.vocab_size', None), +} + +# model default config +MODEL_RELATED_MAPPING = { + 'qwen2': { + "gated_linear_unit": True, + 'params_dtype': 'float32', # need an input + 'add_qkv_bias': True, + }, + 'qwen3': { + "gated_linear_unit": True, + 'params_dtype': 'float32', # need an input + 'add_qkv_bias': False, + } + # Add anther model type... +} + + +def get_nested_attr(obj, path: str, default=None): + """get nested attr from obj.""" + current = obj + for attr in path.split('.'): + if not hasattr(current, attr): + return default + current = getattr(current, attr) + return current + + +def set_nested_attr(obj, path: str, value): + """Set nested attr of MindFormerConfig.""" + attrs = path.split('.') + + current = obj + for attr in attrs[:-1]: + if not hasattr(current, attr) or getattr(current, attr) is None: + setattr(current, attr, MindFormerConfig()) + current = getattr(current, attr) + + setattr(current, attrs[-1], value) + + +def transform_config(mapping_table: dict, vllm_config: VllmConfig, + target_config): + for target_path, mapping in mapping_table.items(): + src_path, transform = mapping + + src_value = get_nested_attr(vllm_config, + src_path) if src_path is not None else None + + if src_value is not None: + transformed_value = src_value + elif transform and isinstance( + transform, (types.FunctionType, types.BuiltinFunctionType)): + transformed_value = transform(src_value) + else: + transformed_value = transform + + if transformed_value is not None: + set_nested_attr(target_config, target_path, transformed_value) + + +def gen_model_relatived_config(model_type): + return MODEL_RELATED_MAPPING.get(model_type) + + +def gen_model_config_dict(vllm_config: VllmConfig): + target_config = MindFormerConfig() + + transform_config(MODEL_COMMON_MAPPING, vllm_config, target_config) + + model_type = vllm_config.model_config.hf_config.model_type + model_related_config = gen_model_relatived_config(model_type) + target_config.update(model_related_config) + + return target_config + + +def gen_mf_config(vllm_config: VllmConfig): + target_config = MindFormerConfig() + transform_config(MF_CTX_MAPPING, vllm_config, target_config) + transform_config(MF_PARALLEL_MAPPING, vllm_config, target_config) + target_config.set_value( + 'model.model_config', + MindFormerConfig(**gen_model_config_dict(vllm_config))) + return target_config + + +def gen_model_config(mf_config: MindFormerConfig, + model_config_type: PretrainedConfig): + model_config = model_config_type(**mf_config.model.model_config, + parallel_config=mf_config.parallel_config) + model_config.post_process = False + return model_config diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 5e3dfc62e..56cabb1f7 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -17,62 +16,64 @@ # ============================================================================ import os -from types import MethodType -from typing import Iterable, List, Optional, Set, Tuple, Union from abc import abstractmethod -import numpy as np -import math - -from vllm.config import VllmConfig -from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors -from vllm.distributed import get_tensor_model_parallel_world_size -from vllm.distributed.parallel_state import get_dp_group -from vllm.logger import init_logger -from vllm.forward_context import get_forward_context -import vllm.envs as envs +from typing import Iterable, Optional, Set, Tuple, Union import mindspore as ms -from mindspore import Tensor, mint -from mindspore.common.api import _pynative_executor -from mindspore.communication import get_rank - -from mindformers.tools.register.config import MindFormerConfig from mindformers.core.context import build_mf_context from mindformers.core.parallel_config import build_parallel_config +from mindformers.tools.register.config import MindFormerConfig from mindformers.tools.utils import is_pynative +from mindspore import Tensor, mint +from mindspore.common.api import _pynative_executor +from mindspore.communication import get_rank +from vllm.config import VllmConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_dp_group +from vllm.logger import init_logger +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors +from vllm_mindspore.model_executor.models.attention_mask import ( + LowerTriangularMask) from vllm_mindspore.model_executor.models.model_base import MsModelBase -from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask - logger = init_logger(__name__) + class MfModelBase(MsModelBase): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: - super(MfModelBase, self).__init__( - vllm_config=vllm_config, prefix=prefix - ) + super().__init__(vllm_config=vllm_config, prefix=prefix) + + self.set_flags = False - self.mf_config = MindFormerConfig(os.getenv("MINDFORMERS_MODEL_CONFIG")) + model_config_path = os.getenv("MINDFORMERS_MODEL_CONFIG") + if model_config_path is None: + raise RuntimeError( + 'For "MindFormers" model backend, environments MINDFORMERS_MODEL_CONFIG should be set!' + ) + + self.mf_config = MindFormerConfig(model_config_path) self.rank_id = get_rank() self.dp_size = get_dp_group() + build_mf_context(self.mf_config) build_parallel_config(self.mf_config) self.mf_config.model.model_config.parallel_config = ( - self.mf_config.parallel_config - ) + self.mf_config.parallel_config) self.mf_config.model.model_config.parallel_config.model_parallel = ( - get_tensor_model_parallel_world_size() - ) + get_tensor_model_parallel_world_size()) self.mf_config.model.model_config.parallel_config.pipeline_stage = 1 self._generate_model_config() - self.casual_mask = LowerTriangularMask(dtype=self.mf_model_config.compute_dtype, - max_model_len=self.model_config.max_model_len) + self.casual_mask = LowerTriangularMask( + dtype=self.mf_model_config.compute_dtype, + max_model_len=self.model_config.max_model_len) self.network, self.lm_head = self._create_network() - affinity_config = self.mf_config.get('context', {}).get('affinity_cpu_list', {}) + affinity_config = self.mf_config.get('context', + {}).get('affinity_cpu_list', {}) if isinstance(affinity_config, dict): ms.runtime.set_cpu_affinity(True, affinity_config) @@ -80,15 +81,18 @@ class MfModelBase(MsModelBase): @abstractmethod def _generate_model_config(self): - raise NotImplementedError("Function _generate_model_config should be Implemented!") + raise NotImplementedError( + "Function _generate_model_config should be Implemented!") @abstractmethod def _create_network(self): - raise NotImplementedError("Function _create_network should be Implemented!") + raise NotImplementedError( + "Function _create_network should be Implemented!") def _set_dynamic_inputs(self): self.network.set_dynamic_inputs() - dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.mf_model_config.compute_dtype) + dynamic_hidden_states = Tensor( + shape=[None, None], dtype=self.mf_model_config.compute_dtype) self.lm_head.set_inputs(dynamic_hidden_states) def prepare_inputs(self, input_ids, positions): @@ -97,26 +101,26 @@ class MfModelBase(MsModelBase): def update_model_inputs(self, model_inputs, **kwargs): return model_inputs - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[Tensor] = None, - **kwargs - ) -> Union[Tensor, IntermediateTensors]: + def forward(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + **kwargs) -> Union[Tensor, IntermediateTensors]: model_inputs, is_prefill = self.prepare_inputs(input_ids, positions) model_inputs = self.update_model_inputs(model_inputs, **kwargs) - + # enable_mb_split is True in lager EP enable micro-batch and per-dp-bs > 1 - enable_mb_split = self.is_enable_micro_batch_split(is_prefill, model_inputs["q_seq_lens"]) + enable_mb_split = self.is_enable_micro_batch_split( + is_prefill, model_inputs["q_seq_lens"]) if is_prefill: if self.enable_micro_batch: self.network.phase = "prefill" if not enable_mb_split else "prefill_micro_batch" if not self.set_flags or is_pynative() or enable_mb_split: - self.network.add_flags_custom(is_first_iteration=is_first_iteration) - self.network.add_flags_enable_micro_batch(enable_micro_batch=enable_mb_split) + self.network.add_flags_custom(is_first_iteration=True) + self.network.add_flags_enable_micro_batch( + enable_micro_batch=enable_mb_split) else: self.network.phase = "prefill" if not self.set_flags or is_pynative(): @@ -139,11 +143,14 @@ class MfModelBase(MsModelBase): ) -> Optional[Tensor]: if sampling_metadata is not None: selected_token_indices = sampling_metadata.selected_token_indices - if selected_token_indices is not None and selected_token_indices.numel() <= 0: - logits = ms.mint.zeros((0, self.mf_model_config.vocab_size), - dtype=self.mf_model_config.compute_dtype) + if selected_token_indices is not None and selected_token_indices.numel( + ) <= 0: + logits = ms.mint.zeros( + (0, self.mf_model_config.vocab_size), + dtype=self.mf_model_config.compute_dtype) else: - hidden_states = hidden_states.index_select(0, selected_token_indices) + hidden_states = hidden_states.index_select( + 0, selected_token_indices) logits = self.lm_head(hidden_states) logits = logits.view(-1, logits.shape[-1]) else: @@ -162,12 +169,15 @@ class MfModelBase(MsModelBase): def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: raise NotImplementedError("load_weight not implemented.") - + def is_enable_micro_batch_split(self, is_prefill, q_seq_lens): """Judge enable micro batch """ if self.enable_micro_batch: - is_prefill_cur_dp = mint.ones((1), dtype=ms.int8) if is_prefill else mint.zeros((1), dtype=ms.int8) + is_prefill_cur_dp = mint.ones( + (1), dtype=ms.int8) if is_prefill else mint.zeros( + (1), dtype=ms.int8) is_prefill_all_dp = get_dp_group().all_gather(is_prefill_cur_dp) - return is_prefill_all_dp.sum() == self.dp_size and q_seq_lens.shape[0] > 1 + return is_prefill_all_dp.sum( + ) == self.dp_size and q_seq_lens.shape[0] > 1 else: return False diff --git a/vllm_mindspore/model_executor/models/mf_models/qwen3.py b/vllm_mindspore/model_executor/models/mf_models/qwen3.py index a5a8b01d6..a11a93faa 100644 --- a/vllm_mindspore/model_executor/models/mf_models/qwen3.py +++ b/vllm_mindspore/model_executor/models/mf_models/qwen3.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -16,70 +15,210 @@ # limitations under the License. # ============================================================================ -from typing import Iterable, Set, Tuple - -from vllm.config import VllmConfig -from vllm.config import get_current_vllm_config -from vllm.logger import init_logger - -from mindspore import Tensor, JitConfig +from typing import Iterable, Optional, Tuple, Union + +import mindspore as ms +import numpy as np +from mindformers.core.context import build_mf_context +from mindformers.core.parallel_config import build_parallel_config +from mindformers.models.qwen3.configuration_qwen3 import Qwen3Config +from mindformers.models.qwen3.modeling_qwen3 import ( # noqa + Qwen3ForCausalLM as Qwen3ForCausalLM_MF) +from mindformers.tools.utils import is_pynative +from mindspore import Tensor, ops +from mindspore.common.api import _pynative_executor from mindspore.nn.utils import no_init_parameters - -from mindformers.models.llama import LlamaConfig as LlamaConfig_MF -from research.qwen3.qwen3 import ( - ParallelQwen3ForCausalLM as ParallelQwenForCausalLM_MF, -) +from vllm import envs +from vllm.config import VllmConfig, get_current_vllm_config +from vllm.forward_context import get_forward_context +from vllm.logger import init_logger +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors from vllm_mindspore.model_executor.layers.sampler import get_sampler -from vllm_mindspore.model_executor.models.model_base import Fake_Attention -from vllm_mindspore.model_executor.models.mf_models.mf_model_base import MfModelBase -from vllm_mindspore.model_executor.models.mf_models.qwen3_weight_processor import Qwen3WeightProcessor - +from vllm_mindspore.model_executor.models.attention_mask import ( + LowerTriangularMask) +from vllm_mindspore.model_executor.models.mf_models.config import ( + gen_mf_config, gen_model_config) +from vllm_mindspore.model_executor.models.model_base import (AttentionWrapper, + MsModelBase) logger = init_logger(__name__) -class Qwen3ForCausalLM(MfModelBase): +class Qwen3ForCausalLM(MsModelBase): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: - super(Qwen3ForCausalLM, self).__init__(vllm_config=vllm_config, prefix=prefix) - self.mf_kvcaches_init = False + super().__init__(vllm_config=vllm_config, prefix=prefix) + self.set_flags = False + + mf_config = gen_mf_config(vllm_config) + mf_config.load_checkpoint = self.get_model_path() + self.mf_config = mf_config + + build_mf_context(self.mf_config) + build_parallel_config(self.mf_config) + + self._generate_model_config() + self.casual_mask = LowerTriangularMask( + dtype=self.mf_model_config.compute_dtype, + max_model_len=self.mf_model_config.seq_length) + self.network, self.lm_head = self._create_network() + + affinity_config = self.mf_config.get('context', + {}).get('affinity_cpu_list', {}) + if isinstance(affinity_config, dict): + ms.runtime.set_cpu_affinity(True, affinity_config) + + self._set_dynamic_inputs() self.sampler = get_sampler() self.set_modules({"model": self.network}) - - self.kv_caches = [Fake_Attention() for i in range(self.mf_model_config.num_layers)] + self.kv_caches = [ + AttentionWrapper() + for _ in range(self.mf_model_config.num_hidden_layers) + ] compilation_config = get_current_vllm_config().compilation_config if prefix in compilation_config.static_forward_context: raise ValueError(f"Duplicate layer name: {prefix}") - for i in range(self.mf_model_config.num_layers): - compilation_config.static_forward_context[str(i)] = self.kv_caches[i] + for i in range(self.mf_model_config.num_hidden_layers): + compilation_config.static_forward_context[str( + i)] = self.kv_caches[i] - self.set_flags = False + self.cast = ops.Cast() - def _generate_model_config(self): - self.mf_config.load_checkpoint = self.get_model_path() - self.mf_model_config = LlamaConfig_MF(**self.mf_config.model.model_config) - if self.mf_config.moe_config: - self.mf_model_config.moe_config = self.mf_config.moe_config - self.mf_model_config.return_hidden_states = True + def _set_dynamic_inputs(self): + self.network.set_dynamic_inputs() + dynamic_hidden_states = Tensor( + shape=[None, None], dtype=self.mf_model_config.compute_dtype) + self.lm_head.set_inputs(dynamic_hidden_states) - # qwen qkv concat will support in next version - self.mf_model_config.qkv_concat = False - setattr(self.mf_model_config, 'npu_mem_size', -1) - self.mf_config.model.model_config.qkv_concat = False + def prepare_inputs(self, input_ids, positions): + + attn_metadata = get_forward_context().attn_metadata + if attn_metadata is None: + attn_metadata = self._dummy_attention_metadata( + input_ids, positions) + key_cache, value_cache = self.get_kvcache() + if not envs.VLLM_USE_V1: + # V0 + seq_lens = attn_metadata.seq_lens + max_query_len = attn_metadata.max_query_len + # When Mutli-Step is enabled with Chunked-Prefill, prefills and + # decodes are scheduled together. In the first step, all the + # prefills turn into decodes and max_query_len will be 1. + if self.is_multi_step_chunked_prefill and max_query_len == 1: + query_lens = [1] * len(seq_lens) + else: + query_lens = attn_metadata.query_lens + + seq_lens_np = np.array(seq_lens, dtype=np.int32) + query_lens_np = np.array(query_lens, dtype=np.int32) + kv_cache_lens = seq_lens_np - query_lens_np + if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max( + ) == 0: + is_prefill = True + else: + is_prefill = False + context_lens_tensor = ms.from_numpy(kv_cache_lens) + else: + # V1 + is_prefill = attn_metadata.max_context_lens == 0 + query_lens_np = attn_metadata.q_seq_lens_np + seq_lens_np = attn_metadata.seq_lens_np + context_lens_tensor = attn_metadata.context_lens + + q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) + position_ids = ms.Tensor(positions, dtype=ms.int32) + attention_mask = self.casual_mask.gen_attention_mask( + is_prefill, positions, query_lens_np) + + model_inputs = {} + model_inputs["input_ids"] = input_ids.astype(ms.int32) + model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) + model_inputs["block_tables"] = attn_metadata.block_tables + model_inputs["slot_mapping"] = attn_metadata.slot_mapping + model_inputs["positions"] = position_ids + model_inputs["q_seq_lens"] = q_seq_lens + model_inputs["attention_mask"] = attention_mask + model_inputs["key_cache"] = key_cache + model_inputs["value_cache"] = value_cache + model_inputs["context_lens_tensor"] = context_lens_tensor + + return model_inputs, is_prefill + + def forward(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + **kwargs) -> Union[Tensor, IntermediateTensors]: + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions) + model_inputs = self.update_model_inputs(model_inputs, **kwargs) + + if is_prefill: + self.network.phase = "prefill" + if not self.set_flags or is_pynative(): + self.network.add_flags_custom_mcore(is_prefill=True) + hidden_states = self.network(**model_inputs) + self.network.phase = "increment" + if not self.set_flags or is_pynative(): + self.network.add_flags_custom_mcore(is_prefill=False) + self.set_flags = True + else: + hidden_states = self.network(**model_inputs) + + return hidden_states + + def _generate_model_config(self): + self.mf_model_config = gen_model_config(self.mf_config, Qwen3Config) + logger.debug("=====mf_model_config====\n", self.mf_model_config) def _create_network(self): # Initial network with no_init_parameters(): # Delay initialization - network = ParallelQwenForCausalLM_MF(self.mf_model_config) - return network, network.lm_head - - def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: - weight_processor = Qwen3WeightProcessor(self.mf_config, self.network, False) - weight_processor.load_safetensors_shard(self.mf_config.load_checkpoint) - + network = Qwen3ForCausalLM_MF(self.mf_model_config) + return network, network.model.output_layer + + def update_model_inputs(self, model_inputs, **kwargs): + return model_inputs + + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + if sampling_metadata is not None: + selected_token_indices = sampling_metadata.selected_token_indices + if selected_token_indices is not None and selected_token_indices.numel( + ) <= 0: + logits = ms.mint.zeros( + (0, self.mf_model_config.vocab_size), + dtype=self.mf_model_config.compute_dtype) + else: + hidden_states = hidden_states.reshape( + (-1, hidden_states.shape[-1])) + hidden_states = hidden_states.index_select( + 0, selected_token_indices) + logits = self.lm_head(hidden_states) + logits = logits.view(-1, logits.shape[-1]) + else: + logits = self.lm_head(hidden_states) + logits = logits.view(-1, logits.shape[-1]) + return logits + + def sample( + self, + logits: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + _pynative_executor.sync() + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, Tensor]]): + self.network.load_weights(self.mf_config.load_checkpoint) self.network.set_dynamic_inputs() - dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.mf_model_config.compute_dtype) - self.lm_head.set_inputs(dynamic_hidden_states) return None diff --git a/vllm_mindspore/model_executor/models/mf_models/qwen3_weight_processor.py b/vllm_mindspore/model_executor/models/mf_models/qwen3_weight_processor.py deleted file mode 100644 index 338616caf..000000000 --- a/vllm_mindspore/model_executor/models/mf_models/qwen3_weight_processor.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2025 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -""" -transform huggingface model to mindspore safetensor. -""" -import numpy as np - -import mindspore as ms - -from vllm_mindspore.model_executor.models.mf_models.qwen2_weight_processor import Qwen2WeightProcessor - - -class Qwen3WeightProcessor(Qwen2WeightProcessor): - r""" - Provide Qwen3 Model weight load and shards. - Args: - config (Qwen3Config): The config of Qwen3 model. - network (InferenceQwen3ForCausalLM): The network of Qwen3. - - """ - - def __init__(self, config, network, is_quant): - super().__init__(config, network, is_quant) - - def convert_weight_name(self, weight_name: str): - """replace weight name""" - weight_name = weight_name.replace('embed_tokens.weight', 'tok_embeddings.embedding_weight') - weight_name = weight_name.replace('self_attn.q_proj.', 'attention.wq.') - weight_name = weight_name.replace('self_attn.k_proj.', 'attention.wk.') - weight_name = weight_name.replace('self_attn.v_proj.', 'attention.wv.') - weight_name = weight_name.replace('self_attn.o_proj.', 'attention.wo.') - weight_name = weight_name.replace('self_attn.q_norm.', 'attention.q_norm.') - weight_name = weight_name.replace('self_attn.k_norm.', 'attention.k_norm.') - - weight_name = weight_name.replace('mlp.gate_proj.', 'feed_forward.w1.') - weight_name = weight_name.replace('mlp.down_proj.', 'feed_forward.w2.') - weight_name = weight_name.replace('mlp.up_proj.', 'feed_forward.w3.') - weight_name = weight_name.replace('.input_layernorm.', '.attention_norm.') - weight_name = weight_name.replace('.post_attention_layernorm.', '.ffn_norm.') - weight_name = weight_name.replace('model.norm.weight', 'model.norm_out.weight') - return weight_name - - def infer_process_attention_weight(self, src_hf_dir, layer_id, hf_weight_map): - """infer process attention weight""" - qkv_concat = self.config.model.model_config.qkv_concat - # wq - wq_hf_name = f"model.layers.{layer_id}.self_attn.q_proj.weight" - wq_ms_name = self.convert_weight_name(wq_hf_name) - wq_ms_param, _ = self.get_safetensor_from_file(wq_hf_name, src_hf_dir, hf_weight_map, is_split_param=True, - split_axis=0) - - # wk - wk_hf_name = f"model.layers.{layer_id}.self_attn.k_proj.weight" - wk_ms_name = self.convert_weight_name(wk_hf_name) - wk_ms_param, _ = self.get_safetensor_from_file(wk_hf_name, src_hf_dir, hf_weight_map, is_split_param=True, - split_axis=0) - - # wv - wv_hf_name = f"model.layers.{layer_id}.self_attn.v_proj.weight" - wv_ms_name = self.convert_weight_name(wv_hf_name) - wv_ms_param, _ = self.get_safetensor_from_file(wv_hf_name, src_hf_dir, hf_weight_map, is_split_param=True, - split_axis=0) - - # wq_norm - q_norm_hf_name = f"model.layers.{layer_id}.self_attn.q_norm.weight" - q_norm_ms_name = self.convert_weight_name(q_norm_hf_name) - q_norm_ms_param, _ = self.get_safetensor_from_file(q_norm_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[q_norm_ms_name] = ms.Parameter(ms.Tensor(q_norm_ms_param, ms.bfloat16), name=q_norm_ms_name, - requires_grad=False) - - #wk_norm - k_norm_hf_name = f"model.layers.{layer_id}.self_attn.k_norm.weight" - k_norm_ms_name = self.convert_weight_name(k_norm_hf_name) - k_norm_ms_param, _ = self.get_safetensor_from_file(k_norm_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[k_norm_ms_name] = ms.Parameter(ms.Tensor(k_norm_ms_param, ms.bfloat16), name=k_norm_ms_name, - requires_grad=False) - - if qkv_concat: - w_qkv_name = f"model.layers.{layer_id}.attention.w_qkv.weight" - w_qkv_param = np.concatenate((wq_ms_param, wk_ms_param, wv_ms_param), axis=0) - w_qkv_param = ms.from_numpy(w_qkv_param).astype(ms.bfloat16) - self.parameter_dict[w_qkv_name] = ms.Parameter(w_qkv_param, name=w_qkv_name, requires_grad=False) - - else: - self.parameter_dict[wq_ms_name] = ms.Parameter(ms.from_numpy(wq_ms_param).astype(ms.bfloat16), - name=wq_ms_name, - requires_grad=False) - self.parameter_dict[wk_ms_name] = ms.Parameter(ms.from_numpy(wk_ms_param).astype(ms.bfloat16), - name=wk_ms_name, - requires_grad=False) - self.parameter_dict[wv_ms_name] = ms.Parameter(ms.from_numpy(wv_ms_param).astype(ms.bfloat16), - name=wv_ms_name, - requires_grad=False) - - # wo - wo_hf_name = f"model.layers.{layer_id}.self_attn.o_proj.weight" - wo_ms_name = self.convert_weight_name(wo_hf_name) - wo_ms_param, _ = self.get_safetensor_from_file(wo_hf_name, src_hf_dir, hf_weight_map, is_split_param=True, - split_axis=1) - self.parameter_dict[wo_ms_name] = ms.Parameter(ms.from_numpy(wo_ms_param).astype(ms.bfloat16), - name=wo_ms_name, - requires_grad=False) diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index a9c2b9a3e..5846f21ae 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -32,9 +32,9 @@ _NATIVE_MODELS = { _MINDFORMERS_MODELS = { "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), + "Qwen3ForCausalLM": ("qwen3", "Qwen3ForCausalLM"), # MCore "DeepseekV3ForCausalLM": ("deepseek_v3", "DeepseekV3ForCausalLM"), "DeepSeekMTPModel": ("deepseek_mtp", "DeepseekV3MTPForCausalLM"), - "Qwen3ForCausalLM": ("qwen3", "Qwen3ForCausalLM"), } _MINDONE_MODELS = { diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 279af7ad1..bf40c1fdb 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -16,22 +16,15 @@ # ============================================================================ from dataclasses import dataclass, field -from typing import List, Tuple, Union, Mapping, Optional, Iterable -from functools import wraps -from typing import List, Tuple +from typing import Iterable, List, Mapping, Optional, Tuple, Union import mindspore as ms -from mindspore import jit, mint +from mindspore import mint, ops from vllm.sequence import IntermediateTensors from vllm_mindspore.multimodal.inputs import NestedTensors from vllm_mindspore.utils import get_valid_dtype -import mindspore as ms -from mindspore import mint -from mindspore import ops - - WeightsMapping = Mapping[str, Optional[str]] """If a key maps to a value of `None`, the corresponding weight is ignored.""" @@ -73,6 +66,8 @@ class WeightsMapper: ) -> Iterable[Tuple[str, ms.Tensor]]: return ((out_name, data) for name, data in weights if (out_name := self._map_name(name)) is not None) + + enforce_eager = False @@ -166,6 +161,7 @@ def make_empty_intermediate_tensors_factory(keys: List[str], hidden_size: int): ########################### for multi model ########################### + def _flatten_embeddings(embeddings: NestedTensors) -> ms.Tensor: """ Recursively flattens and concatenates NestedTensors on all but the last @@ -252,7 +248,7 @@ def merge_multimodal_embeddings( """ if isinstance(placeholder_token_id, list): placeholder_token_id = ms.Tensor(placeholder_token_id, - device=input_ids.device) + device=input_ids.device) return _merge_multimodal_embeddings( inputs_embeds, ms.numpy.isin(input_ids, placeholder_token_id), diff --git a/vllm_mindspore/utils.py b/vllm_mindspore/utils.py index 153589ed6..920bb2306 100644 --- a/vllm_mindspore/utils.py +++ b/vllm_mindspore/utils.py @@ -19,8 +19,8 @@ import contextlib import gc import os import sys -from typing import (TYPE_CHECKING, Callable, Generator, List, Optional, Tuple, - Union) +from enum import Enum +from typing import TYPE_CHECKING, Generator, List, Optional, Tuple, Union import numpy as np import torch @@ -30,11 +30,10 @@ if TYPE_CHECKING: else: Library = None -from vllm.logger import init_logger - import mindspore as ms from mindspore import dtype as mstype from mindspore.common.initializer import Zero +from vllm.logger import init_logger from vllm.utils import (TORCH_DTYPE_TO_NUMPY_DTYPE, MemoryProfilingResult, MemorySnapshot, T, make_ndarray_with_pad) @@ -142,29 +141,41 @@ STR_DTYPE_TO_MS_DTYPE = { } +class vllmModelBackendEnum(str, Enum): + """Define the variable Enum of vLLM_MODEL_BACKEND""" + MF = 'MindFormers' + MIND_ONE = 'MindONE' + + def ascend_is_initialized(): # Just return true for check. return True def is_mindformers_model_backend(): - return (os.getenv("vLLM_MODEL_BACKEND") # noqa: SIM112 - and - os.environ["vLLM_MODEL_BACKEND"] == "MindFormers" # noqa: SIM112 - ) + vllm_model_backend = os.getenv("vLLM_MODEL_BACKEND") # noqa: SIM112 + if vllm_model_backend: + try: + vllmModelBackendEnum(vllm_model_backend) + return vllm_model_backend == vllmModelBackendEnum.MF + except ValueError as exc: + allowed_values = [member.value for member in vllmModelBackendEnum] + raise ValueError( + f"Illegal value of vLLM_MODEL_BACKEND '{vllm_model_backend}'," + f" allowed_values: {', '.join(allowed_values)}") from exc + else: + return False def is_mindone_model_backend(): return (os.getenv("vLLM_MODEL_BACKEND") # noqa: SIM112 - and os.environ["vLLM_MODEL_BACKEND"] == "MindONE" # noqa: SIM112 - ) + and os.environ["vLLM_MODEL_BACKEND"] # noqa: SIM112 + == vllmModelBackendEnum.MIND_ONE) def check_ready(): - import vllm.envs as envs from mindspore import set_context - # Common environment variables of predict. set_context(jit_config={"jit_level": "O0", "infer_boost": "on"}) default_env = { @@ -179,15 +190,6 @@ def check_ready(): if is_mindformers_model_backend(): logger.info("Run with Mindformers backend!") - necessary_envs = ("MINDFORMERS_MODEL_CONFIG", ) - lost_envs = [ - env_item for env_item in necessary_envs if not os.getenv(env_item) - ] - - if lost_envs: - raise RuntimeError( - f'For "MindFormers" model backend, environments {str(lost_envs)} should be set!' - ) elif is_mindone_model_backend(): logger.info("Run with MindONE backend!") else: -- Gitee From a0bf3116e53f59b7286099a5340d383371ecf739 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 16 Jun 2025 15:12:02 +0800 Subject: [PATCH 18/76] add llama3 native model --- tests/st/python/cases_parallel/vllm_llama3.py | 113 +++++++ tests/st/python/test_cases_parallel.py | 12 +- .../model_executor/layers/rotary_embedding.py | 127 ++++++-- .../layers/vocab_parallel_embedding.py | 15 +- vllm_mindspore/model_executor/models/llama.py | 298 ++++++++---------- 5 files changed, 349 insertions(+), 216 deletions(-) create mode 100644 tests/st/python/cases_parallel/vllm_llama3.py diff --git a/tests/st/python/cases_parallel/vllm_llama3.py b/tests/st/python/cases_parallel/vllm_llama3.py new file mode 100644 index 000000000..656c744d9 --- /dev/null +++ b/tests/st/python/cases_parallel/vllm_llama3.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +# isort:skip_file +"""test vllm llama3.""" +import os + +import pytest + +from tests.st.python import set_env + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "MS_ENABLE_LCCL": "off", + "HCCL_OP_EXPANSION_MODE": "AIV", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", + "VLLM_USE_V1": "1", + "HCCL_IF_BASE_PORT": "60000" +} +# set env +env_manager.setup_ai_environment(env_vars) +import vllm_mindspore +from vllm import LLM, SamplingParams + + +def test_vllm_llama3_8b(): + """ + test case llama3.1 8B + """ + + # Sample prompts. + prompts = [ + "<|start_header_id|>user<|end_header_id|>\n\n将文本分类为中性、负面或正面。 " + "\n文本:我认为这次假期还可以。 \n情感:<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=10, top_k=1) + + # Create an LLM. + llm = LLM( + model="/home/workspace/mindspore_dataset/weight/Llama-3.1-8B-Instruct", + gpu_memory_utilization=0.9, + tensor_parallel_size=1, + max_model_len=4096) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + except_list = ['中性'] + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text == except_list[i] + + # unset env + env_manager.unset_all() + + +def test_vllm_llama3_1b(): + """ + test case llama3.2 1B + """ + + # Sample prompts. + prompts = [ + "<|start_header_id|>user<|end_header_id|>\n\n将文本分类为中性、负面或正面。 " + "\n文本:我认为这次假期还可以。 \n情感:<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", + ] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=10, top_k=1) + + # Create an LLM. + llm = LLM( + model="/home/workspace/mindspore_dataset/weight/Llama-3.2-1B-Instruct", + gpu_memory_utilization=0.9, + tensor_parallel_size=1, + max_model_len=4096) + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + except_list = ['中性'] + # Print the outputs. + for i, output in enumerate(outputs): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text == except_list[i] + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/test_cases_parallel.py b/tests/st/python/test_cases_parallel.py index f0ef9f1bc..b5f93c6a7 100644 --- a/tests/st/python/test_cases_parallel.py +++ b/tests/st/python/test_cases_parallel.py @@ -232,7 +232,17 @@ def test_cases_parallel_part5(): "export HCCL_IF_BASE_PORT=61002 && " "pytest -s -v cases_parallel/vllm_mf_qwen3_8b_v1.py::test_mf_qwen3 " "> vllm_mf_qwen3_8b_v1_test_mf_qwen3.log", - "vllm_mf_qwen3_8b_v1_test_mf_qwen3.log") + "vllm_mf_qwen3_8b_v1_test_mf_qwen3.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=4 && export LCAL_COMM_ID=127.0.0.1:10070 && " + "export HCCL_IF_BASE_PORT=61004 && " + "pytest -s -v cases_parallel/vllm_llama3.py::test_vllm_llama3_8b " + "> vllm_llama3_8b_test_vllm_llama3.log", + "vllm_llama3_8b_test_vllm_llama3.log"), + ("export ASCEND_RT_VISIBLE_DEVICES=5 && export LCAL_COMM_ID=127.0.0.1:10071 && " + "export HCCL_IF_BASE_PORT=61006 && " + "pytest -s -v cases_parallel/vllm_llama3.py::test_vllm_llama3_1b " + "> vllm_llama3_1b_test_vllm_llama3.log", + "vllm_llama3_1b_test_vllm_llama3.log"), ] with Pool(len(commands)) as pool: diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index eb56d6650..ff6ea4da2 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -15,17 +15,17 @@ # limitations under the License. # ============================================================================ +import math from typing import Any, Dict, List, Optional, Tuple, Union -import numpy as np import mindspore -from mindspore import Tensor, mint, ops, nn +import numpy as np +from mindspore import Tensor, mint, nn, ops from mindspore.common import dtype as mstype - from transformers import PretrainedConfig - from vllm.config import get_current_vllm_config + def _apply_rotary_emb( x: Tensor, cos: Tensor, @@ -163,9 +163,10 @@ class InferRotaryEmbedding(nn.Cell): Compute the inverse frequency with numpy. Numpy process is faster during initialization. """ - freqs_base = np.arange(0, self.rotary_dim, 2).astype( - np.float32) # (head_dim // 2, ) - freqs = 1.0 / (base**(freqs_base / self.rotary_dim)) # (head_dim // 2, ) + freqs_base = np.arange(0, self.rotary_dim, + 2).astype(np.float32) # (head_dim // 2, ) + freqs = 1.0 / (base**(freqs_base / self.rotary_dim) + ) # (head_dim // 2, ) return freqs def _compute_cos_sin_cache(self) -> Tuple[Tensor, Tensor]: @@ -173,8 +174,8 @@ class InferRotaryEmbedding(nn.Cell): t = np.arange(0, self.max_position_embeddings, 1).astype(np.float32) freqs = np.outer(t, freqs) # (max_position_embedding, head_dim // 2) emb = np.concatenate((freqs, freqs), axis=-1) - freqs_cos = np.cos(emb) # (seq_len, head_dim) - freqs_sin = np.sin(emb) # (seq_len, head_dim) + freqs_cos = np.cos(emb) # (seq_len, head_dim) + freqs_sin = np.sin(emb) # (seq_len, head_dim) freqs_cos = Tensor(freqs_cos, dtype=self.dtype) freqs_sin = Tensor(freqs_sin, dtype=self.dtype) return freqs_cos, freqs_sin @@ -200,6 +201,52 @@ class InferRotaryEmbedding(nn.Cell): batch_valid_length) +class InferLlama3RotaryEmbedding(InferRotaryEmbedding): + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + dtype, + scaling_factor: float, + low_freq_factor: float, + high_freq_factor: float, + orig_max_position: int, + ) -> None: + self.scaling_factor = scaling_factor + self.low_freq_factor = low_freq_factor + self.high_freq_factor = high_freq_factor + self.orig_max_position = orig_max_position + super().__init__(head_size, rotary_dim, max_position_embeddings, base, + is_neox_style, dtype) + + def _compute_inv_freq(self, base: Union[int, float]) -> np.ndarray: + inv_freqs = super()._compute_inv_freq(base) + low_freq_wavelen = self.orig_max_position / self.low_freq_factor + high_freq_wavelen = self.orig_max_position / self.high_freq_factor + + wave_len = 2 * math.pi / inv_freqs + if self.low_freq_factor != self.high_freq_factor: + smooth = (self.orig_max_position / wave_len - self.low_freq_factor + ) / (self.high_freq_factor - self.low_freq_factor) + else: + smooth = 0 + new_freqs = np.where( + wave_len < high_freq_wavelen, + inv_freqs, + np.where( + wave_len > low_freq_wavelen, + inv_freqs / self.scaling_factor, + (1 - smooth) * inv_freqs / self.scaling_factor + + smooth * inv_freqs, + ), + ) + return new_freqs + + class MRotaryEmbedding(RotaryEmbedding): """Rotary Embedding with Multimodal Sections.""" @@ -254,9 +301,9 @@ class MRotaryEmbedding(RotaryEmbedding): cos_l = ops.split(cos, self.mrope_section, axis=-1) sin_l = ops.split(sin, self.mrope_section, axis=-1) cos, sin = (), () - for i in range(len(self.mrope_section)): - cos += (cos_l[i][i],) - sin += (sin_l[i][i],) + for i in range(len(self.mrope_section)): # type: ignore[arg-type] + cos += (cos_l[i][i], ) + sin += (sin_l[i][i], ) cos = ops.cat(cos, axis=-1) sin = ops.cat(sin, axis=-1) @@ -379,7 +426,8 @@ class MRotaryEmbedding(RotaryEmbedding): st_idx = llm_pos_ids_list[-1].max() + 1 if len( llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append( - ops.arange(text_len).view(1, -1).broadcast_to((3, -1)).int() + st_idx) + ops.arange(text_len).view(1, -1).broadcast_to((3, -1)).int() + + st_idx) t_index = (ops.arange(llm_grid_t).view(-1, 1).broadcast_to( (-1, llm_grid_h * llm_grid_w)) * video_second_per_grid_t * @@ -388,7 +436,7 @@ class MRotaryEmbedding(RotaryEmbedding): (llm_grid_t, -1, llm_grid_w)).flatten().int() w_index = ops.arange(llm_grid_w).view(1, 1, -1).broadcast_to( (llm_grid_t, llm_grid_h, -1)).flatten().int() - + llm_pos_ids_list.append( ops.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w @@ -398,7 +446,8 @@ class MRotaryEmbedding(RotaryEmbedding): llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append( - ops.arange(text_len).view(1, -1).broadcast_to((3, -1)).int() + st_idx) + ops.arange(text_len).view(1, -1).broadcast_to((3, -1)).int() + + st_idx) llm_positions = ops.cat(llm_pos_ids_list, axis=1).view(3, -1) mrope_position_delta = (llm_positions.max() + 1 - @@ -457,7 +506,7 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): self.rotary_dim = rotary_dim self.max_position_embeddings = max_position_embeddings self.base = base - self.is_neox_style = is_neox_style + self.is_neox_style = is_neox_style # type: ignore[assignment] self.dtype = dtype super().__init__(head_size, rotary_dim, self.cache_max_position_num, base, is_neox_style, dtype) @@ -466,7 +515,7 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): if self.mrope_section: assert sum(self.mrope_section) == rotary_dim // 2 - def construct( + def construct( # type: ignore[override] self, positions: mindspore.Tensor, query: mindspore.Tensor, @@ -486,14 +535,16 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): if is_prefill: num_tokens = positions.shape[-1] cos, sin = self.freqs_cos[positions], self.freqs_sin[positions] - cos, sin = cos[..., :self.rotary_dim//2], sin[..., :self.rotary_dim//2] + cos, sin = cos[..., :self.rotary_dim // + 2], sin[..., :self.rotary_dim // 2] if positions.ndim == 2: cos_l = ops.split(cos, self.mrope_section, axis=-1) sin_l = ops.split(sin, self.mrope_section, axis=-1) cos, sin = (), () - for i in range(len(self.mrope_section)): - cos += (cos_l[i][i],) - sin += (sin_l[i][i],) + for i in range(len( + self.mrope_section)): # type: ignore[arg-type] + cos += (cos_l[i][i], ) + sin += (sin_l[i][i], ) cos = ops.cat(cos, axis=-1) sin = ops.cat(sin, axis=-1) @@ -501,7 +552,8 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): query = query.view(num_tokens, -1, self.head_size) query_rot = query[..., :self.rotary_dim] query_pass = query[..., self.rotary_dim:] - query_rot = _apply_rotary_emb(query_rot, cos, sin, self.is_neox_style) + query_rot = _apply_rotary_emb(query_rot, cos, sin, + self.is_neox_style) query = ops.cat((query_rot, query_pass), axis=-1).view(query_shape) key_shape = key.shape @@ -513,16 +565,18 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): return query, key # decode - if positions.ndim == 2 and positions.shape[0] == len(self.mrope_section): + if positions.ndim == 2 and positions.shape[0] == len( + self.mrope_section): # type: ignore[arg-type] num_tokens = positions.shape[-1] cos, sin = self.freqs_cos[positions], self.freqs_sin[positions] - cos, sin = cos[..., :self.rotary_dim//2], sin[..., :self.rotary_dim//2] + cos, sin = cos[..., :self.rotary_dim // + 2], sin[..., :self.rotary_dim // 2] cos_l = ops.split(cos, self.mrope_section, axis=-1) sin_l = ops.split(sin, self.mrope_section, axis=-1) cos, sin = (), () - for i in range(len(self.mrope_section)): - cos += (cos_l[i][i],) - sin += (sin_l[i][i],) + for i in range(len(self.mrope_section)): # type: ignore[arg-type] + cos += (cos_l[i][i], ) + sin += (sin_l[i][i], ) cos = ops.cat(cos, axis=-1) sin = ops.cat(sin, axis=-1) freqs_cos = ops.cat([cos, cos], axis=-1).squeeze(1) @@ -532,10 +586,11 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): freqs_cos = self.freqs_cos.index_select(0, positions) freqs_sin = self.freqs_sin.index_select(0, positions) - return self.rotary_embedding_op(query, key, freqs_cos, freqs_sin, batch_valid_length) + return self.rotary_embedding_op(query, key, freqs_cos, freqs_sin, + batch_valid_length) -_ROPE_DICT: Dict[Tuple, InferRotaryEmbedding] = {} +_ROPE_DICT: Dict[Tuple, Union[InferRotaryEmbedding, RotaryEmbedding]] = {} def get_rope( @@ -547,7 +602,7 @@ def get_rope( rope_scaling: Optional[Dict[str, Any]] = None, dtype: Optional[Any] = None, partial_rotary_factor: float = 1.0, -) -> InferRotaryEmbedding: +): if dtype is None: dtype = get_current_vllm_config().model_config.dtype @@ -581,7 +636,15 @@ def get_rope( scaling_type = rope_scaling["rope_type"] if scaling_type == "llama3": - raise NotImplementedError + scaling_factor = rope_scaling["factor"] + low_freq_factor = rope_scaling["low_freq_factor"] + high_freq_factor = rope_scaling["high_freq_factor"] + original_max_position = rope_scaling[ + "original_max_position_embeddings"] + rotary_emb = InferLlama3RotaryEmbedding( + head_size, rotary_dim, max_position, base, is_neox_style, + dtype, scaling_factor, low_freq_factor, high_freq_factor, + original_max_position) elif scaling_type == "default": if "mrope_section" in rope_scaling: rotary_emb = InferMRotaryEmbedding( @@ -598,5 +661,5 @@ def get_rope( else: raise NotImplementedError - _ROPE_DICT[key] = rotary_emb + _ROPE_DICT[key] = rotary_emb # type: ignore[assignment] return rotary_emb diff --git a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py index 6e760aa57..768a8238f 100644 --- a/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm_mindspore/model_executor/layers/vocab_parallel_embedding.py @@ -19,16 +19,15 @@ from dataclasses import dataclass from typing import List, Optional, Sequence, Tuple from mindspore import Parameter, Tensor, mint, nn, ops -from mindspore.common import dtype as mstype from mindspore.common.dtype import typing +from vllm.config import get_current_vllm_config from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) -from vllm.model_executor.layers.quantization.base_config import \ - QuantizationConfig -from vllm.config import get_current_vllm_config +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) -from vllm_mindspore.distributed.communication_op import \ - ReduceFromModelParallelRegion +from vllm_mindspore.distributed.communication_op import ( + ReduceFromModelParallelRegion) from vllm_mindspore.model_executor.layers.quantization.base_config import ( QuantizeMethodBase, method_has_implemented_embedding) from vllm_mindspore.model_executor.utils import set_weight_attrs @@ -408,7 +407,6 @@ class ParallelLMHead(VocabParallelEmbedding): }, ) else: - # self.register_parameter("bias", None) self.bias = None def tie_weights(self, embed_tokens: VocabParallelEmbedding): @@ -417,8 +415,7 @@ class ParallelLMHead(VocabParallelEmbedding): if self.quant_config and self.quant_config.get_name() == "gguf": return embed_tokens else: - # self.weight = embed_tokens.weight - self.weight.set_data(embed_tokens.weight) + self.weight = embed_tokens.weight return self def forward(self, input_): diff --git a/vllm_mindspore/model_executor/models/llama.py b/vllm_mindspore/model_executor/models/llama.py index 354bfb37a..954579f11 100644 --- a/vllm_mindspore/model_executor/models/llama.py +++ b/vllm_mindspore/model_executor/models/llama.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# encoding: utf-8 # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -16,49 +15,36 @@ # limitations under the License. # ============================================================================ -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union +from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, + Tuple, Type, Union) if TYPE_CHECKING: from transformers import LlamaConfig else: LlamaConfig = None +from mindspore import Tensor, mint, nn from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors -from vllm_mindspore.model_executor.layers.linear import ( - MergedColumnParallelLinear, - QKVParallelLinear, - RowParallelLinear, -) -from vllm_mindspore.model_executor.layers.logits_processor import LogitsProcessor from vllm_mindspore.attention import Attention from vllm_mindspore.model_executor.layers.activation import SiluAndMul -from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( - DEFAULT_VOCAB_PADDING_SIZE, - ParallelLMHead, - VocabParallelEmbedding, -) -from vllm_mindspore.model_executor.models.utils import ( - PPMissingLayer, - extract_layer_index, - make_layers, - maybe_prefix, - make_empty_intermediate_tensors_factory, -) -from vllm_mindspore.model_executor.layers.sampler import get_sampler, SamplerOutput from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ( + MergedColumnParallelLinear, QKVParallelLinear, RowParallelLinear) +from vllm_mindspore.model_executor.layers.logits_processor import ( + LogitsProcessor) from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.sampling_metadata import SamplingMetadata - -from vllm_mindspore.model_executor.models.model_base import MsModelBase - -from vllm.sequence import IntermediateTensors -from vllm.attention import AttentionMetadata -from vllm.model_executor.models.interfaces import SupportsPP -from vllm.model_executor.model_loader.weight_utils import maybe_remap_kv_scale_name - -from mindspore import Tensor, mint, jit, nn -from mindspore import dtype as mstype +from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, + get_sampler) +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm_mindspore.model_executor.models.model_base import NativeModel +from vllm_mindspore.model_executor.models.utils import ( + PPMissingLayer, extract_layer_index, + make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) def default_weight_loader(param, loaded_weight) -> None: @@ -66,6 +52,7 @@ def default_weight_loader(param, loaded_weight) -> None: class LlamaMLP(nn.Cell): + def __init__( self, hidden_size: int, @@ -91,13 +78,10 @@ class LlamaMLP(nn.Cell): prefix=f"{prefix}.down_proj", ) if hidden_act != "silu": - raise ValueError( - f"Unsupported activation: {hidden_act}. " - "Only silu is supported for now." - ) + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") self.act_fn = SiluAndMul() - @jit def construct(self, x): x, _ = self.gate_up_proj(x) x = self.act_fn(x) @@ -106,6 +90,7 @@ class LlamaMLP(nn.Cell): class LlamaAttention(nn.Cell): + def __init__( self, config: LlamaConfig, @@ -139,9 +124,8 @@ class LlamaAttention(nn.Cell): assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) # MistralConfig has an optional head_dim introduced by Mistral-Nemo - self.head_dim = getattr( - config, "head_dim", self.hidden_size // self.total_num_heads - ) + self.head_dim = getattr(config, "head_dim", + self.hidden_size // self.total_num_heads) # Phi models introduced a partial_rotary_factor parameter in the config partial_rotary_factor = getattr(config, "partial_rotary_factor", 1) self.rotary_dim = int(partial_rotary_factor * self.head_dim) @@ -177,7 +161,7 @@ class LlamaAttention(nn.Cell): self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, - base=rope_theta, + base=rope_theta, # type: ignore[arg-type] rope_scaling=rope_scaling, is_neox_style=is_neox_style, ) @@ -190,7 +174,8 @@ class LlamaAttention(nn.Cell): sw_idx = layer_idx % len(interleaved_sliding_window) sliding_window = interleaved_sliding_window[sw_idx] else: - raise ValueError(f"{type(interleaved_sliding_window)} is not supported.") + raise ValueError( + f"{type(interleaved_sliding_window)} is not supported.") else: sliding_window = None @@ -204,32 +189,33 @@ class LlamaAttention(nn.Cell): per_layer_sliding_window=sliding_window, prefix=f"{prefix}.attn", ) - self.attn_mask = mint.triu(mint.ones(size=(128, 128), dtype=mstype.float16), 1) * -10000.0 - @jit def construct( self, positions: Tensor, hidden_states: Tensor, - kv_cache: Tuple[Tensor, Tensor], - # attn_metadata: AttentionMetadata, - num_prefill_tokens: int, - num_decode_tokens: int, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, slot_mapping: Tensor, - batch_valid_length: Tuple[int], - context_lens: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, block_tables: Tensor, ) -> Tensor: qkv, _ = self.qkv_proj(hidden_states) - q, k, v = mint.split(qkv, (self.q_size, self.kv_size, self.kv_size), -1) - q, k = self.rotary_emb(positions, q, k, context_lens, num_prefill_tokens) - attn_output = self.attn(q, k, v, kv_cache, num_prefill_tokens, num_decode_tokens, - slot_mapping, batch_valid_length, context_lens, block_tables, self.attn_mask) + q, k, v = mint.split(qkv, (self.q_size, self.kv_size, self.kv_size), + -1) + q, k = self.rotary_emb(positions, q, k, batch_valid_length, is_prefill) + attn_output = self.attn(q, k, v, key_cache, value_cache, is_prefill, + slot_mapping, attn_mask, batch_valid_length, + q_seq_lens, block_tables) output, _ = self.o_proj(attn_output) return output class LlamaDecoderLayer(nn.Cell): + def __init__( self, config: LlamaConfig, @@ -242,17 +228,15 @@ class LlamaDecoderLayer(nn.Cell): rope_theta = getattr(config, "rope_theta", 10000) rope_scaling = getattr(config, "rope_scaling", None) if rope_scaling is not None and getattr( - config, "original_max_position_embeddings", None - ): + config, "original_max_position_embeddings", None): rope_scaling["original_max_position_embeddings"] = ( - config.original_max_position_embeddings - ) - max_position_embeddings = getattr(config, "max_position_embeddings", 8192) + config.original_max_position_embeddings) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) # Support abacusai/Smaug-72B-v0.1 with attention_bias # Support internlm/internlm-7b with bias attention_bias = getattr(config, "attention_bias", False) or getattr( - config, "bias", False - ) + config, "bias", False) bias_o_proj = attention_bias # support internlm/internlm3-8b with qkv_bias if hasattr(config, 'qkv_bias'): @@ -262,9 +246,8 @@ class LlamaDecoderLayer(nn.Cell): config=config, hidden_size=self.hidden_size, num_heads=config.num_attention_heads, - num_kv_heads=getattr( - config, "num_key_value_heads", config.num_attention_heads - ), + num_kv_heads=getattr(config, "num_key_value_heads", + config.num_attention_heads), rope_theta=rope_theta, rope_scaling=rope_scaling, max_position_embeddings=max_position_embeddings, @@ -282,23 +265,22 @@ class LlamaDecoderLayer(nn.Cell): bias=getattr(config, "mlp_bias", False), prefix=f"{prefix}.mlp", ) - self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.post_attention_layernorm = RMSNorm( - config.hidden_size, eps=config.rms_norm_eps - ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) - @jit def construct( self, positions: Tensor, hidden_states: Tensor, - kv_cache: Tuple[Tensor, Tensor], - # attn_metadata: AttentionMetadata, - num_prefill_tokens: int, - num_decode_tokens: int, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, slot_mapping: Tensor, - batch_valid_length: Tuple[int], - context_lens: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, block_tables: Tensor, residual: Optional[Tensor], ) -> Tuple[Tensor, Tensor]: @@ -307,22 +289,17 @@ class LlamaDecoderLayer(nn.Cell): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: - hidden_states, residual = self.input_layernorm(hidden_states, residual) - - hidden_states = self.self_attn( - positions, - hidden_states, - kv_cache, - num_prefill_tokens, - num_decode_tokens, - slot_mapping, - batch_valid_length, - context_lens, - block_tables - ) + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + + hidden_states = self.self_attn(positions, hidden_states, key_cache, + value_cache, is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) # Fully Connected - hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual @@ -339,18 +316,18 @@ class LlamaModel(nn.Cell): layer_type: Type[LlamaDecoderLayer] = LlamaDecoderLayer, ): super().__init__() - config = vllm_config + config = vllm_config.model_config.hf_config self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.org_vocab_size = config.vocab_size - # TODO: Support quant_config cache_config - quant_config = None - cache_config = None + quant_config = vllm_config.quant_config + self.quant_config = quant_config + cache_config = vllm_config.cache_config + lora_config = vllm_config.lora_config # noqa: F841 - if get_pp_group().is_first_rank or ( - config.tie_word_embeddings and get_pp_group().is_last_rank - ): + if get_pp_group().is_first_rank or (config.tie_word_embeddings + and get_pp_group().is_last_rank): self.embed_tokens = VocabParallelEmbedding( self.vocab_size, config.hidden_size, @@ -377,24 +354,22 @@ class LlamaModel(nn.Cell): self.norm = PPMissingLayer() self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size - ) + ["hidden_states", "residual"], config.hidden_size) def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.embed_tokens(input_ids) - @jit def construct( self, input_ids: Optional[Tensor], positions: Tensor, - kv_caches: List[Tuple[Tensor, Tensor]], - # attn_metadata: AttentionMetadata, - num_prefill_tokens: int, - num_decode_tokens: int, + key_caches: List[Tensor], + value_caches: List[Tensor], + is_prefill: bool, slot_mapping: Tensor, - batch_valid_length: Tuple[int], - context_lens: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, block_tables: Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, @@ -410,25 +385,20 @@ class LlamaModel(nn.Cell): hidden_states = intermediate_tensors["hidden_states"] residual = intermediate_tensors["residual"] - for i in range(self.start_layer, self.end_layer): # PP 并行对层进行切分 + for i in range(self.start_layer, self.end_layer): layer = self.layers[i] - hidden_states, residual = layer( - positions, - hidden_states, - kv_caches[i - self.start_layer], - num_prefill_tokens, - num_decode_tokens, - slot_mapping, - batch_valid_length, - context_lens, - block_tables, - residual - ) + hidden_states, residual = layer(positions, hidden_states, + key_caches[i - self.start_layer], + value_caches[i - self.start_layer], + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables, residual) if not get_pp_group().is_last_rank: - return IntermediateTensors( - {"hidden_states": hidden_states, "residual": residual} - ) + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states @@ -465,21 +435,21 @@ class LlamaModel(nn.Cell): else: if name in params_dict: param = params_dict[name] - weight_loader = getattr( - param, "weight_loader", default_weight_loader - ) + weight_loader = getattr(param, "weight_loader", + default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) return loaded_params -class LlamaForCausalLM(MsModelBase, SupportsPP): +class LlamaForCausalLM(NativeModel, SupportsPP): + def __init__(self, vllm_config, prefix: str = ""): super().__init__(vllm_config=vllm_config, prefix=prefix) quant_config = vllm_config.quant_config - self.model = LlamaModel(vllm_config=self.config) + self.model = LlamaModel(vllm_config=vllm_config) if get_pp_group().is_last_rank: self.unpadded_vocab_size = self.config.vocab_size @@ -495,68 +465,47 @@ class LlamaForCausalLM(MsModelBase, SupportsPP): DEFAULT_VOCAB_PADDING_SIZE # We need bigger padding if using lora for kernel # compatibility - if not self.lora_config - else self.lora_config.lora_vocab_padding_size - ), + if not self.lora_config else + self.lora_config.lora_vocab_padding_size), quant_config=quant_config, prefix=maybe_prefix(prefix, "lm_head"), ) - # if self.config.tie_word_embeddings: - # self.lm_head = self.lm_head.tie_weights( - # self.model.embed_tokens) + if self.config.tie_word_embeddings: + self.lm_head = self.lm_head.tie_weights( + self.model.embed_tokens) logit_scale = getattr(self.config, "logit_scale", 1.0) - self.logits_processor = LogitsProcessor( - self.unpadded_vocab_size, self.config.vocab_size, logit_scale - ) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + self.config.vocab_size, + logit_scale) self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors - ) - - self.set_modules({"model": self.model, "lm_head": self.lm_head}) + self.model.make_empty_intermediate_tensors) - self.set_model_inputs() + self.common_preprocess(vllm_config, prefix) - def tie_lmhead_weights(self): - self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens) - - def forward( - self, - input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors=None, - inputs_embeds=None, - **kwargs - ): - if attn_metadata.num_prefill_tokens > 0: - input_ids = input_ids.expand_dims(0) - if attn_metadata.num_decode_tokens > 0: - input_ids = input_ids.expand_dims(1) - model_output = self.model(input_ids, - positions, - kv_caches, - **dict(attn_metadata), - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) - if attn_metadata.num_prefill_tokens > 0: - model_output = model_output.squeeze(0) - if attn_metadata.num_decode_tokens > 0: - model_output = model_output.squeeze(1) - return model_output + def forward(self, + input_ids, + positions, + intermediate_tensors=None, + inputs_embeds=None, + **kwargs): + hidden_states = self.exec_model(input_ids, positions, + intermediate_tensors, inputs_embeds) + return hidden_states def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: params_dict = self.get_params_dict() - self.model.load_weights(weights, params_dict) + load_params = self.model.load_weights(weights, params_dict) + if self.config.tie_word_embeddings: + load_params.add("lm_head.weight") + return load_params - def sample( - self, logits: Tensor, sampling_metadata: SamplingMetadata - ) -> Optional[SamplerOutput]: + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: next_tokens = self.sampler(logits, sampling_metadata) return next_tokens @@ -565,5 +514,6 @@ class LlamaForCausalLM(MsModelBase, SupportsPP): hidden_states: Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, sampling_metadata) - return logits \ No newline at end of file + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits -- Gitee From b170c9fa7d95d4984c09455295636af28e5e80c5 Mon Sep 17 00:00:00 2001 From: twc Date: Fri, 20 Jun 2025 15:58:49 +0800 Subject: [PATCH 19/76] add qwen2.5 vl and opt performance --- tests/st/python/cases_parallel/similarity.py | 58 + .../cases_parallel/vllm_qwen2_5_vl_7b_v1.py | 101 ++ tests/st/python/images/1080p.jpeg | Bin 0 -> 617724 bytes vllm_mindspore/__init__.py | 18 +- .../distributed/communication_op.py | 90 +- .../model_executor/layers/rotary_embedding.py | 59 +- .../model_executor/models/attention_mask.py | 66 +- .../model_executor/models/model_base.py | 191 +-- vllm_mindspore/model_executor/models/qwen2.py | 26 +- .../model_executor/models/qwen2_5_vl.py | 1079 +++++++++++++++++ .../model_executor/models/registry.py | 2 + vllm_mindspore/model_executor/models/utils.py | 7 +- vllm_mindspore/multimodal/inputs.py | 73 +- vllm_mindspore/v1/worker/gpu_model_runner.py | 183 ++- vllm_mindspore/worker/worker.py | 75 +- 15 files changed, 1736 insertions(+), 292 deletions(-) create mode 100644 tests/st/python/cases_parallel/similarity.py create mode 100644 tests/st/python/cases_parallel/vllm_qwen2_5_vl_7b_v1.py create mode 100644 tests/st/python/images/1080p.jpeg create mode 100644 vllm_mindspore/model_executor/models/qwen2_5_vl.py diff --git a/tests/st/python/cases_parallel/similarity.py b/tests/st/python/cases_parallel/similarity.py new file mode 100644 index 000000000..bfdae0d90 --- /dev/null +++ b/tests/st/python/cases_parallel/similarity.py @@ -0,0 +1,58 @@ +# Copyright 2024 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import math + +import jieba +import numpy as np + + +def _get_all_words(standard_cut_infer_ret_list, test_cut_infer_ret_list): + all_words = [] + for s_cut in standard_cut_infer_ret_list: + if s_cut not in all_words: + all_words.append(s_cut) + for t_cut in test_cut_infer_ret_list: + if t_cut not in all_words: + all_words.append(t_cut) + return all_words + + +def _get_word_vector(standard_cut_infer_ret_list, test_cut_infer_ret_list, + all_words): + la_standard = [] + lb_test = [] + for word in all_words: + la_standard.append(standard_cut_infer_ret_list.count(word)) + lb_test.append(test_cut_infer_ret_list.count(word)) + return la_standard, lb_test + + +def _get_calculate_cos(la_standard, lb_test): + laa = np.array(la_standard) + lbb = np.array(lb_test) + cos = (np.dot(laa, lbb.T)) / ((math.sqrt(np.dot(laa, laa.T))) * + (math.sqrt(np.dot(lbb, lbb.T)))) + return np.round(cos, 2) + + +def compare_distance(x1, x2, bench_sim=0.95): + """compare distance""" + y1 = list(jieba.cut(x1)) + y2 = list(jieba.cut(x2)) + all_words = _get_all_words(y1, y2) + laa, lbb = _get_word_vector(y1, y2, all_words) + sim = _get_calculate_cos(laa, lbb) + print("calculate sim is:{}".format(str(sim))) + assert sim >= bench_sim diff --git a/tests/st/python/cases_parallel/vllm_qwen2_5_vl_7b_v1.py b/tests/st/python/cases_parallel/vllm_qwen2_5_vl_7b_v1.py new file mode 100644 index 000000000..d776c8d93 --- /dev/null +++ b/tests/st/python/cases_parallel/vllm_qwen2_5_vl_7b_v1.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""test mf qwen2.5 vl 7B.""" +import os + +from PIL import Image + +from tests.st.python import set_env +from tests.st.python.cases_parallel.similarity import compare_distance + +env_manager = set_env.EnvVarManager() +# def env +env_vars = { + "ASCEND_CUSTOM_PATH": os.path.expandvars("$ASCEND_HOME_PATH/../"), + "HCCL_OP_EXPANSION_MODE": "AIV", + "MS_ALLOC_CONF": "enable_vmm:True", + "LCCL_DETERMINISTIC": "1", + "HCCL_DETERMINISTIC": "true", + "ATB_MATMUL_SHUFFLE_K_ENABLE": "0", + "ATB_LLM_LCOC_ENABLE": "0", +} +# set env +env_manager.setup_ai_environment(env_vars) +# isort: off +import vllm_mindspore +from vllm import LLM, SamplingParams + +# isort: on + +PROMPT_TEMPLATE = ( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>" + "\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>" + "What is in the image?<|im_end|>\n" + "<|im_start|>assistant\n") + + +def pil_image() -> Image.Image: + image_path = "images/1080p.jpeg" + return Image.open(image_path) + + +def test_qwen2_5_vl_7b_v1(): + """ + test case qwen2.5 vl 7B + """ + inputs = [{ + "prompt": PROMPT_TEMPLATE, + "multi_modal_data": { + "image": pil_image() + }, + }] + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.0, max_tokens=128, top_k=1) + + # Create an LLM. + llm = LLM( + model="/home/workspace/mindspore_dataset/weight/Qwen2.5-VL-7B-Instruct", + gpu_memory_utilization=0.9, + tensor_parallel_size=2, + max_model_len=4096, + max_num_seqs=32, + max_num_batched_tokens=32) + except_list = [ + 'The image depicts a serene and picturesque landscape. It features a lush green meadow with ' + 'wildflowers in the foreground. In the middle ground, there are small wooden huts, possibly used for' + ' storage or as simple shelters. Beyond the meadow, there is a calm body of water, likely a lake,' + ' surrounded by dense forests. In the background, majestic mountains rise, their peaks partially ' + 'covered with snow, suggesting a high-altitude location. The sky is partly cloudy, with soft ' + 'lighting that enhances the tranquil and idyllic atmosphere of the scene. This type of landscape ' + 'is often associated with alpine regions.' + ] + + for i in range(3): + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(inputs, sampling_params) + # Print the outputs. + for i, output in enumerate(outputs): + generated_text = output.outputs[0].text + print( + f"Prompt: {output.prompt!r}, Generated text: {generated_text!r}" + ) + compare_distance(generated_text, except_list[0], bench_sim=0.95) + + # unset env + env_manager.unset_all() diff --git a/tests/st/python/images/1080p.jpeg b/tests/st/python/images/1080p.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..0d298985cf4468902c27eaca2f23f74dae8c80ab GIT binary patch literal 617724 zcmbSycU+Ur^I%XBMNv^e=?aKI1f&F{i!|vq0YXzEC80?Xq^k%hQbY(L)(*=A0Lo%c#sMmTGc+`(?|;Hs+OwzO{JC>yY3a_>(VfN% z7cX7BaDn~;9UVObJ^iK2)IfLf%2kHTS5DKXot!p5ZAE=AU!c2iTI2sMoO}Z?UO4xV z?)6z39>5t!nzM{FCoKSO>P%^=R5>Nge*(>!v*&2fQ>R3KiCO`7jXM3aXU|Y4OM8xr zE=?e{9B_`2_WB($6&APz*xU7lUsQ(Y2gssQhi}WlvZ?Ur7<-aE& zC?pAzl71*7tD>r=uJJ@u%h&`AF*P%X!k*jNJ2=7-o?b7#eO~$cg}e(53xEG1;&WU) zG9fW3IVC$MH!r`S5LHxOfyLn~tEy|hH@CF5wRd!O4Gazq6GukJNHeo@^9zehzm``v zHn+BSD7$<6e^2qEp|bG5!oP_9Z+J0M@j650!8y89ylBq&P~%y~bF_EF&R+Xek_0bhRDp_H1Z-#bKUu7^wh!DDO#V6)nIY^L7^ToC8>VeZU)UKgm@Ixw zE(-0jL)*`e%p>ydmNHxR$mo2?dJ6x@E_MP~B}NkKjEmQ#>-DitDvnKNC^y$Gg;hxJ zS$wXp#1)`cpUytQG8Gtm8C!1Kz3T*FzbYoys_T&dG$zjpx;zSgSR9yhMb)sZKl%ME zj;EhhY6HslGNb1q=DoAe?R`he=bJ*%92NyRL_oKZimjCExWx^UGq-oPc0hraLhkz| zJAt3-Ah-*lZ5U)fuD8YD*ko6`hscrYG7i+iy(!y8_y@{l;9OdBeLwE{5knKhk8Nt9 zo^)854r~JK8=7(KRya?MnWP)v(G<2E9=z7y)9E6O3U#7ir)@;So1d58W0r-pI+I(i zDhDZ7;!Bzzzimo!FWX{@7>d!Qpc$9zw$b80nstRC`EOf9wZ@OTxF-u*3*PrlX6nO4 zfV>enw*tvJ8UkF+p6(AZA?|tg0nV}~-QM#AFm~PT04FcYwK2F|lDPJ9=@?|IpkT6{ zlbk$+f_>4y`qP2=zAVe)veg5;QFz3Yrx#8D-qGIA1T9+>IX<78#Jva<(3KB~1{Ozv zQWWz1HViW8K&3X8;$+N_39 zXZc~|rASEl=-~KPuD=?1?ze%xp;T)F`8Lim+M-bhyhpAuQpo=!lv6E^MD|pMyws#k z7Bp>WGE#$wVj=@`fExA)Wg2Buqmc-%k%+S?2l24(ClZDTmV@?g(@^{ffe(MsdhXXV zHOH>r4_`T@kmVF2!?#fQtQA$Jn$XEJ@1zqnJV7D9> zReGg{{Tr`DexYBst@u`eD|!=Y%uC`5EOZPqJixs{jbqQoPUHN%^9qWS_ni{z`yqC& z0*ht2cRgR%#CiK|n2+4+dk9?t{l45$L}06A?(VY#)kPse{dzj}m%9t~P5^nzrk5nk zJ8L7s-Z4vuPBrWt!#=kZtr3Fi(1K4-O+YtjCkW5L{UG?n0{rh<6YS_} zL0LMeo%x1j4%04Y!TA17`kpJat|XzFd4b|zZoJ}eS~Yx%_8B&}OMG-6Bnpj^(!n2H zgSArFt8y)$kBF0gCv)G+)8Hx}KZYvAz!dHAvf;Ix&qWHeUu;ee%hjVRw(074%d z@#Mx$EUaTy%RXTPWH%)S$_HB_SzkXpXs8=e4lQhOMPd?KCOo$4)S}4d7gbeP?YFY8pyx%c9#I{C-q@|GEVf z*VUWTJ-*93N(9w!$B8Bpy1nx!R_?bItQ6XqH7wX%(|#Ga^+JiQ>p@WyUfpHtZH?eM z%YrQUqU1~ONvlt9kWapoZh5ZUfJUJ!-KvWZYEJ;tzpn(MVWtB?6w=af7wE9w%38tM z&K6BGw^0&!LTVO0LHcdu_xaMAT7+$Hrm4}A1y?||^}?&H&%C?ALWw<_8U&+YL~(#i zp{t%6oK+i@8VPB3NbHF&s&PYgOk_k!cl{mq6!(3?!h@aR@*7IGk!&y}Vx!yTcX?s%dM9g7dO_=BO~v0jH_y@oErb+AfFD^okgLsr^y&KBzYvJ1|E$5cfxKI7X0Zlk z!1}-VUc7GuMb214K)))Tm_u71PAy5GeqHh)U?J*RGm-f~ydzi_QuD?#khDa(F2@@! zPp;#vQHk9*e*Wi)-A~g@3;3_`^6b3h-k1a2p#GI1R};?*Ykkt;O>QpyGp0G%s!AHL z#;jAdibA5EVCXi6e3(W>S>D2nJy(1*#`W+G#II4^TC8S;mh`If{o zN(Yj>h(I1HnrSLwuNM3{EQz!H2RPel!_Av+pfaBvlq*sft3dcx5-Iz|N5(ye3TV(Jc(fWl&b1KEBG zco&@kI|0b#oKnnRo#TDM{9wZPdCWgE<%w}@j%94WK)4QFCmU<9aOH0PqD0CtK52QT z7>`6a1umvil(te7a_)_`l!i(^fR>$R72A(JHL$i6fzr@p@*P%g@8Y9>(56zTpUlcd zV0J(5TYaR3`(NrJLAIrd8i|mG)25>*fL=#RWVBIfqTYAv(Odj4)&6a6n4dm>@$IyC z2DNW(&Jwnac|$U*HQ`#W#eva2*RX6mK1L)4&gi1X-Zv0LhZ2{)Us!5{Wcvr9cV5!>eC-yp{97mpkPa|umJPK7Zv7^9#9-n zl}9OO9QF{c>^>%2bQK?Lx+poWa^M)Als{XK;~pbHE*IQ&H&@(v(uF;Q>T`JGi@V<# zhW25s54iUU*DZwT^9+$45hAJZ?;H`xA+c{oyP>g)P?=GWJI^n%vDh#FHf$H_XcSo? zWb}O5QSgK0Co@*!_p32X*n4BhDU}=7gUC>PMz5X{)D(B1@o?3cH0Em?{gm=tkveC~ z(?PgTSzjEO8&$0pq;_T+^lWBaH#!D>dx7UbFc-KGQKtJ@>471ck0U>@`@@xf!*<*D z-?k;}_wfk*+kR|?L(gmA@l%~zhmC&kD!^#{EB(5gzm}e@#agSG5j2qK>#Yg-`^?t; zasTWiP@;>aSOAhWT$D<^j))FvBdougHA@$^6CXNDUHuft-dC)(26)7uuWt3E62po> zouba$6vw`Bea8&EpmmqbB4^pE>0v~~<~2%h3z;NKjL#Evbr8t)9U|&*uMfF6YhqSE zvP=)4yY0f3Fu}4J#4unxcF_DHGwN=-?!D3WJd;OF)%J9*Q$iK3HF(kcVJlDWOM_{m zC}$O{zV@4WZb9lB;;IgFR-p-O+QHu07O5IA2Cw|{SZ7JRJHff+a~oE2@=n>{cCMj% zpN0&Si0$tfuk{w%$xmu(AUlmNho*S|zg%8rkcKZ?KxDnQcAXw~t65EZ}?jL5q zwJQr(3|$n}%0Fg*w(GfX>VDd;bb)6U%^y7O(v+n`!nb)&lP97s#=Y~ty`r_Uwi>HUnx#hVkd1LQm-EU3y z1_HH&K_9o#W$+S5i6wh!ibG#5v!gi*ljP;#mmqu-7VtepHcpIZQ2R+0G~+<9Vi!`G z{T0mZ{n;^~Y}ZkY5~d2dpR+!kYW8OuWbu0bE6dh*#4tM z^aIiZIVZhAjjfyfg^OvrI-WUpi5`o$ zs5NkqF>5~H6>F7M5S&*4`X7h_zbzI+&2AkSIP^Z%m}yH6``HbZN@r#)-GIz0S6voe zhDf=@QPI-X6L}H?iSZfuokN6&BY}z6@A0j+}m7qkT z<_(@6I;nKztnFnc>yBokD)-3|qS-Lx7$D&@?`)jLvkibBaoUd)oZKobz#rk|q zXgha{?sLK=(H>f_DZYMJ{;G+w?_Y?*LpqJZKE|U)qXfY^PlHZwP%eJlcyZNG{uU^U zpwM+)gqS63LZ!AbX0Fvylm6R)u|pZGGGY`PYLQZ=P*hY}Zz9ca!EyM)7Gsf(Y(pF@ z*Y=&sxDqx>aJH3g6X*}xK;u2%`WDU(jIm_D<+tfgF};x?n8TN)PM+3^@NZh!0^2!r zr{-zMcNt?8O*~DCRitu-=MSd+3UI>%6=~U@4%;ez+i%9Kua-x1KAf0MkVcNKVL%;e zZ!2g_LtBhAkciD?5Gy8ExM~(ddNq^9Zr%tTTq(>TpYyv6earht4L)w;8tKKn_~KO) zc$4s#*jgFQbD%VJQ~L9bQ^<-3?XTa_Q6a8|sP< zz3-6_cKESzFN#mxm*$OIt@(q!=&{Cy!ow9#ZK!n_9*kCcYFa5`wVK-jet&5NC}*^x zZDAp?AI^(+5~rvOR$CrsC|hX0CfMbueNe)k+8Da zkC;3G$hN1L^QwbkTWs$wp9R7C>itFogyfyo1yinDRZLuDQ@>jiqt*Y>G+LlaE_JGU zFT)T2`+JVY=6ol{PL1EQooUTWdsbbvGSmjq=!9a1iy z)i_)VZEOfna}R28x-8NT7@en%8^VbqJFU;mWxuTYDOyh=oZgDk@{b~k>D8tGDQBLDrh9@fy^~S zW2yv91#lEHK_}iuWc*XvabE43@g@7>q%FNofrTf%J_2itA=hIEN>Al8k8Iy$HP}Tx z^jlI)`LHKnv3^CIkDAgw9|Xf!9t`$ju9zKzr=Dh-$#N$v*;S(XhF(NBe3D@KwQLf? z2AakvmZ`^Kcoo%1I5vLeeLvzm{Gl(+Iaaij z2>v=L9_Ga^P;R?@MCsUk?rd_uP!ykJ%Y_5b<+YyclDaKX80Yd} zqR`ny)N$IeFbYj~jJvL^ z#B`Xk6>-KB67UZ*Uf<5IaYZYd5@Ud?kC^MIMQ~=fuD=5xU504XvKe$OUI10ruz?A$ z1=eO`?j6pS1}4Om|Dfd-i4pm)q+oE3z-Hn>_P_b_e&cWSF1A`92y@$I(tBn3fE&wy z{BX&nZ_wXwm5gMoTnL1#;a30!x|_khLc3#*2f_fuiv-z{e>;{HTKWfMoc&(_KqorG zE_0lm*@K>^N`=N+WOR@z5&3lT5p>}{wQ2pu)8sl&01weNcDYpIUuv7`Uo| z+{sjP&>i}39ym$@shR(UDUN6IcrQoPQlN@d+5}JuaPAee|0&9Ev|$REraZ={;1fXk zkaH5^f)Lh>`v~Cz-CUkh0~osNq6eBo3s1=jK&wHi=m?y~GjUyQNz$%p7ng)wmTE~r zPt1CRv_E~un((8F=Q5e}+mW?J({ab{8q`QdK}jSZvM=ODC2d^SUjx%S%%)YB5i{XD zA0Cgl*XR?A__U-X=zW8wm7*N1S1mY7Skh*>uRrDyYYFyES#rS?42kS|5(`6Y`JIxL zNdQX8NBs~lcJo&tef9oi+x<`%HKzq(eT66(dv;QIx2vG$9Y1hFNP7JJhh&Y1N6BCZ zxAh30^elu${5kMFJe>V3G50fjDqi$8tt_CYZnzNpgpMPRZZD(rQ@Ii5E>SGRRPlBkOT)V^h?)h# zT8{Z>(J5{(WaF}Oc9(GCViU9etaKFg`&tXCHbZM`Qr|pb<~jTpt>+hqJnxz1FJONE zhOC)I9imr?T(FN)sNc(xuz(qY{q!prmtLuPzmYA4v7?Y4--#4+y7eIBrDY7hR=fU& z;|Tx~EL3vLu`f`2;R|A!>v<0F%Yn(qOj=i#nvl$5w_1+XAdL^L>SpJ%l&o;WBM0STf^A_*ZCHaC%3&zyPYuwFC%msOdxNbf~6t45; zyzST60H+E|_rx<2JYx~%+|XYE5`KO3>m2K#G7!Uo?%l}1)u+qp0b0zyXR-$GU027U z&NY}9eQ1K{w7US;es77){Dug7!Cq8@+XwL0{cghJ%ldLo^ydez2s>M&5&C zwgh&mD1TyKZ2AzflD0w{nSAg#((oa7t8Nmc*tu}wK-zsyB3GIr4eF)wQ&ig9YrTc_%r?YXu^a&)p~;S`^!+6=#;fyFzI&{4S} zN~HXD$JL8r} zUZSqco=sT{Y_S{XbV}U_Oz8MvlQ_am>^?wAn3IS}AZ_nsHac%##ae$Rsh)z%?Yt>7 z>%t_>v7^lc@nP|a7r|j!Lrmp`WR92eLSe6pfcRXBZ_$%4L zKK}GLhaBlD41-8(idEe#`in?3x7h$&mvs=?An`S;{v_EtW;TXCu%tEF_Z!*zrLQAV z{Gr>70>8;^J#-SAhU-)M9B>LfaMcXJed+I)H z*o>H-I(y4*6(5)189&tPZ;Y?Heq+m}$OZnwx7oHF)QE9<->XPZ5wQ;rBtC{oj$tqMrjNY|CHbzkhDb^YeN-}~W@8&J0?FPp_jsp{(yJvr2?fq~Aaab$? z^?yjYmtxgjwih-lT3a0y_5oM>v1LCk(M``u$@D)UypKY(}WA6Ciut1r=cMRL&8f3rPw2<%a7Wr%cgMZ zHajX?au%=dff8Auyo5|HvK+#rWE(9_#6rb>zRc-FVDJ*Pu!X?A@xR83Fe4Qf1|{l= zM62^`<<+ije2b44idVPoHD9zPec9z)oVc9b4zuhd^i+uQFb6u$$l#`XrcCk`wiMe= zsyHza16-zx6Qu~Nuwq{uE&0+s0K~DAsZW4v;VF4v4yT@<4*SAK)5MsH6F>#@)U3kM zbD5f?z8YiHk2dHtzwh2YkaICv31j)H`wH^fyBsgjO?&y`_c?4{?*1m@ z3XNJ`PE~`8&$=G-v?Ju}DE}dmaSzoFf?4o4v1O~Wox^G9C#R?bQKy!pE()5-xULr2 z6>)5e<1)pWW#c0!BNu2r8vohp5Gntlc&b<}R2?$*spe@Qsq_V_@dElcH#1{6f3&Bb ziG$`1=HEJ4h{2>Ev0@zXWmsQW`C*=*BS#_X0pGA3_IWq`;HwX01FJ{oekPvc#gXwt zY%(_&T^n$@i9&OE5H^kEdy~H*11*ns@??C(P}gZIy{jgBA0X6l`!$ea7WBORJ3I~A zB8SW9l2hQv??bihQziF#yK}mYwUcDD8ccB_&Bo+$Qlw#zBE%qlc%x0tB-y@lzrmh@ z$@Vc~GHbH8ni{tJz924Kq8h&FES+a%!Uxv$f}||<*dRRzaKcZIh|}u>HCK!p6Q+=S z?g1eY74!Z^@4w0#Eh=}JZq}nx87#y$26D!a338bWz&g8xoK5zFVZ@W}e5oOu=LnII zcxQJc7nU5B!KAK;;}@WIg`%#27%Hb{o2&h(2E?K2PfT_A?VN5mt=f5iL+aZT-qmGo zK^5aG^Q<-%3aRhnV?ke+Wj(WTH?7Kff8&-%hCLzi1=Mo>W>Xy%<|}<}1BLYDO|ZGW z9pZ;jc|e_sG$ewTDoY!=NrBr7K=MUC5pTcq@E_E#Y{wpNJO~%RxR3nK#(#9%CrHEmhhq*FzwmaDwSLK%o5K7DsoSbz zAyBZE{YQ{Vbx3^3Kg$Kx6*~r3`kf*Z5+D9Apz(JKHDB^{;m&4#;bwhN8*$sFD=K_& z8?|@^0^S_|hZ0Ml0%^IFx0yU|Q=Ct+;yT?kxBt@1hx`&)R9+j?r_v@WORjVC7cc~e^^w!=c8-;)ZGS! z-8uHkNV`Ys%JEWl3HtG$8dB?)z$8=(qV4KEj8mt3(`xge+R$>`_a4(^RQis)%WmL` zO~nzA)l~ynedfE?c239K`f%2@R)$-xnuE7$ndM`TNY6ioatqC3E|}LEKmSlR@P;`E zA!Ty>&)=J}IB-oeUC$q!0bJI^`WtAPj7J9cYknQZTMH=xk1_^|ZkxV%lpip)WAAuW zrnA|Ad!+J3bnQV+wRfU-HM?+Dz)# z`TmC2P)4-MZZzV_9JsR0qrEZDu#7l2xdo)%Wlo#b=N@0CAAd34V0mT8u(@kdToS~o z#lXOtTu0o#m*>HCdoPdApib&5+V%Lp2awKSV-s7_S3Mk~+zU(^4zOX9lLgXP=LvDt zRD@M{|IokwDs06b&L4Wo0ma-lDOu@rpSp8p+MlVHmg<&R#zX8ZchFGlICt-^5I#}X zof4#7SvP7I*BcvW;wowH8PzDP`*MR$U%UCzMR5H{#L}VZhKQWGJQ~smSZqv`F+U3J z=`5A}AWa_M9QO6Xptm&4b~Bj9*6=YqqpK#fdxdMP%ms@n#tSy^3EgG$u|yy$)O@*c-) zC&Y+e^el&q();51wB}@v9yXh#akOo_9tmgf!imYx}4)!?$cDib<7+~2a1m(vMlYPrr_llF!RNyRHA;bEYEQ_@41id)jyHL zzk;OTj3vLUE$8GTss>}5!4e}buCUFZ=ilcrcGkWLLC>dNm<8b$NbO|KtE0JZ#mB!q zHDxXga2pbBZsLntsMO6zk9D*oGt?YwX1i}aPkX+U?+5vgII!};1QcWis18dyefvX5 z8a-?p$N7!CITqI__4VGoA7wj0UvoIoAyrGYHLU@{i%F z4906ct!R;?I{zf{rB&sRHFu=G3s7!<#;`IhGXEOo>Jy0A|@n+_Tg)&lpK-tvDFAnhy@bV#~An_$fo`aMEZSzn)q_MZMwU` zlqLo5a^&H3&#Er2bF+PtiSt^e(aNip_1=cyhW-9ikA=!6wz zyDLYyD0ft6;RJq~>bBr^<5hKzjp)cgxtFf1N+H?V1yq&+lob}AXm+ozcY-gFkXj&! z)kF}}0qO+cqb#D(`uPOVEf3ZN@%t7mw!#xCIf-Wc}J7a zewkig-tq#W?vI}zG)ay`O->klsMzL3ynvwUBBDZe(b9$^kdC9pL!l3pAocFn%JqR_ zxrSZlzOh$jM!KJ+g$LryH_pYL0MKsrKYqa1#%d+gHG+g{SIz83Jy$K$en-2XRrm2t zubQnkmxud01dN+&HR{3JUr>A|cB%s&f4E67hgP=S+Fpf>;+Aes$<*4BCj1x6L2ir_ z+HO%@lf4Yq^9L8w+keKqhxje6n|`S10}XUVTD&NY-_BsOeC5-^GdoH7xfN%l93Z)u z>^7&((9O^K=$tg2+2=_YEyMD<`cOX3d^ci6Y0YXYd-f6QJ73=e9G z#-9MZkbh02J+CX(w)+J%5m0TbER8Un3?9f*6iZj7b$vJ z4(sb09t%Z|;RV|hRR}w;q^>tk7Bkm7=G!sh4SQW^0Ie2aZW- z@!S@{qxh$`@l(97uJmd3`u7B2f6ZL>k%q2CvGhrSzgQ&?nXlH4#2G2ihK8=5xhQp3 zn*-6FV%<97-U=F8&fjSIJe0XgDkPL>x|{!LWvlcw`nvSS;&F;XN<(G3?d#1L0nesy7Fg@j?~;{@i*7f9kH6nlp*d7m#m zHG2`mqlGku_Nl4)k1460h|fR|U4SXoY$cdK`GIQsl+2blpyjT>Y0vwE(~!5{(_f6$ zd#LD_+JPFlQGHx7&+-iYHlgHPrd~I zUCy6O?$ZLK3lz6cp^p0s{zl~APvlrkUC=ygIb5*|3RR!f?sUqp4Is2$&4TaJ?RvTt z37>t~Hhj1YZOyd&5_G;`3OISU>d(-NtCN=Nu#QDEgOX!QRE^%yvWxr8mFC7DY?& ztzZmKes+wb98Y8HlOOI7cda7U8p9q9pkO!G{J4pA6SC?zbg};3s9$eLon^Bk0cFr@w(K zT_p|HuULyHo;M}+y1VQSCLK3d?`8Cb6iqI;RZb8!M|l(2_7Ww8=q7x?}X_ zug#SwycVHdm8*Iz^PTvDq`~;`?gDkB^k^z6v2(L<@rscs)qtI!9epYsQ)?pxsGw7u zcAh-FZu0;YeA{a=t{e5`yyUb9HOGe99IUH}l{J1%;ArYnS?FIF4hwP1lg#hec;NAA z-^=&qy_vtpZ`aBHm6iTSOVfDh@rmhS?pYwgjH;;=S+0YBvJvAW5vuRovbM4x-b-1k`$0I=OYhbV0)P-Tx z)4M_ILi`_@5FEMIR$Nw3x+C-yehtZ}n(V`;taLuSZ<053g=*iRTAbEtuz*fv&9UPJRFBg?Yq`9gw+->R*vFA+!a@UL$C|^`5@c= z^Q}x7j>?-un-+pL_4~x&G4rKa?4r7SF+R_GL-&<-%4a#-oBNCTPxcqn`Yof7st^A$ z;y?S2I+zX>TQCdr&Hh#DaOb0A7kr#*)=TND*|79J2yxvHEUJ(B2peh~u2hLhDMco2 z@fs%R9HLuA$4Fv*%*Li0+!U9D89tqex1K~`bR3%1U?^= zkb%`HY$@THef?#q^(`z!Q?B?@pIp7bl7V zA!8?UXgun%9N1-S--%_n9e&AHU{|`{glDRWKVfk$L(Kc~ex44fx5;%O;CMGanbk)+bxx`4M;bE5!(WHI{}~Yqz{ni)9vT9yOG* z%MZkJ<(%GZobml#%l&G$-s7OT(}z`@>X=JEAE1&6Z9l+iIKZNB=DFzQ=o_=DrK`*e zwxxTwHTr$!YID&8gS&EGmcsQz2!*9-JS9_^TsW%+=J&jJVdK~ht46aJkphW=ds$0= zeVUUnYjtg|2uiXOXQohNbE|jEPdK50$m>ST32#!evq_P1nfz8r>Gv1&XQjyXv08V4 zP|P2!Wg`~tYo8JhzEA8BysRY~BUbju_oCb@ZGM$ByRW7kdn#cr&c~+iHA0_zJi!bN zD^-et_+;OEtqu;nmb~5gnSZkCC#3C7Y3I2EWSQl68(`a*E7DCTwZ~OlF`j0bi)G_n z25Ba_8~D@1iFn^W+UXTZ-fOXrPN%^|4B24cw`?7o8SuDr&@0b=!h(~*V`wkp!>>ex z`>_tBa{G7TvxUF78P|4Tc=a)h1Wy$dujJL*RU^X;h0>&Yq2$WV`F(Z$JXdX3;b5YB z?EpIUs;s`a)OfUqzag8wo${4yt_BTb7~n-_u&LYB9B8RQad;iunS`01RG=y1y0JF~ zLXw*O7bLBiD|(%>)H{quw>drhql6#{Qp?sZb64N9g_3(d)+bgn55LN3)hvF@M$~O; z=T!jq1?byJdg7;(-McdBW=j&5U4#r{wZ`|uxBiTKwu3ala>Br)cqnqwCzL>PDv*ND; zKHTB;7&)JQ=QtZWs@SY8?F z-fpAyhf|J%tCQNOu=s9na+X+sc4mYulfPH;gzTDRF`?xh$(;yF?l$_Uk$Ed0tG1%% z1mB%~^*FYRXsO6^QxlgX);3WbsR$`-tEH`*GPASav5w~`&n7IFNeMcoB!+c0k#QIb7=}g z%&nB3b-f;d=~F;!IcN;`>7_zKpV?0+8k$K537Rc9n=<-r&=bF{eGX2oBES$fO!sfw z2DBhNeLR>WHf8aeLp z`kj-jcaoyKtU}Dm%WMRVSjT1|Kl9MtQgF%|TsY9eMt1d6#x*Kn6lW{FLE8mHeR(K1 z$R#v-U33z^(l)F$E+L|~KAikRMfh8^O#)W3UL72(m7eBwg{Y6MU{PT3Vq=UJFi#FA zd?T3n@I$i~EBr>p&i^+5gCHQ9wzk0CxszPr(C3pDr&lB(ZHk1n_E34NrPq9ji8v!u ztxbaLGKL-V5-;1;$A*?hTtrG}ts6>?2kL_Bxa|#wIr20ot2T5}W*jUf*e+7++mdH9 zD#pDIva>U01swiD4D7TuA8nWTR$>X88;|@Y>cZQ_CdS?DX{w6@omxJ6wV$tsgxR^| zc$1>dVF6@Rruzi$M`khhoRk>59lxSZl~8($Ko zQ;{Fr3ce7-(h2ErieA-r;6?mKZuo96Bpmq43!>Q?DxFa9RMphy?aG!4fB4m{$1;cn^VkiNw;pT!e7WhRO?zv zpnGmmzZBgQbJll+TMJ6{Q0rTJtGRp8>JpQ}RW?6C8w=qPP@+8l@UsaMREgao0bRF? zqBt+19{mkz&A{0sb8R}^+}sxL+xVUU>Xd{+RE~q=WkM1{ZOywmxKv+arQJS7SY?tTjsJ|!&t5bL)3%ks`J(;1n47s!&gAMx%~IDYBA zBJxKPl+^>#fad?nertT7zDGeXJkHeK{qJx=oG7!E)f1KX{JH;Qn14fzM4wsQ9>FAyDpS!`0iJ! z+Rdb&G98Fp8!!4jc8qP%Q#vxC(OARXev3KP@_s%LdQNNhbALsj>Q<0{R`PdB?uG11 zg3=m}N8+pzzu9)?g}HFVz`l*v7ZEwbN7w%SLZh!|GO?HWBkbX_>+MN=@q4Mi$0m8= zaP6l+4Hu&e50{OFkSJcOZUb7I(_UF7Os1>^qt&>jx@3x}a!xB;_yvypR9|a$ohj2^ z=qEE;6XFlkunnff;yPc+HxfhrO*u`uxD0R99)djj>P=e2HRFwPz^{q->)pA@jHUv7tk*hX;+Hd#~0X z_mg#ot=vkI?f0GMP5_3{5~eT78e{~1LkNR~aRhhkV;q=7iZEM3M?d_H>US6-`c>C5 z>TFU;X{YWe;4pg_fgALX4t?tWA=FOLASfr>oa&%Cr$1x2TMHdld`8&-=dCy8ZPtx!T(e#sC7&70@G)szm@b>tc2JGO1#66@o^-N#+vR9v5a1WbQPV{E6uM)X&zBUpUx zUS!DiG#H=cTP!JNH}!~ElDlwN!^YW`uL?7|(Di>%#>n5LTjxO7k!b=uO-28;z|rZJ zS<;Os0ZQ;E0AX$iL{-Q__G`g*M4vJHd9y8iu_~GM04F`3>MA9=@QC#UV5c-GIVPOs zWixDFwY2S%O_pU2X3b@B)p%+fd|xtBuVc1J8w2Dr;LpC z?Z_iw{()M|F6u%>($So%ljfz-)-NNYjbldw*IcOA2~0I9{z|{4m-HgdU<^FRAR9AV z3ZqO0kbCM70S5em)Y6eklg#sq^lRlT3+x?rXyOou9n3eLqS|)aXVfw zP>#i(i?H#5d`6*ysXi{`7&k3^S&qei)s*MY;zy=niC`hcugxpJwk8|NLGdoTlY6HA zM*C^u(4r(le@{MAm5E z@L(i&V(*5Qn8f|VxrYynoiZ3mEy)mLvIf!I@Ed4@BX}$)>8UG+iBva-2jeh0XjgMf zMO~^i%Nje)5FiJ4jZC~QTYyk3Z{Ojw#olkKHE}0ZA=XO{dEwQMlHZmxTh%d?)%8j2 zD&pK*`x3NKXWJh5v_=2u3O@zpy5|B-x_*5S$LUt&ZfNv!W9n*vq}%Kok{SvkE`TQzW4UFNDr)UHwbAL z;E*|?%6Ah|8y+}peFB)J#K5=-UXiAm>de3Vd+%keqi&e(>9IIV27tr<^ii#=qLZAo z;Qk;%_}{+2s9XMV5dCN=xx*jjf^&3nhW2p6^`qz{+haDoFeRh*ujzvm0JkJT#71P6 zWxIo{r^MxD@#dg@{+PL#hcr|*6!BxZ4s&Rm^;zluh=SWjSer%Y6{)s~v(UK~ieE@6 zl4mB|)7<1uouzea=%ejMhrSK4IsbdQe2R*OO7dk1*4$N1W%?{n{0d`D01a(EJNxofPM- z6$o-IdS%+6@<~!B&pOvm^WNaxRSKwE#c4}CW1u=v83bf7bs;p3r?(Yr{`?@XKAs7& zZ_}o+Vq8$|zG|(e9TQDTZzDD4*^dPvh@$H)c6vltJ)rX;UkyIoXSbHov^a>tQWJiF z3|5HN;DBV9MVP9#pEZ~3gBAXv;CB=LbV}HLeMoTW2>531C#{v26(GZkIUnbu9X{YcNMK(ntI!AT#SD1W|-0)*Q^A0 z>7P>^HgOV&H3}DN+Dq^czxAt{Y*xO!Hs?dAsh4#FSn1y8bG;l0v~B(wGSRdsHA9XV zSyfj8J49UII8*&%vS>1NF)Jsnn=i6qu|DRQDfxmqY@)l(-N5pY6PbOqI8<(v4=98|)<^dF~A&dug|j zGkCYjMpjOADpB6PriPi}?}#+tN}?8F*E7zFu>huM+t!E*ta0gGZ(cQl%o1OqaUp^P zws+kEU1<&9%kyeB$D^~cxZyY5Dy4%jm6O{@#kvn4*W7k6jQ{EJ&d?pG^$^9? z6r-Mdp!6A+^}*=jqy@-t$_5*i(F$%}NJ(>@vP`zViZd09&)hG8t7T$89sN!n$i!+# z@%¥ix)UeGaVk^s~o6ZOBS>`wdu6cJ*#gnC`~q4WUA?^A3j0+Y;rxG9FMZ(R^jN zt8K;xxVbcd70&zB<6~P!fyL=i$TumUh9u*0F$OkRvo?UD&3z>irytm9?ye{iX`7mtTvpd(d>gNq&`5(rQ_yAKWtgRqpsJi zx4sIFKGYUL$#%4RHC4Cg`MspXIm~K{icdY6_}UBnu%Qv6*c%y%V&9pdj#Xd zs56YGgdS?RQm?tiEw*DkDpM>TvFXcXxYgTPC*ShH$UGWr*icOvEQ{7h{tu?!I;^Sx z|NqCA4I&mOt%4wokZ!POVGly`k|Ur;0DaHrW{vTpONTi#hM$3t3TH{_!)WD2mkqP6Jg zIr^QS_udQ$PHGL2*yy zFfg97;m`AGKQGAA(GTULRb%?H%h+*pGSVv`)Xj7h=I|bb05J|7^0vAhg)_E@*t_yn zaQ_jW5vgR9z>Iu*man}CJqTdPdm$hiykf_rQ#}Y-@wZ}2&7=$R-}_|g{5A+y6?sOw z7(bS(RpN9CQh8pnRi=OSZR+jq;*=~i0^{Uq9He5NPOn#O5G6D@wU<!fUi*j{Yk!?8GjGAa__QZnW} zs>+w0BQMC7Z*5sxKsHG+8G#pczMrVBI0m^__2_i-*=uZ-CpdO1eEv3Xt4ha8&X0DiZ6euNjCoZdO*#m2_HQ!|=aplO0~5-Ew6v zD*X@V+uFr*XY#>BocV7v$GJE@gD+0ZbjVyZcwZ--Dwb=vdmDiM0>p9Kv+I*Xo zV0Et4w$iOSlG~}Xd{B;Fzk~sJB)cA1lZWE4KDs%xeGF<3T^vc+RS4_QHMP|;N~*$x zN6cM0HRj|m&*leP2jwD!^T}IgpS&)4c>B*!49gpUXdSdT{JaqfWs&uNX`%!73R*ed z;~|-&_PuC6!O_l}Xt6(9ewSH70i1%mLb;?@Torgtc8ucX348Lo7Nr9LLR%WzkQB7ez+of4Mj zN(R2i*%l@(xD@-y8E)0$6=x?CQYT}S>XcO%dnTg*jBrNS;(sTHqr<-Y+G%m^sAf<5| zN&Ii(%~KQg*6Gzn7V32t<@nXNW=0O-zVO#SFDQPf!?8x2qpg>zDHl`+l_9}?Rm7?s z70qfqprCGN?0B&{1Ghjb;F~()c;>tqU#yk9Kl`esPNu?L?GyUqQ``fWWi_mLI6_Ei zuUFb>1av<%H_pX*s@&x&|08450@NAlU&~p~OHehI@T0(Hc?D$mY{ZMv>D^qa{HH(s z^+mknqK)GLlR0aR)C3vFWe!sywfXl3wQqi4x9#o?Di(_Qfu{-`aR7U2{xvddoe)i( z@T2WGx)Qpr+zU>_s@~x_Z5eekTE<1QsMn9d<*M(bSyZw@9N*EsY2nkwVl@AGeAz@% zF|a#tr0k~;kL?k)!0kotz}qQPkJqD)l%mB+)HXHs1+Js21`6MOctG3KYBe=!Yq-r< z%rU$QVo7?xmcl?iT=!paR7OP>Om13B4(8wwS}3D-oy?=%p?E~%WON#l>Ufs?@=R(H zbrco$ZVI^E_2>k*Y5<}u);80oE&q*#*kqN=QQSv(kTgOjCLSNv1 z^iSp_U;XE7vyJ)#>kS-K*0N$50b{dK7S)>6YNfK?iS?>mPyl}k6v0iE$VXi6>~jC( z0t^M@2+PkF;QdEqDbWiZd2GK-m?jGT-IbD~F3LM@5N-j6DHt}1#ekh!1=oFQQI3m1 zahsPhP=B8JQ^xj@vhSjD5UUUu)7$+RYH;^N zDUajd+Z#tla5BR3>V`hqxAR>zH5(WiOU>_YRH~k>z^7j4Z0N7(TLt&CJ~(&4U5!_& z)yq3|9GiS=<{3OSz0kcFsU`F652bQ#OYA{|HUz?RD&>$K5B>ItoILwky z?hlOB=IURX;^&q2duOn6|EGFlKO>pdXLBn?vul#ld2uXZYi0YY-$^B$B4@CX8)XqF zJ;z%yIBz1k(Dk0xxba%f$ux%RMfoXmuaJq)`pCWDpiO;WD||<4!N^pBo{@-2sLTIL z!xOg_*1;|57ikC-kenP@jJ_H|Rm~Y--K(_JaCc%>hTQpa;k92UyJK@*C%YP{FDR@m zx4N1)@G@MU$7Sbk52G)h*-Yo@jqH`#H0dKGS1*tr) zEh$16S+8}ehs(J~l^;M7hsb8M_d3dZ;FX_{Lk^>t^Z=@|M=*mZe%2sky&VVf>bE{G z-Z#c7L4J~IMZH;&*NwBc>*<)f6!%xmx)>?U-t3NQ$2ddk+E>>38>KWf2{0y$$2}q!ssQ zLTZW4!ud*tAIz?Neykhtds_H~ULytt zZvz?3sz+?IWlbGIxF)X6b@*3(AEY`r@$35OtF1uQK}&aNZbrx?O`Mqr>U_xHzDG)> z`VV~>?Q55!61rA(NafRh8i8%r(?|Su>gleGE4t0O=zgIJOVFAtX}TS8&-15fm*?O_ zr3uU8$Jq4~I5Urex{Y+voWb_6BcUTtQP39g9kO)&a7$U*^aWjDa4jW>kaR`JH~Ps#cG2^&SUj6i=CU?^4^P_vsUzk2wSg0H3e;v)- z4u}{?$q6#(73oA4tQx=Wab8`h74u%K=GQA3)IiTw7o>`*SEpFTW!{`?Q#qCk+-`0R zCoX+1KcO_%9VJA|6_U=vjbXHX4}?G9@O;LvD&&U^s>7i(X+F?I^%-DVtP}wwrFo)~ zQ$ZxSXI`ftDsL>&E-hj-GANNK$v$+SQ$EUx5u*@uNOt;mD01v@Q;qp}rI~T9hC(FN z=?hYQn>Pp5E61uzz^kPV{)DP0ACVY{YFqF(qIawgz{@kHdX zcF&@B>YsO;;4J%qxtV~CqQ$J)D{<1>r@czbmk!VwnjL&Lz1bt9`cRGh4tbPH=29LB z%HmMi8;2!3^?MssyP0|A+1pltN2 zBVCKPOd0Zd5959`*>nrd?TDpYxtN@1Z$bJ9tgfPO8j9$BhtH7m2JaVaZ1yDF=Vle! z(5$}8|3$wtjo%(4ZIhexZO*$^Id~3dJU^a=P*n$S+8KG^R_Y=9Gz`F-4-Xshe<;>I zVf79_nu%+Nd-s@W8^%uU4FfN#vq^}l*E77XJDhg4oDgi^KPIXaZWT-R7M@oU>qy67 z;hMtEsdq;;qkaO9qn5G+^iyy}r_XCfmMj|^G1lrB@oGUKnLqTlK#r+|mkWxHI6@aI zr2S4Ik9iI80p|ut2Ip|5tx$wYJm2R>_v5A89?PWcp9Wb2>y%_sPYSmA3J;2)+|9F% zi41F><3-lIW1**Q?Znd}xll#emlZQz!s{!gXy+@ABHH)2)+t7#$04bhsRF;kqK|*L zN96nN<0P-Aq}5xJ%#WP?;5Jj%hLs-Q=~*=g9XOS|GZg*=q!v!pkn{If)AbEp?7v?> z-LJ4{s@c&Xg`{x`;;l_!z91k)z8mmL3~}-zq^B$IH8Xj-&1j$WQDJ4?L-W~Dt;_T4 zE{+)Py}Abu**503|#K7{E13DZc%1>!5Z?0C`>AOX=+_?FQIFau~R;+Xp9W}9IV?tBX;uAeVWo#_{CmNlwAOB5EcQKPFReB4xHx((8 z(m6J4JK8afs*KO|>Y^$9ie{OFG&f|}IB%f-`Z1xy;z@NZ(ado6hi6KC0Drx-#Ue`D zclfr(ok8ula~!xo=GW_7+-9o%ZF~8>O!lyna(3&$-deb-R)kR>(j5c@LUccJDpO5D zRV;K6&njf%d=*xV$iC=O1+Y3XVBO4V*ObqECEQ(S;B)^g<$SEC+LEN;J*p&_ocA-S zfi32N?9Z!H85bMUUq|jJY4Z=mzfDmye8yX-JHJYOYkl+{#%XUEYc??sO{}QPTOu`f zD7pb_6-VGoc_}o86xo5JalJ1^YktzO+vWQ_@!8p)y$=D};K3=?Go3njs$NKEG!FhK zf$I;uW86ArMgXkp`^s!N6${DNMiw>fHLrSyrny<=HHF!o<8&hTcvMSasVhB?Vj0P~ zHxqQ}H7OSt%@o>fJuUvyP*`lHVL!_(m>|VA>Z+zM2PN;FgIZyO4l!v1qi~FQGR4Sc z#Ne)LsW?8JB9WbK>iDeD9YqEvJzy07OY_7))aH}sJ9xS9$~m^Wd4j+U*bsb3Uld2wxF^@U2I8o5~G~4 zBboYysmpYqmyvKWDjR2;qPT@phwjIfcu4vIq%iBT1dz?`G9YSQw7bI|CH|nED;(VjdeBUwUg|_0M*ln zsVm1DeDgET>o7F%&f$wXsp2DkI@Oh{jOhbj48h%J%696rJrgOfv^Y~y;Tv;yfalV>sB3|QmCzAk{e(PLSAFa z_c6Py2rwaHv&*67=$DBr;H0%#G_@W@9amFyIXuz2ND#BXp;~;%1Lc&PobSC z`1czIbfP$IAMs@>E7_Tsac%4Wr8zE?PxwpoIM2ss&1o-B%`_kgLLH8JzdmoA*V%W{ zJF?13anGWb2rLW&InyJr`1qNr*pOb2)=MRU#!%I84;6_$SpD+>Cqw8@YaO0<=Q`mZ{CBnj{$VwY+F$HDX={GC zP)OxG@V9BpeNy@DfUbvZ++H(lNT^o89v6{B#cR_FSHV}bqwQ@1Qq+!#g!)wDj+OKB z5(u{{qjcr$M*v!GT5uGnzVDdpYJ^oNS%(T z{xmgyK#d^$AMnCX?Eep27|yAje$MzWtNrg)F~~nGbjh8ltDfci_OfnKj?6uNK)SHf z3!Sm)3~jBp0Oju#ZgqXl*hrfoUI*Jx_2Z&QbWf3b}tqwi7v}iZLoEc62SA+3`}E zBnv+Yay9zHhEA9Lc;SEanbUc!)a$rP@G#dWfNHhtaci|>j_wh|uM~-vRWVXesye3 ziD9VC#5gWMe z{5dXL)$ADF#Dn&{#G*iJc!i#`8K9a6M?}wVu^f^5_lJ?}mNzy&!jEm8Gj_oL(v&k{ zihlLn6twJf2H3ruhYGy5_rbIh_eGduXP=&>e8j@d4zu6qo&K;hEg3H}yNL2U(HYrU zG86Ncpw@9Zoi!6D`?TEknrc^^Mp}hJs(@ShV{iPePgioMnPy|b@2BeIPIM|qma#QY znUykJ{S24(m&ya0$07(QB~HO%gAjWESEpJ>#HcK+nQ=gmh9a8NsAT=lVU4ulL8cto zVKnr8Z4cn$uEc_enPiQcM(w(}M#!5jfgz@|#mNyrL&Z|>E6CYDvnl#uo5nesH$D&i zuT+IoRN!7|u9-IHa78~e6^Zf>B^9}TcDq_qv*{#XH2T%!T?}17xX7f|fqNEY+r>#d z$3uPxj0W3SuM$x!CHILy_@F>V`b? zER@Hlvc`AfoA>cPRwLO3=GOmWsDaXxTt1Jj0XOYxt1LK0k*VM+(M?eo|&ZqWIN{5r# zr|ileod3!ufHpjyl(r~PCtx0NesA|v{fb&LDmC~(M>5a}&82AR!J*peFM7kG1Y!KPzPf+Yrr_n|s*B0vxp|Vvvc496PU?=sDO|bywEaew zC&=sW)d!d#a}H{Wv(VIpdFQ#_$$kvbufSAWbwjak#NEu~(bp3~nv0h(sljAM3pC4R z(P5A}^)@%%K<6Ec&wF7rLie8`A(V4L*L1zRq@BaSs|eMIQJL@kBC1>ZE-+3Tdm3Z; zEik7Rd7>artBX^E26=`%?*VyKS7mgW*G%%CwWkCjUs-|fhtc3q7+W&N(HQ5Gx0S45 zt;R^VH2Lx(*~AQbLYd93QH!y|H|N)GTAhrvrQ|6|quo#K~U(IEk) z;xgRidxgIZl2+F$C2Lc*mA&uHrdBV3;50^+oq}g8OEn99-C- zygOKD=ccl|%GG;|2K59XuDA_Qz3jQ|$x@h2O0L3*jgIl4-R7$YkQ+xfHjC;EJvGIK z0-2guENdmiygnd%)GZ9@#c<}J+59TSf>rH_n_P-*lk(z>r{m<^V2UiVzf zU^WoIg^s@=Z;pPk7@mdZo{V+!WX#fAwOKD}8{hOr-{|OR=V+zn20{dc6bHFNIbnBB8_$r<3~n z<&#^-O@Cg^DiGQd{fX1x(8gI2kl=J<`*xtsgCcjL#khOvh-`Eq%f7J4{X&dF{HqBy zk5l0cL#0hhKgJH?Kj0lydfLR=c%>S(ajnZLMT96of|vakdbn#*3B_8X5 zvVAQr@5djx7m*kfOVD4&eojz1;A$KJ?H8>(AmxX-w?uOl%?21S?^nDVW3{L20O^EN zRsKvsMTS^v-mAh|BG)Iw&TdT?Vm02tT*W1r^Aq|yZk+d3?Iyaqc&r|=)_cs7#L_7$ zCGQ7A9~V3%z8)1!EV$glti%G#M9No6nCoAbVSH%{6m=+AXa88F*ZJZUp4*zbcotd| zYg$sUke(W>{$PN5ZrKP%){|4QD&p;?EvwHtKU}~qk8-+WC89S9N4??V@&?=0XWv|Q z8vFG(;s9-SCVz&-GQJa7Cxz^iQVTx4?udtlcQGo(cV+T5N>MB1-?=pXIu=m3wWv%-KwZ^Bv{sXZ)US407wK3z5UWL~3#? z2`ThCyn5^%$pq0HM~YG6{UWuaTF^q><(dJ9?(oZBH?$*D)Y|lXic|AHZ~XV!`d-2y zhJN)24gc!~0B7&Cg4JIdXi*gI(s5SuIQ?rkkwu&G(MZ0d)H2(eSbz`x~}LXO(hqwV!?UQ2Fmb6c=Um9-{AaBQE$hGQ+uTMK1UX<806(@w(QAd5L*Q0#sd(Tk37 znHLwPpaHiK;*&1|ykfE-dq7&xeJyjKf%N25gU9E+4J&fpsP*Pwnl=w6DGtpm6MCIN z-O)B#`sV|o$&@*q4hz+rwXiqfLd=Ff8j&n%#+k7sXH+@jg!pJxsk+qlpyqt$26Ibd z$@<>m`l7`|_$21)(v_a zM%%Erec1+bkyeLMTbbtdpxtGS$#%m{262}A>q~dkaiFr@0%7s+ub9&|)1GN|xULN4 zxPm-HhHG7V-O3o_zbQ^@%w|6t(K~)4&xge?qwi03pUYH*u>I_o_?yk*B;f_axMdHf zi}sCPK!nD?=Pqwh5~GGpzc;kDC?b`g=X;H=+~5)sW_n7n;5!(kVvECL`dAfUte8Ncid+Ceb#&qU+7I?h?nmql90= z_4Ml7|1cS_v^8$Xm!AEEsZ{uQRfr?@Iv8ILzm07M3bOVn;xN{;=4BtbCjfo`a?5OP zbW)r32o)jH9=<^7fY}SxR)aZpAjHwlZr}DV0g*z;q9$=UODggtVPq>cJwV6V21FrC zKHKm&AfcMD6J)>6d8Hsj$)A%!QHZ!zQZXHXou6#q!jgq$RTVVVa5C$Vo-y8Hl$`I_ zByo5o?JlMrEE4^OVElBcvTB0=r@8mQ7iQ+F z*_s@XLZ$plynxidxG=Kg!Gtz7W?d4Afv9GrmqvbaD3zskg=kivv&1kk3P7S0VMTo0 zrkAo>pAO`I3t(8%kqm##S%Y(ssQ-aBp7Q&?-a&2mzzCKlD>^NM0k>arUdJ39FNsi= zE9(UsZu&UYFpKSB5`f~~4tXcw{>19@YLKIwpGx3ZOL&Wc?mi@H=|$$mhLC^4&Fy<`7`J{0v{9OMK6FtS>Q%Svk&N6sUcOb=n$j)2-8>?YMQC4KMt=^M(%ij0P z?$c1hy-ISw_d5D_1VumE^_|lvlXS4T5*y1(oeUKTHy2s5Jt`#Ym_7Ij$?tp+Nf90Y zLATI8oq%{=#$+Q^)RD&O3{IRc;ba6Yo9HXh z*BNDnHemLH0)g^ZcJVSk6UjBP-fZX9-{bBk8(*lItyVBsP7z3FsNXRAX21_hS~i57 zl@_~M^KKO7}^leD7*QVY7sGAdq=fB2a8ldxX* zGO3XJ#Y@hSS*HIy!C#8E!Kog|a&%oRu|XD;XB!t{1yGVjsuoSAF*OeEU9 zPr5o{P~jQYLW%L4Y9J)Qf5+8kG$xpE+e$b@^KTMEoorp{Ue2X2F-|}$l00PNMMJp_ zM-SnpxJ?tMZ^lrLWYnOY(*$RNvbjiRzG-?Az1zt~?WuI35ZY*iu`#&fF3>=(v9|60 zGyeQ_0G~Q}NbW{OQjZSDkoyTZsLn~PZU$KY-sCWqUD{!yI>e0s+Nr*9w2IyYPHXH) zr&c6tN~=_zCClY?3_!TMb9`4K(6tfaFnZ(H$y@t%!QVTAW2K+3Tu1h{bvtyy7ot;? zMQ2AH$?waSwaXGx$9>PM7SW$0?!TXFuv@f9{Mx^L?fkBq0X3fwhQIvnHF%om;RBNM z%)c*C99f-{^2P1Z4t1HM7ixk?F)>kHSpw(2_wisaz8Vx}BD}enz-)Nd0>%d`Za~6} zWg5TeP`~!&gu$WRWi2iCw4KH1e7@4xg{M44k`?*RQMvwRJ~~vu)Rt68n=C*8wj77L9zhuzFZwcdv{|x%3}2 z_IuVMV)6tOUX@#=Y&eW`nX2IvOa#s#Um;jNZAY8|Z1qY+^9KVF@4s-^n!`pu|5vgO zO!m<~ct%eaTJa{CFrG~((H?X$D(-d#!kwkR+pyME%#prQUN@zIT!r4Ie%K-#o_G;s|0AZWZybHltGKcH7GI#IDo>S>DZo`A zbw8OM(5;iWv-~&1#IqVufYkEKvz5!eJ)f7mP#?_IRHQ)H`mWCCy%2(YHmgk)xo#bI zLf|)4+i055F`D$ciH2*?Wr=tQZ+{2BY|B00Sd?qP$(!AeHLMh{Zz1Imo7#kFks0N| zOCj>F_GNr4XjQ8USKus8u(jrBWjn$P(}s=$%r$CewN$z0wL=4;5jzQ8y`OFb2G2)p zEVv<<>`hB%5|Jq(C7~WbHLDsyR>@&fjVX_uCuK~VTW92!+~~#kbDk6)y#*_qS~`Z% z8vh*Yc}>owX=$|Mc)8c@n@Cr@jN!ENKKn{X$mI@F>znA4_D*i6uoC1r1g3ZWQH=L4 zPtDm~6(c9^$Ondp6q7x1165_2m%3%LT7Z@qElbYch(k8o$w9PYFQpd2j$+WP#Z{(& z`I0=UF_yDF`dK|w`PS0D-lJ*Y4VR%Dez?VM=wht{c#c$r&zJb!8dG`_qkIsW$oT5I z={VZ_wr7hDRa16HRgQthXDq1>6fT?v8JN^H$?6ctmD*ENQZ@XHMaAtejb*B-T${oD zH;fS7Fw-kLC%Uhy z{SWTdr*KP2@oJ}gU3oUXhs}9eT~}2~&rd}uc%-7vV%}T0I3<45k2(B=CVY_tv%Rw& z#VLZ}*^PJkU+t6@9w_jIJ2CSf#8l3iIIkE)y7x|t#{I$@iju`Yt5elq@&Qp*^*Pu+3V$nb5Y7*(PuQDr3;zPXXX~-&*)|2DJ8D+$&^^}3icc6R`nq-y57yBf;bx1HqRPeZ>#*>__|B2#OiQdNR6l4y(+uGvzyN zH&!GI=SVfzeK{9Jtd#)H{C9txMZMTwU#x`^hV`QK-9+v0_PeWXRr~0Oiy|g)fqsC3 z&GiiS1acCPp@F~kI02g@f_U;aFe)LbTsDuDtv)~Yq(!X`Jc)^r{IG=R^+I{%Kg+SZ zH%8&ua2zD%X&j4XI_$EtpGb9Q0A9AHBrTfEqM_$3@2ah_$k&@+0<6gXJ2yrJHSls= zwG7GGe4`LmXBI=9k*I4q^nf>$o=LGy_ z0Bs+L)y@ry`s3pNDEJ+Pz417?Z!NRVfVJrG$BJ?G4cIi_eCpbLaCOyiuFc1Y2wTQ> za^L>;8$1KPPa3--3LfbhPu20vz+e>Z9ob#p{-tTw5?`%dus$B;g~I(XK&j5|Pg_GZ zTKCmX?srq-mO(Orl&TdisE?oEm}^A2?NAMZ9`~ma?BhY(dNTtcBu9sQzYl(EFaq9> zo0Tc|ao4PGuz6O+zT)+f@|eXms5s5DV_s4RIPLy_H30YYgx3k z@;mRb{?xU>7XjpT>kd7WjTA>$<+e4q`%-$Q6TaQ`fV2^T4Bp9w^RmLu z_x0cX@r^<K`XT4ClA!PwzFdVU}|G3Q$=uy8^cS;^^3rSAK@QpjZ&e)gEFlFblSv3;l z%>;G_@bB_&76se9ud*m}6feM3S}05hxvG@wd@1b83~+OlvV?L5>BWv)t}xZq>k5ln zFfsk5X@K+l2^99U|MKG}7_Z%l7!uL?(M3XaA=tF~F)&l39XR(CDk(ih6n!9mPJ?sYu!~D@SB8A6|w@ttJm}2MDNW zOzdBKk*Q1N8i5@v@6>$wf#c7Z_qU+xCHP2oZog>>$tB>nw;+shJFiXh9hU!NQ>;`! z-Ak2#unMjUFz-TvBf4zpqu9Y0R^^x5zX_q32Q1iqOwGrK)k=akntTFqjg57%ek3jw zbE6Xpey{T~(?|x{<0=ALs3t$XS|VO*k#^Vjq)dOls_O^eCN4Xr-oa_xE!Vi&55SJ- z*3Dby`6$Wlw~4bc<4qchH$BB&Zk=eO5tZp8GvtA5YV6X2bE}iOn9;SXeQ>)qOTivW zVbW?vA}h{j)v#H2%Q62+3*YKgK^zu0;_iXDCLvH-5kNclyT%^03a_~2G8FiICGoHm z+V6*xC0>Gr=4I}sNQ>u{NGRS_Sv45*Ca4m}GIJOs{akHRn#rB;QY zZnF$iSZ7o9PpdjTg;>BlSx+}iuOh4wT3nPmYoM5}sQt+@1AYxu%%MOYn9IxtHkv;Q|B5;uos0CTV7@{St*yeIdmN(%&=LY}tlOi>X z-p?Po-k1B4qUh9x*TE7;&amCYs{DzGBNu=pNq=6ylRk4;-Mt3v1(r+LsXeE$6h|o3W3XXzY!XOBSZ<%2=9gSfPu$jV0X&-@=3YE~;datI zwU=2l+?>*(F-5Sq^z6bq?&%WFGYZ57Rk10mav?)mObtB`O*^0~3UC&!_ZIdJx#xpM zoBr!h?^Vc-ZqYU&Y9tIN*ujMzv&sZ(yIdKB*&cp!WreGKr8p0EyD4B%%nfr*aw#-% zt>}zAVsNpGQKJ1vDRu#!hB0<|tFJ|8-#IO%QOWX5=_%fN`M}?O+I!j6^Bz+Cp6Zna z&SxEYfagP;@!)hbO-as8)LYf-Cpevih?u@_05`OKX zPpBvoPry6gbz&9VUjI>{_Us)>i|83YG0kbdsz*Z75L$8>+~%CY7Q78xDLkk+;|Ef% z^#=I3!?RLc28(LQY9WgfgRjG4gkXG&WJikry3qBX(0f5K-WR|FN1MK6pJ9qCcZ&P^ z3@T55L7IO+SI85J6%x3ko*$4_0smgHZ3#?OnKXL#!wBN#^ixijc(bHfoo)sa7oS(~ zih=QN5#cVd);tx;q<#1}?Y;F4_}6){fO-?wl{@C^*YJxTi#YkaML?*#?(e~k2i<(^ z!~_`1xtslGUD$FRmkp^$tx-(WkFbXYcW+Di^$<`lEyrlm6Cec#TkdF(dPUT1J9THM30V9;F5~Q&)Nt|8f?Vqu)x+V;)KD#(_*^NnMv%{rr zZr-UCDg2}xI$t`WewVU6KUbIwzn=Ms8(vGS4<3G9nTgM-sQ?L{8lt@vSp^9jNe-f4 z%T~>TJUT& z04oz);M_wq`;HFeSt;ZM-aY9rlN`$7*rUctcX;Vf+1s>yavt+2OM%p2;jdy1*F|tY zfe_1A3vWbr{9s?+cCq)}-9jY+xOr3CbM**}w2^X5xlwO?b2v2~h3T!TL*0jdgiQ>& z+Ez`pdExRbhjI#U#^v`!q9GN6yTPDyuGGol-6v)Q-?6-^$z?OmMFmGlh=j`z^?Sw@ z=Lr{=L&c3vz1m#wv$Q(C@dazGy-1UzwmnpI!>jY2;=6~(7WWvgiQlveN=X|ah<~0X z1!P5d8&LC#KQTHzHcx)fxw-l|Pb3q6RG0IQnUH;JJy@2@1(h+;8~m9GNCCPXi-J{B z=wRNF7kb5l(KNDfxO8M-E1)&BJRGXn&r%*HjrVFfAMh@h{RswvCi{8H_Yk+cd=hT0 z&kdERhCCv~q|A*`yUne`2^`}yh`U0yIZYpi0BQzVFp28HbA-lk-#bYxpfVM-#=Jk{ zFl)x<`F2SmKs4u`KEYx1ZM0H%b)@&?q%Y1yxB&Zk{mjzlV&40e)H;|~vF63Eb~{dE z)?7TMA~|d}-xJ5aEV)6)SGJ}EC08Wkf72c_V7Zn8$|ADLu+w!TRB8eSE)+H_{9krl zi^I)DR4||VSkkA3k?&-Nq|{p?_ht|aJ%M`H;K<}T3^4vS^9h2V{0-mNW&UP7OE zcMlW8Um8dv`d?H$TiN@6TY5cb{KNJ5-zWF}zh-`(;m?3$f-SXFLEWKY^a_R=Dvxiox>t(e?J8SW(v$?vS&0Y@-*EVn)ol zZcy%+vfb|~Yi=xBd$Zbk8SbOXcT*ZHIV_`KbR<-f~2 zyh#%t#Eg02(_L7!`*9kT2uy$cpK3{IsU++L;N~7jyPo!U<*$ylq9{L@a(VDvB8XF? zsK6}ud5;dC(O6ac{NSH1?p(mF`oc4ZDBb`O|z_$lT%Aa|UcZdj6uOl4b2 zs)L$hCfs4N4iXEHrEuf7F8jgp#U&E>si_H70HaZ^$-J$^ADyOZwz4*^5gY^QF$rv4 z7G%smwu_Z(<7D0_E?byVt6syHn37{nE7#qd>)Z6IhAUMM8X@b3ZkjH^`HxR-5^?TC zZm7_;_$4I?Yg1Dcmi0zX2}Q@WgIo9<|F+tF4;$u-6x3)b&fj4VsJVLaC%Om8+OR_S z<{SwU^51joung97Acu~6?-gwz;toAv7U!+^WQcS}>MftWEw_C$a$EA@YHBrmqOwWF;OWA>Th`5ndxf{` zVMA%JMZ%DRY5W5h*wXsw?V(AlEDp`Y8#+Hz^LHSRp0n%U<>FbJE!)77t{-qm0B)^p zzm^QtQTG$ryF->i!(p;HQyn`pcq@FNEPZy_+j6w0xL->85Fowe5>*#zg~5%ZAJ2JA z%{`DXBN21$m|3E2Lig<_hxbCWk7B{mf8db*j>?LjaucYT zO<{f*@;a|ee#O=)MaZ{3vi=1Ib9Yw$tp=W}tP|L4xF_*z0(xI)njdG*V)Bds$oD{^ zLpMwgy2BA?IhWX&wh*^)Ok5}qTMXks@dVVHdF1|~Sl^-k1;Bp{=So&g>@3vQvN=kkPVWjX)^-w2a3*gt=hBv>zPA_d#lQe0mgbGUP-5fXdu z^*B>BI*}{YsU?4n4?M7o(VSzGRf!*{ZxKKI11UH(pS{Ni8%_J-e70yUA7^Ikg;e#B zN^yDw-o@Mf5m7Fv8lFX@>V3lB`SHV+60P@>xW~XO+FPcvg}QMIj@fLZ^q7g?u!7z% z^C+5TaJ8#@lG6l#$l($bF$$H8xtYS7*&8b62T06ymHoNrXn7c7cHTa?7`p(L(rK3) z#W&vef83B0@Jw%D3-5!7+%?Y7GjJGojQn=`eKgi{b*2p3YqNd6h57mOAhZqmDrBfw z7?J-Zx+fEW9=i0IkkVjdMtFh@x(HpBnjEWFfMo^S)YOgSRz`Zz9;GWios6 zp1#t za{o7X5OH$HyV&m1g`i$8Eh)=_VJXuIfCG)xo0~W+mPCs(b2RK;e_r2ac8Hh>ELa9` zF(gi7AX|MI^rQ}PXMliH1Xh)CQ3WZ!r|?v-TwAq0i;*Ic!P!}fF*7U405r)~VN)O;uB~ z&!Vm>HE+M%tjAkULq}aYClVzOWIC~d+6j*LP?B=`fzN}N$8hb_?LoS|8M`tf$Yu?6 zS}wwSH~NB|v#}vM(*({TJ(s#Ttjsj}AV_}A1EWMSMj5ugub?bBm}-Q<$qmbqEmLdT=iRTg8q+Vu9nG;&fhSePr>w$~LD zn=W(P+6^u(*o#RFcx70VSvp#66$t`zG3LrYl1d2C17~lVJvNgwFc6bIFX{s`rCnW^ z%-r*NGvFx9_TCtQaZB`Pc-!b+lXcV6`+kt+J4O^sLb?{;%-Wtz|4mHnbYXO(L62{VnWdn{=;0l#eu4%ysME)P6#%8`_r zt>sFwbJo0q_0>dd>K^|&-@jtsj1yF{4G%i12s#csVL6z=CrtR36rJB|+HAmc9W@}{ z8wS8d8_RUIw$3s)OdAY6{4UD`*!bx>ozxm7DXoo;-8xk100tx^1WXm|nXet1yrdk2 zOhr0hT>zfw^gKWB*VLOIKH-Jw7$3!{N)0OO)SPAJ3>o<@8)u@(2BWc1Sx+3fOpCW- z*hs38VK+lVIcAPp?sTT-G`03qzLRXJ)QkgfyfMmh{xpo!uxWc#LxLY|q-2d;F>Q;F zsiXr5%~-KvTscX0$VKy^oOD&+tVs-&;>A`yT(@al52_kUW~ev54-RHcp!GM|FIcMJ zDd+y>V^Cqg&MTg2BTFnBN1PCJ=d_znE+hEkkT!LqwjWAByVpJYbM7>(GBy* zB|V8rFlQLAM7Wb&;UB`|m9Tm{ zazRC<6!_!?b1E2ft_QMOxTuo3v;%faKjlp_x( zU2fJBqHF2-j+>%_u`SKOi#)rvkVRBzVbM1vV_(}Wn+RUg&+3_+77!q@AM&>52g58< zs3_OT1WF8}!a9(U)pKLp52yU`ZajJe%f-dcbzrzDbsO1&{@Gj0#G;-;bj&*W{)B`{ z#4ntaZ_%`OA&wMxfD+o?ONpBDPk2yArp8sMThU1>601%|1W8eQ2}_ljrzw$kS6c6% zW~E|U3K{<_?O;EclyYdP)BmM80hPO7M3)h64xY3e5ph^#!U>_dso?k7%1GJBxy}yg z!k96fJ~JODC?U9pYbU`_wFQ`PcD)<<)iOreBTrhS>fZzcc7!$x(BTT^JmnPJ<+!1w zrz|Wg--%^6oJ`FkXPa(MJYF~VUKFrUSB6bY6e;6r+`VnEHSx)j|D9bnJ0Cxx^Cc4* z0^P2ytt;Atca~$w6_jqDJ*q+@&^D`7-s*3r0L^lQ0>SyYI<1-J#mu=(voeZ%>QBnQ z1+D6~$rcv%p6F(Kh5bL0&ciL~_5c5!<7ruH%gUW;nwpve_o&PrX)2(&GB=2%xaVn@ zxpSb2D{~7t!G)uoIdFptqPcVLJ9ojm{WpV4q|o6wi}#lQsxL#QciplNgC%%g4WBVs!j38Z zPAF*G$M}=3j*a4Lwt8IX$}K99+zF<&0N5Dmp6$7I`q}Omw5fRnkj}8|A^4=2np1$_ z%q+938hLKQ@~wXtRn0!ezx#I4)q{UZaCg~?=EO~6uakT;Be{bY*9D%N9Qb}+?~ZEi z41i+EA#(m_2j|Z}nIf_9Fw-p(zg(Ml8B7(Qf|*G-=gvkEGU3n0o|STb;5!5P_evOC z8LG%v{VPaGHgK?4R{SHxG7tO9R~m0zYrKg6d4&jrKeq3nW+C~fwmr0B5~ajzzmj8Q z9a$B@NxsD`cvLyVU!VFNVf{U%0p{rQqDn;~DqWxfl(2Bq8)v)P^WV9fhPnn~z3-I{HC6Ju%*!pm+xKv&D2 zi4f+58~pJ&CRBA<-D9Gb0p0GiC#G%VfnN8+9C;e3 z%)~&6a8^QI!?Hs2Pvj*><_I}3a;aWp6RJy0Vu{_g!Wku1W{(pvpBS?o27hc09yvNB zdLD6alN#PG($Zwpqjzu3ZGPyr^~f2m3{T%73r(j_un*@v{&n`!-`KCzY{RJSSCHkr zp**a~ndRaLV9P2~D9v~rpG5N$A@zL>kxnz%wokRlNn?5YM}qrns8?av$`MPe<~5V` zxle%CMC8vpMoGzpCE~r^2rncDo@AGo$UVJXc_UUsh>KJ#eosCwoQ zQ&rm2NDP}<=U`S`g{;ZUAH5^LcD22cON{Gs5|H+UwPm&8hq`%m8_am$5DG-9WcJj1 znJa1sZ;tc9sohLg z5gSd9Pb=W$i>aB{@+xmVaV{)QiEI;KHiL+<9&U<(E1)YU|9Q)+*L405vEtZzD%v3lU z@!?;z`rQVo&(C5{F6QZC7XO6<;KpD{&-#IFHE$pel8vY(D9xz$BEAKw(o1S;5N zNFbK4)mUwuTLkBpyV~Qqrdc&?LRs}b=^8Q1e^%asG7ot`32C{ESw`Jk`2fiEhKPc)!wX#!U47T(##1@d zltuACkuvJup+(g(3$<_&(@+}eo(k!S>LTu0*W(Y??__Wb8+)p;d*^ zMwJl7!h>8RA-PJ`fO4`IcS`uIxF2+$7{hMpH^%fj`> zD^4icq4~mb>rB-U=u4+2ohmu*)X9361oWMr39oB8lcGRHJ7aL%Lw4h!H9 zHcSxU)KPLY-7Vhs!Rt{()db*5a&BEeZ{?^9Z4(F8LQaZ)$_5bCb%B_hOUIRD(}Y!M z+MavB@cT}WZGhx;BI(+XrR@MyX*eD8k$g9xYF{@*z0iL&eP8)a^t zd#ih;zOfzpLtatdF>0N*-_f(f9xw?ct{~+fgW_=2nht~Mquo7~936UQ@b@6$F#N3< zx39C@<;6itF4rM$@D5{@k-YJP1p`6_l3;nZO@RQ~J2&qDhou~1wf#nX{&H*+Fi#?= z*xxM)7r{u6Ro+C{?v?ALeY2L|`nS=`U=*^;Ip{|+AL#u8R^Yd1zZ-hAtokDaU6+w5Uy!b)T+uKtl|CAeU!}#jaS7Tz2NU;AlkfZw9qPx){hP7F82(aN<}HWA`rZuXlHghG*Ci zb*|-H#)yWm;&E5gK!#pqe^5DV{IXq99Vt^zv?T(8w!2n|4fMiugiT%$tQ>I{#;9kB z3B{&!@0stoOvft3b-n3fmjly=#~5<{;+!dk{FJ`kkq4*>CE?WYDb35F_Lj*y&A((r zY57YU0+y+kF4hgprmLt+3Ljs8d+lLt;r||)YAIkahjtpHaLee;)Z2d3LcVTGk}dpw zeX!oLmHQ5D7bOp`M)LZU=IxSY`M?TcNynR30udVg_uIRXH)SM;CB0vjI(brCCha>u zX4!qQ^IwP9ld6D~i0hq8N@VZ-P`2DKW`nM_x<7P5T20Wy(?O=at2~`Iawnv$fZwz) zmb1ueMNMw=#)$65u@0`g2K9kjYmMr7ue$5f?=S1Ai+AqKwbpX$uJ$aoh2`-lwE60F zwUm*qmA(~8X%%cNwDR0ae)8)xr=j?N=ZLhTrOBJg7S;atzIfF3;upbfMTBN=cFpZs z_Xj9T14W4P%x9B+TFqt{W%m3xCkvkF543Ws;bJ~gIb%OySOH4691n|CaBpG32iU%= zXVHvY4eAt3koK{H@ELTS$Eg*&^|DTCc@u-SdOXEHxZE+6KB{Nkqf3X&nCE7~#l$Cw zXq=yhx~UAE%$ntNE7-$x#pMUly1qoB1DKas`H^D)?0Ki^Ey{*b{1{Q zo#A|>rY^nJHkE(E#!{R{G``m#%y2Pw<^4V%Sj+n=o84m~G6GjmMI@Uh1e-(Y* zCf#fFXJ1OkU@^QpnFa`_iO|Z5elmL|a+XaJD(+Bbj3%5Vq+49vbDiD;yjLz2S7)(J z39S`ZY;;}JG7Ewq>sl<{lx)nZ?pcFdPNl@))qqGjYwM?b5$w#oQUBH6ZRfWaPOwi5 z=4J49u9jg7uhO_X49asfv#aF)lv}k)E-5XPY#@Ewy*JW5A$U;kKOJ$>H=&pD#Kyk_ z+iZLL{)Rv}W^0kl$^D$i2b;J(c)Gj)K#$o2`#)(72DCBZSpA$Wif2#A24IRgkN#gy zlfRheLn zXvjA;rz&~rNM9R%l7*0M-I&JipEngdXE(`3k?4Jm@qrVBhueh1mjbjRV?xC&L})W_ zF04|r{%xml`kd}PNSDUL)9<^;Tcb33hv*npuKoaAq@G}wxAvC7iEKjJ1{JkgxynDdyvz&E;#8PDWf zo);1?D21^#?pP_TXOle@A4p^-IbP)9hdhMEM1b%GLx*SFSd=}%r?j2pkKAp`g#7G8 z)2%kEh6=$tFtClJM=^Fuy?Ki3QZ)0l#st0koQ;@3UpXHzlzP+S`;5nYu2E*rW8O_u ztvku}l)r4&hI(M#b%WL13@_K0kVW#-2PCo8CwHH9cNX%z+@3RTnG?l~*AZrjkjQ%i znEtDQrL4G?IZm+VrB0Wg(PIj=<*pV)GRMO6E_=Fh=J(`|3^@M{c+F?J)oS0r!cjg- zr7np&7IEEGmAeA$f+_rr;@Z!1Y62D;g#K!1N8R9rU^=84uE8FwwGR*96b*-79IM>H z7yfD|*lN9F@0SE3z4Ye^rS*!BWJ~783kHh}9n1H^@N2P%S1MMH+A4;Eo@lUAs>6JV z@C20gjoiAt6G#xwJ>PEV3m8uA=3`RJifxW+tmkHJ1ImwvUaX3CHTmt}yQB0r2P@Yk zfApp7lbq_5UmT{Usn?HogZ$bTUHb0l2O*fzfEQGwL2xX?fci@S7(bmTGauUZYnz-xh<%=SHG~ zTB{}*h4~G@CVv+eDZ3J^Gd77gcoXi(4>%Rsg@8n{O{H+gLjn%DYMU8a5u|Yu3o9 za9MTPaXT$tXx+<7k<)@jtB`lo(o&jP9{A5h017?R1dzSys~V-X5JTac>wi~+s7?u&+Aehn> z+;tJ}r7i3Kgn6&MKn9j| z#Ka}{5DS(BI)6Z_4gYKCsJtMbn>#NVgI#_)?&DN7@6_E0^!H6@vpf@4Dt|}T7>u+eP+d}K6^*E?6k^7=j zr7km$s8aa1lD_`Vb9*I7V=6{R307kEvcLj0{cx;tV;LA{6&*`l*yqNT^P>*TsCBIg zr5?K_vCrRb%hjcr>3F-1fTn7T1GU*pyi7>J<; zzTVjlj8XnNoRmrTEpqEY_lNaMyZu=$%m&^byP{+BQ39WYx&t~&WmX7j7pj}Cs1f&F#yc{cT$?FfEyIy!g0$Nl!del__5Zp`s?mQMu$ysK8 zzc|)`J7M_yo=X1pchqmmXA+=eN(2gj*N#D|hRy9}-K=9qLEpn9R=hDO(Nx~BMAu!M z<~7ptb^o;L5stuMjch5-qx9VpQu9p}%k!aWk*WHUJ%<(w=)$JXZFvy^YTd(nySZ=M z)*0~o)7?JnXe~uX-jIO(p{Wi&a|b0_EI-%JNH~+eX3>;9DZ&01PV8sD-5V^)1Meb< zU-^J|jbps~4c2JFex8z8wrI}?V>DeT&#?9&RvP;icdp-ufbSZ6O5`LD_ri;YD|z74 zqc*`fN)gL@geRM_Su3R9fibRQ# z%9fv2yVAW7$93evde_(eYKk``>r)p+xp1X75BgoImjqKadKi^t_dHZfEdQ%OKDlr1 z;RR6qqc2a!;#0Ou;GoS}-`i6N^N737nC03(-}Z&^qapb{Pj$HYW>{wYn<2zD zr`|=rVn${*J2AJ#+ezUtE0A+KIYgv3>vjd!khf!=h1LL{8ncwVr(l4DV$q! zmJoG9$g3I!7)dOfuANtWERoq!Sj7JqFTc}VM?TCdVI2CudJ=vJwV>lz6Ms%j9wq2m z!6C7%i{8cZoEPcxecJVOF4%?XptgUVx3x`+%Kj5M0ini8EQ6W0Z7qHYDB`*VXQp&Y zgIw1UC7CDe@3GITpq_|1ZRGzwq{zxmgCN>B&VhHDD|y$Y{r{gO6BF(h_iCcpdTD^B zFv4cAqO4z35skm!i~qZ`_6gGT`Syc4cPHzHfFVNIw?3(K()-7Ji;^YN_0a_PnCild z80Wp{H{NlcSWP8>$Hx+_bcI@zV?)fJlL`b5Z}@XL1Uugd)i@&-6&<7)4VgzpnMn8N=adZfw50BeD zR?XpcJI2_de=eH9HTVr{s(u;jOzbjiSi?h@a*gwGZ1Uw_ZY_i{NBH|~4A@q$rfO%J z-_)xJI|~Eel3!H7O^7op|3HOLL+4==>5RpQ8#s_!tst~&al{cj@A{9g2fAb92t2Ft zfY_8`uQ4Gj?r(k=ALaWUIM2)oQg*obO=~M^7Olf3ijWy?sk#lGb?XtUG#_)S4l>VL z%3)U)9V-eRIO`MQp5MLfLt^4*TEXu{N%uYu> z1Z=ZXnU>dxPVwfsbmn+(#Nj2^lH=>&1um;@5cdIre-uCvwAeX(uE47sD3%7})*;rx$G9ZJvP+b?BAZBi^3d znYK?&{a-lR5JyM-G}`|5j?5Ubl7(|1$_Alx z+uxtRrqp3$@`q0nE>CBYN?H}&3#?}1%*dM2g|{WPrF0)JEtc7!Uzv9@nU!pyBM?^e zO2ky_myAh+BTLZ1^@0PFF~=H=0{EVtf16d@lt`3F;@(#p*Ez1{T^!8m@=9;v@0sDl zNy7t_@Wy}ViXSz8EedZ|e>%tDBO#6;{bwBgZjKFGA)T`Ae;uYOmGpA3L0hg}VMSl2M!SCgDd^K-D(xS8c)vmo%^%}F)G&h|+FrqtuN zs;R--2j4@60m=H*y0`CzFzwT6tOCQ5)ET9yq%h ztz#Hzl^YW5$ZLxsT#X3|@wtf?uhvJl(Dg||epBC*3nZ8m%-*FdBnYDE&xru60Pn}X zaI9U?g}ukf%Cp#%jwqD|>r~fS>3P=g;MhXjQkvkzFe$Z8&P#}bMQg7jjRH;sSFXB( zFttO>@VIyrwMO%%U*Orb;~x&8ME{t7bJ8KEZ!4g%`K`@4^V4EBLHSdpWx`Ca#otxu zLYf;cZE4#qaK3cT)jH!OB)Wcitffdl_oL&ee&z&TvZ47yZ-<4q=V$(_gkWI6#ed~3 z{dRof?*#eHeSO!$E~H77_FJV28*_` zU66Acpm$`dvhJk5zT*=aK2Jk!QC?*WZ?8F8H|{QU+*)5t3%tFn+r=++U>ho4|LHcJ zq%INNv~iMrLq5bZaMRWU)_8|lM~HU_GvHMEu>hEz`Q2=_Y%1UHq@XEvK48=h(I}Le zcOi&~Tf7fo%$Dy?W3}_wswC$|1{;+RIQFh6b1UT_T7+2g{GG7BmkZ-pr-+C%l3(f5 zH$~l+>2f(5i0d;LMnW@LglTx?%H3x1pfyN94-iF9rRY&UhoqUvwx$DTkE>rtn-ue0 zxd(z@oJJq>UZICa%`0PRW^U#0<_M85Wg>U7E}Zq&WkonyS9u=#&!T@ruz&tL$0BcZ z-u2AsTYr6*%v`A?W{LUQ*~=~P#IPQmccJ9OU~@P6S`0#<0ED+7|X&ap}($a~y>Rzj{#-US1~IFA#Qa+U|Ga3(*BGZ)%F?|!k5x=R4_JBzgqCfa*!nWl8u)r`pQeCiPi5nkO$DM zu&`Ueaq}=BK|)vP*_y^W=fc^8&6+_A@C0C-nB}pFHBNb>drpgsL0W{K+R(Xz-yCOd z7rAx^^SXWMB>uZZLF4^|I?EECn@@p{e}46Zw|FDt&V(_bSKh)Nr7X+LIj#O>w*fI zd3}(Dno@5FXu9rJa!*G>pO*d0P2SrT9dm{8Nh5^^95erTbd`-8^moL<58B&lYdT;_@>?oZR<-hy z18eYkeOJVTY$W4!9olXt=9XGO6}Ppg*UN0?ya-l*Qxo@fgcS%|6yWrtFQz)ZE#E7> zC|DoU(?9|}T%VO=-u1o^W4a$-PwxR@rSy(IO}458&*kWIc62fK$C}`S3_JNP?SBqZ zJ>@W)zfKTEqfdq3GAkCncw)>q{NFF1Y2G#>=#7nE0D?TWL!QZ-6x5Y`oWSf5#Z%gfR!uyoIca#;G%hLW`jtcfhAA6Cc4VpX369zoiZO05RiIs z#rsz18=0)6lD|&HQ(6%kdq5E6wID=5(k|}PQCRn*c&ycs@ zv}!Trr)fOAE0VZBB*EWY`HBB}P`fw z`;6y+qX?e(*rmQ^GrmKZl6C#8S0y?1k{wb=!k2D+quumP0^nN{ycrC;AQ zZ@|*`MofUU=yDq(^8@u}`)Rd>aP=wuq;)3G=^*8Kb=6QTgVX_|CEu4s2^vI2%{g=A zuQ?o&Si?szWaq{O~6Epm!}H7 zvLSAyOaVKh?Gv1<6C_@Ha_NRP*RtKd!Y5%8dv~PA$*O3lRO=w{_~laQRRSr>daFrTW3YDV8uRqG+izFuEcVV(`25KjUWXtAC8~p%j8vAq0HO~ z6zyO5D6G3b8M4JvO0EGtFH9$|R{rD0njfzsfT^>La+F2bz&>R=EU{CHALj5N)Q0D^ zx0VufqeOeeCIJYx&&ji7MP4&$QF-AP;194?%B6@&zrycElO@40ov;+@cb|CXb@{)K zngo1OZ{a=;i>cl}8P1|vsOsICskwbN(P;z(d2Ay@iaOQYQ^$QlK8L+?*uNROvq)NH zi99yg<1I*w1NHz6OVigYs9~#3LBeXfT7S(m>w-GmxD{+0ekheN-DS;%l^Nd~Ag$m~ zmue@Fz{3(K;sJU)veCzLczip?wnZxZp~Q`8u6x0F%2Lk3VM*My zw7SR13s(eRyVlOk(S-0CW*dLr;~Ns-WHK>FK8Xhi~#X5;IVb`m-7^mBDzqqo4)}wPoeR8xPxjbTY%$ z4Pw+U*&2V<42TCf+vMloXz?DJgX()3sfDaoLRLEW61dn)#_*D=5RCwL^`(y?+zAi6 z`6!Z^nXUXiDy$<*e@1A0fS9vGCBUHTO}Vve|Iy?Qe@Q6rU?UmF6o&Z68#XpSV&>?yM0tomyzz zEGF=vP0&2$w*Qme0vXG^mY^ns97#-GAGpnRrG>_x>BF%_)I|mLYi$RO6)v<&@273% z_sNoPt6*xhOqKhz*pHT1O|LUSNkwksWUVdxNzOrY*Zxjv@_vKFOQ2*XU!%dP2N}82 z8-nWIExqcWH34h*y($PEi^dYeUqr4Vf~2bN+;x;+zvXw^?1Ohf0l=z?!{v?TM9Wo4 z{sB@sRzan>qYX9k45)rm+eG!rBa)0m-i&?wQ((NzwCK81Zyk6~iXLFogKZsbXFypX zXL%(CS8vGS7#%L*E8^>o|4Zfj>C*2<=Nri)p5XZo#!5yzzW0 zC*dX?k)4B6@chFj11d9)Wrf-;v9}KeQaG6{s%g(E+A<|#iSc-;i=w3ge-chQ-D^%< zL@ck5x0ZKq4Ivy_hsP{M@e0@eNvnJsLHfsK6SZgo{gg!jKtH*0G`y@s$^LKI=t)k? zr<(yTsil;Bnjh)XaNdV=K$QU$ref42(khe4+=%;j!TI4?4-S>E$=t=QsPvJi-C`D) zeeuDT>H{pxtHJzj*}w4EU?;`HVhg}sCmW9^4cTPF3$2GK)p@3}*F`w}@CXx7(%y-9 zx;UwfAyGCf7{RTB&)_GYjq&Wz&2son=XA7P7iksE_;j-_MKC)Y=6mG0XzAVJdER}o zQ4BM7XMX$+(SI+o^x=%RO9yru?Dz&UShHm8w6mO<<~WU9&1I!)s~{9C^|gmhc%_vt z)DO1U#7lYNw7qDdt-#pw&Yhnnp^o+z!}IDqdQ=|UDyC@N!jGvn+R@);n0 zP3B!uW*2x)99EOg&%?UERUOIpGXV?}7?midyIi5g<1!e8MA2|A9#UC~DJN{>BVyX6 zY?oRL`Pb9=<)FSQzO+a52SIHm?(<@gh<0yNU#0){0u*r2q#kU8by~dn!v5)dlfGvX zoZ9lxo#`-B13K5tuwizXQEBN?4fKx_?bL5RuX7}ajjt(N5c4m80+9Vayn-igQDf_b zkMB9!z}d#KZ6{c3U`R3ZN75&m&^iQ5Tc_I(1&Juk%znD*`J?RK^{*z$e81vc0_rox zK23qD|Ej_=mixmd@MQm}gIGjbU_fNt*$su~;yvTX)GsY#=IYrrSf+G5qpI_W0#`Sm z5SY+hwh6}i80}+=2h@qY;0IAtD!y` zGIFFTmOg{O8*v|#qRl*D_k6N$Z8rSuMpE^o3G2S%_Uip>vK5=eml8bfDj)P=6^;kH zMXNy;9*?q?xpHg@Lsqq?HE)hBb2}%Ox5va%>Z+gMdG5YC_-?3PfjrK+OLn$7XzHu! z4xl=&W^`ndv&JLyJXr2AUbHR=Zl*zH-i-w{<>en9(Df>jg+J9Q_jmN7w(~R5SlG@?C0WjaNQU_U9%0gJW>8 zn^{#&pTx3B$L-sdwrxIPdpWm}1_eb2a|jmQtJ1>V`wb6o+4ms*L)V5qMFBl+n>G_; z?-^S6Wz@?}CQe>f7=9~X+d2iB>8jjT1oJissbK8^@xGf^3?s?8+dN^r>sC$~o0!Xh zh#Q;9N>2|B_VGD}U*{9&)a5P9k4fgt{PDaC;*P9%TzmcA`gD5A$iEidRh+!(TQG&q zt4HG|{wX(t8XmjW0bP{3mKKnj(em_rDCwT2mPIS+7!A?sAWQmT<^itVA@y+={@zkY z5l-FaC|JV?4M98rj^_|Ibg=%@VPhgUk#5t42i4P(9UqAoVqJnpK?dolPR<^eM&X)@ z8{=B?166)p({*IX)CuM+4Z*x-RDB^kRomvJw3U;woNYNZ8GFNCI6*?E!fOv7_;Vh1 z*%v96`b3#)(abCB60o3SmRRqWS%c5eY#xHXY;p#JgdXK&7lIyj?nzemo(Z}rW{M{3 z-_lD9{2Simd=Ol0YhA(2B6*}=Hs3AHE6F_uOeWr!CXYle1o>tY9u^Bhs#D5WBuX++ z+({f6C#F-DO_#M7g1@WAjPu$@&LB&z>2t14a<)1L`%^wysHt7J5=u%|N1|QLl!+Fs z5Yyf0!oe5u-a7bR3zPC**D7C3dgoQQF7sWHYGfn7EwnoY)YLsMk~{jsaWgL3cS zX#v&V*A)srnUo18{!F7eIswP{;J=WBY}ewoSWTIv(>Y%5(T#?2T4Pz&{)>K*XC)Kj z4e44mz1>fn@{RXw8C4o+9e*jopx2cU{VCh#^pT^l^dNpwP_UOMy7chyjr4i_b97

BZmHng}NvfhC1iW`st`RIDK<_@g{%>wonq&dDWdW z{2Z%w2n{|G6CpmZ^LORKkOo~b?S97T=0piVxmq+nJ+mO++VA~l4rZ>t3>Cds@}ap= zpt>>)tSs!X_#Wo3rcx^AQLBG<8n?J7>ssdT(T-o!1(-xyvJ@26x(X}YJ}^((Quzxq zOBK@7U4vt);Dvdo3s<17iZr1XxvO3!&~Ol!`9Pv2v6=AYXc~Y~I8y~LRVX&H4r?h0Ir23lnviw1M&xGw0pgD2C zZKV|Ix~7osa0AwGhGpg>01N8~hp@z)hqEJhrjOG)cR8d0i1IzyeonW2cT#O^Vtb}| zRgFSWAbik}#S*Sndfm=^VKT$S~5~6v=MXPPJ9vH2d$|l&$%a zVxxnvp?h&&rQo68DzmmZFkO?h;=j88G0Cc=_0W8`EkK}|d7Fi^cNK~URqErQt}^D2 z;9gBG7kS;${aU2Tbf)#uPYieQlV*gTKz`wL$5A#Pmm>n;~p=I#Kh+URY+ zNO|5+lejKhv>a!E_uinYR&`swFrwyz++9_iN-_#)O9>%swsO&{Wb-&Zy_Vo%W>^F2 z{4RGWYhUq(&S?8@*v^sTG?bv>`;{lU8fw#@Htty+@@j%TcaP z{yZU|IIN%R%FulUdaPe*{`^K;#<6?XFZJSl{Kj<4s|v^KtH?Yv4_8^oj|c(TpZUew zq;BOSa6$M=D8brSs=tu|r*ZuFL%ndwKD27SZ+l6jc`iQHV-!4TqZ4Z4EPW?zhLW>| z_VE$r1RqbGs2I9!`})iqfr7^b|86~Mh{ts?p)!M?K3Jn9Tz7L?S}@Gx+<7jJ?j#3p)je0X20A3 zrJ#R827YO892)=^bep-y*sy|$7ktHv&8ycV;Xq^|wx29cSm7YTZ;{%QKZ1Hj2)8ykz= z+fsAu@zyeJ61<+xOK)Et+%(E$>mjN)q6-$wUpIFMc)gn-9rs;)6!UrJFa#aa(fs&x zyubeHYzL=NZmMhoKdrtN*?(=VS9L$;S3bam~A7 zd8Z2+hpn2{bDa`vMez=dkkiIZBtDo^MXk7lJe(q*9>vo*CcyJ76Ycx#<~(P zWAO2}Wxgs1y8o%WH3Ztlp)^Y=jnKSOvLYo(rKt6sjQvdlssqbCC1!6=dOPlH2>jTBDfaEn*_DUp1)eW$In_|d!?GG}8?vAc zQ|ni0V}zk6k9%Z34C1X1>RMXLa4+nSwb-MEN75o%#rX5@uupp?Pl$t~JTb|WmUmJF z0RF+@QEGW~(k}K+qbg-5@Nx+u%G_JEEhbul6!PYY@PaUVAjH{ zKuT8d+)sgqp!F%E12N5T(`NCB`vcy10gKA7=1bRJn0?Myn?2vxbwPr@Q+h*ddR3ff zdb1C%2EVT+Ao3R%7ul?+#@$Rpv|Ow<#@2Tq)-ZwPVW$|Hm!(ejgr#(@R^y$l>XRn} zC0{Tw$r91|7xntiK_-te`qAG7RXW z3$Ukh=j_yy%1hU^FIscSurviMv@0g!D@ zlr0!&P^h@CU5~%@ZVT)8eVO&cCMu**;Qg|{B3WwkHT>n!9;zbHDUA$C(DQMO-#5vC8@2Lm%dk6HeH) znc`?6p>9fuZ>da|?;3>*a@C_52M}+yXTetwjd(O}Il05ysz2JL&AAiDVEmkQgp6vP zu7=`BSG>!ibhiY+y{*5%b?!#mB(qlduJl6YZp#o}Xda@MpfQ>Q@+ zZ*VIXVLCf!+=7JsUmv?eSSE6)YFs|yz3))WuhNT@R&e!icfGOlabuB~lsQmGo>Q@c zQRTK;g0eedpDyorB4KM#Z`&{gV8FN?V{=Iku>hnsF}aBy-;C9tM(a;7( zK!romkd5Mb9kq_9`Dn~Sw5jx}Lv=(|C&{11znPtn!KgjFp!4i#i)i*PT8Q&l7{WeSCrmD@u{RQS6=~YD;{p!9d?XeFW9gP+F z;TabxvPK(~OWTCI^A6-QLI zv!WXCi@NFcV(#RcT*z}*0?Mo;%6C6<)`21haEv-=f_BzXw<%bS1rS&ro-c0+Q9D96 z%oJCTjJO;&K`S!vSvLjlmhi&pO1IWmKXn9@zUetFr&=x3>AJ& zd28>~k=Y+ON1cV|66QPf(CZrMX?9;`h5*z9(Y9CPP{pqyu&raBEOo6=@jvyVEH>69 zLLlVI3|GjoMU1!UvWqFZZ!jFasomo%TW*C=FP!JKw@7*=aQD z>qB;Go9BLI)nt9B%0v_&@CDEDhyTaXdB!EZ|9!YKnx-Br&Fv{oQ^bv$GxNALGX)eJ z8E(YAaNsF(*1W|9-$DUhs?`-|y%1eqWa({o7kw7T&wm zsQr?ZsbZv(`q-t5B6hFimyj$ny8hEvr=`TlId29tE;s@bedBs%YJ6!(=QJ zUMVAPB??>FDY5#X1O`zwFwd&qj65rzy8ngMFI+K|(a}vhZbo7sXV%ezQmc$omyd`% zbGY;3q!4u&t2ohBdFM(VYEocFt@u&ER9v|DzNouhVkxP^py2fTW1*kw)HT@X*0bwO zcG+t~W4UL?S(Y~4Jse`In)u|F+HW=g7KXk`KbffglBZz-0^HB zsfv?niU5&o^c{d1Ed=*rM@mssCdt3i&j`7DykL~pYt}N+aHmVDAWaZ7Ft;>h^SsxH zl=p63ob3GMOwTVrYxkNrPwyDFI{4*L^jS%R`&>v?zWudF6fZhMa-z1Vi`KKi;YxgJ z*W=-$B9FM+)3}R5fOUko^q!f|PMv?drAdf((}aZT zQk9A3A6Kz6GTXz<$dWzev?Y%CZg9 z7UG9&39a2HGKwRr*F~kyP%v+5e&7SOPg9Cuy;x87ZkpoWSOeBGo?*le&sZjW?+`q& z*}rn_0L46Q@d)izmjn z&{n^t#gipXM?5a-OiLo_i0D2X9=L5>;l49eo(8d&J%Eqd5TjUnm%?zy z&)6!wZ+J`-orcX*Py)k?5R<=o@B2YR?Ui1j9P>7`fGny$MJ+cxJ3%sEy=!aO-gf@EZd*_de!E$ zAF-@>rwg`hPRfqRcxf7VM`|PL=`1{^$4#vM9-(9%>yQVt>BvVu*4@*_V?W&=ELxeV zj6{v3n4}i{@_8>@7bKd8hPK88HBHnK2Sb#T1@ioun?7&t;E%WV+r>{uFi)vcS3(^p z-7nS`a2~W1mn)%dTMMoqw0o6Jtc=qt32%#urvWLeBRcDm`7u3_oRRmA$$;h|^EUd7MS1_9AF3lq zQVW{lekPdyXIM?~>IdooHIoWus~Dc$(8y(HCQ0WM#FxD|o+bwiS zbtC;+shT~gBi?BUal;h~RN^u}^#&j0B<2#YAH04^wiQZW?#>c0tSuYiO*nKIcu=U< zvwu$gLsbFF0{1E|<0zXApQ@>h75U1Ea(u@?S)rPyRt)6q*oCmwQ!gmOkU4bmXtMPA zd#5j3CGe&&gUK>ugv3DybEH*@Tg2xYKlifNih$pjb&B*kijHYQu#vB*QnQ+MqrfOf z_*7-bwc-ed*=-0i;8LN9{GDiST|rgQ^+ zysm^=l^7sAQdu|;i6XLCTRgNccyp#**Z1ujZ}4>VH4$>~rY7NfTPc%5Qt+#D)eHH% zAow_M%K+1MBA$&llM%bxqR~D&3spo4&w0mG)i?TC4G$S2~W z!|aqJBsPc7ZKODMqX$?B@lItnU*S!iyF~2q=ngqiP>ua=<7c((OE}D<)3i5Lzln-S zFKpLkQL{sTDOI=v(6JpHqR;+a!hRPo>+fADD-vxDkiK3zo^0U`9RI!8cnt>0b{l4`Hz>8W-8>JFD9vue;u^#x!!RxhpxIiJLsC+fGw z=d#0cNki4m3udMKot}5Qjkh zRkN>#{KqHI1nprA2p`obZ}wD$gOm48-$2$dU-*@jFG2#)0^+r5u$VcsT}#g0 zE@Jie3xcY%c#<`#`hbO7C&#cq{D~@uo~zLJ>_3w_ffG5CMsf&!hI39m(hvEbu)qRyTD*~4*UahbA4T!pa4*ChO|Kb+c~ zr%i^tc1JzJS4zAav5ajlp9-9%N4Lqlx2wHTX7ccZ$H?vbEOfl7TvN8=xdY2e zU1vTo>fbLxuoJSGj*(lBb4e+;r_UT@e;FySPh^v>auk?p6}*^LZ!cEmIha@0gL=gh zbrQYjeaMyj2Kb__-Xtec<~Tg2Y?Q->6JPLn7cInxh zcwf*<)6#(z6isl^u&>3g>wa{dg=vVLu@RuPT~4nLc=;U4^Q%9s>iX$db(ntTqwZgO zX84qRQwM2ozvtkn8pTesnr2_x$V7&=C$Gnw%pvjQ=xK=h;+_eKU1}6nx4pH(JSLJ1 zni3UzSw`Eqbj&bA*4q!dAW=`P`xv09&BiNS>~g$8Z>cLHnXG1NB;8FRn_X0{`1OH* zL}rj8(uS4I+B4Q9KsK9qf8OehWc&9Z`^lryseG-WUm;vo>)kVlwU`)Q-ZS#S$`Usv zA2ESBS<$Q-?!L&@>lvt$gTy5!4vQtgR&Daqd!*Yhh+a}z6032Mf};>!hF$qR3!bu`S<^B;WNPP8wqF69!YKo==_x-{yyIG`<63gEu8pw+?|h|UGZx#??at17TWB< z>A*zY-0{d9(Q>GCc+}O_)Z)nJdwcsa-C6u|m+}prYhWAn&kNeFqt1Lu9cB`L_S7I+ zh%Q1sa@T=e+N!PD!bFx+o2rzHvV)d2<{8+p@H4O!N8QXx%<9`32EE_6`PbIfp^s)L zm2{fm&5o&mJrOntglU|`X#b=falO4fqY7i$II}xI!>zFvlUuRO4Pp8GiJZ!wM!xTB zc_9Ed2X~Max-YA!64tm;*i6p+2DL_tPIi;391b%%NvV3|`@%E1U@m>F+VTw8tljHx z_repuA2Z8#vGa)ElLuNy2{^{-`Z~>5MtVPl2~~}q+i`}=##6sjs`~B4Up%nB`p+f(WD6ai zoL?Tk{K@6AEz(n$->bQJf2E(d7xp}>C|{{2*f0EAaPK%zk1t#FRZ8QpabHATkYmG7 zltX;!+v6?oLk4<>+EspWQ^tH4kD<<0!ZWYh3*9q_TXOm>vL1eSF263AKsmlL=%I-f zcN|grfA9}3kY@g@-y6o7R#LAm-rQyWOz`J7Ua|RXn*sK^9OnfcCUA2{vyP#WHMQxU zg(!b!jw#aD?Drdy-UOMAn6ixk{ob=4l_6q-s4M5a`uD!?xc?jf!#59Ct4@N$^pCL* zXdAL%3h6TtZ*G&WankNi_O1WSN2tkx@MiPo-PRXtACf{IxDQod)#nXNPJiC>N-IvO zA6H&V=arDL;zDEJ`m3TOwL{+wZ_=e3B5Xn;T-*ZiuiS%viD%cXfN~!jb`V7bAH&JN{v-)jbJ?EzzQiZ%S5-!FK)g;M zlaC{MpP2*?&>ny6`fLeN`;yUeG<=sZmUcY8C;HQm2JPrmef2o@j(IHpq4G&a%rn`O|NlJf5OUY%v;nT zI&uF`CJLZ^~Nw)k-oA98?pm|t?O zdw~#Smqfk@gxMa?QfrQDVp%Uo+MC{Cj-bM-skZ;54h2zZ+kDwc(-P=5ZU9W!5}p3< z27~>D*m0;9OKzpv>}5pw@*KsBy>wy>a9}`-j`n*h>C$DW(rub0?P@UMG}4#Kb%jZ0>geqzgIBO1jT ze=pIn3|zeM@ZTrXc0oTH7O6RaRL{n`zfojrjE#P!qU43OP361D;((3Y5Y6p&x%1Q& zXlq+CPHNpz#hjDK#Jy>$XOcZ)k7;Q9nmkvgj>i^_fTSEnY~jB&OgjmK_81%!t(78wmLT zIFY{VpXH|%_o zJ_l#WSQyj4xyqhc^DdY4AIBAP@%`a~hxE6K1#{?wTs^00`S2c<j43h%0xyAJb>5(! zHiC>g=sgfQlU4qYpDGNw{Ihe;m%V+&^I1JAlv#KprP}NpRl11dr4CTkhI^vLNh&zb zu>VonD&fW%bK0s99ZIG9cM2hYeG-yYt_towdmo^%%duaCBgpuZ+vV9S0!8+W_+or= z#(4`i)D{ZWgS^zFd{W*iH4PP3^3)Z(O~$7xMs+0tRNb#1%tLpqd=204{i91O0I=8W zrkUqTuP=D-863GQ<1+r7`TIa=0|SO_m^bfJK5a-`O+;(vCA_1kM}79eCX4vLe0&;g zP%~QJ?O+#TqQ0HzCd&8(WZt_{WZIQ_R=`_Wc#yh!E9McOwKurOjAcgGKhfTptbLxI z>*jyjX7*8{Wjm|8R(&@?RAlLw(V(4pn=S)}p3Wt^UTjp9=$^FJ=5bpqPT&j))!RKR z7i#iV%odaVtZ>ugHceCg1Dev``}l0MP%F1yoSx$Tni!Tp{#Yqdq9}Wi*%Q&(7v%X~ z>HWx}Nvump>T#hzk9FaZcC>Q{^{MlbXBG|Xm#K&NvqXbzCKBt(J9*Rs&9-T6G%ayQ$QZ}%3jqoX#7Vi1-I~tG>wNP^o4s$ z0qtjaplY#o)UB*|o39H}g#%X^UYX*?zSL46dw=|%Q|S^bUTKv z&rZ=(%9aH^>1N80ZQ9$Y+1Fv>MwV2Dc{wn==it6Pk|99*)e4Hc*L7+mbyqjb2jiJs zGF9Ah5^PwSsRb;~%;EjBjk~u(89oxOVLH9>3({W2+F>kH{`fUg+qE>WEX`YP@v^yM zDOr@7-$ZxERuRiTe`IzI+st<}ni^oZM}dF;j?LTdueT>b;c>M&Wj(;IoJ8rXxh(Al zYw4SACx%jbHAo%R5q6r)@qs5qzY+MIuvXgjxSPB$ry1=qXy1k zNa5z<6wCgk^H77*bstssM1^s?9~QIempxU?jLgpz(zvNBtpB2V_g9=v)c3LKE@2uP zbZLNh)X9pN-1SeycAO7r%4bllxU5r-b2MW;sY1o2myZBv4094%`5fMi_shB67RVTq z$!$Er!ZS@jRC5Kekn^k5VPDBdR@GD{?N3=()ZyH&|K43WhNFXa#0jsMW2B8XyBU41 zZ4+ke5tpjOH+3JVsJa@b>qKn9_q+|8;wjd=M(g6`*u3v5cFWDYoQ^P?wZB&ZIlIu9 zz&SR`)9PA_N*%_|J1aq^iHo>D#A5QS+KM#^{0pSi!WtPHL3QbY6aBwVKaS47Lci5o zY*pU|e61VaI>dTiZPS=g!OCF)YA)>hpYuQJCuvh-43JwxTB5yEM>xflWEsJ1ge%X) zSco-bl^x$!KvSSe-y=rl346=<#I{Rr!263P?17{{$fd(GT}5KIOs!*5@)MF%M>h7a z^{q#KfjDzR?wq@^jNHV&*{;Rj7EgWJvA_bp)fX6X4%V}okzcI?2k>vuWL<6my%PVi z12CK0s%{RssF(g0a0!N;K6QF{0;$!Kx_nUx8@0z*8@&+FU7j0=BJ~LD+$I`Fv{V(& zO$t4ZjWMUxJd1qi3eNxho()i!nV{|8ZcxzeYw^=LyvmCu&~hZ}1lhdY;I2nOfvS}U zQ*0s`6GUYRN%Q3~zFkrI4IFdCWMMB(OUhMT8G!+X?{2LKKFSW8PggxR?Wh`z!#{MIzvm`A@+<+wPjQ9`g7)I4Hw;6D>rvzW7~5Sw~lMS8hi#)SeYk<9l^Vrh%DQF9G~PpvkI7a<(SLa z9eulFUqR+%tkPnJ52Th3J}6-8cW53>?g`5gO>x}-b6C8sWjP9XJ*ih_Z&PiDxTqVk zS{%(fOnSjv}}zemzR#X@<24rlwg~}e4Wy+YmyAt3qZ@jUqHu> zTEC)Oe!mB{e^X{Wx~p^`iSfkAiD$L;dH3~eRi6?Ne9CpHAhwHd2PWq+9rVgN%_S1L zvzvHi2<=b7Q_4yz;wj!YlG>Ls$KfYSx-+41g2Q0-YZS`mo-0Y(AN*|EGz50+Ry?_3 zl-$IUvMHlsDIQ<(NnT^YSj?C#^)N6M8 zNha7)Af_Z|d_pK9vg!JCRVPItjQ0mVeuCS}`J z&CJN_8?sHu4As=daIcfXEvrYY;(Hf8&NT9jmDu-B)VD~kOOTwb=Huf|{uWfEWyOX@ z9hX6Uo}hYfN0JHOmy&obksaTd3yEd>dm$)9GJ6bs01wlTpE_<(RaaaUjvL{7)B~XZJ6b zb{Z-JFj8;kT73WUnT9qk3XnkoUpwxZiVB!9bS_ghRy9H~0;|_g2Y@>Sep1Fnf=`CymcFNr}u9SX9-Im+_3y_{l zl8)#2BR1I%JRrIe3ffH@B!8Q&51=(~v07zN;OGr^UARD`#H%`W^ldz{z24wLSLYX5 z-054e6F)CqI(yFU>~YRWy@A^(dqxonA;$ELbxNFCfBFt7xbAy+aF5=>kNG$VG)rDa zC>_1kyEQKxIvx9XV7Wf|n%()W1v}h?*w}++Yd7{>Zjfo2W#(_J-d8A;J~U4cul;KB z4}tJDxsJ)E;<+-VH`f=aaZk)SMSsbR6T=Hma~u@K-_*Y%*n1C6Ki%Zs zmBtuBd7su?IlrkMG%ES4+h(&3o6kwMvlRJf+avh_5R2LS=(QZ02>yfR9!K^OUSGX2 z9(>ao?^PL^@#8YFrqE%+ImqMmdaw{a-SK46+W>romJ|s4C<-E^%dwmrIV1j3me@K0 zC1IZU*chMC8{{0%IK*Ar?oNN&7n%|7EykP*)1!{iDOV0DjX;+sQSG>yN%Eh!?z@`? z#LzrR3^QgDv{|`o|LpTNWnu6*Mru`@tE*7eW%jV|X<9%`gKp`8?~@X+v6A-8Wp6BRyizyRY+dVCUfb5mOgGP z&*(Yx1`bRAUfjZgYeB#NN7vIH16y1sjBQ`_yQ?gYDXov#ETvyYoZb5%dO}LxuLx`u z@~(^nxHp)1dqbDg11hGOFQOM~Cf z2etwIj)nGmP9tbNxriO%OV_*~U(DAac?=oFFLNApp8Q?4eD-zW572q&g*;{?lkuwP znq?N5bixAj(+-g*b1xaqz{PLh?sk-acYn8GVnKc(v5RU8JjEQZ0^52c0+BsL*mpXHdfBDok!}OMM1-hYz zo)T!^!uvYvr@SYqcFmvj^$(9fpMG%O(`nt3dNT>1#8%{|IO&gV#lPTSoay9p^Mek| zR*h~l&oO?yew5srMSnC&V1n!B2fV$rl`%gox1`W#NEP?`g7f7ub~*x!4Hbsey89(ZfU|1V15 z+>@amd)KRUG|X|xX`J6Z5iuGqb~8ZWg8zDuW#H6zW`@g2o@@w@ms}st${Y(Kb#p|c z`(zcd-6a!BIk&?;Eo?5-oWq|bxH8?Rkxe4{j#i!)&-!Th9R;F!EL2QaegtxKEyUcF z@@A<3QqwMiJha58pZ>aiyFd|H7GieMG>-i~`-G@1@?a^JJnb)CzGQ$D*_o5;p5-4r z!cX&?y6Y_GA$xnz3|8Fz`E4u`NJ}OsJa@OhJ5R@v$9zX|KfI$OUhju@=iG(T} zOuIm&na>|@i9Axk(|g$4pLVRW9AhN{>j6{CpYtw$1`6RsVmY%mm;;3rD>&`iD!P_; z^_pQTOPA}orVxJ3#vR|IB9GPn9@0|b;@~%hn=r+SM}@})b5D$kq6mr`O1bSig|5V{aPe+p7>*Zs+|^d;7u=gwWP%aTfkSAKL!0opEI zoL?@dD1HsfeLVu*MC#i0-N*uRZ9(0xv7J}PP{E|8dGD{e;S~4|(7bFo5}Pp%p!GO) z+5~!q^iS0r`@9Ig)sR?&s4yu%pShNpl=#sG$7dOh`CsFdQ|dmL#MxDtw6}RX(XMv2 zpZ>eSEQqX8Yu{CHVTn_oKPsdh6KTcHo`dIySQc;6{ny)+ck5)vncoVH27!r)Oyih7 zakbW2drl1TJ+gB}H_)tkEc6GDuPliio?L|{iKYJt?(mW{^*CKbr& z9#8sFGbhyf#Rqe2&y2v%o7D%fb}DKLV00gsE-_yWvbu#-XR6vN_6^Rq{DJG=%ogn} zSf*L6H~E0!GY=e-ANARPi<6Pppr&pc`%gip+oN1rs0JC+4jEg?<1}N$IaF6js>Ho7 zRUOz?;&c%Mf*?WtJ#BW~%==v@4hj1x8no4PC86Ziyj@H;#i0k1Dg#FBpA<`&m2%O) zN=7Z;8%^ZA?46OzDLXSBiJg%eEOqiJNwX(WR-f%BR=gJHf9u7>`l^F42d%uwD13n$ zFMg$FFGQ(si3GcSJ*aTtQmML}8jf0c{)_U@MJ0g)m6MR7m&B0U5grFPJ}RNA!~Q5A z?ADi;{!U?+Gx5}y^&ba?iI@}~q+N`-CFV>G`30*$Dkl@;vrE`ybS^D$T1Z}a;NuOA z=UjZr*uOiZgo{~97e;Dm3!v<3gKmBC5+}suP>xXftx4LdfwrfTEzt1$7hMc<6x4ZH z9f}+_nor}qwlCh-Bd#vns4Zp01qo4os~Kh$G(bk^m0-5q z&ei%e9?a`EjM8A(e*##h7hF$DtPPaIR*gM!Jm4~t9M6p6k4jdA;V1vI=qWb-+$S;L zj|v(Fr7hYD=e-r_#NI6a>2_~auR!HEftLH`+PF0{9kEC~j`u2MwwT9j0UUiF<<2Yx zm`gffe7~2`)ODs%XraC9x^!9{)k=vcngZ{2++H(2kk6PZTdGMz`Y=lqX1>o4W{P#( z26>kVr)zr2x^js zyljfHoUsrCQRt)DoE*tTJMLE##DaCqs4>%&EL0`LEfn{kqS5ktXAa_^UA#7B%)Rt&gGS3qu01>de1B6 zkEeo@EgqmB8$3*t29#KyB=ZK&uHLIlbVf4sz;`9iYVfX=1Zj`Lh0uOtW*F~@y2FVSfdrZ zzX72)uU6_$7NFsD1hU!>bP-CfXXp(hB7t28%b29s6eas>B-&&?*AGl+H^$hNf>#-u*HpGP^F8PxaTCzT-f^fF+<6aEw$`O z@ttOt|8n`tC`NCxqPFY^sMG7*6w_WOIH+a3jD16`+KU<6NtJOd&J>4;I6+|?UoRV6 zO;t-}blYDy9_3hLOa2y3IUltW%;~@Zbw&+4{>1$qt>0a}1%G4dPv-cmbr(<-XTYcC zQUdtHVDO2=S#guHrP*!iphF$TN|U7II5h0N&S5%WYX#@rW7j7_+42 zthd}NmcB5eE_By{W6Sz+8Wr%0{0B|~FFWt~UWDV1`BmHa9$Pf+CqIM!8-RSx0S06W z9lmna?SenR9T;UZ@oF8Uh;#YST_v%O4pM+~JPDehsB-^gB z8?Iv?#jigUp8l@upGg@uwPQLBZQUg^jBP=f`z_Pi3HhhsEl--Zmk;zW732~WxSa#w>2PxNZhMv=Qd6CGuhV4oPF@G>~tv< zr*1%uVX3tCOI7-#j6^Z>@3M`aV|*uu9CF+&*k@6%HVRRaSYe)1rX$XMYZW2h&7GPq zMUDuAJogly4^ENddX-G)YaE6@`f@$mF!IX~AEeYZr_G2 zfh;1}$(`1zb1W}jG%uV(K_Ylk^8Kww8-WJjN2^stg>_22FIPy!9z)+i*V1G(W=?8u z@AWx#Who8>QMiGOK&dhJca_z}Xy>`j_=l^?+y+rNC0;0%E|jgF1Ts10{ddCzATvOb zcMgk(n{J!%(PRWq;WfAS9-fW>{r9;FlwnH6nM(m~8U#Dqtc7#Jj*kBs_fehwh30tA zMHMm1tQO?M?&N(fMC7W|{0c^7UMEWY9EO!c_;b9JLMl#E5dL-f)ucCmw{QWH&@yVs z2Al}7*!l7sAx+rBKFG-?jr-r!so{4`Ze^2QQpn*Vyl=w0D!C#^(a1@Wf*Mm=7aF)| z*{%vsZWOBd=M#@QXaSOWe`FFFfGeM}4b-Pr2_N3^fbvYR6I+;8x2{2byY>Iwpp$DP z_Uv(o@>lp1$mwr2(p?w;Z`+4-^5~y^(LKMntN@tj5|RI8ykl~DEfI+GOg`UED;Pf; zUXRsaW&2@y4QhciLgXdOqn{>_)W%A-wmOo!$~6|Pz-UYl=e21}$fkz&qJqkWx)1W@ zzh{!5&4`g-d!{rvZIyXO$Yh>Wu-_Jq?f^Dy8MRA66up9I5YAjSeL77fn@KoyL35icMiw!Wlw%iVYY9&Wb*GhPSi*7N#j^%d|2@u zu+hA_tX|Z(x!fp@U2xmSEs>~E;iJ0RVy#jA1 zB?G~GoXJq_4TkCZ#g*``z=vjSp1fKBu`kECom)N(3vJ!d-iw@re4m23%?YbUT?euN z5yc)G+G=S(GS<}urX&iOh(E;-*UjXh90CAZKG2=ac~U)a4uE4>e<1=i_Zj<@uM9h6 zeBcix?umTnOxhKGfQSw6GY;c0Z0SA2A3$p#ULPz0`>DC4j0|rU=FJC}_p59n)z-p6 z%zd+$AmT95;@mcMCc}=HEsQ8!gNVmGc}A?WP@# zG)wi|ywx+Ua6g!HbGQpc0dTIV*+(Kopf3lSkCEY)7_zwHl;r+x{f$s_MG2}JwFzk3 z@jy0!z>{vhvw5-+TN2N=F~htpXPS%iHB>42T#pI!?XIcENar9ZJ4*rycwOPN+>i4a zOEFw7SJV|2Dl$D5z3U*uUX{f;X?7xt$tTfQVi}I7*~51uM8hVQqnxUCBSpNDI<$8V z8M%bz1X2Qg*~&U;vb@r=v=e=dY|*owwA9%s+~IQ)FQ~P3YwSfw1G{qM)(&dSRO?P` z<7z+ehN%*6UXjBfTq|kreLR;|$?UDmm%G%@QbT4_=phl!Gpy{AdvdeDSHtKP*8s%) z9lB}wRCAf_lC^W9MicW2M)5Sb1V&yW7q(li-q*`oGY(`{b}%`*Gpj!&-C;9DqvxSD zJ}WrfthUL>v{~7K7JcfmO3bBYf}c;19R+C2U!B|2IbriFP%WK(-dXMw)i`+jmv6ms zcli);GXCG|hnydFZuj z$MApZzL*IGn%j%i{&hvYTt_-TOuVQ|IA5+)u9$ZTa5L+7rh)t0mv`~$15=U~BH~pq zrL($Ao94VVT#Loc9_xqjzmTVM-=WqQZ@}VKPWxAMtD5ZITuv>=6#jJhiQw}Uh`RJo zja057Me|tm*RWmcxjxcvo>yc2#Xw2EA}*&(x*GI&v-OiZEaC0xh_W6yv$R=!tA1f1 zzBjVTGp(#(a^sWe4qv=?X^HfREB_<^n!I=;um#N+itNs-$zYSgAW%nLcmKO#!7G}b ze^r^4l49(ay0S!}zW%*5|Nln{jASs!(`l&_IXjRiWHV=MxUckmyo)onCX8T(xF8A( znTU$epkC04e6wJrUG!I47n8n+epnrMQi{$CZ9kcibuM3rY7e&KyCnPVCPVW3+vrSgHz^#$ZKo1N5b z_R0{mx+v61Zcdz%B$D1^eNZE>IoI>ma!`5`FtW( z6iwHZfklqa63Y%`xGMwR#t%NVj!cE>o^!M+6TOJ@fIxno*@}V^z5Wf;ww2Zt|9>Wq{q8aN%*gOZ=X1n3wG}{Rpf~{;X^}cFBw8{=C=H?3-XvO?fw~ zQ1$jf3EHjt-OJnId;i@y71aN^6P()gxn6W$pKetqlSl8>c&;3VTfXSq1IrZ;S-Wx9 ze;W=`&pNbEL-!v3T8Vr&;ENnYxV))RDBOrfo{eT?4JVY=udfWv z-ni!T2|D&EJB`|n=(v7--8JAzma|`O7wJJLA3a%V&F2Nxr(n zo!Gx?v=F@Rx8##~Hlt@KC(uxg=oDr+^>zI2j(+2H$4I?OZsp_Jz8c4>ZiJ0ct3thP z0fw^IzjgCM!PH;!^fSTU%_p_rxUP&zXu3m4%8KE~M-7L_bTo-u>TPDqf>-OtARSXK{zY|Se8LTfIi8|v%#VnRH(e6L5LWy4Ofwu4>9 z2+iJqS9o{+y8Q|eUCEG+m&j4J=RLeQzE?K_V*F$vEEj|{=T1_V9y;JFbdPM7j5b1$ ze2?4##xG&kb}57Dw~h%crx6$EQvn)p?=VfD(eA*J`t6@#AaMQ(^;cx|^QIP3jyJ(< z*ES74q~QtKA`e!(DwOH6Dt$u1Wp1@kyo9^ zXNXvz!G?gC-%*WiQ-6cAb3c&il{mBA0e?zczMv!t^cZ!y?hkPG4jgn8=2w1b3NJi6 zL?1G<$tR~IhR)<_QVe&%qDq++mz94m8{1C4LF;7I&t|+j;T+a$Gtx!S#LfP>{v8KJ z3Y(GOM_D357BU+~21;TW$Yho_auiCx`@Hw{gqO2Uq^HqkEv- z_xyja<6u8ay>uWA<~O>Zuaw0QNFgji1k2Oa9nfq_gMZ=DX+^I8z13sSu|Ts*3Hs0C zG+gDMn-Sjx>Ar{`$bi~iVD%1C1(jCYz!$2+jg*T<%#p{#%^pKj;g;p*E1}%uP|Q*> zNm5+lty2Cj&R(}K*U>nZs@9q%I#f7)W`}{$mI%G&rh0Y6R~H{vK-npX_)EBhU=Ho~ zR|K(0W`0!e4&@3W$SjZL-M4W7W47MWI_;T!mK3` z7W!t0H|6ePfY2woYJ<0?r@;tViuw<0v&nv>(nEQNIu$-bk_|nq2dDaCg*mHG`Rhdv zu4waSlr&W}OKdM%NYPz8+U~U)e==shmuD9%GaMRT^*KuQGexSuJY+()a9oD$+zHrK0%Yv5TCrBxgos#J)FPkk~*du&AzxB%mENL94m(r~6> z!+|V{Dzb&YcB?BBzy7}wuGLGt>q(z1&JXUzn)~s1W&ow^o3Hm;+9Ii>wc+<$JmOf6 zQEK`1WUA8SwY!Rhl=Z#l?><2kaQ#^k?YYlg`9SAb?RxLB_%75|Rr^G~SlU1QPQnoL zV_q-&xb%~6RV@EU(Rqfm`M-U*{k5vCsxE44OSQFE?9oGVc5Pxjxr<<{{*MSPnDsi27&qTh!s-*QWlL zj;l3)@3_wRuzqSL?SS~~2r8}JZ)np((uL%6RM{&rCO0_j722RD9Y|2-{VdGj`lo-F zIxIND1J|afcn(`YcI73?`!=-4N}54dbtNDIr1afW(m-%i#F|h}b3@QTkDi^|k%hZc z#JKlMj<4&B^M&OA|IK!ougtKXj1I4G8mr&z^vEt&uTL=QbS^rNJRJ+Kj{~9FJe!dT z^m33DIF2bJ#^2b?_YtYZm-sn{imWu_pl#JIzIk0;XE3D2)CbbMy2OavDXbdFenIEu zFJF$>edwz-P!KdyJf*<;6C&}dJ*rn=tm$Hl4ZNs~C4!fI>J!&@#&aNqnjX#m;m9Xaky@gzc{Il4L&Zxpj@`;J6BjsAQy*;c2 zG#?Q`9mzA-XizDHIQ#4*^@THw%d!QAIaurq# zf+#(SW-5QN@=Or)+DXtln7sj}b_Puyb@h1*B}6V7r8Yi%sI1Lt94r0rlFC43g-G1= zhzpi6)uQ2sd)HbFb{ z%{?@m>mrkZ%zVpB$F=@vv@M?I%C&iW>cQiDhMJ5u-b)Jzd8)ZunaII2Jbo zutcQW8<@s6Cj*zjW_W!XJE~bQFIFLwS%Bl~$~{Mg6L+1uvHx&09Ut0IF@&Dr3XAD7 zdM>R9L+jGc`*O!+H{fQ}X>s%Z^rdgLhIl1K-8@B}Cgc8?^IW3#JWuuO{72`|XZqWM za;L3ztEdpfajZdxiHQ=etFC95U4UQSPrXqK&{yXF5bW4RsG8N^d_dDP4*_9FTLH@sHmp2uJw5Pcrdo5 z<^!v17b_YJs!QO9ZycaOZQ!))0$ubo^$KzqBwjOloR*x6u{ z8qB;Lvh}fG2=RSYtC_8aB`TJ+```xw%xzVyxmd&DOR?4Uz;M^eOrX_qKyZ?Cn2-c3hYyn8Las zqc8&<^~stsPqqvhUm&&nYnSZ0xp7B-Z0SZ!wdc?t@{})JNKPfof`IwUrZ_l{cYc?4 zD@2TREx{+o2ozk+m`teg$lnYArquR5jqBJ$D{ty#Psp&DP-!azUBUyi?~m}eSVA1a zfrjUQ5CvPsadSaHIri86{SulB;|$Qb_?qC|BP8kwx`hWYq=~Tqc@g`Yq^Grrx&m=T z9c_rNzZD9s#qq*iE{vw|niqraw>*_l?H`shMdyHxss%Ha&cw>e`f7oXIl6QnC$pa` z{*yT5-b zg~rAteEKnUsw+J#wyfIntY7{j zJsgYhsVKFy&J5SdYwGlR5ncN6(7C;xe9}tRo-qr4*%e>aGk|~dk zhB-G-Pr%(Bsnm-@Pt(LCSx;Q%7P-U69hHr~1&bH=r}@2JzlmyTb+=HgF_CkOz%_b` zL+^vaOb5tKT_Grc`rm_hB_I-rLKe1b%Qw##m(9yhgdpcMOk-+|EiGX&|Rs}aVXztchJb>{Dk?ehab}*HrrNL4y&##CiYz7*Y zU>n$6weUf1^UrU+a@rP|w(ak56nH+9<={UMNip5G(C)Ek;3ug1pd)>by4~xY_P3t;jg+0hC%m({S0R`B zhunyZ!EfF9S=Gmt#++8UoEeM1xIgZPStnuNPc(8z&iw2*ckhyPlfLD_&J@XnlYTS z@YwBFmV!8(gQwQPh4pdjP9E0Lz3Xh)XU2u{gWrM&R=;iYgkM~39(FCTkEJ*~qvii&fie$j zz8=GQDtVh=g`difgS9G7O-q0L+!+<9+BRD8yz2BkGd>!?v1&RgY^x&HsR!8&@@+eb zEAX|Dst#$v2b7=qx6Ig;CH@5-UfHNveB*vIg$nq&QPeeXlJnUiy}vc(H-cV3ZoY16 zmY(cNf`M0~PJ-cdtq(oQmWs9VJ-4=9<*Y>NmnjKtip?5*<1=)PI^{oWrai8gQPgYy zw{8lO-tP!f^8FJKBt4+XZ|q=IFK&sL*HJk&Sj>%@?K|x78R_6hyBoCvaw+4|RnJa3 z=o)fB;bQrl>a$_~%K3lVt9WGk`-qj#-u~$ok`w+l-|#q6ryizcyAxrX|*5#RS)rPJNg!o|~{cQpTFb%SD#?$)Qt zZlr=BFl;cq;)(u&pwe8uwETF!<=^3nc#JuqL~Uu4#(CG=hN}&m+y!+>MgD$b$!O1x zPuGgl5D0uWPp5;2%cE5yuqN0evl3|1ze|OHq4+U8MDNsKa&B*UrN zq=n~yQm|>-VXFnhV2-cuzYGbAMKA(?#ID9q4!%-0Go?gB0_R>H$J+O7@oLvQ-<$Pv zX?iic^)u=H6rZn(j1H}V0b$>7f6wbG9HgU{oUQhX!oxb_w2QCul<3y%^f$?b2Jw8Lbbh>3-@5x1`6{er8D&EF7Y(z5W`Sc272(JIJl=A9nJ)VHK@Bc-7FFP1u2nEwMQ$JypS@(G9;J^UH zloD4?U7(9$D&nsDP}Qx`ap{J}=H$vkKNjr?lH>Vo($wh~u0}snzpcjq;crH$-mc~S z=izFG7~D3eNv>1&CH9TVgVITk?9f8QX4~ww!Ff7;#?N0dj*((7Wh*Rc8}f4j?#$%H zasz)%w59Ts>ZGAPv087s67!>1d85Imwx&LJIJo}S9EFI-sXvUQRoZXRo&ZPg!61(p zAS^OMi04%zFZWTG=UB(N?e{SrJafBri2Y~0gks%iF{`t;iz658Ysmx9n9;Kv?(Y2L z^67o%z1<`B2j0##{RMUp6e5<*nt5@Rgb)8NX^eo>|6TGLd*7j{a3B0%L0$ZJ0F665 z&aR$LpmWx~wf>|L-gyUO5=&Jd_Ux{}0Z{xi7cvP_Z!^1^C(18z=K2^;@_#-DT+0c5*5zh08i&Z{ zBY08YoP|F_H|ngl&6)B;X|>_5LHAst?%F8!$Z1?zEJ`7!kK<0Q;(U|9)B$J&K35+O z7m@9+Gh=mYH0HWCnHaK8r|ht-Y)zXDBe(UUVNz4{lYS0yj%2>K!5=0LsEu0Ri)LRe zkNn6lZTMsLQUBy(j?jMEqp!TnC*z4nQ=LQ6 zNe9LsR2oQ444ol&VSG(cFvknNux5*XM#M#3K~Ozq5!W?mnZhj%%2tCUCRNF*+&wBf zp$a$npgljM9X8tog00dx_woKx|1L3kGpbYU1Rm@!5lUvySklF*dfZg*0)LEb*%cA4-bdDC9l%&#cYN8gh6=7N#8kn*crI9;d9WrNb zsZ|ckyV1o1S7ZJV zv&Yy1t{LFQY%LiA$0S#RU7|TdBxSS;&m+9~GQt`tf1z5vzQgLvLX+P;WC0ST-ZqZh zY94FsOr%F-$zw;-kJ_Ohku3Kaa!;z8I9CMBcp^!tegK$meuS-_ zT1-jGnJ?wab?^5(6JXm!!CEgN@M_8}?C}l8_ca3wwn?w?JH^U-%TixPT}gs67x$ii zT{k8<_n{lIu&=4Y%zp8GJ4_HaA9xdQu($C>I*^&qhwx5N&ecghp)*PRnfstW#^G}X zSt}BgdhNSQ4u?kY^V{IRLIYO=Oq|P?MjCsx9bwKI`S0XM6PJYysaC!5PLBIPhxTaC z`|ABIVMg36Kp{L|l;i|)ja^mo_v5g%_)2t2*Bs4)318A=P;dSW^ymosyVIrx`gg~TrfOo1#qz$mgIM{E2f%G353!_+>(Zl@ zdZq961#tDR?=}6&n7;IHxZG3Dy|NM;2i4-t< zqfiFe&*An}Aeruet`SlDvtc}U&v;K|gGXC#2byiA%#6GrR@8Yw94EQN`SO-qtKLoF zrP}x(xh3t*T!UUMZW1Fv>0)O^SOvbFXOhQN^FfgS+Sv{3M8tz^Ev6SOAZOjdqdm67x z76<24pQnV)4bm88N73avL8a2kzAQP&W%bE-KGdKYqY}yztW1XUtE4)0G^4qJb8yW& z&rfB}`ygXmaK0r51B0$1zFlSV+TrC;{{KHGhOc8 z>oAH*bd%h)f(LhSKR*JW0?z1DLEHRTN2bVqjVSQvs6TI!J9keTaX+}~i4v`K%69%@ z?VTpkBdKu7<%Wi9HRPK48Ef9Eo+mGi&FE*|Gv$^kN^0J8b%;mi)I%3HCua|6q1Dm( z>FZUZd~&i>cYU)tntYm;!6@RS_km5BQ9EM5uTyk?44wm|qUY?cLa&iFaJ! z?4S+oZz>N7#Al8o@EX;(ZML&?o@KTyjETz8umbT8oE?j4zDC=<1w>FU?X}-@xR_0DZ;T6X(5iML5rBW(~gBp2~^ zN=?1$T2cS0+MBAmvi#ai%u|H>`$o)U;b95sT@)=rVR{C*g>OLJlh%1XNXwfcZ`pNd zv24Zr-s9XH=MuYMff#hFrn`=$=4Hjghg>~*VF94+)fIexre;s?l#C$m|smb~Jif8GHWkA@qAK9Bc zUg^b8+l-W<50{EjULIE$qK7T2{y+RymCmm{pNBM#_!~*&J}JAq;n`al%|pt#?L0em zL92^8XzseF!xN#FMvm)d)%0Ip=YTNjT_k#9-Iz|S2}-10O9|;zXk-6=sFvfH6KFwh zkn~0xScNk)=+%xT^&4gA+!MXBSN5>}!6z1LFpsklSDBpm^QLUSb>G5h#STG&^u7-Wrr+Lj&*Ys9_bAc5o6==wytZY5Gs`~OQ;fF~7Sk;Gp588H`1!HkUf6ZxQ&Eb4@y+C$W$=&D& zydGy*Y{8Ri0SYr;`ip4V!6;b=3aPZs!g@FjjYsIjnT~G|QQ2WU-Z*bj~MFq*%x+B zZ2$B^hYyc^WtK$)4QZFt#cs2Gp}6tH>Iup}mMm>db9e`|JCLx$gai=sq7BKzA5V-* zk?V?LgT%^AGo0XtPJt64s6eFDr*Ef?w%+@T$uc2_$} zrvJ979)4K{OnEnk3S5JxH{qa8TLHjP|491|$D`2irsGWFs~&0ZXV>S_tFTS#Z}if> zR}jl)ApAGJkWH1JZ|jc!R?urQmh}|#Se$k(E!S9(UpG&mYt;0n_D?*hFOD=#>9Cnu zwREC3fgb2dHyOPO68+wIF34a|bKBFTLNr}(4v^vPd%m*A`))^2cY|s{aPd$QR5tsp zv4|wawr0&r5QP_JWw^29*E0=DZQA{#MNy`qAg7E{=C6lp^knPzv`Ns}*)}LZVMDK` zrpwlG-X_vOd%pF0=gLBo(M=lF`4tl88Zf+;MCeddty!Wid))A>I9$0A2<%^yJ6p|S zM+XFBM%<}cWTmoX4z?ictUKFgmP@<&7x(FTX3kb~Zt2LXCJ>R6hZ3cx({PrE)pXM{ zU{NK@;D7?eX)lodZy2BZ;2jvxT@Cp$aI8fD!1$ueM%lo*WkU2~e9_`=v3+h%%}^S4 zJ^CUYw9}V;vE&mcyGZxWx{O`LLB#sCN=*(H__ysrtzQHS-@vgJ=l}Y1dSfr^G{w^%Rv(S?Sz{u82z(MQ+q~k%IaEqNTb7V+eATIT}<47Fb0QP zp*>)q{XyK;zo0g!hUP$rJWi4fN(+v^7ECH8Pyytfb&4Y-rrhp) zERVWwX9e6ZQ9Q|$OS4@UzR~KQqpDA4fuYxM>;II+|HM<9< zI<4zYvT0J9G>eb?6_lj1W1~=x4a8ki#wMq&KfMKutwa=bs@13Q)LUp8lGIjv zc%NY+WBuM)a~e9jRa*WX3y#JA8@yg`-<4ihVa)SMP+I&P6O&%~>h*(VtVyZ$&IR*o z_N5~mQaFllyc-2+472{lJa{BM-`#Vb%Byipp_=o@{!-LFG(Zls%2eGWQ6|If;CHqf z&&*G%99DlJ9JM}&66o!m=g>(gUHHt3_fQsmo4eImQgh)zKs>2$QZ*O}6n5x2_zrTG z>w(NS>Tk1%yArpBT&lQ#5XGYdJ{Q{XFb2MFuGt!&oHyi2bh}IgxumOf*72%GQKd!C z=gh~3ai7K8HyaVsv=C5{m1z7+*nZGzs9Ex7|qZvd4d>Eg!{ncC` zE_kXIgK=4I<@wFfrGSuoGrt&wVmWR%K;0EX_;ZWOUF;E;m#&~ZwQr-nH14eVlgB$O zm~@ihS%WlqvDH1Vlo}as-@XwJt0CT57GngO2zN77`uer zK$}@l8xPd`p<%b1H?E}!HIG?A78OlOb(pcEZbFeY5~Yun^icMG0Ufw{2kPikGPnPI z)7g8$&nzq_5I*7+sZ`jpe^`gCT9QHTCjieK)1F&7E{QK0+}1NsOsw;*bjl|*p5hND zD;nBS{|mXfE39gg7$ono`|na01gr4hk5H6nTunIrAmB$0h%{xDmps2_aZbOMR`lbS3EwStSknmi{n}FJ^j2kj+q@ z*MM46QECoJ75cf$Bxg&if+@xUbXa2Q@ly^%g9x?b(LZb>RNn>V!0?d{j8 z-lx2eYY5*!vHcLpfWhwE?NWap7i8l;+5Gs(DkyJyqcMHtre`m42D^Q4V`sLH_V@Ph zQe@NW=k2GB>1nfo{q~9}xGXwTan>-!C~L!;MVejj3mi1C;S?Iez&*xY?9)si@`N9p zOfN8#+%w^HaN;69t-}{9T7pa+*Yp6sAwSu}yHJ92U!cQQ2V?6CZ2RK-?YXW&8gn?X zpRI(~NjlS?0*umKKh?)r+N>jQ{D1{jNd*LH=1I+G^-~OVCQ3^J?d!+9+Mr6sGps$# z9grT5z*&inH3bu>CzQebSQ(T&IC~|2pi0qtuIH}F+8-?>8G$bIeuPu^et}w-FBH|4sU1KHs+f6`o*Nas`lD`uA?Nkc*Kf&sFZc%7gTD z^%#`xGsDzV;8O>I!upTq8`X0Tqt|yR326c2&Ur@?FkdtA)+!04^>?Ok!GNjN946OB znWFaX@BFd<+ZCfJa5ZXv2AOYlE7E(P+0*}I<~@@MM8ssIG^L$A&7XA^<})~!u>Ie( z$zxw%Ks2@<1LO9mezEzs$7JIc`WB|RzBBb;3dQVNv#1PO0YpP1aJvtl+D`NV+SfUw z-^j%(MsR#Ugb7L=b<0>d>6GMcG)`5@zN25!3!c`gZO*g~zc zxw)I9p;vUQ9P0%S4B9pxt%Pj@*W8RPzPg0DRzxpiYy7VBQunchO8z$UWbdBh9l53vsf%Hv+pNRcLxZ-vo089i-;ZOOFs? zs5;e8m!DLjKz|2fucc}KGPKh0>){H;!yfn9`vLy?hx|`U$zU1I(t%ns+${)I$Qb=# zupGkWbp#s|!AwxzcJ&>2i$HZnNtR>oT-k|LbQG@=qo^H-m6!jhO?}n_knljh-Oo-u zoPFzNj7@IDoX3p0YtybsO*Xr(r#ZUrR0lF7y0PwJ%N0S>E)dHF<4G#!P0LD|X6w}3 zDp~`wi2mi1e(T(oq(J!**sm#THd^hm9A)xL{)S3pqAYk=U8N8~ljf;>Sy%VG+BY3e zIR?5Gp1!6@j+cekCYuUS%B3I(On&ZxQn*UtXHAul9+@)?W2IF`}gx_h|i6k^STX zD|?#JlGElR2W((>=EC z7nR?GARH)%dBdHqC8mw>A)c+y#Fd)kL{)|mjJq!3-=z<}^;sZ_fzt0q=t~1 zKOX-s+3A9Lq*mn565d|33u}6=S_0`u-h#W;p7u80JcX*Ae^nIxK`O&vZvMv9^n`NE zJ|6#PHo{9#+89$=J?(J=zFnH)&ERCA%#{>;(e-Pbq+3#j@BmLgp{)_=Mzq!^&k&u8 z#7lnB=>ZOt$3HcS&OxOcYr_YQLcDKpE}94DZfCst*~D_t{4m_fPVr9cufviHQe>l_ zkJ4cB-t6R(CGn1h1_MHG=)v;)Rh6q);d_c?@E~sV=IK*!ymlvx_U)`<(DF7#MSSY4 z1HOE57pAR#Dv}FjY|oOp4VBxb=TA@zLH=AB{IpjQ?)%U)c%yekt!rx^wW>GS02(bY z!HsN;2Fy28hLpKXoqh>Sf%6@eFtfkf+Qns7&sHAXsnrC*!;8S44*5HvbTl00lCu|a= zYMCk;aN%b5YMX!9_BlSErxYNau$5}uoO4JlD~&lTNP1IgutA#%H!u!e@c2D@SI2F> z&U^cqwuE8gUgYQ7w!3bh;(T-KZie6)Qu56aulv<9{0$z^%33q0We2WLtVju*PAQE_ z0@jf=m>z7}kmEAoket6;Q(&e|7!}nAW&Pp`msEOd9jY5Ww?xYC*dxIHU9wG&xL^*7 z+x_gLKa?V;g0fvt8(SMbidC-jJyi%)jp$5r-aeYr-ezvdaQ={d7d7?xb@N(M;*BNd zO%?hx>r=Tii`b&*wqU8DeefLX*kaB0i?G1fU>7?5o$vv=Ks9UGpqn9UIZ7@I)=Orx ztsmol_RC4c%QbLywmM=IsQi>^CoRs;6Ea`V27q8?QQ-@_l4;Rjc-t6 zGS2_QAJ=An(m`2oKIWzTiF%ctIZ72fBVM%tPmN$^ix z^tK3L5_R!-)qq0@yZ8z}YPoiWe=YCTDc2cW{K0fHDV<^oA=u2!4;s~q zjNf(h<$ddh%07n@R9xYcFGB1?BILbWJ}c_EKMOrG=FmnvkC-=goxIbqxpX`g2aYl6 zp1HRb5v$B$=@k4Zu5%7%^Nm4pB!Rj6_pfKCO?u2LIT(|5rg^JKD~7>OMMGXexBE0x z*qVoJb~wNhDk?y}O}@STvPEa}nX`JOLFw)Aig_AnntNtucqyU{xP>ZCqU%axU)y-k zt*7g5u9kCvt^l+f1Xb%>$Q%l?3yPWW3Uns z^-PFKV9l9cjLM(Xkm_B(t}g$ONG#OY&qb!3QIW$|0w!Q$>&xl6YL&+xTmPD!wE&J) z_qFH-izkp-eQkda76ztnXI$J&-rCeS$_V25ye92*5^O*q0or{Z3MV^o9=hDM%su77 zQ}kGlM2)uTqw{M+^@t9qdXvR-`QJ(|bt`aQAz-N0m_0{t+4^N0XVgR8rJ~*j&*gPv z{tU?2YUcHCG%pc-&$5?7vw2g^r$Z7~W=w95^Or!bt=*T%l#Y z$h&=^>Yon669;RJ68btEQJuemA9CFCUmk?j)RCN9{-0Y=wzDCub6hf{l$)RIT_nUB zNc6X;+yv&;Ej8Kq_ENbJgtB5#rNVJ&RG6)x@q>+b8I`wM{PP`um5OjM8ZW>)af@~< zfCXwk;zdHPvMbL*zmsBOC9pJFO9#v*2F zv_g!8Ri|u@t)^YM|Ix_7Dm_Q=a-szn_KJCmY-zSi3bMOFPirH_OZ?lHje;^sZM<16 z(Awx}AHc}AG`y6Yh}etwtdalty=3!UsxULWH!^ec=eB$3yOd2{l<}?`MCJQoao+?r zbG=SbCmhf7|8KU$%q#*F-xIQ_-`joA(Vl@XMr7{;uR76WgCcVv4I9pVp@JOYYnBQ| z;+ZJ(sH#i#oyyF$y3`^;|Fad-eURi zi3>~2Y|U}$u8GWfQLpm8ETrEg5prx6vyk1fZX-L*m=o)X8)l_xMK^y=Qhziw#J$2@ zD{lp-^#iSUjU=4)@yjMGWOKu(de7hU%KrU1Vm~}*B#SI-C|!kE5gO1j{mH|&o`gs# zR!Zm0ez^;qMqf>CEjB^C`)nm&STm&)ejq{UvMD)*Y$pD@G*i-;cJ&x_3t`$LHMCb+ zutu#Y5LzXmHSF30Cw|<@2=IyRa8l(EM}|0<+rj~^1H*=>OC~=nckPeEAYBEjw|e`c z-iQmjYkf@rFx6~6s_iDvCE`Sw?mC|Xfw;rqZf3d?WBq{M@+jlxP0$Fz+m?>h)36A7 z3Aa3g4jR@}*=&UZPLNFBvZ3I&+mF7VO_iOE_Z)`8PKXjMs;{4)iU0TV!R7jL)j4Mj zxk^j{T*v~7(UGWheTDY{f?}IdHjld<^gFD3H)J=gmy^Vj{gy3&2_dyR3P2-y|4BV; zO(Jbr=I>qZ{tSPJ6!7=F2J}IuK7#G_2*^5pU$pr82Seb+;seE;46q0xV0v{<<5;@-R90R2ED9RWV6G=~Qk6d% zRqQ**mM>SK9xH`YJQ=H~G+Do8xao*>fY5y>#-zU^{=4M6UuKe$BntYwXfiaPtV;zf z*!>q0Rk`VC2l*>PapCGSlLVqXQ3KolC2_SBv8JzSXQhvoo19AYl>d(L+lc5QY`lt9cC@K`?DS3uMj(9 zUtHUO4-At*)%KosNVyU5QvHk`<-v5c85vQFw=d8AfY*e3N$!ljdRo^fr=8naC1Cu8 z!`t?^crXq4;8r4s{xX%Dig7kenX5cCU+_WR_w#flmu-{Bo?A#JZk1cVIFeapPkgQJ>hc|!iNZz zLTB)DFKb|f&v*!T)~zMT5BlZzknHPU!|ZoXd`gqwM46flQTNV;`M=2|HEf|P)VKCz zUYT~_NX1*kL}D~1F9}>TjDZjPp`C#m7VIJlc|igsE&(xSf%*9K^n~gPd`(y_w*z9z zoKQ~e%GVWx8uFNpuxq2pw3|z-u;3;TT}A0@R6sCWYTMM2{0i68?ibXv`r8*0vrm0l zfT$wb&#pyTL%AE|vY2WuE9xmGpG|X@A(dPy^;N2;&YvAA^1Y*M)Uc{m`h37i3YI%DQd3CO@Y<2W0-D;uq5$9@a}62s zmOlcTY}t?N*n%W2#4EG(J||Mo`UQ9}Z|~S{6{&HE_Ww#)*5>;jBld^;e`93M#b{_G zBdAOo_cAKu^pz5Obe^4V*MDi$fDt;v;7)vnRFp3_#^Qc&PohB)8Vo6VY=)Hh0V9{b z=Sc3oz%(VMl-G_X4oz9zd0xx~l3_b2H@Zu?H7l`S6hBkLTypR{n53RRbBw)OLSRZl zLoni+Y{#)?+M(CGS9Sl6zls?s4VMB#T&9M&;4CrsE1228JbE8BFaO;OU~17 zh|CK5l%_VmYm8+;hSenmF2}^jT?S~aQ2G8CGo-xt<>vd#WNk+E7=OexBff?$KmeB% zN7Jg|K1Hz5BjoF)m&tRptcrxc@;AhV2

5n-(>(Pmy3co67CsGDpzV)fZMk#ESC{ z-Ocv!O$(DyMPc?%!dvpSVC;}tAPpD#`O{k8O{5@ayg#Aw2#$at*#qF>oYdhO*aB}!S#b}vD#YRP7d`>1*hMy zME%~*3H26bQsr2An94*In~M41NUT?G(pl??qhmJItgIeQm)z+r4reBSkD|KTYNlWg z(Aeo3U7sASV(U4D$Z``Gt=|#A6-asWy+i^Kh;!2R@MUV+E4K$pNc(wTCtoY2OzdA1 ztg0R{YwbD3PLKs(`n`z*DlMI-BTJ7w7mQ~Y8$xcZ2;@FT4VZO0c%wQhvnI!sKOiVk zI9qQ=#^zZ$-^*QqWTYSWJXwZ*nuAc8yOq`MtB~f}n#&;YcfIus~b#iLDJ ze6Q2lq^tgOdVm@ghc|EER+PCJF86ZZ8kBDQBZcDr0l8^5x){LGDk*8dZCMEQsnU={ z4A-{a|IIw*r)OxzV#$UTtfQpUGC5%Rlz)J`sZ8@jvn6t!Ti2tC8D?UjTvJ3Fl17h% zN_5xW6FUxbG$?o=JUaYZ=bLx@TAdvm;@ZX!hmw^VnvVzCEXOykapf7xn_#sp-d;wm ztgi476ge_mc~mjUJGCZnA|$|vr|6jOPT_V_eU*(ZM_hS7O?)5kxBQOA97%M}chsOq zyI#wc`Ag^67D4_4gzIG8u@ZYV? zFIEh$k6IG)XC``RYbJVEv=a$-eN?_@ozV3*HZo8{)p2VMLWR=PR*J&fGOk&-v(zE}mjP zj%w2KL3i6e9{YX{w1~cVcodJYor})!NsCF8>Gd44sq=}0mKG)yoUeVJ#}?3{*AIOh z6`2Kg{`&U$2{?~xt)|y;*sOPN%rVyc!PKrcgx#>zWT?ufRRs}K)KcGwv){cvy~NBC zITwH|4eSWl%SfzU07ec!4NEiDmX^~eRv)a^2AQ{heI0%13sk4gfQBzNZb^G)nplg5 z7KJk>^^s7M^`jB0Uw_2B<{Ff>#83?Gu2}E;AkKg~;uFK$dwG=Br^-$g^K5?;clWuT z+~dXy(N$y1f!F^oNuq<0ew6I)5ECdcPJN_}Nzi5` zS*0bm!WH3jmGNFN<)(J&73jAW$jnChr&%$@iwXfFXti=mU%0gOhM=n9=#%Qq*vcG~ ziSnaxZJC`pQ(F5*>G?@pK$4KsmzOEMvruqY^yuyVo3j;ZyXq4qw%hNjNE^HN)Mg$! zD`N}cf3#y9h?0%1^p_qzoRj za5}CE|3lWgfTyU4hV_@+2ZE|Ecf`w&rU z%P|P-1y+uz5CDMXD}3P+v;|v;wh>WJ+oP&w$ZD&mHOkM3j19u51|gwfGo_#wcD&7# zlk^~6Xl_pV-*I5uq4aj?=Klog^&6djg{$w_yRKR!cST_Prv6`{hmcnf-VG9bW`5*c z3MzNlA--j?C)Znl5`=ian4NI|i0#8R@?zuLG_f2!7~%PzS!W2Xl>MhiSoOX)_uQB< zf_WJ1=d;oY7a`7@y;@z|p>tlopda;y&Wg-xJ@jzAwQ|Gdlo)?kSA{|BB-WWGl(LPZ z4tEjD{XP>QXQ5HPyIqBE+bX6jiWC7`dIlI=Y~iQpla=33$6-Z&AWGZTuC!qcN)F57|%@9xuPSzPOx3p0rMxq zGkga9{~Sc_D~Xo%D)90z@1P`hAjuv<5?WFl z*mL0hG+*P{v(UYRq~24`V9nKEB}sGC=vxtem0viweB!szH9^n$8hqW&4y6+#?Bf@W zWv?D04-$gjU6hR*gyF*Gu#fY@RlY6Ux@orF+q}&8i5zr|PTqq4eM4-D!smG2E6;LT zf4W{UPFwXBot*rE?{zq{CxFxXn5Z)5+XG3tEt?`N>YQ%S??h#pHh4}|2^!~Xh2wP#TD zzF-l`7DMr`KyKwDXSkYrZ>}D=Yb|pce5-(dUosm9 zgdZ>LDkIY;hE0Gqt>w!EgU%}B#XrQ8-q-#$^8I2u`SIWS&2CKlyv z(_ybls1U8P?g9tX{$#ElJUbn5kZ zC0(e2g_V&`S*6)jDVu<4%(!S%g92|~l@x!n9Zx&f(#zR)iUTHf%CMYC<1+(V{y==< zSZt#?t;m6tvKc>b?( z32;n)YP-QCwuR2%PH?Bb!Q*^;*&9$-I&j>)!}%lJ)TcVq?(!lpZ+75riEewyokQv8 zh0~8TIu1#0d`NM5_u3mHMD|{$%#z`#WiIEznbeUmH1LEguGFzSx53#qy0_`P{ePt~+w2QV&*H?bo?OiG{1@LA ze}8aZM+P`j(WruMz*b_LGy_u@Ke0W?)Ols;Y;;j=$c=)Vp899kn1+i`@sG@ z-|i@X9ww?f;w|ec70<97@wVP{JybfcjPvDL&Yk64qMwe)^=XUD|#52X-MU+IMFP?dC@*UNk@b{Ule711OqYJw4ug!FXO7 zsCN1$(3v20v^Nzoa;Cs*VX5^w_}uRCg=NV9u4Il+SUwuXG^rX^zuiQkrYt)Ka_*`C z2sLiV&EM#w4TO8DVOENKjD`tXYI!Ha)`{pqDsKyu>jFGTsp6GbZ;$e!Q)$;QP(qdy zXcHS<(}VO6tVokVPJJDL4e_ZM*^5X1qcSNK@I~C&o|^j}uN3QULPEkzxd#)6GN7-| zl~(LIB6}A+XmA4A0++EO98k9?PaF~AQ#|=4P`9&KJ1g0+pFhbx|0@G3D3wM#6qkc=ko{t741Zp(m^3@bAu0B>aLz&(^O8ziQ& zGa?z~miCGGd;R*1GNfT2=`vN;nBbUR^10BXQkMB@m2qk)FSEh2wDCQFT~CoFKMCqq zS7M(W2IGpcmrCR1M46;+8YMJHcoks~fH$Xaq~HF;GIXs1x06eXLSI;13m&RC@b)V6 zmF5NHMgBBkm2l!W80t#tBb7}%56t@1U#n(3UoplRThyX%=^9I!v7H#uh6hUG>lOwt z@0`yP3TdDJJKAHREl4;s!0EmKH7`j!QJl~Ke4>vjVJq8R?rz11AGlD$kS_{%I0*G- zUZM&cOHpBa*R19s*C7L)Li*Nn#m82l7U?8Djg3)J*Wv7M*InE{j7595q14l^W%2b# zg4j}y4oc8AyOnWcmM_wE^U6E%Y?LNhTJ-fs`1-m|z%BljA^mT7X{!d{n*(tN(Yq7? zTXG6UJ*n)mKJ zr>W4v50-zB4W~d6a&n~wp^zaumTCpR5}krC{+z&{mhwhlJa?+jTD1NieCh9)n29}p zZwueRErMr^LdXxs!}MMmvymqZCR%qgPK$SPKOEDVrAT-;h%VE<3Tksxdh1URsp*!0 znkKtI$Vo}mQ{ZXmC%d=x{yhd+f(yr>>|%sNN#nOr)BoT z7eli6OP*AV=v{{*Ov~i?%3onc1*4>$&{>8x`L=P`4iFHf^DiZhbc2et zbTdL}HbzLdh=ioHjFQd`iP53d=n>n1jqV!VvG?7FeciF+c=p`KeO=dioj>tQQ-(X; zwP=p`)|e@+4{fr|ZZ4=s1zHuWFP$0%*oVEXgnbY5VFXqUQ->C1z3Kq2l}0<(WbjtO z!`-Mb`^V{zC*WX5Pz3hxb8h$vZ;ooP7EXw+zOGUdxao4^$oc@XQZQ-Y;5j0;^|Eyh zu!c7^Eb$_0He7;K-Mqs2`BbE!`^dqiP==#xVGjr5ASK^+*~IMeaQ7w%VmRVFe3BLT zWC`5bXjp4reK5styJ8e~XPBZH8ZX;3KAS)f49;w<7t#Wcn50|tT{uqyw>_FOa8#T7 zuL5#nVq17-D=bRP6*|y*UI72Z5xSd7mGvvJqzuv+7hg((7nNS(KOgQ;-*X65C}No1 z{Td$9$9?WqN#xhP;r?g8$A-hzk21W?CkNX~-AA#Iz@LA>u4HyUG%W1RGIHlO12Rz; z+O1t!l5K2PCOeMCUmUdD`5g*1yv^AA`bODy`l3J+q{uHZuW+!V-FfIW1<^3isdYh) zF8HAWMVxg~SM33nQW7g)F!rdOysS$T6*qbXW73AYeK0=z7F|j=jV%WKBG?HEhg}+J z7B}t{z^BwAZI57+cvGualO7Dkm#2d$aEZ0a~$ z`(<^++k8aJ_RC|d%v_QV;KSQ|4LYrsMYWeJ8?0$|KpDd?FXLyYEPSN4nbLY;rxj7! z-6yb`8=+UMy@rDIe9?(>{jfwQ2TxxKBXvB97cAHEg(KkwZ$0EeQsb~9Zy5?TS_EbM zRbyJm5PJ6U`w6t^WQ&c}_oR|i=|9$O#F}}aQX;nQsaU44CUsTp4Oa_CpUEpr+O zyYOYkNqH)aX(YP}B{$ts=Qu%14S9&#k5+#6duA5DhC*FTA`u9NyS<8 zuSs9Fnz|%=OZc?&K*La8yCInW;{#Xo@#xa~ki{G&RO#IV3c-(H=7u z47d3$a{|jjp+9aIatqH=alol+oX0_|0X-#2k~xEil` zAGuHp9G=KeqiKD0S{e#$EPh9sAR%WsR?!cKN|n3X(winT>t$~D1Vl4~xYixw52$^v z(9VLUF;g=iI-T5Rx1)c1RbA(pc}~>PoPWBkpm#-KlH${n1`M8~s(|s1!iyvcT1xI) zW|h8SeY_u8*sd_7=GR)pcX6s@6s&bilnn-lha^*PJf4E zH=%4CY%!F~y%N_dIWlV$EQ2>+3`-dSTlp!kd)E12PAkoQ!O0KOl?UP)ogGnhZITO2v63IcaUm85=T8vM6a{v33D zj<*c)E9BUi$-`1?c%k!gZe&yGzGzF1Ec?8mH$WCdV+?tXRj^9y+Ljc)__ibYtd_%- zqm^l|}GAiu&+90H%g;Kj83%RPCu@@~nI$2+a>4SLz z`!uj}cs4lYBL35yb0g(Z!x2LyBd9m@;@m~D3@V+JSCfQR7NgbUAzE;?-2*WWt;=y1 z1boI(8EbcjNbkBp%Yn!j-d~f?j$EDcj*9WAO4I$G+nvDBEY>s;KzL$W1G*_lVk(Y? zd&cgZI)F7wvX;v*B(q}NfybYXEY*1B24?^tgv?IXhcl8>-yM>Im4&!n?b}ai+K%ZR|$5&Da4`bSFxg{@o zM{w@txom@*EqP|m3cC?r$@dk-dW+)UK`xv@UMZrU90ny1|!#VI?$ei>|q zV>1st&oDI?Nz11ba6GdYJuHi+Fv4~#R|oFQaP^X(w0R~Cz`j26;BdIpi!EDmP0uO| z9%mtu$99>=O?n9$^LsXv@;$oy|kR|nVWTzi41zeMrH(}=atz+Wlj<{!R5>}5?ftTmq z3k{v|Q1Ky4rE4Ya)>)jZ%Ywi6=K%!pB=H$BM@CJc`ZUYWdxXsEv?a|@E6(3}Jg51( zAhC+(=qzEWg9ZfYA{;4OU|jPLM2%~u5_CjErBAB;#)&%T7hA+oo4}IB*0%@4-(7xD zW>+^zSDszTeyD1-^BI*Sk3Xrn>V>Pye-uCyx32QS%u&c=$sH4&-K}z+bs=nrgPdT1 zNy}`{_`q7<|L(NuW#}y5Ydc@w$MY?@eJ5FP=|});4lPh;gReJEM*T)u%|Gir7&Q-_ zDcBBtvSP+=h7RLb19x2P8{*FV!-}%-knhivqXB1(ux!sy+6s9@dwuQ92#AZu|}oM*^$&1(M=cewQ*?v@lZ+ z?KkhTJ`WKOV!wE|%oi(*SJIyng4#MB?SeV{)R`MgjB5!rA-DyKAiZU7;>=V9B`0h9 z3Y+qgg-AYy-KXxqVZF_jWBz?%K0m*+ED`I?)y{!7>v{o5hb zF&?@kme^yQ-b2SQj=U0=ShkK`n-Z#W;|H%?vh-7vvgbZunfs#! z8W|~1{o~`KfV$whetRLbV%6+XieQ2WmXq4>Ij<=gb}TD}#hk^3t=9t6O{9GQ7R|fy+ z%Tul21)JW=#V$3Zvf^I?5JAv$wK${R39(xRflBhcr=vz(PwA|K@-%K{#)%@c&L3A^ zAdvks*esnT*98F?IdnuInfz*ZEp|dU(3u%)N8dYJrS}ufHxg*zy}Zw^0cTOS;|m8T_bdX2k{7`#*S$+K0G3*TOb#>}c- z$zKx|n&&09#~81SKRNH(raxq;U8w8Kv&MU?lA3LKCKpRG>tMjN|E>hn+}K8ZOS)hR z#?KO>2#>RkBY%`Sx41B#Tn`Z;>$49@b`P$;k%btJ9Ln~j01&0ba#I-^D;jp@CgyK? zW%7=wJ6(y2QD77spWdOI|3FTIS^8CFd)-8>c^}Z6GjkQ@xXjFQh?TW`=g_=4|CQh! zwX72pVDi2A$W5@V?`*x}{y`^x1LgT{ROYHa+PM3-psIt=-n?fZBDH{G3tPntDnf&P zl>ccX89YET#?O-H_Wf>XitJjlJ6bW;d1pyE^l5yAzspx)5iMt1xTzOHIHTv-Gm3m# z6?xH_k6?7ry75R@@A#P4X8=+WPnywJpyF9>^lA^z8?a2m8`HZkS6)ld(4KwY3#Q4V z5u2g)O_8$htDL*>9IHybjQpT*S^an_gg0Kqz%^x~k*k@^z#9C1sVI46bZ)fEkIG=k zXfPctx|Mff-4Z-e(-2XD!yA6O{auuPP=T!nk_#um7Zn&TzO~#3PiF;#fT1zz(3fJe z^sRexG24V89i?=$dgLb*L1BJIy@iie^5<)*8!dh>A2HebR9D%G2tx=V5}Hpva_1 zbpc#C_;5wXw9hCmeE+%XFbL@Xkvt%?9vRqqfhJzE8>H|ni1!jxLa-pf6oW(jnsvsk zNz?*iY>G?gMAQ2YYa1wYDnw!-xV$J?C$oL0Liz)_^!j9bBJhwR0X}E;g)=Qf>J{B; zh|wsKBlZ_`I9^RkJgiwct`>q{5EIJHdx`7Xdvbr|mK((uup;Z2nT~N_hCU_w5;iF9 z#rBzh@QbW^8=u7KBXI}UcgBlGLllvXQ-$XfPnOQa*F|mu+gfJFx-jD#Kif=97OSxg z(riaC`l8cW;P!Z&QBtahergp`<HI$s|kbND?-TwES!}-^+Fm5Adh@{u#JIy(+l3(v z_tLEyNRK#PBgjY->*7v1X%4Dy>TW^o)`wIwY&i{*FKnBTip9#{6@Y-s9>}OIk}+b_ zPLZcfJjY>ga2&E?SkoAs6Dy=J_C%L<B+9l6| zu&qjtCH<8mR%Ms!pPte^$mKhpH`r^hkZRXop6c}#vDW1HQ4X~3Xjf(Z&owG#_o3{^ zgOTdUQ}HAFYs2B@W5c!Qwxvua>WA!qGh{^GV|GBnr7}0?lw6q4`?(-UZGlZk-jarS~j#7$Bu#CHwy> z-KuaS#dE4W$I>dR+)SvB|IqLndBojY(LAn%(AJY-)fX=#hHFa=!8U?)^@3PykSw*SIL$>-l~f>iqy;iGVvQzyks5Le?O{d4o} zE*og@i^@CItaGe>*~eDSOCvJKt}B->Za?f%l3ZqEesa zLmjmX#ekSt$k<#WF++)s|S}k1{U(--Z-yb3B;bx@ul^Tj(1c z5E^au{11j-HIs8!7PidV{)wcK(sN&Iy;43D=a2_Z&rHkOnh)GdqWS@iP~Z+ ztWL$*TktDTlk8RWmo)qn+@6vDcJD*O|J`{4Wv@Z%&6~W6+A}CO1-m}t;f?E-{vC&s z1z@tn!eh4AL^+PQkGN-1t8??&wq-7o{e}JIydrOam-KS~R`pi(w2vV5aV{T=RK|Cy z*xbtX+02WN9{;X*{VRgDpLiEGjPdoMX6tP_j&Y9yKbF^tj)&@hSoJhJH-F!1M}j%p zV&ETBM~)QW&C3O|8d@^K-M4hrOZN$sm&TEfQVm3UIG0WYq_LtQAEBS6<0tQ5%G?dX zM(?aaO+2q#yb{I}vpmYqFKh7kSxz10Dya8lP>hhGrig+N{}TT1F%y){-yzJTlD~Kn znYte|)p(8yDaagMj~{IV(RE3xbvEn#0HuZ^2(Mbnr7G15eKJu4 z(k-F8VzXYDzA+@WxtFfo%QEuo`hz5rr6lj=e$i}4thPF}4saqzqS*cH$EHl7^P9PC z9qHimd5zK54)d+GPw$A&WZ*TGy8Wmxi79@7>PM0d?X3TnLHF8z8Td?Z)7sM=FwI-yxF|>ci7XF7yU)R=>1})A*IC~A4pD;rQh$zq$SPb zUnbVrFD*z0Vzh`E@0=^%I@~85DE{=dbAbUY0iazD&0jwDJS)0y?79T zT)ReP-qMAPs&nNXy{z+eWeY$2whRQ+Q8hFrfPLMP7r=*>Xoq<^JaVKSva%HEi>6Y zfn96s!v=3_#tLk~Lry(4Jxj9NCxpw&T~bDLsIStA@gE=Tnc#Bf{tks{8N*&Q#OaWS zP*D0T=n>Fw5Wu6boNZq&njUW3 zWTIi1F(}(=81GW%N5b2xRgmmZj^%Pnsjj(fi|R}iGfhANhDx%^>+sUUgg;n3D?Pm7WN-T4Z8Az}Zh^_55K zL4GyC;)TzZLw#Ho6V~*YzFxL6G{d=U>U~Qp@y1lG>$C)s!&?tl41icdbSnY81N4JL zd+M%i^i)~}^0Sy{XyqKH*H=aIjJfk%nceAZ>xQji;$JYY-ctXDC9b9w4jvwE zkCASui2%Sq4n$lU*H_EQu`3GbwM~}P5svln{GOOZ9uk@byuK^3bc)q{DwsG-g2~B? zF!xk#_@E+?*yirH9GMp{ zwHYVi_5K`iaBJmbbtQ)C`MKFC<&qY*n?c_Ihjfqmfeh*t{&&Kcq z^>9H|@M%8>tTb3*tooQD;CM|ow^+u$c=D&YpdyC5yZ~(}aB$-vL=x$XrQj+bQn))i zgM>N``S=9N^2N7c+O89eEdZ&lb(V8#RkG{4vv;-Y+mBF=a1)-?v>_83($<*pp(B}z zk+R3Eg3513+Ja_pwHSVBWYP1?^HwFVb}f!{-ghWJ;V6Oj?h|BW<+u zHjv&}_BTl(S^EdUKxMP_}QgPojOXA7yc$`F%cxDFY5Vs z%Lso@EDfPVzG6ayU@M=|C|hU;!~JQ9iXM|jdwU=6FM_=hb*izh(iOf6OpHxTMoo@R zqx3)|RwC>?{$nu*?Bj1}jY@adzY!2={g8V1A6-n_%7^4dIJ%yg(uQOyvxf+uoWAj`gV0c>5TTD3ir~d%4n1EIC?% z!TCi_vk98OYVk&DkA&@bTmBo2!WeFQ^$qqO+F2|u&7WGnN>)dsmAT=x5NqmDSf0rB zgSA-Pt5+#(%(y=NK^_7A)G>kk>R+gd=#=&#nlX`tG3#qLF%cbLAw<-ql&EEfMi!WU zO`utwf0?dG{yjMBmq%Q{ly~XP`=!s}F5rC&vFH3Fk+ZEKpH$26<}_y$nvbz1riDj@ z?Fynb#j{D1d>pgjS8YgaXicb@Zpb%L$3o53D|#x$fD@`ipNuWnTUo+B-pOl@&E zQdLZmbzRuXIm5?s711X~`@J$`Y_p+5Ed-<8p~Yy|#C>Aia5HW9tpw*f)lC@@3yiA! zCg(Gx+?f3FNt)kI%pbA%#E9Midt^9&&@5X8T6B>c8&nCt(!)>RW|KeL>}Bl8&YSf95e9Oyo!y})qF<$kOBS&SnmLL{rCVa$EFHJoWG@}>`cAjyT z*nyiSwT9yXkJS=5u)AgD2!5(Ws8O@Tg?;2QX`YdhY}7Kay{I)Yqo3=5kYy9)Qlhj4 z8E$Hbd_Bh$GQ(Dq$zI!d{UHfnFNMq#3;%Q@gQB^D!{DR>IVef+WRQylMK>nF;d#m0 zSo>%9KcxpyQ}Ab#HGXK`M7d!47)Pw+p-R5mDN>9xoik;P(G$V!jF=yj&vM1lXq|l8 zxH68+T7F^x9E^nrF4u-2Ml9;aKckI4W!}I?b<6(=YyusfGQV`V8jffw^|pX>Qfnnp z31a;C+p$OL)#*eDzao}FkxC-3;KEn8DN*yMLtrWkqH|Fe?G~p9ColJe?l34u?c$3j zYtC%Nk|L%4nMwDbS5pOQHj8D6*MR(AB`^K9=Sk{XZ`wA|{1DhA2h+Ehu-9C8ZV584 z8lSh+DzkX^QVArMeV{6L3#?Bc0lSVo>o->G(OIYLuD0qpo$xbQ!tE%$KzlzGgkrXt zhN!q(H6k7pff`8dMEd#@bBANLCXSZGMVBwm#9&wrI<;<3Hb7IUsj)hHW2Oox?|q5= z&nHmOlj(=Ho@YPXxpW`Y+}^1;Q(#Jve{Y{FwQ711=@K+7KceK!fYp(h8%r81?*r+8 zz|Sf^83gs8xIF{y99muL91v<&NH>?p@1)NNg42u+`$&aXkAin#w^%s`t|04Q*RolAhoGB`T}fm^;fS9O>7y&VQ+j*2M$dch!lS(Y37MY_$F~ye;F!soQ~!>U5Fl!=vu0_PSvK#qMGdY0(S~~|bnA_zzu{BUG=P{{ z!kbSVV?*5ls(HF~6Jo8=>*;Jiyv1B9$e(Ph@U6=tshB<0&>`b%3m}shf12Bpbxt35 z{2PnAKzkVoFD#~+82l=A}+7QF<60|{T zW@-F{_Vqq4gNsGbe~vfFB~z0 z?av4L>z>n_cp!Xw&=kPH7yNn#qwZkJ8>vEp!V?thl19B?Pf)WNwNNq<}w6} z12;;=fuwG`%L-U1X@8{c{i7vST?&m~n4EK|3IjgXVkOTC++G>{mO(afYu(sr=JiSC zS-M-Yxn}C{qIQ+2CIy>o^z7D@a z^`8j~yZSC@u9hYJ%4s$|j;XAg9&ig@mEq5_p`Q;+Tm==t1NX`4CHS?@z8W&OmH72n zxSOd5`*k_kvXu%3bp$ z)PA2>q{NRkMh0oEITKz@K2n&HqJ$O5+{5=gO1#eRgi` z1i_pQ?=`yjh^&lZ26vD7J};jsDhai5f<4&9AYZT>1u!-76yHTl`NWaOTh>B1Qnzhw z_KVId@WY4s7_p{klh>?3k8S4oz=nux;Em#;ax#+RXtV9!!JOhR;%lv^yqc3J;pde( zVG*QI=l3|ZRa=TWQ}VlG-CUhiS+%0`|L)u?`89O_GTyE5Si6H83^;+ssvOefE$@>d zDz;>x9fbw6&+}~`BVVw#v?i7#flYlX3Zt*tS-XVGkY}-(F$8g||J_-vuu|9w+!tc0 zQtWHLx5f;e;EL3A@qY2g_R5H{48tSA>$+05Wl33;gyffT%-IgTzxQ2r2t}QE-BnX~ z!BYmPITlK^-X}SATAHTI8?4Oo&B^-W`fr7;{%Qi)-)E{c2xt9nq8tKG>{)by( zcTM$P;V`L_PYFee-&C|g)E&4gq$=r#9_1PIxhcHZF7MH94Lss$Sa?viyXv=uK0snJYvxok zMX}JC_EMx}aWI*o2NfwSD?49loZuw5JhSE?W`YgS3s<6UDnzKl>0?m zJf9}17)OyVO$>e|D$p^ofTphrrzBeX#KS=G~!6CRj`qICAD}kwoW_?NGiIN)3}!d=Z@K+F^gW3Z5Pb zmahz=IhZefF(8pck@Mf^6DviHf6ccLd~bbHH;7T`wJhK<^^7L97M~Q8(I2>VTT#$f z`Hm!{C*H%&n?FTTR`>YI7|^}UJQK;4hD+vZ`HUQ!LWhvw=AGO=nj21+*m>=M72%Fr zw9La{ov8tyj?YA6${nZ*>p!MHs%lKS>4nZj*ww(h5U*zKw5Op?9h}=q=3TU=5L4pJ z(|7F!g|>_dY3aqSovPv2GC7)FRcFO?sLW9ULi{rE)z#-6y&(O1%qQcc4upGQR`h@a za_rDdrXiyEqQecY%8u}B*CZ<=T*3sz{S0*M6MxVd1q+$}B;Z{Yj=e^h8rT3i$_rs! zkhfx&4C=VZ$9sACB^P|XtPYqgZIr-FruI`k;=C>IQGO|)+k=#L=GbFCr>tjTM%2;@ z0BvOn>bdW6(_GOL#8p39s8c(U-`tcylQrD>w(PG5<>;4S-W%Wd0$6WUhkKI8Q^l@8 zhUJ^b9-Qm3VwmD0(?{zLJ8{=j8kuP9yh}lUA=Iz7)lpSE%I{m(?53hE5k4<@^_soq z3)YEPegB5lBpweM*Zx$(t9nvpN;-9*C-! z4_r+A?p;j_6yDC|Y@J+zf3HfquQ>Si%iWc4e$^I-Q&DV(rx9oEj%2kRd5_a*XK=p1 z{+`}~)~zEX8*}-LCwF11GhAlhzb{f>O|?2%NipI3`-e+oX$c z6IB4xnJ~Mgj}bV6+2YA9$hv!;v*Q36BZk`)5-hCyekqv*AR9ew9GaYdX|ajQuW!6e zUw+fsWpSC87!Vxa1rkkh;{yyAZdjb*ErSk|(qtwS3NKz?TPa zMee9iDUKHz|Ik4q&A0fyKTPSWlTxIGxWRa?#m6cpvUh<&)EA9aK^zzViY}`08ML$* zSF-)8t#Cx=Yw;_s1I@FH@esiq(JAjCA2qviBP&xKB1OSti&(Nw2lL1BPXPLAby#_e z!dj23O>WZ^od*=|T_CN@>eJiC+KlejCNUT(D7CoEif#=oCp!3k29~*o{JIEPU+Bwa zp7f4Y!_u8f8CJQx(X@SiIWfESv1(`3R@OAs>&j%9wKngD>i*jc;wD^p(C23pPDi3S zvbttUj5arSvtI~@ZDCr4rgi%2{8Fqg@(bpG$=j4O+dJ~{!^baMSYK)1wcQTY7@FAz z0iI{W5^aQC$r{&jQ_K}r|GUG5k!M;8-ua#Pfl`MmzGcGcZE$>l6WJtlBGjOL@*HSF z-_)4$CBrF|46_jBV)B6}2_H8jfw#zQGYU-Ys{P*`+F~b8*oK4hG3Dc--1W-A$Sq>F zxS+CJBCy)Q8UO4guG)%+NUOH$&-NziDqq*40^gj4x2>0QKw~1+{R^bsiPZQyKT*P* zji$&`%~P%WJgi;&0#)0fY-08W{W^1BwT{9`LIW(LG8Y53aNcy}0aEEHD>^VOy-$Rs z3Mva_nA2UqL+LRX4$&wZ`chRmDQO$>6VGa)wA1q6mgN9SJV}RUAi?OrA0FowBg$*R zPEQ6`XvE3RD>PmxW*g31KBVlUl{?fXAJ? zf*QM%(t47Y^Hp@*M|?^md1+0Pc9WRTCzbQcGO`vB=_*5T@W)I!MJ6ag{275M0V~i= z-F~Kc3<3ABHj_4r6J^tS1{c~Q#~=M$RB*u_h6(B27l5PccQn!IH_&&28I-Kv{?Kxi zH1N}U0u7&0f}XzmLmz?P0I4+vpxX_*&uMkN=v7t1`+4NBfPQee6YP&)02M2Dnf=&L z)>cjI%Ux)pbWa18dw!tT3sk^Xs*#|@pr}!-E1x+u|E|f%n~|>qOlBI44LjjJJ_pqp z{utVwLHdtq`DX^0C=n+CHzBEs`6@YOX=*qn>3`E3L6`EprX?xV&9|}4N`gu7KZR>m zxB18+QTf+|W5c95LwtcSfq`@KCjT6`!UuUL-n41Q*?ipr7jD+|Xa>@)9ZjuUr$Nf-hoz?rJ_#&?jsBL#SRi)24(d0pF z*G{DWwR+h&H>aP<>};HqMI;YNB8Y`?!f(i)JnALe zAY1G0w#z};rl&4xgz!?pq_1TAgZNZpQPSLGUsZ$%kSOBPxA8cXx))8k>(T4#cGLg`^e zyb$$$ON1!hT3n&f`)}dWMkv(rnRdmr{+#@F&&Vpx!cpo@E;^PP$MLX~w}8FbtmSaT zR7;0~tJs4B4B`l!c5HuyS{XJ+r$4EJXwJ}Ju)IIu>_zLJ;8yQX3y`lS`9Mf?( zm68J5WlN)3|2i3Ac{-W!>7diE_&FBS6e2VPK4g8564SwxzIIx5uz#}hAZ`1xd}ZXp z>nJXiw`{cxM(5A0iVa2U+I)cIl3|bf^iE(ydxWiZ8)-21@@yU8?*?9q4{G(^p*C^Mp0nUA z8UX=KK)kDVdBrTmj`0SD(gMKDtJw>YhOStSuw|FAG*<=7E=z<}3~_{FjXee!AClPJ zX#oAbd0Yu>@r`hG!RaQgGno{Jrobs5XMcF8BU@+P~kTT0?QC=#1j26`mabD5^84pAeqW|NJv-KM1Bbw&7e@{GGV@* zw91n5EDuwN>Jak-Y2_@L!5DR8o8lXq%hEuP(U9pa{G>B~sn2A10-8s~t*zM8qBN2q z?dRKDoZpgpGE}6y2_lF+LK-jN zcmxnLUj%X~5jG2aduC_c|NFF(>usJdBtce=p@uB%^M?v2L+5RNRbODL`DHf#mbbSf zuh%Ma=m|Wpo^K|Vt#*U>J)a9Kr*X#tMkjSrTL%23@$=Rr20XK6U88+(qwHdvpFCJD z^J_$0DR!4ZH2Z`CcCMQ!^h45WmWd;^u7C&Fi21psgTmXwi4N;S)o56o$BLOZUx8K| ze^A?%3G+u)p6$;Ea%-$VLMh=$CZJ@B^$5c>%$U)Z>kUPy<@~p1Qz-c4au*}J6KnU& zer;NA+2h>jDsbxUnSrJCh@3G0@GwbXSX_wM9PQgI6O>?a!jn}9)n$0t_nk?_i9csk zsJ#ShFv%1v5I)&4+Bl_WHtMVHnipU-OR_~^zNk3vmRV~j1{Ar%psS64L4d@hEYwO$ z4C0`7#S-v3>xnb8kKGnEr8_P=6PpF_zV=nw8a%({J_|d2xs@2j3J*@mcVxKuVOP_( zwb3K_R&i0K-Jok;eDX$L$qYzia>FF2zvqUaK8Q~*Cf`3vVORg(9XV+nfqK%<*AmsH z_$33Ywd{OOdGH_mn*Dy?+d?c8{15H)_4m=@vpviC)Wz1<77cUVodK8EUDj~d{D*O% zGUrN#Ft~$>d3wwGpzRU6mJ4u zbIMx#)K3I;h?+1m`K4g}xpQj?TVgRHK%nM|CmuT88D|gT&8hVR=*X8$@C7EQZI(3h zqB0{(jXJLtsHwkc?)z#?TXoL@oFdBW>5B4kHC>5FQl3t+WLGDgHp*M*wCieK`G0qE zlWGX5fleu>GVO`_kU5X*QAq~SKjxM_SH5somU?-cCEG;(cZXB}7oQMqilLxkn@y>qt&wd`7 zgbSF<_Ag@UFOJvvlb_Qvix`>+UIE*)xRX_!i-yXeX@w`TPh*^+;GiA`niysw1jz3`HPdg9}{t|!sKZfR+cOBTb&qwen{O}M+{28fo ziA?|d#b0_Iequ@fAXK<9IB~q9I@(g478ER3<=!XPow$r}>#MsOn7K`(Wu_Y^vEe>Yg>YpC?VI`fg|# zg*fOXKmKgi`CLx@b)*9G4nssC7&1STtv1WA9}E)eTcE+Waf&Fd#p2ahNRr-WiM-tl z2=QJa=Cmq0S-%v_VulqjZDKHZPtJNz#Pup+i~Vcxh*8ix`VfG$x`aP}KqyxKgEPI> zF!W85zZF0rWgn8LQr~uP8W&L*fuF#yMMw4reU*ZxbSa1tl{T7vl@%6FJ6{@U>Qv8E zyr+v`REr0~R~Wk1mPxqL)T=~1g~uzQPu;@Z3t^K-)$-`(v($sO%Je>#Rx(VQ2(q*> zMNCodaAUt0J#|POaBey-j)}SM-hqF3YuVN9mg+IUK8v>0)hgSe^~JUQndDkUC?&Qg zDp{QP)Hz8`V*iQpw5T*$RubCtDp>7zkkoLQNF#2@m0*tedR2pmDqCTXqhR-)uO3Nw zU1xXVGQfg`ePKNR@$t*<6~!fU+WWc*Q{8xf_l5@Shma5POdRkY+^ipPO+bz-(pVPxmX!;y(dFBe^-11v1&krFI@z#Yit%yf!291(7il*?xWi zZBzK1RyX!6?$D~{V55hDB3ABwLjY@pPq&+`4C6AaLv`RhQEFD^YhcU?>CnPM!YB*% z#L4weU@$}(2c^ZkQ1NQ+atXDtmw(W086+y zn%d4^KPE+PvR9l+=xKU_C1>Opg5kZv^AtcPpybt))|iM5{b{x={l9>*k%wGF~)~Y1ra= zq5gA#UDDz{iUKHI<=H3t1vPP?5ME^M7i%`&_MuK9%iopD7>DpC7T-IgU!rGIu&A6u(^qbZl2Ufp+`bY*%6vqpc|6Kwgn#RZg$?Da;l^PovKN+ek{0@69&fpDD%K8-7TTgzw?(EYKr8YfW+UR-ziU zQ3g}dx(l_*@*07cgsj(>>gty`DlOq(8%}y%U)W`O)2h5cG@je8Z|gw0;-CGru1>AW z%1IU8Y}&s-$8(L24k;1eM6tFZo zc-99P*5Tf{e8i=S3Zl;y(ooJE#uC1H{GGJH`Tj%s=G72!U(H4^ z4Z$IaW~s!b;PjXmC286wz`;k#x`;auKD5-!jbc{HCiC}{pHO!55ThJ6_cIb@ZgGsX zMcwloaVawH5IZC<=fvdSMtU{H5Bu`Hdzrx#}ge?<M)tCBdlQCD7`0xj{2t3 zc(ihP@t1U<3{De?!ODWiD?Q%amG>9BMzrLqHnkm@FK8vmceu-%;ZVN6!#QchTm6U< z{B2T~`y7URA<5gk#pzQ0JqbFbr%+9&&)*nh{V;CQZy~bZW;pJq5GAFVkA5s|r^v&m8## z2H^xmxK!W4_|$=!k}G+MWE8YnTpFmd&R{iX4y2stkDF9F8tDA)VO4mbxZ+xK5NMKR zJZS_w$tvOz%n>4OinFuLvmR}m(oIChIv(_2LaU$7gF6Dv04_G{;TI+!oU@Jo3k)Of zynF*a7xfPz9iJe_pZ)Mn>}tRNR}sDt-ey_C)7g}wj;8xMKFigAXFu|_bakrh9vH0s zk6ri*?fE;nM(%XlSTT=WG>_13f<`K0S%XY>WUQm}&AhI603D*y+2T?b2p^nLnz?9w z+%e=rNEHPaP&kuUQtjw~C(lK0-e4#jC*GQu{d?sPTtC&~ zMN^GVC{u(k+;omM7Dv^hei#zluv{tCR#lRcto9amO|XfLe;uUbV4BYr;y7DQ-A4rY zF)}WM+t2ggUDti_9Y%K;)pr(W1)Sm3>tzbuMwgxi!VOdiG%qOcwtT zaqfd)%V&;b*+C2DQ5LtQa->VR#0(Q>#P1*R?}sYnO#`({#{#DC+>MpBlbVtAGOnw} zjyGZw%^*@fGwj!uyKzzMC)JV%Q*mMoKa43CHm{1~dq{rQn^N|7%;v)V@%O5;YX+ld zgZ7(3s0fANfh%;q3}7KX0KLdN!F$Wc78-rkTQwvmskJWpDJ+4HMJ*Itrf~_*L6@Qn zY){K`#6n-nz4QJ;k>ky7`;9iH&UPh{#sb)%Y-v^H6547p(ow^ie~U9}Rb;U=o#*q9 zoopJh1yE2klO_-yP_pf+R_L~iG&pHtE9_3?f#0=x7>3(hvs|u2qJpQ_9dxLv56T%w zxGb3Vx(fA@M&r|}+3VufMs4DiSO=R4mBT&;eIE#)_o~f%gW4-%5P9YklMo*0FeU3; zU+aMUKvkBdJY1de83!NI9D*DE<}~Tr_kW7cf}zQ_kK!mQDkT!qJOYBWbTbj@l8zD5 zF}fQS0jVJ%T_Q1R^canF=U{`;ougx<@7}L)@7fjTf6n=(M_cWf=bq%B5!Qh7xQ9y1 zx|py0x78gC?H_Z8Ow$eneKZ-*jhE%~7v~AXUFX-&pzS2PJQY|=ec0#cNnaFOHETc2 zAKVhGroWz`H1ZUY`RK90N+<##;hLk>U`DEj zKt)eT8q1P8PM$fg{x*^IRTq|yTol$~RCpIUIaByPg`QOyqPbsrpGtj&0 zGWN5H%Xj-;C8^ETb>8G%jC|9HBKKqPIX*n;Hcgc9ZZ0tlIQF6yngbQmUJz=C!f~z7 zDy0$Of76}!*MNH4hG5wCJw>F-Bo7rGG7)qY@zQW$kg2m}SjRaHsc)e82ivv6EdxNU zO=Wf~8ISVua&g%bt$j`SCXMsd@1HLR2d*G1OC94NQWFX?uKb!KG)}}z+1`L>L8|#X z0isC^Ek2h-?R$`Q_}#hTJpfx#|IFJFi83JB_^3`^XTRk(>fW)IflT4m-D%$UgO#dU z0$$yZW>j|P(k+6FB}QER=}Fy%V=5?$dwqP*_V&yISKA5DSiw#M+LWXKc&gSw&Gp03 zV5jco1N=E3$POJeHT7 z&9SfR_(87x42x0((Ix9g?A^;ey%|(^I)_Cmue3)kG9T&viYwej7^c4>vUIWD$0dx| zAMdFW1-2uVx;jdw7w#0i7<4z$m}&PxX6T$_0_M5*agz0JUbA5 zd7|*JbnA$+@i_GYrxZQ7pJn93*Ax9V5`5Gwtml zr=_yc;9bR!mP$vd7N4?*ypo@egJ2M5flX`virt^t*(DQw+WzAC$*)a+UL~{IacP`z z+B>%y=aH3LPwrE?NIFuXJarHy1$^VhW$Ac5nirP66>^Y3y&wCbv*m@_y`G*xnm;JJ z#W{>NtNA|iXH|SaU0Apcj7=`V=LLoG4*nr7y&h6qejh46#LB^&q z(~nf+*4*6Z{vLqqe)n$6##Y}o?hL9p_d*Aj%T)R?pRU7zuV?Gtl`{U{Q~JunhbTf= zN{`v2p+eu`z_PMH-Q0pi5lRwC6d?ar@#GNAk(Z7FDbbVu`n|g(YpDMZkJ_u%KsNFN zvaY@X_K2YQlP^9(yhn2l5P?R0`_NIeOXUG{)UOmt@D%*X0|<)D>k>*Uj1lcVagP0^XOgCycRXyhh*@UpK{s=w{=R>c%6N;rFMoqBKZ?B zX>1>+(B=~m&g9dWt+;kj$k3M#%DeiZG(PDVx%LxqZTUU=KX0IPGfs7_gc8UkjsY z|ETpGe5Xf!B^S@kA!)#k#`X2sgKCqSRoYxleQ~=s&gz^gm*|iqz5!-=(?rH-a4GlPqmOc0QOqMCOFWXaH}R5eM{qVc zjkABSR#Yi|sjuQ~uI7n?%E8*k+^u2*lger6lly6Mdw3ja3^xmCb5&Y_tl1;r>b<*J z_VN;wpFtm;_2hYYT2czI;D&!;$z|=f3<&{4K=nae7tXdA%cj}V0vX?HN%aXBiMqtj=4EO7ny4yoNVn=e)wq{WLtL8rz8IEiV?{dq{)oPUn zDXqTuSO3HN+qVS_wf}VRVQ_B|$2_y8&?+9T``lPO-Ih_YSU*>TzaIZG5z9G_`s~H8;X`9HIzu$XFdI54l;9%_L1Bv917XD&1+h9rZbosIUGU-sO z*v8xob??R@=TA4jyoXR}3=Qz$;F7YM=gjvK-#b3(FuDc->2K}arPD?P@D2Zzkcrn9xzCb&*ULG$~NSh#DSudsdb7~t$L*Jfm zEZK1vGUu_Io4Hc0t6YoDm3s@HQgT7d&g<4+eupoXSRtlUCnFJ7@z~<{bcn#V21-6} zrST8lH<@#tla|PZ=C3+E9x#n8v$ahdY_}(gL!!~vK{6$UMN(sw6&F3W$_H!64r3su?)He_eOuwO3(TlM$N+eEZaOnTyny@uB z0{JGRM`@)|e@Y8ynaq#b{Qvctmm{mZNvC)e{Tfj)Pr_C4PBE_8q1_HFcmy zt+v{CanlgW)tqU*pBcZnNgoI3b#iXnNnZFQi*gu;y0Rks>>+gOFG7l zl$rEwzz1u5&mdeZj1Hbg#Ld~xe|T6hG4ySI6?`}-6Gd2^j~P?OLdCSNP%3daKu!Br zA2}=JJ?RYeR!|49nkS2-rF(tkMB^tT3b>a$e;cQQgX~E*Ka#7#?h) z$A7)_XJ2=^w#_bHbTsgRizJM&`uKI01@G%xy*EN&1P1B_W>}=w@6fH5vDdc}+|gs6 z{?z>aufV~$AV{xX#>{yY){t3rJpYT4Av{@1064&E6LO{|9ZD+{O48d@c&hB~rX48s z<8>c5gzYzCwFzO90A)$ssl%>=B6>e0^fUd0QXCVdzMLDPXSsBHgNVPx_E=%~$|PE2 zT`kr5x|_cB70q_I3v>w$g0p|wQHsV?2tgH1a>Tsa1*R#r)%V8IB;aP7*;ej$E=guG zdWoO<`Twq#D0?usHO2dmi5Wdnyll)z(=?k2-1;Mtz|`p^3V&qm3kd40y&$ZGj0 z?p*b0eYE>}U8E!YDD}LkD4RK*wiG}A#&2n*GIaRTMNUPn8Ij`itFrjPI%d=- zc^dmrccwuSX-9TrP(-vH&KcsZ-)7@>k3reyvtW#M)RToWaC&{=A;oa@hT)Y+3GyPbzf_AJBK#r)A5V= zfjE|#npZK>y^3oL;)+y}7|)hKjHqJCGVJw%&QU(5}VD-WK87@uo?H7A={5{dU_Pkl-+85#$0 z>N_`jleepGwVsvOtm!^JY1-oRt~`k{&gk2>;628h)ttzJJ*HW0HB;wNeDFWKIDb?x z6H{oXxkO=IQg8$n4cR(B3q(b`o7F;-P^(qba7uVYRIvdp?h3tBMr`gFXPAyacWZ$o z6#eb~Q|o-lHc-SfVXor!2!~oG2SZ%A4AVk-%%XQ>bHACg0KKmJ!2dd+Z%^gI&ZJ5| zZsH&S7gONbh#PD*^eW?W3UN&hKio45b-nrMXNwg|)iDEVSKZY@jYL~ZlGdi|azqATZn%`9|=V+!h&_`NKly5e*q(~aBJ*rG|Z{IOFs61Yh zom|n2ik)@Bk3PwbG81mVeq9xTH~2(!vfQ*r(vNU9^^?%2UFkZ`shA+hR?TvJ# zsc@-auCX$%gJ6+Y#7`Brw+_4>Padw-W;ln9d=yZevfD%F9T%ktVN{0@n~73H;_+p7 zAU^%+ib~kF8B!#X>X%SY`*$#>Dj&UU@rc``!r(_J9RuVG$%9)*E3HB+2ZjBST2Xxu z`QBO9jrSN+#R$k0@1azhtWouVYtv%aV(kio_Fgs0Dk0P-)Yn*o0@XvAuVQ|7W_y2N>MLLu1FXAJkKcG^ZX9zh8)(~5mSyD z@7VC$H*|#__vc3ADc(*KN&%Q^;mG^ujzu+jd*le5&@859V|1tC>e^{ zBh=ReVZx!yYVY(s5t8>LvJ8@!cbwr!h+hJ*w;WggTTh&Aq{)+|Hw2(H>L{MMLdF%- z(4-^PFvNA&;|}}d>qiS&IZUP>W(|d`6?Ac8{Z>vZ=m~?cvHnl$m3c!6yNEt4N-ak) zA&5&D+U<5Ql{*^siFDyXaz~HV5^4SXJF+Vfg_2@ppKw;fj#kXNJcGXd`cX?N|J;=a zu;fO-S0*_(&yR$LPO}j1##Kjw{|qbJbj&Ip(Ib&s#_vI zwWPf~?)I=^I6c%C2JIaZZwGV-FigYRO3Gf>JG^TLysiOou6h$LIF9=nD_Fc-GA$52 zfN=L)Kd4?R3NR4mWZ;}8KM`6VcV%y#bxa&4q&_IbkMTBBIP6W5n|5g-Uu_NsskgoR z9or{@7{0yRIPc{*HRvY9VS_*;d7TKk!UZxRorh|fX{Qb6efUsoY-DGm13Gz~(8YN2 z=?|lhLH@r|KDVX$ev2mlmh*v3Wo{UDFt>tl@wS!+p$KJ=L!?q60qE@ z-Rf!pF&os~3Es@@;)Tt3T~(%l03tZm zPyBq2VO)Otz#kfF+N2A^fF{{xri)T!p6kjS7uP^uivz1qya?aWUz4C7&!M-hSzo`J z{wyA){q^6a7y!=?ar1Ce9Oqsj-N79eTbQ2=_lA*G^jlExduHsHTzZ+;ey)lQ5q+{p zqd9-;+>$nq{$iZ2Pj$<^3NtlY;?=*21-0Ct#xI`g*k4Oe)B;PmSKt}*RMwUR8Tl2` zyZ*DWB^^O*m3=(ExW9q-dWJVFyK?q(1ag=IumgNX;95z4FWkNBb6q{7So(2?o6 zyE4NRei+&!`ECxZI0O%e49{kXAG$=?bvcXd9valfwRPP!+2lyt56GC#TFONn-n2Uo zB3ZsIt+j&VRy{990PmYl&c!XS@*jBqqPe~-L#sLe_Vb{&Xu6T!{5HBD9yIXgMp3=S zyZ;cJz4!cd<)F}+(9DFZW>K}xr0p_2KIj~3MVGAA77n9jgs*M3!4N^2G@@5zUr&LU zWv2@l2<{V8I0!!Cp{H$Q3U2Uu+^V2x(yyt96=heqm*Xw(jhwToaytF0D0&VQpgujXm$pu*u-bifbg<>Ac5i1d#73 zgO)ck21mCA=74pTFPq&Mk`5mzLmYe`GDrh8on?^JX3D&Cp9JV2#C-?tk&~_DYtrD% zRY2rD=?z^l_`D!>w4UDi!4szY#|9(R2aGf8(2sn4=yGd&o_!QXleNV^6E{*qwsVgZ z3`hy63(PC&4&RZieWAEMjm8L>M~aBZ5ShPlyiT260l!BWMXW1=4JtxaR&PVR+Q~un zN5+NgA#}fsB4Lnz@b4Grd)AFE7OHLmBQfxOZI zi16?%swGg}Hq5cYgD-_lGnUu3F=)$G-7WGLwrVt_35W^%n;0!{0xz zSNNap7y({FaRKI4k!+GAXLph$5JjS@chY9{2NLNJC)%?e9sQ}bY&rPRDBOFw;8-Nn z?oIcF3=1y1(jNE3S{eVqP7Cp9wWX8k>AV~u85c@+S9z2WN?7R;yjP0_m4tSRt_3pq zSF*%t<`@9eT;8(}p?!`P*YzK&()oc`r>zrJn~g}W6oMKbY=IY^LRZuogOzc?@m#Db z=4=b5>St^BO1sC6|K?{aIm2lVY++kpl(xAd+sYPQcstCqz(V({-yfYbb6z2F+Uc7k zGzlZ-ckYQTd7U9n@c7>8{FcjV*#}&YGOw z{b@}{_z?gnaqVXcJb+#*cigeR<8$MhAa3u=KGeXN230rc4&duWMkfi9-%Yl6G~nE7UUDCS@UhFj{P8 z=AQYno2U#&d-E~D1p;WNoTx+Z2EPsH?!qeXB_x!XV}_BlRYBdcr=u*N{-<(=4$ zvGHa)LtRFC;xQVz!kql7dP{gtBcXqH$+N;)_H(suT;k`x2S1MuanHwB>8y#uV*LZf zXSm^R>a~0pkE3&V0~vk%dCs{xur6M=DNzaNxW6A<)|5?YMVs72!Q7U(V~c-p)bF7Rk8Dg>qbu{J0yk+6t_XH|rG7Q>9`AgBT&>1hS`~|&(13)j#?(nO<*JC zi%#lu!g;I2FL$p86%gra-PV09L6Flmp=q)Fj6z#*l6>wxnYz1cU9O;d0$AG~5VMnU z!AEjGdwDJ01O!P0n$0UTYcFqk=IUQw8+4Bx({(fAPqD=lZ>%4JEpTbG=G+}1f5^nM zGDFTf0foT^ah>GS4S|dO>wJ^Dsvu|RQqwGNs+~(qAG6ZEbx0(Fxn-@-oI*--O*PW> zg&#yagX*5Z2K^7O&|e|!*oRP*XDbL1lrQ~2cpkXN9=h< zAkH#9P}&YYHMpc-N{!vH%$>V&;`{&wr=?wnbY|b5YgkFiSGSI+t*|6GRS0wL0Y&9&R*h&ASm0K_jF@-3b!Kq)h+-q*u;j& zv!{}6Or>!O9TMCItpXnT^|Vn3y@#Q^mLwMK-FPk6%hz@gjg=)*3^!`_p)WU2c`BF@ z73(y9HV^bNk9$0Bp13!P9rzGhxcYVra>oL`Gde;?T2g)tBK=(&-A5ElK`uWk-HF`G zJZj$0;xN`hQ~2G^Gd-$RD3b#>l5+t}(GIGr&4 z{m}ET+=bO|q<1D0dBI?RnYUZBIry>sGE`zu{(F^S z500Adz1hgadk$-k%5L`>1pSHcwjIL_hwPS87jltbBrq;>%F}41YClqhfODWvxN(Kw zAs?tryK2vR&mnGeZIH3s6q|Z2L_Bs4819>_m*QF_h1*ZQb!|4B!fy(%Yp@RGNwM3- zRhXr{z9{!!j_=9wNl6{=HLCrl$+RU*8nFeImM$`dGH<=kq$CInQm6u@ddwT~0$&@F zD?7buthFiHm7xIG-A$iy{^t0>&|Vpk!SvFo$9-d03wPX46wssi7~#^eW#?3~n&GkR zf}&?@8GF1f=B}kef+ljVUe#dP&%~a3^LtURVuK8voYwBgzzV5fo^3nUg~)_;1O$oF z7UZaRXZ8MFvw*r1evmS#xXYn7xC|qU^^z_l=nJh)Y0dL%b&*mjPa&E83j8APfq;x! z{FuS$T%_e1id?kY)HcHP`hW5rlCGH^eruCaa;BABpmE`zvIy#VoLX;?16yu;J-IJL z?16rl$Svg)c94;;d`VGOLnYBOh!NV&5rMQhUPLsrPz3SN}__ojHP($NLcHDMT0?K_+9?EGM4?`v#Jw##?5LJ~F z@OD>%TfN4Ac=GMA45KdT(vOq=N_Yfe_x_b7s zRqW3nh>Zl@xmp)kaCl^8r(*0pOZEhCbDZ2Z@SRMbR4NlcQJ_fRYRhF`fH$i1A0C~O zP%ed>I|OrTnWAVr^K^804X)YQ3NpuXxUM>r?;H3|%!4kUg#+b{Ru%)eRAjaqW=yZRrPtH^GL$|%Fn(^z)SVKu`m_!PImr;1ghQ$@kJ)0dR;W)!N!-_nk9K#tYEygG=)@ua#-a>H zt$1#NOGTJx9RKu`stdrEro;6Um$J)xZJhx;=>Uuiby?B!ZeGL`t3w{jSoTwcfTa` z_bKD)_9(?jf0aVTVEOK!^We)X@|o8Soc>|pY_FaV+kbIo^~0JMhY7Nb zn7nNwmd<-FVi27`?)`A8P@Ue8f}oD&UEq{cEV0?F3J<=|Z}C|pQF@(%crR~D$q+{i z=M$8yLXdmnv_L+Qb6LXdbg|P#_dKpe>tU>M(4=QFp^luxMl%hAPw_HIV_8ZH-G2^j z!O@&CpbM(QdZx3e$hG$`jh{3{rCTI{BMKTpp(NGLA6A7;tRF9Bi6(=vx^MTp|-hiRE7&u+4G8pNJ(LLJw4r%oPDIyWr=#( zR|-eFyqki8s-Dd1?yOGv9?4>sno#4(y1g|*wIch?`&R@>MvQ{k$ov^2?w|M9C$C=D z-_WiJJw1V2x6>h?wqe%3SJcE2sFJqqo(1FZ5z#!Y|kL@LP`F?&XQ4#@Fejh;5GlX!=v$ZQ1c zFC3|2u5Ef&O@-9`iWWC4#mK*}zdB!^)?~b8nt~B#dh0u>vZr%a%7(>nm<`>?`1x=# zFrV}a+s_jGqQ~V+NEdHGC5T&L5M#bOoj|Ssw>M)Yls(*Ps{y)4XDM61u$L=^>#06) zt$D*8AD;`K{!^s_*G@do=u@7+9uy-j3g%inPXl%K4Nh4ZwfuD)>swhU!+`}dKFGwp zmKyTtl*Yi^{Ah$v)lm_cI%kcg5m%TX1ENVuIB z>=WvxF-^$e;m!DpfOM7Sj+xcU7&=~eT6oTsR-ty%$%R+VnX80Eho+xp$B6bK*QV5b zz|)Rm+_*OnUo|<2_46$RN7`Awd3pwTWwan1y(CBIGMxTNjJb)qr0#h^N~F%(7+Od7 zs4teyR743DPc(oJ#FKShs(L_b5qnT6iA`M#7F|{Bz(TDfM%67P8$YI;RvfZZcgW6V z-RsToB+2=gDi;n|I2Qv4-kk)YPO3CRq@Pvfu4(S0(SC??Q;}I7%zDJzioBRD)0qyZ zn;NFLPCf8)sjHyZwNd?2WP4NBC`q=6Cf}4rm8Iakp!&E5!y$KM_4U+Eot}%5IDQC<7nN_1 zG^4$jB1Zg0QhiH|fJTf;NiXmSes8wT9aZj!{lGYO)ee2Sykk0GZy62GZwn zg=151Sb(!86|iM*LyP!JU!*`*6kR1>Zli$KBX=NjyQ-MR8+h)<$C=_w5-kkp#Udb69+n+DY4%aq4in z-fx+##ysW&qa%1~pcS!+>jZAUIpCh@5abME** z--5Y{j*8lsbE7@f9aeHF9V8a>Xs#^lZRfFjv0r!0sZ9LnZY1RB-&i%61tD&R@4Z9i zz9~>oM_^>g)AbixN^m>SGdedvF)6)3eVACb(51{~&C!{tPdvS3K_XeC*5%jos}^DI z&ozB4Z+8i_%;b*ZFNR}Q&Qh*Jqj(R!F~V+D5@=_T)*-wy1a|f$>{u076ah!9-Y<&5*7t-Y6TvUCP$^$RWfteXvdxUyT+=v>VIV&oGH>8Gs zg!J$oCB7rwGCK7O9iy8~k-OVCG=tH1ZY9MIUXeUe^#`C!Yta4qWg%?MpzOzb@szpl z`EMz=5#1cQ0PUDCv8$l|zGKTYUc*pA17(q4)wkV2f6_1fa5{qVDNS0+rPJ({ap8?Y zt^Tj-KAIaCCfA`PNs`_Nyak6 z^u6Yu9`O21?xlbn+)7*bvc$XL-Y!K@Zk4SKDtX`glifj;QA1i>PayCs#X-7=-xx0G7*>5|O9kaVq% zWjoHzt&XY^2<;V-Qy|T$Ko*%YfjsnSdgP7nQk{as1kxKnJB1}XUh8AILuP$(p-x*Z ziLLM}|EsjhLX+B))3DQE$AaUvLYV=ew=+Z3uh{^@nkCK(v@H2^wW@s2z-fXyRuPa_ zu0Zki^RY(6FzHG3QzCLgr>pR#60a2!a{w_< zAtL^AH(zPq+hmT1tx*>lTs+;^7d0Sfs+m2>WdFQ65hHeLcY$q!yFvQ^zkJU#ck>s&dx zg-$}+#zu$05>^L_67|-H(F}4G6vMtp``1!PQcv$Ua|(F|qmsB72`EE<4sJzT9{BZv zA|(RjY%W{X_I#LxR1<2B652jr;2MYq@Tw2?9FLQzdz=btT6zu@bOqZE*9a<&|F~%9 zB=IzWaz91{XHZ)=G%6Wu!S8e(+x@C8prA4?X`;U8mD$`rLmqzv$fGBc6mK4{_N|XR z@_4Z6{K2Sp?rup(M?1jrc~t+l=!q(b^{1k6GY2Q@cMQ(vGPX75h>?1{c1SvMG`b0R zW#UL1e)iH|A0hRnR6&;YbnHn|C)y{n0=ua$W0QkY{>E_fm1ce{EIW5`og}+7_iG5{ zJoctrln}CW>iXTjf<(Lf-Hdke{C_WMJ3@=Y<8 zervfL&#-8W48zX-+cu=L8A920e~(%!+%M^6@u)9(ZtI~bs*`CpakWe59(nxV2`jAk z#xOpe4je6e=T}j?W?NnIy?!DH@TMZMDW25RW5@-Xb?L&znIsIMNP&!nQnnbCo0!Sv z?UI|R5;@sMZ8-h@?2ES)PdQ?4H0I8-sJ!8y_>ruW1b>UIxt$@1ziq-P=eWK4{Asz3 zoI6kbZ?^Qbtl0lB9b<(azP=L+V;Am(a+@kH3W`TLfI^$$5q~W@!Z;mS7~;SJEVede zqd>j9nUS)F0i{@N#Qmln57wZI!R9Md)|b%xK-kh_A{@ojCrE%V-c+Tt$TrVkYYC~r z8BKVqt1tT&%bN61$y;3P3t~@Fc<4NZheY0P+T0ZA>*sh8vBue?h0+mKml?kReeN)@ z&VDDo>&$3#n*I+jqGBf?bdHzS`lf8;Gx#FdvCG`IfW6{ ztqGTe;Hto@Eb}*I4UZxkfAj)S;#Z&)E&Gszj+eMbE}}(?dzn>83xYyvgcZbod!7(s z6pw^?nLafFZn~aB5kgzzMJQo=ul`V!DSMjxd5=ON0zyuO?ok|MiFDPoHOo2YDay0q z{ZoaLN(`44)q)y6e`grJ4-+=fo$|P(dTwN0D`WDVMA6D9!J#Q4-^H@_3Mw?cL^;Ge zz|aSuEKx4-3N{t?3SjZ`{WOseUeg^vNMQ2@KZQJ(d+A0;vrTMO72g#Qk#{j|nCU)8 zA@`^nFqqVt!`OuFazrO%uFH+>PqaDMXRf5E=CY1skrnwC7Ax7j9CV+W+NKB&^5KT~ zQST2EMl7^#EH)s8k<^~{jk%IbImU08`(Ez2Wi&us<)Q4%pTVV|!l?!HX zI_kES?@QXj?EIEYr=?W=UAa0Kva_T<^0fOM z`Rv4J9+yv@Nxo4x8J(1R;16AJ%qVOTH8zcFPT@CUbnbo7Wz{~JsxU=+y^yV{Na0+E zQ(1;Y#d`^Yl6t&`3U`ewGAZ7r^RjqfU6B6zhu0q8j76F?7N8xd{^8jst^x!B0vwDc zAvdjx#*P7z%&drzX@8H70()D{wi-s#NjQ%X$k}+(VGa2>d!*(S85N7?g9Na2SvsO_ zuf#$du6!YOk2!s(>wdm-jdJ}RB!Oztfe>P3)J0asG?w^XUJ*&2Rjf%&P3ae^i}q68 zb58QCB7GK3dVSvX?~(N#lizWX>eQxOoI}?PGj?g7Ts^<7CDpsQo@yE?a5Eio6)sN^ zoB?FIpJb=vi(M)2?e+LvN2K~6*9S9$#GLofW}${%Lh4CRiDLnI>{+xVlrl!MSv(nZ zxZ{WHD&RY3p&W?`f5&ZvyVlS{BlNs7+iJV__pGFYP;{xe6L9IJaAT8RBGRYs<~<9D zQWDpmS&wter-rrgR~qlCy9yd@9^3HB_`S!=9SzGzvt)X@<{xAzcPGp{ofL+c0X~sJ z9OuXDhWsm_MNQMrPR_1uC(cvru41E_^v6cNv_h#e44IghKuB7QaLJ;yKxmvD5Q>ND)~ZMa>edzd62trg&BIm#LNiY1g(a$rrL(a zpg_S3f0jNp-bP47l6v;zx}z(ET9xxFK`t-$gRi%7@}+*?G$VXZOcxfV(B9=n5yM2V z?yx^!r`7`u1do*Dib1aoP+Z=`t=)wq`FTX$iHZa{9JHSKj!1j%TZNiM<3D{&w}Ex} z>sGs*zgt1hE_8wY7trnw9{kWDQT7e@Gb!X)FOTduIoCH8TABx1io>~P=R>=DK{Y$I zyDXk^Qch3rscTP3D^<3haWN2p_YYtT0h_%i*VZ{)errZyLA6!v%yo4Q!o-3dqEzIX zfo?So%p6QSc)cQ(zj4OeE%|IzHz~E_i#eA-@k9U*3R*>^v9^-2bt;3P z#vhd*9QcZo=KNDJyfb=I-Q$odXt+?5%`wUGi$!Ty48K+Dn7!);IF$Jvo&a69A6}U` z<`@;P_+^hGH|po*Wt2Br=8T;(Hf<4YvhAWzC47`)^dG~-aW+mfuGHHfdBbZOd&6ZI zB)u$YYU!|MGIU{uAH4OZK2{g*D{4l5w9#EDeHP$+_I-#PF!W$~>uwRRbm@i7>W)g!ldLaEDSt`7ju8+PdR*tmc+W{T#|)g!mDEX^2|xBrd#rZ1m@Uh)SQo1m zU*EQ+T&(F`xnrNO%MZ)$e3`PX@t)qM*|yvvWyk@-HCV)Osv6y6!!YbiLTQqF4%#!~-@6DQLhuUpq!(Ou zf0L)HQ662|`RL2+$&B+$La_UaL_Ca{5+Vc8qTR!7cXW+#{Bs;X5xBcUDCg}!QC-+a zH<&|BzSFmX+aLQRal+YRFN2CY)BeRf-X4*^>EBL#1-?857>{8Cp<{NCyS7o)s6vYx zRfnIIi3Btq7h4&-Meb9@`vHm_n%_fMvC+H*6E1KE?3K&r8)(b$>C3O9D^nLe0m$qd zc5KY22qfKzaqo&x0&7F)LYp7{p1J5P$1xyU<|(%F6H-VuU$`yrb<>nrq?4>SA^kzr z725~*WUarH=j*0R{%lQvR!j{7>`R`)wGMZN_XAQb(MWQ@QfTnW66jni6MiHd!47I(UX358z7gK0gOXksstQgxT8ieWHb zj{1))+%Ws;it@m~dFh4y(2Z6rsYJ_>d_`Pok6noo{NlUH{hTuEJgi0b`ccmrlF!>8vYWC}nfL7G&xc0(Q++AR2|u#~8sVqT2T+$c@7}4#EkAMc zP?R;ZyuJ-#I+i{MXR^HpdmO!AIzFt*3WZ)96h=bwhN8P2%LCyZij((m9E~+{SJgc= zCv2ytV+GX=6-MdoWxr@BSC+RJ+%RwF2-Vd#*0z95aZ627KT+qVUYDBaonY7~*V=-? zie30zM*tXBzDIRYJNr9TzP2viR+;@-@>NIw)je!C1CFsee)tb>g)dUbnUPLfR2Mu& zm%f&!VyhlzzwS*fUcS3*vKYyS%=OSjhjrwTV9oC$SNw%v2UePE)S3^l97LeGlP%Y1 z{;sxwK2#@lvF)O6-VZpqja_iL^?@60E~LmzaLU>$Y1BPWo6K97l8dp^t^uQimH#Wc zZ#TK}XV5+1RlzY&x3(e?M^UMwjoZjcgNw*kk^9#@(}p10?=P(fo2$(_H-mko97|5E3>Y)AYc?5yghwTd4tz^ z8qhK_E)wi$O4{X9g=JfH;Nz~l!wryGqFB@QS5GW8CSTn4__`@A+cvR00vqYu@|NX0 zTq5snaRW%hu7UK_Z(LN9TS|dUlIrczfrNUThcs97=k(Xt1{xV3{=z5C>O4$UNali< zn#YhW`G4DZr|+NMs?&C(owU#+>_0tEyPjGC{QEmt7>He;+c7wzX%V+JitvUeSasWp z*Pk*$HCb+I40Wa5U8>Lx)5Gl?UToZ29Gmd?R-N@^+hX3I27*WbG0ZRDShlqP&3Q6$ zYqUc83M9jitf*U1+1=KBL}pyZoBt}q_}vo)v8@e;y(H|&amLyzRer$^I&Nbc9QCI% z6*Ak$G{X4o^>a5(oqwtH>T< zaO3Q3`AhZb<9a9k`!_Uu@=^?!iacsMB5mS+YxgfTX|h#KnxdC9!AyKA@FYnD9uebw z6$7W{)9n=KAKrpVIqwov-dRGA5CtoELkD=fdq~&|l z1m+s^g1WciaGJ|5jY*nK|D)(E+?ssfK8#;fR7zC3R6vvv=^iR2B}k1iLZum_n*kyM zN(u-_gLG_@7$XKKHM$uvdX&U~kt2Wa-u(kR?&I09=f1A%bDgK?_0%PXRinf#pIytt zf4`J#oojU5(xSc zX}pd#mf|fF1%G*WUh+z*s(6o6m}!Ui5$c)$GE}Mun`Fb|ITtp-vW})pNJ-6v;hx27`-mrKAJryu*kPsq14q*4l>+`RG|%iNyZ-^Fv=O29 zzEdbYjF$l61hLC%F)w>E7ae%;=nr!gF?&wvRMfb2$o!AWwoaB+LK>Np6p?W6>?5Xb zxnqm)6Om66_*UJ-&T`5IqJjHNnrq}DL7K3}q7GT$)VZ!uCXUO~cjaI{n-m4!8<>xv z+$TP4augTYCq153p3gq^VH~0$ z#%KJgS0~Kx;-;~N4jBL3zsDiZzMxh@@v6N4PF@_vUh21~2>qqRX8yAC)z-(WqgoCa zXdhqg^ZE&%0aM7rSp0674tV3FEyxtQueL3#p>Uo-RN^I#Kw-LQ^T^dPOOlkw>d(-z zmkLS#Z1Cpv1o?pr?LE3y#v{A29byi9;Z5VLgt<52i22jCygh7m5^fEz*h7bFibfnH zwQ3*!kLnvQi_7}{Hts>=-rUGN+VsfD?v3ON!Q`$0^Eln{w;BO}{nS?bXDG$C9$?_*`z2Sbbe zjr|PYen=eKg?-6rD1ZQS*izl9W}&9?b_35w)Qvi7{%MJ-)bhs@C?AbzE};fqXYB`$F%x7xCvho;wV^(44sjd&b;6lC2HfQZ=H8TZy@{ zpQOLKz|?(Y@=LV%)GCR&PpQLjEuZ&xJL!3`XEB;ZTX#2&^bY?1Cq7>`<<$u*@cRW8 z{QAgJ!5-oZ67b2mTOv0U*x5wN$*okUyXFeDkNaJt#_r2&k+Av}a2%BhzPZ?tP=}EA zMWqzj*`+IVEclOmEX1UUOeaj}n;@z79Eh5-;d!Xhi&st_Nb{^)NB3fpQ1a7uo7rRJ zhp`J_2{~6kp3rr!lO$*`Q`Bg-!Z(qo#OpjZl>^mwxeDC%ZYpY+Ul1G3b_f=TN0hRC z`0<}I#BkRVCHZw_wON?95)hBvzbY(&*&F6qmzA2oZ z6^rgM<8z=_;yjR**1dp5_A0lE&Iv2`ibWsv_|wC|~)AVTJ|3PN50_#RQJOA=`+rJK|`U({dJ99kW-SwLZV(CR? z^Ul_{FC;5r1~n7ghxIGt%yfQwkBm4!YiRL)LI@Jx78Gp&1ROO|8|`%p4eQ3BVip$# zqnL!Q)(05NrJv^eDmBm3gZHn4^fOufd)SW4Trm1Rq)=>o@IvLpa9y$BNK#%!O6omt5gD2lfMbE`?PHCt)&;*SCps?pTn3O{7UvNx^_Q#x5zJ&d!U>lvK zto5$cSbyk$R9{0GeOeDc5yes0z zfa}PhZ||pl1x5qm?WY7A1z)lBY@Y1JIYEMS#`M2Ncoy2|9)ae%4qsyLXZIK4X828O zn7HxCk?3<#<%>uLE|EAf<>)&!4)QZUUG3*V8S^&pH(XilO)?Mh2Gr)*p6744KN+38EUg>>&q|N z(2O>#QW*C)q_VBsJW=}^bv4haOWR*7HwzD{RQc#rlhgEax~9e8OL)6Fw7_iA&9}m% z(z(2K4crq6Nyl6Mca$XPI6Z@y@EH8mIzw^Rn#vbSdFZyC%Vw!5RrVx8Ir)L@GUPN= zkOKZ8DK5e(iT~o6t9^*2Ruv}e`xJld74vq8I63WkB9sE}5{aXIZSx)N&~@#C{@uiL zG3n4-GuU!H`jwtr`zuEE;GW?s<$6L*yZwNUsgwSLqWqJ1 zwk!1GxjUxo$J_;e^3F93-lEwQ-h2^ID2&hbYKgz-hgY`PWXy`=fuci32B%xabKqP| zRq3;<9=D3j6{3$C@aE zEZnh@%_i;yLl#;c67YoO#gpwh0+dD1S)et3XI}X^i-saoU%(f}vH@7%FLKqQ*%IEj zb?&6Z{|Dkng^v$^nxVxDrdyFqjII#vK%Ssz6(h`}a~&tV?Do-;a*$zt{D$jir<0r4 zh4yvJX>G5hy*eoJdYT9qfHb?STW4pX~3&XYX{8PCB{02sayikk(czc*HWxc2!o@>wNuv=IOU@R-(E> ze!-|ntH{oTC?*I~b#>C2c{f@1n7G+ z?n7HNd?fYe#|uu;l^yJu(w$3MoK0g*OC+bVhK)n%6EbJ)8wC&5(uT@FkG2ENtc9db zR<|TYx=49xDN{O|)7NQ-ogQ1vYw3yse!0%cG!MBF<6Beg=KRl=HQZ3rqs&`z6C-c? z*UF@I5A}b)4l&CVU3hp@c3MyP4C$lO_LN^Ky zb0L|QIpd&GGW&Y_-)vyjysp9(nsfxC`w-)Wc`myXoylQrPz4ZiI(Fn zv}9ffy!>3~H2@n6C5ZZ&e^U1-v(V3xZK<}+F|c)d{fjdK zXLhBHYVyZHY)bY9IH3nr2nWj)*Y}ZnJX-&?KbrM}4TR5Q*7ZpYn1?dI=O0D7wfJ3S z3#+!b$mZ$^dY%YT0$M|xE(=?whwprz7Z1-`sIu;0`_1SwChCWw7lQm=nl@6y{gQmA zeRWFbz{IU9>2Z|bxf0__*W5v;s+(V#v)}V4O^GY@|B2tJv!TiB*&5(Ag7vOqU%U%f z2#w?A2wehS045RPU~AO}j~3AeNU-&oWK}oiZ>5gW4Fy%{9$5~LglRz z_ko;7=xQe1fS8#In}x=9$kZ_YnUq}0_8jUkr*<~==pOFheneM)s`4eFEe_DFse}{uf<-}E~;TlDrzcG2w!g=x)l*kri zXS2UtvX9Kg8mH(z4z2^U8R9h6D=(DyCXPBJxMetl{3jnmsgh5D1uzv%^pM)Qa;2nT zZy%FAjXPF*KNET1sWH=haBRTmL9G0jnFNNxnuuMCn(qKpf8MFMzbWcOOK zC5?_`@3Y%8m5I`j7uqEKKVEfpx((*jIrHDH|2@RVT(mf+asEWd+4XXt+lXyi?o|8R@=Gj9z-uN2jhSWalazMVh2DOnnL+HkFO-ipkJE`kz0yE9TIc0dtHCieUo=?LZaPU zUzWY|lLZL=YXmee-M_5zaL@20W@yA)7Ovm$+7w>5W-*`IY>e9*-Km7*@UNkdlkPXF z5eK~IMM-*-2^YY1u)e~EOvNhg19r(TY;>9y9vKBJMGj?|r8~8m_k)$xMT+94QbKxz z&u|uXqgYcOoz(TmsiFAIk%~W~vI}$+o_G5^!?^VHjQ$w(VkWr1=Ehkgrl_HQnv6JQ|wT|D!T_rlXpunZDUeVO_QGp%bQ42kdI9 zzChhXhzC(E3X`{?OQp7BN`3zJFRP!KekDHj>bZavS&p^MybBp@Ht;Fl*cXLgNN7Hc z3+M=K&pmDy&c!Qk`kYJm*}g(Ju~M8!hoBv#Ed~{JZPcaaWCt<=5We8s>u;foKT|Xv zJMN>4srp>A_}MFLfq7sPA2;-kg>40eieKVfab&Au+5+=3ZzG?eMy|$!_%poSfBPE9 zL@hf4P6@Tgkycov2nq|I(gdPq;5LMSpe{=;NwKVF~mF#X|$N6=g@LvzRdnJG%(2<0r7p>BWNEG0{1B#Al>Ps$a@td5_O?# z)cbVr9RS%C11<9Z;>!Jt34RmA9k?GGPQbJ~>U=)^1gT4&pp>=r8x)i`rz%7)LwvPs z&hrn~frVyAHsiLHNKU8sx&Qlf-kp`%nGDg{yU)4`;v-Y|*fmwVpcuZ}7&E-eT+f4_ zG^NV%kl$oI)~KTx9XC2>bG76i{(iu4XAqJr*TdAu@;R8h*F@vb&9(DhdPDT+TxfvH zgEa}@YQ}?JKrgepR#c2_70Ic z;3^7I3;23NNHg#Z+15fohO(qFNIIJbM~+h^_a}z9vNErS?#0ae)0k&)vgg;2+A3zS zbKdCldAB3{OD>%!*aaam!E>AP9Z?sL?N6V>O$n<%zoy*6WBi!21D2W+=ojEC!hrPP zr}X#nTC1hI2Opbbx>C)##x_|@Ik<3vU;0%qi$Lu0Dp5I*MKi|o@!p=ouTf`xihw0MS6^fm?~4Bf{Upy33`Dg?L%=KDjiVlo5m#Ek~{>Q zw&J&(hrE{j_Y@-cAx>>_plJ(DsWhH-h~4R_z5+k-kN%alfLeB zLXLi+938iCReq>)CuBV1EhS&GnY!=9I_O^ACacCcp0V9V;0($E2s5${A3^o?V?%GF zuzTII4*Qj-QpDxeSoHbJMN@_vl9BmnhHQk(Q6G@q6ms8aBaa^fI(C97B}4&2gt|b0Q33jqU2{B@n(m4xy~_u;%wLK z?JmlbB`8R@pwWi0NakuT`^;(5U z`VkYMku_?$fRQ1T{{1RwYv+c3MpsfwL9Z3vU|>1YSwr>i`FV}W{H#8|BM4p9H8P8n zjca)fCdppoFZVC+1lku(YTe+FNLJ7xy7sl>9q<&5p5y!fQJGOaiaBF!YY^iJ z8EF)2pV=~{co6NYTiF`Y*_i9DmZ;fRawd8lLnQu36_HEW@*RI1we8XU`0mTjxRr)( zyyA+7d82v+_FEkG*frl7SFOLQ&|rkNd5?TskdKtz5^UC_u`8P*g9&a?7vT5Fpe-4% z1k48^oA1Fo##V~t#1hK=CqqYZ%VeCNlFOM{v-0O(jy^qKZv8A}&EVfcP>ktb#-!YS z<#--*uwlF&x%-#*=Gs`v-fDK+=BUWBVXD*h)G*0ULG;r@ONJyaJo|W?)J|Y*;=B&O zhgJ6^D^S%#_{IJhV7ho958v~+;BEA9Tc`ViaI4E?-v*(AsIYl!8(F3J;aaL51;MLo z+0mjjcEPq(l3(6I3wq{O}l6hsjU|==3Q| zNaq^Ve~JAAZ(BS%pTIK4&vVu^SARo}5;(uL(?rX>b5Veu^2#n*y;`)!*K{n<)b_>6 zjGig>Dcke%y#vkA^l%}t?N;I6m>^EO>k*CyO*&4umZg-`*_V-L2- zjH<(dTx>2KIpvBXDQZi4ZmRVpF5O;mCqiy9ei}X{{b|n(KV5_M;QR{NwKreK1 zCJumYp)ptYYZJ`IF0@K2&!ks$N9t(?(OJ*-qv>KVrUV61fvkU9IcCVu4$YLbN@<4} zwphCRg4yZz?&qdjg}KvgnZ5l6=dwgAc8u8a@G4lOB}rpkw5!FL=#t#S(N@8Fq7m^6 zw4KGXQXaQ<*i*(o6dJ`+1M%SCkGMfaHgkxn4fF3Wt{^`inVci#$+n6G27iRc`1Jvf z8U7qCUMc?dQX&W@gsssiv}5K|)E-+N^8pvJ#n$}y-=2bL_=dhf1shgS>0}+Qd#3A0 z`Oh(t)Oz%HrsoNQ8{mk5g`YRDG1VS*BLZ}1(wSjmORH7G*FCk)QM^LyVWxQy%g2LS z0`@N1m~&i2RMbk0b_Cd~MrPJ;sK}he67Gz*@Hn;0^i2J;VWw9@3CQY4PQFV|H-TYa z@$8&y%AFaXG`be>SN?M13|#C6E7BCX{A;0w>KMn1D%^g~Zt$i}dXTjkQV_F_r2qMd zRd??HbvO>IlN|RLEoq5{0yP_&9@7Todp-F0WG+0*cKtWP%W#B$?*=M1?V!Ef+8NCk z)OB7!6`z+-q*mY+==}kc9uP4Z?xUyQ7w$BUlZ_H#H+d<4JuquhMReKPnX`nh+Qw0M zm{EJWW^V?9HiT!#9gS1?F<;GMkC{EUD3ocXYIEz?xMT{9w3ySDdXkx^A33L4);s5j z4hhtErOSo)`_xxpC^)!ed~*D|Gd=>tujjh^-T2d zkd<&L*^M6WKBG~M)xbzJHEeHjtYLr7d>?zG9$5mbwfVi zg>FN^Z%3q#MLpl!f$XSTmL}sTsD6(f*&EkZfuvGn*R(!4J@J%ZpE{ecroK?g)Y==! zt~`ljopK*7m5=D&t>4$DWL`<}X$&o*?=eo8#BX$F;!U!1bU_r;^Fw?VoQTYb;dhU7=p_79|8!{Hj*hz?FIm z7xl%3cy-_3cT4|w5{1dNevhk8&5Z32g5&AcM4f$awYLpr6TFc8%Pt$b;)r9H`-pA! z_H5#D!#DJjXO>2{Bu+CvVsASs;a0K*U9sEQtF&`@jz1mIem=0K);^j z%+bU(#eI|+bz?4$f5Vr#qev$lETkvf=|yQ}D5iX!wV2^julIfsB%`j?#N)g`cXa)L zh$yL#+@$_Fsv+=><~<;V!~ZS2%aj(Z1?=q)LB~nUO^N{XsjCVn zu5Oe4I*cxJ<_BDkWX=%q=Zeb%(m8 zc9{S!95w;Q$NCv{vkHSbH);F*@cEt6lSGZJt@mq+NyuM@0q( z9y82jOyN9*wv~~Q*pfB{RaGUufYQ0&FK)GLdd*k+(3=ataYb}3i0h|T&Rydx!6C6C zA@_fOUhX+Jo_u53(Hh_>iCwoih%ZsIH6}d?2b2^{Eo{=>cXm-N3<@z$zD&fU5w#U= zw+0+M?r8axnG>B1c-UkZH!S8ek->2I3g&=LyuKlfaeg_jFmS?+=|X#4$&zpby*HG0 z9alktbTHhcQ4Bixq^|#Lf@4JU72!0u^DJ&$X8ZT7<#U4VdEd~S(RBS5NMj!_=Uox^ zdQ;krkhp`+w@d!OW}y5A!P^B;X&zs$r^IR+P)VJt14@wn@^#LzBmQOP?K9SIl#r+> z&l&wq!3Kz6dZp`$26Hl-D?`8*d8Btx$pq>xb;w8$A~Uvqzl>;^2q4hNCiP-&!E@=k z9F#~y>!7o)(viv>b1iNa4kwD&cAWA1QP7Qt#`fF;gEB|B68p}Qd7`b;yg`>NY z8a+%I=Xatz7qxOQdujL3$L6p+Zs#rYjA;*lvkv4CGe3+!)pS={AtES1y&JKCAw}Hq zc7dX4F{!+}eS*kUFo#vXN9ba=PYO6dbcgmUbTt?*qj+t;oD4JD40J&oISx-xV|ljz zW%BhUw}jG}900n;(qxSojrM$YqupF!CMA}^#T&+Xar+_ae?*U{9Hil1MG&hebzo+U zb(QRy<*YzMmMoiVz&dj-K^bVd7aAjSsC;L_3g?(%*Z3NS(x6Hi(p@ApBt)i}=j1QL zJx`I8q=|T4qSh&&`iIZK)lT~jTo0!*SM&D!Mju)WQm%=zO1>c= z$K4xaF2?wf{i98Fo}O%}&4R`+fBk%M8{J@olTN} zX;dDwJ_wq{PFz#Prl^e&mT$m$A0$~d`aK}~CSkdO@v0Fv=>kcH&VnfXdD8_KsVRGX>WjDSaT^qj=k#ze<-QrCJ zMhdJ`cF_#OL}-f0eRpnrlC(3nm`QtVHuTzK=HU^X=%*Hm=d9>61;efOs9^1Naw8gS zWjx^-wmLTJ&Gu_zx6C$41KtT`s!iLZvj0&T4eiCxodLi9OauESf%woZ-({w=yqeH} zmZN_Q+!$K7y#-*5Nkb}V!PG@KwEy8CGB)~UTy+ahU}S7(Lf4R}#rHEbG(HhQ-{h%|hKjtd zg!kN{j<}yZK^^2f+KS5PQ`zhs{u6GV#K=sx@YaAqtlj9e!PtJ^+E$qzKuS2p6+=#- zC}?3bQx^HM_jCt7d1~?&`Hd9Wdf+;rRNBE9IOt?~9uHsWNt8tGisr20h=1*C(}A~2 z-L+Se{J9~cN@9MO$VFbF4%MVi)dAL zfabLGCp}lqkgy$QnW}RG|8>wPOwU7#GM>heyq^q$@>Mn;^2Xhn+sY`FYk{Ueu(g0$T zS#^K2yw(}88?6*j?vLArE^G3nNk!zSBGbu#i=z`!J2MTJ&jQS78% z^&qE#`O^X#;iNzyXYf+c^wHBLO!#)1LBbQ$t7j)(5#0O2h-?+UC?-S0>70Q*ODQ7w zIFJO4L8bGJWAqBj@gOs?yPXtoh^V#GvSPqs5HOOQ69k!osGB;W-pusN>Ux$3GkQY6 zuXE1ZO*!gf{MMb?4b<+G^>vqf$^j-m;etjT0W0t7{w=EnTxO0mSCnT<#ha&aMj!&k z{wy&0>%jz;W5n)l5TBq{)Q$sxKD_qjqk%^j&rRW!y`e8IN5~tX894S>^D9MkIlYn? z#b&%D>=i0MAR8Urju~R=E9MwP&N;X@-j(*Gm=c(7~2dm=qAcqA5!=9RTN;)z&{nSo0{`oLcr3^F5XmRIipJ_fDVAHl_L zz5-;64rT|4Ct(f3Y|X~9;{#L@*btlE4eumQP_h_N5f4yzm7;~E?$S(dQ`oTS+p49g z(7VzdqtnwnT$Q9vkNGvH7v;)TJ_f6%ey!)w;*IWDIz+H`uz&NGz@!_MS2kynnYO=-D;R z2Bm9T<{D3gs(&CnujK0*C!{LMQqQ^=x2ODAXb&VEZoIm1By*Q+gtA^R5`-C<{Uh}R z+Z!!3%zn-HEa?mkl!< zS!ZEGs#ZY3Z#cpzRN59oe8QW4U6rnyAv-Wf|8cYCvhXG^t&)P0SU^ClCgz)u|8`z` z+x5#nA9lP#ATVj#Lhl!Z#SbS#YUf6FKTdw6DL*tepTna(sH?VZ@R`Us4YrhhH`BGF z>ruxYwHIPxU!gBC#9t#!jI;J+UW_@rLWA~$M!RXo=q@@kLlAL3)&FSz#lJaX&R^z~gDJyjRXvOV(^d%~sVZUz~VC-W=5WS{%XM?m2nPJC9kI7l!B(n++vXVgpTrEEW4; zvVBAoA)Vhwl{3u)Q0^m9Yt1<5iuLnB8@`T({Skk}mKkY2lQ7_=pM;-3(M#grw|(lH zC&;zP=zP%3+CO%rQn%x?|8|7!9~r5+7u7K&%E7IwkIG=bGB8d8&Q6MK6pZJ;YvS`W z9;uXSAR-=(^EqW!eD~w+sv!5a5!M_pDoSzktfH%{`nVh(o$ zgag)^4kDJ)ONbatL;(kCs(0`#{$&xAUjw7 zBtSPf+Vxx6+`oNx4tY%K%9jt}<_d_kV=Y)cKZtMV)rLvZTqSluq!K>k_FwH2 zZF8{1F7pkNc{kjBI+rY52riz4FXQuV5~%9W+nH0`oFL`%-8JF|50-ez>RLrwUU^$g zl@9%S)4~`0rv7pF>Ujzl7dTaa{dIGF#F|(&^C-)~@qbidrnrVBu-jcnCdx~@peS?4 zB%#FOxLkbWOfbHdiMz|i^}nMiYC#erN~dLH=YrAC0pi2)m*IMXd*FMnLj=H^CXSr( zt^#7n?`$rdkhXfhqZ~(BV&)p&@~!if%5_NW;xkk$$RXmI!R?KVk>*d2bSUL;1lpkT zpQjC9h1fdC>d{^?k|4%}-)iBk4StK0xd{@zx$(mxd#=g2*HU*=yZems*!(gJB`Vg% z>uC9W2t0DsWD;TcIvxGAFiF)_ZDS;QLam%5>jyjB}t$dO;=bq(^YtwSBLmmPovKxh_pSE6T(}_O&XuSTZ6oz9J)?oIBYo zvoHD?*}K$XJ0A@7{sov+0K#F)%iok9LF>*mTGKUcYzn`Ei4@54_!`@|4EQ-rwR_`Sdng`Re3NstBkqll;>6b3dN7Geg~D= zAC$vOkM4?2f6)zh@nl+<%-D{Q?se!Mf(>K5kJ0#F(*@Yza{0u&!SgS9wYOl@q*{MR z=EnFpA2Y?1DXU_Q9y!yB$J4`d>`)A zf*{^EfqC4myjvDNr4aWSFORDy&Yu2=r?ySPCH<#HjGUV1FHKA0l*E@>zw!vyWKu3P z8Y4vhePVW>r~CA*r^=oE;3tr5UTj7&dUftePERWO-FYo%6VPc9JfL;-SBgVR#_O4c zU5G{CrL&D`k#e*|h!jkwiX2sFjxBK#pB`|HIc5z@<(>C_zezP0wq=ouTB6S3>hTGs;1=)zBrXWkfe((lp$L^r$;05Oz)%}@a;3GIB z**Qn0C3RlvfLphkh1zxd%6N#I92gXOZyKIss_4(xS8~?#@?8=D#g*$Y3Eh2|KxFwn zEB zr_fT2+Wq2|wjyg<(m&YqKm57uPPBYhJ$e|x64d=Dku!0Xfum%z`CDv&ucAH^x#G=4 zZ8Lx{c(#P7ois6yV>e1}D$!oQdJL1EfZ32(RY4!B^#Hn-ZwzNel6Eb=V=o{n^`sim zf|fvx<;-+)cH%P1H|H+-^cdU(iF;++V)~9stPdRfo)=dhr~Tte2~U)|lJT$YVwhW$*~dFL!E(lv#c z!#k>^QcMmvY#wnHlNNn#C;2w6wzG-4SbihAd0C9x$%eAQo(2=C&5Sd=p_ycZcBNsz z*cxqLOIbIXH|N%FOfy<;mgs~Z(KbEu;R^d%RG1kU7-~M1Vv8PIO-UQP=9_XT*X8U$ zRxLB5u<52nOCY9=L9SXvEjvQtUR`CKU;lQiGuo#51Mee6-oA`;A-Pg*TEoII^9SA_ z`1YaW#fH*xy^B!psZA%i`toT1V4t!I+e!(sI<)cPL)bs?hr#-OxK8{@eN;cGM1F+Vm8Gnt^;{63*IX{p6l6>hgGYjymvQwa*6Qd#=ardG%+r|`L zP63mcdqfGT=t@^;DH#=PawJ=dxPv0lJRTz+DSmsdq)sPOs&ln-Ynb^Xpd2#bqw?(xUgH!P z+}RGHgy#}NI$Wn})cXE(DOcdyaN*z_y^sLO!w~Chr?}0T<$IPbtM9X37@6!Uv2vQ} zdNdGcYwlvsfS+L$3f<>e$|?7wQE6sxZQwT1IgTl%q`X${2E1(E&Ze_R|@uiJAAk0U%4Bs2<%@bSxhvK{j0v< zPvNW$$@UNL;Nji7&cfk>FN!`jbyvOn^~=G>JO9ZH`+t<@KdO*9IaWXwj)CS?0~m5- z)w!tGuZIRiS)oo-7WGs+tnar7fI{7@HIIW61iC7B{7ygIHXA?u(?x@8RaLR$W&C$_ zmrPr{*{Jl5KXB!v({721l1{f`_i`PiI$lO^pD(AFzG#52h9&uxl%9lo<4X>G(bsV$ zCClc|2>wJd91f=h%5CkGS(*vm>G)v#uJ`c6CICfO*XO{P#<5zsYgOntXQkmE*h_av z8yXY0q<`Zwr>y!yOFCY{sF)*eWHgE!*7=Ld&VC5(XPupB1BL9(TnmxRlE# z2SCt{_STL1&Iik>?cnUu0U^G*3l+N}vM6*d!6%6_5G)B$fDWb8c?!wLUW_)#y*njY_{R#F za~q4Nr4agB?vOMJ7HMim+*-$$OfN6u`h>if<&`1Rhm?_90QZ;<{akDvqVo05bQUmr zUTck<@0MbTbXn+aMHBDD6UiCdFj~T`!v=~c!+d^OC!VtKD}Ns48rp@)m}1xZmylPG z;fbAh$is4}+?4{=GGop~G0_5`m%^*DD+h;cNELDg3FPCK(B4SrAsBU7RqY^mxfSnq zma(YhcCJk;ER7mjM(?}c-`-cU-Hk;WktefMqU~gK?&yg)?l#xuVuCl9Q<^Lb`T_i0$KP(oq=^x zWl;lpdC7^}RuA_0pWBY^Vj?q`T8S>d6bGnk5Tn8>ByI;Cl(D9+ebB=63_#)Y#$i<@|czF59sm=PoU%&geJ!?zNz zdBDtB)s3l*nrdFokO1z(v}Cg>_yopN`!4Z$Ue=w%j>a*!t9v~>yYl(oX%L@5KOWTb z6&3j$x@f~O3l?2V@L>Hp;*%24B|fGE)FU}qR}Cvk7H?+g_Hh@Z8d6^bSa4Am;}ecy zld9?&MyaDR-a}vSf>k{N+=~C8pD?ir0F^ir4=uqR8E(mLC2cu={jQ&Q2kYJbD+EZ_ zHDQ8Zxwv@yU(A&8yF9ZXE|VTNdo3cPp+~$o)k|h5g+o!6ZDf$c?4=%4o3=82OT%r^ zA?a}_q7n%hywZ{W*2g11#5>xEjGBLusYME|lr{e`X)yQU<@J<=oD|4AvN#vVReq`R zN{oYq`=EaU!Y{<9bIa<*FpRf&W{Mm_m_H-sO3bbcMJ0}#1e=!F-1+&{PX_Rup#L@0 zZicJwH^zuXZpBvGh@|-y9`cCXZcyLoRz1_ES1TfSiSraro`Ai2Qc~LECpDN~Fq5_S zh6|^B)j%s+3HaE<-SO~y)Ec(giHj@e-yr*!`!x<;`Tk-$&=0oFxbLcN2MS7dn7`v* z;J~O3JzmubQ%iu3=I?~aFEhbs`>Lh-LA+@kwTUxFGm|_=)SKsa2WdMT&-4%p}hL%fVcrXb7_!|JTv(%o`|7k{$zZ2J*D7`j`-XADb zy*9L&Z8Z#K4+JjW`Lw81sQD7Vk7EI2d zFzr1DIhRLrTp0(U>=*LmO6--QE&2AFPvq2?cG)Q`Id|dEzqy=8yVrXUW_tMk3ih~< z@>tRo1u=eWQTXS#K)>zEA=OgT__FIpyB;gQr?9raQ|G^Uj?t3YJ%8^3n?F1j14`d} zUi7o9#zrrmA9B45uIwrQi#Cj4o++Rb&Z9WhDlGl9A}s-3gav)yu1icA8$yBbDpX?U zz)E@tf^YZI^lI<)RE}F+Ab zg4ToMnw^M3rky)fCuL&NoU17#X@h29rvcw{)s8Z)2FKuMMdY#9Ei=d9Bk0_9%>Sqq z@H%G5F~4+Lu}jH!m%^(}oC_!{x+k6`p>f>xRJpOH6Hu@} zlO?`lnpm2H%@vXa=T}QnaE(CY9iriO@LUrF*zJ_io6duJtL3Y+dg`KI%5?`XYu+4N zxoZR9R*=INr%sqi2!Z3ipUiCw-KNASH{b>Q$E;wc{OQr`b!n{!UupV_vsa##!daYp z35agB4vtje$_!{{>(1aj%8VwV9A(oxkY5FN@`;;}olzMSez0(vEz7nd{~i-G5hfB= zJog~_cWf$O_+d%!K%7`zXconDI(XpmAQtP8Hg)7yq=pW$we|^T%5ElU3Jd=j2k^)D zt$Y;zK}w53h%v!!TvP?0q?b91&&||8p;O(`$y$qGVyp_@5_P~Q8+_tO`^ukwK%mtw7*(`fI+!e=r-;^%-jFFqyyhq$d+6f>%s}6UN&mD& zK3h7}FVwLqZo6XWr9K*2#{RZg^>5xKsPMF`c$>#QGKcOE&pnSC?mvV>FWCL}vI z;PLtcFS8u8oNpfl5a?n)(2IlWCagO}gE5L1h!`J|`aLtN%42)Lc3pT|pB>3F5%L&B zmheAbkd7n#XOi!tg*BGno)gZZ*n5twWBz0JG5bG?&cmCn|Bd51wX~GhUR6|U?@ij; z6s=jiwTV%+VprAPQG3W+j zbWp&ydqHy$NuPS-xusGmq;JforEat$j20KXN?i^#M);SKSqwD49HHjTF0)_KF*g{4 zRhd_XOO;gKaj*fsZKwX zKB~Jh=>U%mL{K(A_;H29@pM&@OkaG!g4^!U@)P@)IP9mt~F~Sx)<|V%vU4?+yv1T zR1n4FHOD$7^o2x>g`ihZ4t@J@aC-Dn!M(ru%suy7oE=z_`=&Xza>v+AcmLS1Y4oD6 zjefKHOK({}>!IF*FYI?5Oo;}1v_G0!K9~lk8l6PMN{OC8V zFqCA(^zpMVS3a~X^+i@Zzt_`_0E6}|>U&gAa(*?=Mmib0E<#at zz<*@7juX_(IqKDY?!iCXUE$Zxr&*HbT6Muj&wxM2WN01qF>rSN0g~-Myq8@#-#LiC za=}V@`B*CZzc*)&xj~qy#tFF@%Oz?iVuvL$v>14b-{$uLP4p_Z{p~x5^2iFC3)q~1 zrB}UCEP*S=X&%6mx>@KPF4&qq*O?e=R`S$4%&ebW{+}rm0ojNI#uPWft)Wf&Uwq>? z*{1yHR!vS(0Uh694{iI-*QdLk;R&2lLd-l-UzG^WHR3CNxR*bPiNL`GX}w@RMWu1b zNR{_ZaCOIa7&#GtZfrGT(iX*>lG>mYHW})q$YLUTm4%j?qSK(28a~@xBQIMk?! zF&p;nU@VI@_Zc|2(AHQf6W1H!3aVN`MhEE%*ok0t9P0YA9V%N9vw8mB@yGLz%LyfT zLLVJ5rNVM{!KIxuPAw(vCr<{BiGq9>Qxg!VAK>o6_>b%*!7sXuWVTf_qJkh`i)n-+Xnq#ux{TO}FuMoRr2 z>&K~VkLagyx->{u-}M~%)r+p^l~ljx`9t>)Sh;*y=@!lQWX$(a#}+`I%j)DkJ8fKR@wsBQ(DW>sNN$oFyaDl19s=&b!OZfn03rt@Y&*HAu55U}*T z?Mv5-Ah)6zdX2To%cvOe^P4hZj3eVDIf18o8=hi?RWI2_=e1R59YMlxFjJg(%IK@Ku&sqgN-?Yqacc~VpBF#4=SyJ05C0sZ>mBq{3Hnf)9+p+Bi7rLEd0 zGb*VP!)|J%cm9ly>>t^vmE8EM@DnFFc+Y*3r-Ax+H;%G^Z~j5cVbCZ+R}SS7>DY!> zOOrfTL!Ard@TNZ5@3Klw%^VGS^qNq;{!l{x#R{!)b77VC0~&Q0qC60l5x)Bom3Q)l zq^7EWRxcQsmtjJS_fY|*E|weXYIaac-uB-9sYCwi3$Npk3_wXejRaC#&RBJp&3N^n zdppx3dc2FN7FVu*TK|2P6xxSgnRF9ovJvRCshH=2KjB#^RC)_f?H;x9Cv+U77nIF+ zSD4MJJ{j`je8)c>bmM0Y{6DNYE`4BIfb1;<-q)N-c^RGNWhqRL1y)~(vf&F_n@azwIgN(kn$ak`Lm7c1WUXqVX zY|Jq9qZQz)|HzWEzH<1#UcFm3H&_S7Kic#B9K;Z1Jz1nFNm*s7EW_lB~EHb zK-@?qb=}+#VouVXuyT{u6)DNz{3f4-Rr_$_$}Wy=roSpaE}#lhYKrPQ71gP{Dly7` zeSv&)pZntps}U&FmCdW0ou#3!HPKDyawaP|XsETBA~=GkBicdnx6xhmA2l!}FLaZKQHmUF)Sn)R1{Q7V@Ej&X$mjmLtNFjMK z`UE6w2v6TCI!rF^a0GaJj!O3F;O_#+d)eB-?!sJmP!2)##=MC?iZmf3wZM4-Kg6XE z73j3l!dDGqD)$Li?r%+hq;1wpPCZYvWicA*U1iNk6hfb`?X`@kVW;anWid~jT1(?U zJvt7UGymwNl=9;(#nUVadg+lekMj+ZV`0g`ja7=MWg3kfY!6=K>1zIsgy;hmbSgDT6LdN zR;kxqs5Z);P>wx8H!h&L2)bCAfyZQBuf*R%+E0z{df1dZ{grl4!493eK~jlN!5w$w z^%F|xZ29tyKXa&6IF^lXRkH|e0S-BmNumXzbpqdIE%$86E(jzMQlego4}U_siCIW? z<1ki(tBZ3RltAIeEh!T>sfpp+8cH6aT?o?Kh8~!MxTgb|GMCgv(0x^N8M;v86o>mQ z*AsYESW7vWK3QXHHU$fB=QA5#xgXN+!PhrTrYf3myUJE2tvy~iv6Qaue*;18u)ntf zlgN_;P@3jMuYtF(z0$>k$B%3L4O$gn%viK!D$&+?P=nqqDz~JRnlpO&$gd4cWL?wx zv(i}d#MKm(#tfIVCH}=KGf{A-7&>xaDY_s}gFHMve1d6_o}zc9J({X;R-X9(OiK~q z3ClNnyx-t27TYijic{`c0t!~wOCg2K%Yx+*I^Qx*h_%j0w+_G5J~yKK&PNhJ)!9w;$1cIbChLG z$?1zv@DH|!hL_s`47{~RwMHLJYzNbYiX0n2$kn!_SS2pyPY?4;s3P3M9-^(Dm~6{h z50w9{;Oz-zE0%LFdDsyJl@t#ByaV@sy5}^av5*WL*l5XgmX_L3b95AH0`SF8lXe&3 zC4&zk>r=cGP{ z--V0LGD>mbmY3&;J2PI|_La(j(}2`+%+E9T4?e2^J+mXd?;SS~sw)MN=NYjQe=MBk zzMMTWeM9AU)qkIT!G+K%d>eUi)E%%Hvz90eInLCw4Dev&<_I$8r^xR$=r!E-l^gAC z(|!|+y6FD~Y?>aMyTg(tZ@Wmh*3L!Dk;++__3)6uRB=ynAMFWprQUw!l|bC~3z!Cw zZ7V$`rs2)ZabuWmEvD5B-hK@v7kSODExyvMhk#CpS4<TWb2#t?vIwX;)oc|3~IE zkyK@F>*0#6CID^**-cI^H$ptQSV|^71m|x&;VI_PzVlhu9kltYmS@}>7us`V0V>sI z{1~}4+uwZKv)++E4Pb^-V%$1+h*|c}QFj*fj^uEee!$q(a_cL$$5C>qcf~DqrKZhe zxqM?07%Z;{RVk>~zIT?)e)cq!ufPVV_b3N8(z@K(s<;^kbu*sOSeb$r%xl|yxZR|W ziFrsc^k%HpB}U-Iuj#H}>yVb}373I-Wrpng^|nV^v->k#>}hKOzFEtpr~(tI!x9Qv zniCk=+MU05c}TW=W8}kTY%@L>${$L%rNr5AR3mwVVvfZ#{!FWoCOIHw&_qz{%iH|~ zmFXwm&F&QyHlc zPZDD~ZhP3b;7?0D9o^XDO8%l2x1x)HF1jq~Aw9|JZ@p_$kybJX@!9jnq3Fg_&+8!^(FcjZJj5K)H2)&F)- z%o$OapH5>02=1|_qNh1;etuOX+XSQqFghFRsra9~oP;Arc^VIJ`6X;edc3I>tHB70 zP+tRuK=1Mp2GIG@a%ilZPrq&CIZ`{%F_89W7TGS~%BXG?xcFe}aI0f3APq4#q!nBY zAE^ras`Ghfq$M=E_D()89Nu#CzB<+pO}(BwGd51GyesL}2#` zX3(0_`IXDP8MQ6bpG;Rrjp9de{TsW1CZF66N`Y-}E_$R+8fnGbO$hoY`bubi9$NiIs7}WU)^38^V(0$E}Wf(6Rk;)xLE#Y=)mg zDKhj9rkfM@e9qi56#0u#$MVtn{FX{|i3}E6+e&KZfyS!1c{4fnQsgsn|JaEDTZI`5 zTb_D_N^ond1VR`z={)w@(`~Jrdi3^yl5@jvgT+LcUvEBLOMCw97R>2|1FiDYze`>p ziu%Nz<$fj&!N6Toi?p;MZvs&3@C3oyu3qj$&ZHmNZ^RTUS9MP0UQQ*ntw!S^C2Qwh zq#2?m`VKj|Wi$+MM@UK1MeaMjC~gX3d!Z~YymufXi_0Ft2V$_)a`ef+t^vz61xt=N zO8*zU9Jh-ADn!m}2+aNvKw?`#uVDHmsH@Rm;sj z9kK14*#N2GJqXV7RN#}!N861N&)(1Hrv;u~PmcuW&5`12buZ48!=pm=av*-}H=5w2 z`8x7Mo5e5@Y24cA4eVWxxBJpeSb@4q3UjI2a(~txrs!gsbm{~c=!&*?GQHdb?Z+KA z=jf>fN+n_`>l6{?zFE*ZlX zzTrlRPu+UUiVL&ijw#kgjodjUWG67T$njB=IM1qd26Os5x!eGg|6D?DOe?(p)W~9- zzRmdk+Z~QJ5=T2=M%<^UCaa=v;y)-l=+8u7Xi_stvMF*%G@GAC76+*leNmwex>2>D zZ+kK)I4dOr&)^_Ou}lcx!axDs_s!74+liy9#)$X zNBhuLRcy#psF%|zDy8$~f>iEb7EZFI7bD~WYqj3gyG^PHaGm-0Bl~;)nTgVXi&vh< zWl;n_afn~tpTA}jUx0yYDMi^!^9$scT$hGCyY6jM-5Zg;@aP)ghW^Nom_p6QqvyxB_MjA_P&l#Cy( z@{PM>=Oz@5&9IM!My0MCF2^^P;~*OH{%}$SQs&90eY5P0PD<*#pa2$#iDART;G@@% zJPPuI`JFUXFOY0nx`C{B%a)iVv|qT}CX4;a4byz$g1R-d&0nU$**FjWv*_-hr^IvSgr|gAqaKUJr-vQ zw=&(7Nd@ymCKYr@EuI3g6R%LKe(A)uK*Oo{W_MC+=}SONzUN9Ug=UG%q~=;Z2B&bt zRAlzGAH{Og+pmEpzX}-8+wSz*)?!%OU+x?c_S*#yT^(|-vM3sIkI$UP!543;)Wl3; zh;C(83UMAyXFkQGOs}DXsh`yN2oRtf=HO5Nrke8t<}v|*ZjY-2VODwwy@lQoI>y7j zkS;f~OsnA{L0CsycGxp;mn&jm-gDTy5qoFylEfH@XQ=Whx2;Nr1Jd5ew0fHe z6ug+FqIx+nX%<$n-0|4zgG;-&I6YWufb*|TFvC)UoGb!RVPUCEJ-f$cN#v@*0+9&m z1GB>wd?@sR@6UXV+ak5r2YJ|n<-r%ngIPVl^6&BP2LqA4N|s-{TZy9V6)cv{ew0)> zY%MKsI5tSkmS~@0Ga?4LIDSaEAn)4N{JT*5~4^7|$iM}29fhDk{b zfI=W+anAVNwn;py<# zW$^Sx`M8gpuW3Ii3Hq)6PP~8LN|+=BYui_3?$Ru0-?~`ykqs>kE%CD=&$4SzTtSWW za?ajP>B~$>lvT<-sZcN|b3Q%)kzM3$d_?-ainq8skyk*9^j_zdMZ%hVjE$QT2mqN* zrkeEA!#?b=Exf^L!^$rgZ$t`4L4t@dVX0}z?6%PwWEydTAB{3ERkhMCUIoQGJ%8m(3YqMlC+cp`bKa9 ze>lxGuHL>WQ|+R5`uzQd)A1tp zyPS7$JNZve@gL}p%$*2wtjc;VP7%?$-?Q2esr7-A5|*|a<29t-q!SI)at67x!Z8Uj zWQD0D$!x%^P9E~h?)MYOa25CjpN|~0ZR@p!`lbvwEitPtdR~1Fc{_kQ4&z{R- z+r$gZmKHnFx%p4rv_|$=fLr$;X;T>$E7(0gmP}Y$@arj{)<5L`1M$X0CWMa+_*rbW ztNG)G{p-v2a_dURw>oGP9^0<+TSGC}VMocFG|yB0!Z_AP)IV$a@Yi7;OGE~pmWlPI zZ&Sg=3o+S>v#T)gbMiR5=^$ANaZ8u+`M$S_jz1*q*k6-*C24I2y~%PouBjBTRoKWYRSei=-IG$;iV$h8V#QT9+XEF+tYB%i}Isnhyqcetz>vuFZnx z)Jb$}iN5G~RD)ykJSf#TdxbGAWZW710=dtylL)LkxK8z7$pJBUhbroxQ&jQ^m|IGFRyt0wn(*^mMF-vvsOveY01=Yk8}~a zld~9{Szm0fe@vRR%|=|UOFqW}(5?D@>6Vzl4bc9PF)F-dtDOg|9sEW1vG2X6$63@$ z(}QL@f_)`CQ&I=rl1muwr@>>v0KUJM&IueUlh>UT5E5*+r2rwzI>VzW7x14=vz}eX z60W-7M%lsscOl!RDHt~=`@5%4Q<4xNX=~z_Gxo+cCxRX#u>&RXZb$OvN*vo=X;;V8eWY-1e}%ybzyfr zXze-;@GS@}o1YJkWGO)lDUCN9Bzw)E>y)^NYHsS<5$$u=Q7*;)6(dLaa>gc!J}}r= z?Jz8Da;tETNmrrg{r*Hl5Y5F~Cpn{%yu9A#b~sp7>=n_$g4K4SJuy*8(~CtND-B~4 z!k8SaSBo_8^Oeu5Jt#}$^N0$ehi3F-7!y?H+p4=n?*sh4hC(c-g8Y|j8Mp7aeaz8W z2&c#;OUU<(buZ(I3im01n$K^Xue|o;f4LsL%2DEux7f)_+a0m)xqoN$^!v7{@4&c9 zS>wd(O7zKzHjsqB7jpp<;TJH^n&)uDne5EBWLLLPLH7VDZ)*LAOeYYUiTV@vZ`m z>mBRWua;J4!2GCoaUyDPsWx)=#La*}FTSAa(!gGr)g`?RJPJMiOsw-cU|4nhcBy&# zdE9wB*`od*S#`SLMdGTpY6!*}W)*j#_t%)&J?UYO)MqXDbFA?lh2D>Iq$s76Lz|Wi z^4^CdPnwOCv7nGEIOhR*AN9j|Xa3U!VI34Ar`2Z@mvY-(Z#8Kk@51o#Q^} z+cHe}0kM$QlGikr>j7G&n9L~6U&i-hNbTlKFF~%fBydH?UHY&_$m0f0uH$YQ*Nbpy;DTw4^ra%l6`pze|v{24sfLpy5zfm#ZwE2_T5k*<-gJVr;gJd_WN>l zGtfmkCz8$bGbHk$Heams_kQ+)z4_vXQl!{=6yI|aV)l3KBT&m|cOB8g0^TxKQ^O8` z4^p7b`|8MsnDu?*!@^SgWooNPKbU=VlEIS1ES6$}Nm^#iKHrzqeL>aOFWWTYGYIqJa z6=oPp0XJKJ`$xtq2yjY@vl1Hj_6Cm#-`B8dGT~y0+zm_ekzCYv2Mc_Wc*_4YctqZ0 zM*UIA<1UC^N~Sbts!~2F0m{Vv5Z%H-RQHe!Z1A`(*c8-f5wc4Wxuc^&iP;;f3qG2? z;(xuo((gpJ0fPwl1nntwb?{dLB9JQ|`$XL!w(u5&E< z{h`hHnV8(3wRHFg0DXsZ&gg?6x~B1C#WyR9rsM8l^S`AHeDKc=drF27LZ%SNB6rEe zRuMX(?94l7Wh-{}-XGKxbG4j(W^zrTSqs|i%qBmlkK_4A_I(vJCkcFa%T!5iohKR) z_;ks)hSlfArMd(4lU05Wt&koLHlM}Vf(Hmqh^3V}Pg}xj4v;_Xk&c(gAx_QV4DLn3 zWw)U)$S)s)B=Kykrj-7|6wgXST%SB$=}_!g%C&~Vzq^Co7HwWS_&5@06C#moF|{F^F96CXu{$R zoGW`9R1W>n@+-M^P=piCDmtg1X=RU&c)^ zILG#rlRur3hKEyGFxNpHDhfi6v7DX0fHc8C`YI4EsWsS}(%OfUSbPZx5ldrB%p&y_ z*BAG5ClS7XOyb`$&2G#`frsz>X=yg<_DG0)bh=|X)`@iR7a!u2w(~K&oMC55$1a!B zDhE3WeY-{YqJi8pt6FK@Ns5xnK*flYFd;f?t#9?xS4^&1@xPC2w3euy1xYnP@k8$4fusmm$_!A;AhiW$NEjlF{0M0 z*$h34C9k@DR6m2CR>ZC#p5jUT1NcInZQ)QxIv zNn1--wGn#>bxUaFy_G+WH0NKR-7hQ{FwZuNgosYqlJbTzLh<#ze(sgCUtf0RD*~0M zY1A@MN=6BbLn^P*TtoT(DzRQqm{HY{whU~#C`v(_mAwy}KWdnrCf+={@;!6`Fb1hl zSoIAk-aVWy3hPNcwEHDrN2j5Q$KcL?pY>-g??`671v=>5!$ z9?Wf`e^3f^xh?;Xj0(u`@~VPW+`<&=-SkEzkkv7}?uqu&#qE~ZWm?~-4R8IVx6IWk z#xrJ81wc`xeA2pc%eN`>#Bdy!XSJenU^wld1pRguK}whL`b}dy&rZ74v-Zft?xB=7 zHzeE!Zq0{TRKM$H%wB%Kp)V3G7mhOz)}h|fQsED#Dl3N+X`NQjdq8xa56D~7sIyHS zC_nIQ(qxSv(K;n{q8-4$IYcQpH8mxoR#peOxx>AR0*u%=Iw!Un1pO<_py%G4nGBtv zxj^1B$@g8{d+IPa!US3ez9&hkAJmxEpRjgO9HO8<=(D~Be{ZEWbBmAGDQW<8x{}Va zGfJZ;iEnhm?aqjF%MAR87I+D zyw7TiGA;m2o}(p>F4-z(Q89KvQ)cYIzUX(s9{-i9*MZ(K{%)S29rpc&1Y{ymSMQ-z zYRPk7%FUF~C3%!8A;MfG z-zgirYRi$lbYzG_5p+{b-Aj(BWXWszA?^4G(g_>#(KOz9E&lHNZ;NJ%S@D$>tk*du zT0)#xL$PsryDo<2dZf(_%;%T=%Zw^iq*Sti& z(eRL`+g3MyfO?CXxnDO)AX3tsclo#(ua*twPI*?NoJ@@^16tp$RgiaM&VsD9a4+BE zPQ!hetVxxt{PY*(O-#oV}*scGppFy9H@@o|%IReHtbcVi#h z9@PBnf^I9K2voATp2oG8UH2c>!pUS%5z;NqtLQ7s)=}ac?56ppVYDK(*_mzBC;xid zdXUfWeA+aV8#cm0vy(oC!MX%+$CX~?xpU_~1^C`aDqP7KL#jK_69T^Vg1z0J@=UHx zseZDO9C_+Yw|7CH>Npzmp}2gfvQVd>Hkoo2fU{}}4P-R)(}xnSpbcdaPZ*n(gDV)ZDJCGPhHH zm!D7YZ__0;@=W->mTW@|3S@hSJtx~_bv&O&PTU5&VF+)ei8F(tzX&)#Amma9Uz{&8 zEr=7?8{hl(h3Ny|*YHN*LTxB;^zL84{h3}r>0j`i&{NKjWmlXww6~A+T;1ebP~yE@ zEqQnS(0}0{-03AZ_u0kNma1E0kiTm0C;_z9-LVtrcf^Lc-9GJ~bI`>A+>w>~ui)O7OQ-jT*G8?pLcOjB*+b z9Kw^!n4a2sVVke(bL86MG_DvYs*BA(Bpi3y5m%fycRjxd z)gzy$K9UEcyCmb!9h&KO`k?1{hXP0~1}1fvpd~nBnx{skr}J}CxMQi%1S@k@dC9TT z2ps0^Y~;Vn33g}Yvt%-*&V0qYZdpy`zfpyG=1&cuySZim^vumeSK(-y@r$b5j;_b< zw8dU2D13!H$`!#<0g6?8gF5OueF67eFt;;*nS7X#lGmL$I}AdS=HsQG`M2?RJ62vk zpB9`^oxoY3pYo1?KYja|Ys;ElzCue~RpB0}0~Iq<$Hg0UUU=NDkB*cgLvDzkn9bdkpU+iHPtSPiP(DNuSPv>O1 zS}okdW%q%KISgV(U6}8tNks_xVO{+8AY4Zm>1+VG+R){l7#!)8pBd(YX*?dvJnKmF zJs{ek*c2TeX#zJy2U3p2Z*|I46?OAVq|Ec1UHudujQ|LDpnj$EowH4eh&qe&_ONpz z@NOk-kV@#goT`VRr{`!8K7aGh8{VLAa!_odVDlyx}7+n znM?veELG@lmgrG4W_lDN$wcRn$?gBu?*+*U?qZru>05xqHY%yO^a=X0Uq`^k ziE$H(-2tq2*b$-QYw&Y~IkQox+8W(?_m9>>{dJmf?} zyEk-^(#>@>b{s@|edmZ1tZKcIn*H~bvf{g^PR>jLV@Z3nX@+(~tb>9`qqXpvO~p>Z zBjMUTgD;q#j^mMq(4W-_8WxJ#XKoiixCpj|#ZK-*HpU5Qrhz7kZ{_3)qf?5r*Wxb_ z-A~#4)Zuh2#5{))0o{UF(P-ZFcSpGuNg>rV{Uu6EcHR>K5mT(}DHXKVD zVj9ezs|qHJP1}1ERhrCGvMa~}m2YODk~$}~qq70LHwgxEfxvWxmwsrf2vdCQgtoXEvh~Fp#T0ZhZ`{tZ$j?LTsP4(gyuyiu zyItZnkFoEz{B({N+xG`KaLtX&j=hzNUJnV|s}CMQM#J)YbDnIS^4XH;8-GJ!8@jQ9 z`6KL*gt(a1=8i*r$}h27yRwD1vwQN*%D@%a@`WxTX4!>osqc>z+QqZ6<0pZEKx0#e zjl=Mp3nRdE5GX%FZVg4%h|+&^$beQ~)_#<*D)#bBzjCMGFOr~SogvM=nw>qNS~oFJ z-%woymYZR%n44qMX2lJdPWaT;pUpfHF}6*ZU>C&|Y+2TR4RWgl1>ApH*D7W<|IH!M z_>4vI)hOGr|8(9Dx$jD<^@CzvB3&Mt!V?hZ2A0hBeoBiZPIw;fFy0HfuFh|k+0U1g zpYk7wqdS!5=M9)zB|C)aNxCcr8oQDjN`SP>CiQV;4d>AWLoro|MC_m`A3a@Lai<3ixK0LqYndKW%Mh-JQrF)z&c_^ z<(yMH%M7!QkjCYi;CqE+CCkN~*QZ+_+HXw@l+_)yA;z}paVx+du`KUz{b3i=$`G?% zU!$>>H4PWlzWUO)hdR_=hb7s)TMz}|BE1f&upJ3LgSY6< zQZ~EUB$`c+3wmmk0`$5R#g4j_n4MsPYV!$uGYW4s*ZiYokzDMvHKASB(7lYM;g^z% zX82kj<9}qDjq8b?Bcs=_pi!@y-`NRM!wl{9x4{GOEv0$Bh_$@#{Q$=Tt)&oY zVsX$XV%rMk6*-Q%$PVeojrlJ6)aEtQS%67;0nI{55y`!oF3afuy`Ww3p&LlLkL#^@ zmCT;~H^pT&sZD z5}|m;k*5CZd3{+_$k<;{bc<6@KIYxTi75aI;f^RA>JZ^##2H_+g0O&xgPsq6+rg(< zs6v}=$X@%Ph(#+S;rPJL2I09q98AORYT3a$bG5KmWa_xt3^{zb23m1zAPp+%A9G*+ zBV*pFP*;}Il`ld|-1_9@MVAPkd|l>3-tQbHo9r{}O#nQQutU*rW0{^BNBrmGx*Wr} zFdTHTOd^7yZwanvfmiza^f>7f-~ZX<1N60BbYf+j@lSlEXje-(a zqEvFloew8Wz5b54i1M9$mZ@Fu*&?UobU2UJ=jyYi{%u)BdBgWbOiH#zH1E5Z-G#sH z3kx9JDuF3(soL<=A==0P$P^jlLzpTHm~XdGCra!W~3`;h0*eE9PGtS;5ctC-mS zk4%5O8XD+d8d1x8bLBQ5*f4S)Ft=1t@`2car&u+_x6jr*?9KnK)bI9;BRQ!JxdEIg zi;7vE>3h2?A&e%09$05MTN3dfTjJ@ArIyoLy|;`mHStwFSiWg@$-7e-7s|f&B0u3A zUoA}TO78Q&hhNxYbsV38cNyJ{6-9~DIi?>wz}t+|>C5VwDQo^m!mjLDHQOK!P0M`O zx-@D7j|9K$(Wp=GNrY;R`L zzezm?5tid;CZ2VFa{HVDsj9e8yUe8x*}rX4x)tys*I{UeL32f>CB2}#JHJiP zbm;{?Vj^&-_?6FMw6An;lzUHUOMYW;)%3=^W{X}cC<(Z;-?N}39L3-(A2NOVzF{zD z!(vsom+H{iHr^%=8T5n*cRZk8#@2ZPeS)Rq6yEf4{E}-m$CR$d$ex9KoZI?v&fAk< z#u0h_;=re?TXd!rI2g0qH_>zFDGkznx^o`r0L+^ZZ=kd2zf{#z9sT*2?>fuxDxpFc zJi31Wa_I3Tdx6$O9_`tGua~X@R`on!%A65Gx6Fme`MTFQASU?RV7B+RV&V&L6k5aQ z@K`AL5H+#lgkn6^`Vtm3@crb|@s8$-Hg~~WTC+;nMBnYiqAMF;fCXxzG=0N|&s-x- zqJweRbD5f`p5_>(HzRq^y@p+3?OzK+s?Ea&4+Mn3;UZ9_3wRc3iAk;CnUe9q+Mdpv!G z`0Gkd4}mdiZc9d^Y{zZJg2`H!Q^0K^DYtuFV3VDr?rBqQC4Z8sMsGyc!s@+d3jwjm z34j;6Df<~`J6FZN8RdFE#98%Th&WGxCc6So!&zTVzV>p zY_X~>pLKVt9Lmg%;i)Tu0K2F;0 zv_I9mXJuW}R8?cT_YCv-?Bmi4%7OxLpvjLQ8^l3q3{R8OC&a|1kxQ@St!MJ;pOGn@ z%OMt@`Lhp3nmw1#YK0*iV3t3R{PkaUf20XJuE|-Uf)#==-M;4#WNtq16q9|E5vi)z zgC%+=SyYYV_R0Vx^Le5a(hwHehm~!tdNX@?8Nc>f+?*6Nx1q|of)#JS$5wZHk3~>? z^4MG5itP(jy^z=8*#9D8dB(M+I|gHdG9d>2_*sP50n5qfDLYova#HG1)-*%*F|-@> znzvXIGp}*pX7reZJGMs|gAu-?4E_)8d)*r(hsUfJHB%G2s^;dUlI)v=b2h1S!;Uy> zTD(VUaqBoVt_UyI4Ec*HniYY0oYvGxo!3LeA zEEvIfJ!jA+F`NJ<*)F3n*mmeuz7pp@vM%dVr)#zYl~Xgf_7qABYYg_L`&wS=hK)%) z1FH#@n1S|!RM+AEolX~iLH12!C-NNRTT-60Lir)nAfs1DOqz^ae^o(Z#7Z3l%Sp(l z!{Nz;m|5@m8-2jQ)2wDZ-AoU#mZ%}PZfT`Sc>C(i(9gQWlQ}d7HFCa(WdRifqo?FG z#B6?Fb7<}T?rnM5Fft^XUcUNReMP9n`^wGLJL`M;soQ!(K-h=#sndpMnjTaak&G#A ziY{?aF^Bl{PIuf%m02#WbN+|#LVXtP0V&uvjY$RP74C(hl7<|MsXGV{iVHA5$?MUH zj%PM7ho_2_W2Hn!)F&F;^&c4n4UKGPV$&@{&ZY0wyPbW}gRu`OBH}pQE~F!_M|~Le za&8lPb!tHd-2^MD#WE}ZfFG#!dtQej2#2~g?YKd=Bk>Mi-vC>f+^Ajy>EW3}n!a(# zJ^MVg8XwHp7MC79k1v7>KjHPFruuq8rjLo#A3nxe%_`r==EQtm_qxuY*kzV9~}}~xg2NVQAJ4Sybg_rZ5Yx_eoh8#>g$+^fv&DN7z1qU_?=G3IWNeuJpSUo{; zr?U-i+-Fc=CDg2%;d^GX6&foUJMZ;~m0wncbq#~ceg9NrG@f3kNmdh*(ZiIAeG{Fe z)XR}oloXrVVZe9i6^COph9FM7pV@(W91d3Hgxlk+*Ga94;6ZZ9Tmtj)RMA*&XBY=m z1^jk>K}|$+jbd#4k}DWuJr7H$&Y-05*RN%{Pu}MH{_;6^X4yZ|X?NF$WI$pMSTDY% zAS8cWBkN%JbT?~`wG^>h_nRpHn<1YEl|ukHYJAk6l>g|rl|7oOo7j(jl045Rg-V|J z6u4YKWA(O9!0)sA6TW*fU#U;ZONsVAwm;SVPyMB7hBV`wNh9Lqg;tbT+yyzyTyAAmZKXRn~2Bw`h-)-|u;o-(9SSbqUT z{tj-Cf6lk;b_>!xqG(Iy-Sb9lMFx6x9y zf7kLuS!Sr0gGfZ%hXa5ay!4W1x@IHJJMdHH%^E$r;->KX9N#BcW#iK~X-c*rPs#?H z-t!(*{ME7DVGi6r|DIas+X7#zhFo;$WhjIg?I|)Y7t++#^ zc;?<%(Oyc}S?ZQ4QmJcd`k1fm;Y#6u3adH?kw3k+co219N@JGpCi%EPM;K`zT^`cU z-P>#tmTL8nY&G20b~%TI=JSm4wu?96pX19r7`}>gXQLN4z>3%<6KlH7NYaSA#|6v2=#TO)Uc$Lv zp|f|a5BAdhSzL_YbyEuIPBY1}+B9$WPU)Uou$XdX)uqJ!BO4Uo4>LJCt6 zMUT3V3&^jrc9b-j#>Q`;KJB(XmCXbl1bv$Qpa~6X-)QFToiZ--QF>TyyHy9RI_z7F zt^)sUnCB&U{;5!)S-t18llOKW27y4TeuSN-Y(COhJ+1;^#<5!_8T_ncjRj1hd6C?o z#wZS}@DVR}1D+6{l^MtiNGT11%L5%_JrAY`kFPiG+ks!hv^L2PD+9XW@w+(pvLgYr zg^bb8-<;8NNf6)k^Byd1u+jqBX)Sw6`BwNLob|BnuhZzKPoWRe>m{D! zKbjW#aQJKX>2;XRQ1%+G)xq zxJVKhih8phaQ^ynBJBcFHgf3WSk?%n5{Z+zCX z*B+01_z1K_!&CWx`mUhhMm{dP=cW!&Hj`oV8~ZUk?Z;()Jy}ArhDkFuyT%?pqzfFo zfg7`#g3X`Z;iy_5#E$h#v|ZiW29Sn(+Qfe$^RpUT8C3F5)^^9+pFOm)a6dOo?S&aj z2=Jq6nY08i!LU!o)y)!nCfCui&fHu%Fmms~)BnY&;JGXW`=T>yDvbq4kM#XvrB~SQ zuf_v`8`{vz5?|f;h1qNuFUIivdcB1Xxl+jTxyNxiFAbUk05zLhr|0w%qkLWl8A=tt zLT6)gkg5vIwv4!D@oD>fAUjr$duTQLy+x*NcGrxMXf2SV?J!&e_tyEL+GR;;e2+A! zQPbS4pR*HcKd+FXMzv%4&{_OQZyl-LQDwuJ;or&QHfHC8>heuo^j)mQ-6PYiRP7Qvgion<11>hMs>+7iDunXR(~=_>sP z2jcb!eDv(fUas14(_dK$m&nBCxdDe);(!XWF-ZvzUeJ0Tw;+TtkUs1RYt(cfPU90udhihss*DosGdYiMHcn;!cT3NPuAlB6@<9?c469j88u9<*km6 zfg~CxsG)3-g2%})Kab;Zi7nhVGoM<~S7$X*ZbVdrWq~NM$?t{%-^SU7Zj#~Ghr#3( zRKNpozh}7eFNtOOF{U3EHLe;Lhlg;D1qU(58yloqt}ihVQ4t zJZ*$>p{F%MKp!%RzLjCJ2ES_tb7AbdQ>y)QwmdGw{KiCSKh4S{p-cO-UzrS8mP*ifq?d-Q57R zh3)WD6bN%2Q=hMkqC=wH>ih;o$val1P9qkl1uOq^1#S+4Lc=o^8+cy8C4-vp%ieRU zj9YjX_n26J!2Swqz+GR4ez7Y<>F>R}t!m`wLVuxL^|=o{*a`wGK(}={LRZza-~U^;RBu%V)ohBgLhxGyD=UhN?UT*! zd@Tc9<0{fNK5RR=2uGx>w0UY1Ji{*8V9$0n^yod;cC=8D2%M;8XU>55D%B01bTsiY z83xlAqU`2nBwvSV?dBb!!rv%&;Z5F6N0%J?$V&&PVhZD@hw@a`JS~~Y$$7ID6P94( zO$hzicPUwPftBN-YRj$jr$;1M+#f6 za=@i8q>PW$d3MD-XaO*XE8~P>hK=jfIH82}3dJ&qg@ios7)3TUsIN(v!9&32ELTf) zeH=u?G)CreG^qpaYQWliVQ{VW^KNoyQ);s|=kj-HpN8o<_X7)qF2)~?D<$AYrJTle z4wHc&sew0zBMt`ndrVAx3n?=SW!4)y{-8_Ed0hTketB6NqN)|1Ln*T%_hG)SU(qaP;Wb5;qdTgrY+P1IMh z+`is53zEOFKWBPt)Uz?lW{i1?EMwj}+AQbJF2g%{;_veytu8y?kzyfEI*tQQPD_dG zReZQDJcY}xVyUy_4&?}Go?@CVja$eg^>%xqIpdWUb*ocbTdHw%gX*8tF;$ZRb4;}% zX-{2s(PA`mYr+dAX(>_dDD82iJTgV zF)zq=1WbD>E5Bh`Xht(39w^>y45W9AcTD>dOY3BcrRKeK6o*kbQH`pjy#h&BdbUPy zspKR>-Bo~G*T5g+?7pxGDBNrme6MT4xgXoC3(2Fbb&_I$#M+U@hWd>LW;D4GqtcX* zl|7ZwiZgw_i#vnPLa+D~{)agcv5A#S+`V-6d%42!=E{^N=bY-7{DtBDsHmr6S^j*|t zu*gGETF=adCW24L#ia^+|46fDlU}`_4>Nq|HOjEt=VD+R!MzIEtv0LKJ#G-}K~hFb z@SyY7H>D-pSbIf{UxR)H9n}#09%$KdKU`V<+Eiz=={4I$or= zgf)!8V4QODU3C(< zt+-gk1s)6p!=h$s$^E-`wt48u5h}}{tLDtiArYRoN{{ag?ENd9BNQwQA#8|h)@6rW zh8>n--{$*aNPu}R!2hyLBS!Evl4TqCd)){kRXt=Wxf-ACjIp)~pnKhkq;z<6@Hmo-2Z+0#RH{SCs z_xep%035hy$}e8>5>tMOm9jFoe_~W=1DtwwDA%^NTSc3gm=Hv~iF2=6V_p&}%5psv%$pM@Ijrzr_9IRD)mr!qV0MA#P3R5is-?@AR?-n0x&oQ)7panjKHOC5M}yh zv@-cuSBg*6=}mW<>+Nh-cr0r+KKgOV7EYan7KE$n!WOe>r_f%MMinO%kF?0rC-)ta zWnfW9{A-$I=bz{vPxc+zwons8$MEZPvrdso0>>1%%x=WShD!LK1_(~WpTRS0MuR=g zBf(2qI3&?yd(An3$F6dGR_=1O{};el)$^=nW&~R%M6qHfeX$r&+EPUND|E64L>Ap_ ziPKry368?5i$wJ39i87{i6#F>MK~Mx)66tX@cKeX?rJ96Zi&lAf*~Rri3M~4qmPfK zVmD1{vgsXA+_bvlI1qpHPZ(XctheeabVtozwYyzYz|Q{&#$KP5>&r`bd=G#Hg( zLz9;LKGW1}Y}G_FfEPr7g~>TAjoGDHRieKIsRSRDXw?52@LK@F0{}(^8{91z<={nS^CiWmdcY@ejWzU0`H`)AFs%kdP_!?X5n^9AA z|12JI($TqCeh0fE4Kj>UteJ26x zG9-1eUI&ot$rc#tDXN>_w#xpE>_rOHkNrnAp7YTyN^*9!MIZF13P=GS%68dUuR`7w z8v>YlKn=#|u4IfEy}DjgA!*^fFuXx~(b!$o77Kmjbx-wI`AL0|4QAEIZMh0rK8xn4 z9yG<&)R{JK#Yr$ykF~h<|H@2QRx8!zM$%UeK5$u4iA@Yl{LoTmc-=WYqxV&RL3v5oB=3(xJ$NM~%CI zA;hfRsc8#y^H|wOp6#Sf^V-nLLf~Y=7^Q_tH?owjYuniA)<*S6x4q9iLPR-Cne5@YsMbw| ziq%%p;IWG*h2XSBG1WonKop{t-p?!YibG#GBp)XRGxMx$^om-TQgsFQyEkp7bDToO zIr`~)+z#%~2_-6*X-i0pBs6%*K%03B&5&Jq06ePLuZ7EJf3`BJMq_&h_Jmw0m@$)mG7WMc3um zqrg8~flk)z#>i0!f1ED=L0!zZ{Jd983v)8nLZi)GLX@Vd$==&@Ux!FF1sEhafPP$^ zYm=qqI209QbD#>}p#a6WZ`Y)EJ}K->Oii4#;coINgLTh7ZK6~#Ls3lE!DjMN4BpHu zxaPcp*no)lAu))aI-XCvkkXOm{r`RLqys06Ug)G*wn1D35z@?m+towM$iAB&_MqV| zdEp8DHjyb&89N_2eyCnCYre;e!&P#oHvr*0_G5-5uPKAOh)e``uuGYl{5R9@S!kOD zD-J-nwNN&URxDm4O>s+9rW=qk<{hJEtdXSReNi{-ToUvx@Xo;w)77XYBiS0U#=d;C zKLcvqe;7VL07bZ%e~ES6ZHt-PeW#-Fz+1*@6fTpyA-{5R@^nt$wd}`j-WVzx0Ta>u z!Pe#`o=;?}4`XmWL@lmJE@Gnm?4$Bmk14kp`QWjAwGz=DOQqem&K7lw6v-Q=U0qw7 z^6J*#I6Dt@%m4j7jiChTFVK55=-B((yRcWgvcBB|zS?27jMq1|gc84Mh*I#7hutRR z^gW(#9u^oPxhoCXJ>DhtGE{uE|4#8Wm-r#7KeDU-eP?Gt{zH?6Rno~WjxjVqe*Y z`5gPONJ>BMFW+Mt1k|@a{BSYw#i5>H!eq2{n z*5v0|``KaNrh>H-_H>1W`*)ryu8EFh>MG{?c<|+JdjQINV()qf|CLV@$00q`5+mD^ z*_`AV+0@Qm%rj8Zj?rA{I#As6E))%P_#{>XsXW$MWu_~Zd@pF?;Pky4?JMg?FYnzv z+A%xmuh)8NLCdZd*9{Ucl}4qMa;$2I2B@t!hV=`0(TjDMUK#Hjx<)|LoiV`Tf5oM_ zRN8q$c`nEL7S_=fRSeJ694)jUKK_|MBuKnid#UCVdb(VN+ut0)lj5XD(N167<1`Z3 z9?URm19oZO4p$|1@0 zLa1^Sr^~wj9z&5!;jNXX)U<`xL!Hwo>JyXAiM=;RxGpO*?QosL|OsEp%w*KJ6Q}=GqStW zu4sb(>;2h%Tkm&TZ|9y=A=*y#-Fa@cIa)DYuf!j5LR*gCqGE1#*E*~?JFC{h4z=kHjLT`+==~<>+Fwm1H#~96qG_F=t`EH z_q(&u6qyxS2J9OfmN0@Wzq?kh3hx@f=W9ZP%rr;>yvgFKs#qwFU< z2Ln92i5z&$g0`7zW_N=9_FZqjBDH1wF~a=0U{~@zIw|qEj>-gE|DiLb?;jyHhdUis zwX5GQ0`u4VW!G&313{vCP{!#@AjKZ3R2I7s+chIe+zG1%)ex)H<7QuyE-}a&{Bl`1 z@n$W~I%dHZg%wOS`$BWWW!*Al*W|csk8x|0vaHxf*Oq>m@(zN39Z2nj3TBrZX4N>t zjJdZ)oY}S&Y#E)K7aQqb=n*M@>hboI?TWy#+70E+j=Qs^5favM zuWY4RN6IkDj6F^2wb$!}Cv=;p_s&}w7JRiEi|S%!dzyjK9;Epdpx-$qJ_^p<nI8ZvOiuHB2@RxrzoCzOA^7 zF{ljr>$>=e80*Ai6zleHyJoHq6e%M#Uh3C&M+gzl8-Ef>Xi+fSjb8c6mJC;?+bK04yU;B-8rRJq6F41uRCdw{aNe!m&H;hx6m(Uc`UN8E zbEr`ihH!H~G5RJBw}P7y2sJ(soIJ1z!Zlph)j7<)i~+wwIpj9R4}1(U=vyTBJlT&- z-_+l|5w2;Sd&IQ~>)v+j zt2TI!S--Cjk3i`hV}w&u_|r;9XEgoq{2QohHS-^Lgiw?YyUS3ZotgO7GVOAGzam$W zQ;vnEQ7XILK4d(TEq1_*zKeQI+2o}ScW4^`=Gv1^oFq6P9N;;eTc-a}`M3Z^=O)O- z{Z4&#C-M{*bl}*VgS3_Htxq=e?OBkB>F7&GaLU5D7WsrV#eZwRfKcO_sNuur+EKFX zAXx)Oia?elH3{izrFC_C19P6c&^K3Bm^xW~t1TwhZR`;8s@zM?>Z677*@k?55|NF6 zlNHYAe^asCh9+GADd3&5RRZklNM=GM4IX%`6irWG)V38leA{2Q?EM}dbjBPjubrBStz=VcEeo#&-=1$yS9yXBjD9hS%amwhlX~_qyEA5|qqD$zhX{G8IXQa^vu2y?>aNAJ z52bCxW^J(9ZiYu5gM+S0`J>$FwtSX=A5c%A*zY%Js%)gMMyFLcKqI(z#?S!!8#t%| z)FGPmc-rvBZdw`^54s@UKqL_aqsj(F27gDp+B7|fseZ!mtF^_3b->#vh~f}kO}8~h zx{GV5-MT`OeG%F}XH&Axz`W>bTE`;w3F8E~Whig`^#`AlZPq}%HDEU2j5(#jlN{#3|;U|8F6 z)Q(0W;X}4sUMFTp4HmE6043ZZWgz+4YrE$7hZym;)F!(fD0n6IuBj#51uG6Y677AP z?g0|VOlviLotW4^*a%cU$&U3!z(Hh}f^}c9<8{G3P@GxM;v>;)ams~<3)~YxqFQBPJ8PV-R(=l3j4YxA$OBRg{*{^RsYm zLT;CG{Oo7OAfj_-b2<2 zRsmcy@L{Tdkxs?Y{&-17+6196#K!oSDf1%Acl|I0F%Dso|7grT-v6F5uxp7htgURr zk|sH)5TYV?^#YmAQO&|zr9&m6Pp*tw3d!QeH&ac#lD>cPD)XGuJ6JT!Jq0MV)1c%& zo;rEHE?mauHhPA>&ldjb@7OYah~F4(TzJ4<$tfZSE;x`naBfW?2@>{+uR555heV5o zB=7xzYbZwO;QQ_NyS1%L|D%GX+ij zn9>btUFL~jCMT}K2?cY=*Ek{=zi*K`e)GA~&<%;?K)I9c)#&G%88$h{MS&O?NMccJ zo0&0P!1fBw@z84Li#>%3O)(H$=;o4PXmw7koVZ$94bsrjfaycPP>Qhd-ccDZPUnm5 zN1B28JzSXs{s1&u)tmJ0sm#PjT>geEF6`BvG=g>+98Q+ZA>Y@-H|r$py5!9&3uU5M ze<-^)^X!;}0M~e6W50(l;5JVV(af6xPsac3!M3Jc3RI){r)FL`-_4kpAbMX=_Wc%R ztBSwU&EHpDFJ@@+KyTAyw!GK1o;E&4W;|g`c4o`{#=>h-qMLQYBl}LkBS#+kW(Z5A z<4Z5x=x_1=sKf|DtnO-6Cj5B0U>`@o2?^+V`TI1IG!=nzZ2FZLs>%7&FKx|b#aLCu z!>G&=Zr4UpAosWrOm}Ar5@F@M4f5XT$&?HT+EU6qDcPDUq_5p4`k>OE_2?2@TsZ{wu+AJ52!ae4^Xy6<;$mZu)cEMjsoV_T9Z zxl2YqvOTq$9KOV+Xg=oVYB1O5Aw7^(LesQDfIs+}isHgJly6EXhpREc#CS61RB7`l zkMudpmJRtPA9XbpZPch{)KC|%*=SaZCxJBvMJSceG8POu3&%m0Np>7x1U9x}P|@Nt^yl^5OC_`fKeH=o-{I|2818B~U$t@3uO&&hM1>KuCk?$t>F*E+og| zpCwUE*XXTt$TOZ@Udu$)jl^=BBr`74CYcO&N1v#Q{gO8^xZ6ZNW5R$Z^)}ecv}IKM zP1=v9Xx`+L4%Q)4>ZypH-R+h<^3c(rv6Wbl*x4AJp;a|+MM{s%Ti!gLs-XzD(i8t& zQ(Uu#ziob~X`qWUGj(7SuKFnYw)+I6?2&Q8bcWPcZH_|pO8rQ_g^n&529mnX9bmu8 z>F=n|0M3NHkNbiGJJ_Hk@8}IUrC#lvRL|BIyL_?P?am6}zVGVW`>>UH)`4JL5-6Vo zuhPM7MN*+Z3?V^ zHw#D*Mh>hb5HpyBGu>CXYntswDbz3Oz7JiO!67=_Ng!FD?Ur&-D_dpU+41t1@aG-& zJAOufl0N`C7}CfR0HB6a;6cYK5YlIvHv<2b zye^95bom>`WlKzLA|WGwUz?gBGZumcK{i;sF#Q-(zS%#ocdu*0r| zlU=}G_Of-O4XMr#;0YfN@D50wXD=!#IjPFrTxTNk!?$OVP8y9`(gyH|QS1)H(r@av z1Pg~sSQKqlUY$+(TQGZSzIc<6=YLeT^ThJcxIB=J*TPhxI_?v2eIRP!x?{r@hbmr4 z?SS!tcHXSu62P%z_7(QAomPNU{8)L5gLj+xh&#vz1CzLKJSJ{^FMR%{YD~s& zx;}2|(v3yM*)U`k>C7!fPIN&1vLag&Hg1|~XF_}w(<=|#O9QV{81^hJB9{>zw*vaU zoA*Dai)!e-BlB}Mv}xIZnYna`&F1O)RKlU&&h^@)*h;&4a8X$nOv+^W4u_}mVxFV; zVEK>cB5P@2qy9PCgTPgZwX4VOc?^|bW^-j?Nw6Z-%E|n>X*Gb*3c8@g*f$M-@>gwI*7Mxlk&n^#ewxFQ<)v{#iUl&6yI|R3ASi5)*{*o(HFS zv?h#RMr@Pfj2pRaL~aO|Y3CU!$2*cTnGkGFrEbdC6*~OiORA3m6s>GHmpF~J5T0qO zEJ+%U&7f_p%0)r_Fw)l`6#lF+3XqlU!|CkkxOy_w_UIEM>!EroauB?b9aM{?J=ZgA zu8+6+Ms?RrfBDrZLxD4L`ht*&K{g2Hw zMn@JsT^`Ai11^+>?AL@^*A6K! zJ{Z0Ujwc(MCynOcmU+2zJ9$M+>!~PvLPFwCZB|6X_ADQnad)F*%mA}EjtDj|OY^c5 zsp9*|$nRC_6ByOX1y@QYvgE-Kn}F!?jLG+|yqE2YJQDL0gU*fl2>I+qzj9LHYGd># z-Q@nCB2BtKO)?G-OQ-5{cm0oAnmtLj`2$TUsuBHds+k$iA0s&Z*rRb=n`#ISAkPPEYPLirkS0^@UR9g!d zt4^*O2Ct|iut$m)dJE(oM40YXw|!TYe|7HHdC#LkS4+jctn~RN(V}cXmHrEzhrQ3G zGqkgmhH<9*RePr2)Ycm2#>c;Q&)@Babjnvk>Xb%b+!&(T0;i1~ z>ix5i0kepWR;Zd35iXuJ;$#bdV|fh)L~fcg1m}5s$TK?V(;0f(Xa(71fy8tFo+?dR zM90P)K@^@>Ck@$Gn`Jw>_WW<|n+(>3GwV@HQg5R1+}7q5$x~O>tChO4p~XN5Gc##3 zdAah)^Q2sXsftH#bWEX(r9K4d@q=`vTgKmWs(}-(r)KWCup4o|)b1>C$p_SC{91Gi ziYOrZ>syaSOg;rSeVpTkwV(xhV1JQEl<^I|(Mp%kyNuPQIlecIx@z9$)A|s&1O9wI8&zn_t86kJzp+&BMpn0=Tfy_%~H!K$C{%*65e7 zH-1@F?hiqV_gPj#svW70ADjhNjwg|&?|zSR6Q5dp;OU*W>Ap>wT@VH3KxtK|!P8M|G)411QOr?i#i{jvVwT5t{L z`3J;~PB=6+U3N_pas*7_{Y8m>ly@I8K zJef~^&S(E*9~*@;S<6j7)GLyq_*BGi9$d+-^<_8u$p%nYlMPD6P3xD5-LAA-%Ws1)6fsnu))FdzMK^ey>xWSyEt!>?2z>0&*`Gj!1^yJneW~mTYQZ zO&XWF%s#q)u&Kb=yIi7HM)_AW)VD0L%rOlXW>eSHPWLt0vN_@a#z7A4f3XKCP0y=` zOxjvwG8Y5yZdh`(n(-gdm(+E=IIq%XyYLrH$EgdNnS$S;ivb~VmQg1EQ6Y`O@@=(J z54GD#AJ>iF*T^3eH`X%VH&f4P=R=flKKq$3=?h2yM!A&tALdeT8^2iN-s&O;~p@2y5E|Q zj+VWAaV$yli+xZ-sQG%CKiVfI(SoK{YG+sF>3|G`_pA*}eT;ohN;@=& zqSUXI!Ce+Y#TuAft%EVlQ+Xf>l{S9a znhpE;nU!F$Z1ECc88dYD2LJG(G5%g_@6RffrPAgr&6pN$GX|`z8n8PJ;h$OaPg3GP zD(rNxlrj9wv&_qUZzsH!ZC0Od6fI>t*@C8XiJ|O9I%GW$gf|5pa>Q5*Y6SIZHl2KB zgs-+hE_8NRl8IVo$lT#11fS-};%w#j(!nSFdffC#^2+|Ee)_u>k5D2{=KE(m=%a!Q zx!N(flD&LG3833U9REMTf18J8o5o~N{<;*}qeKS@_S*j%wns8&?CwVjzZo&BnIu4h zX}NW~eN#LJ9L&b91lH3SN}Q8u!MSK}Y0QhItcF!jN0qi8;$8vOQ|1=BB4wJ5v9smH zXL#XV`xh9GNOS{U!Dnm_#Jxq;-}~>(7yrjQz8e=HRT+oT5r;g~J>7$`m)zj#czx*x z#75Wf0TbaIdu*!uNPyXz=iIvS044LED19HlztVEQ({p4)U7{fn+LA!UAnyaRiox{b1`u1((dcV$NK<1+Q*+q_{zq1A{L7xmhFv{**OR%A!Uo|sC6^3*;*bJy|{@E3dGDde+3GV5l6#FBB#y_hVutGL35 zfS{g=!jD?-sg9}=vY8lKtnFsmB0t2<>Wur?qpwxf?l3w6TBaziHunfMnkT>Fv|r=N zhDdYJ@kw1}SL3(C{Z)*IM-as@*J<@mfn;3>XD0J5v{3)8-U0V}G~;dE4%k?&za0%fU$^2TQ+M9f zV3NowQT?UaG-OyG#COEkn9WSy81B9jjZsnYDcwRil@C1g-o&>rs)esEKAt`cJ`TGq zJ>rSxxlwbj=Z+yAx7)^l8C=yC@yGFR4*Drv+>c+BfkVlX55puR+KBnPOXY5E0%0pn zK@}0f`0MxE%floOJ*zP$?~J|&i+M74{Yq$K?&gL6Z*tqKI0w(^Qxld8qS5=2 zn9R+Yf)w-pfc${a09-Z)g3P>ohJRNX$5W$UthxHUY1P5`?;OtzE1UY!ngYwa3r(^I zxLC8OH+N#j6z#Fv@nFk8sLIO^zbZ^hJlD2@`;j*l!mo>h!F)b;f1fdRv(YvM?H{qN z&o;Gv&qm!u?xK#}#+!MN+Q%&nN2!Qbs)uz9-f#9NIO*KWQf){a6{hmX@&Q5t6Rm9I zRK%v`|Mt3%`ZXc$PDr@srxMY=b8kE0I}hXkah3eNP}}3^JR3-6XF$%Y@m-DEF=>~) z+^=2w!N435O8f7xV%fu=@wY8HZnPODhFWJ|sf42<+k#4Q5d%mqDy!F)$(Z9@f zB3+i_7F9TXMY43ZMVI#K*ZOj{7M!c1-li|2J11&^(c1dE2r=J)bf~0Vs9~LUKSV01 zS42_G4cN*-4&0sVu&Svckc{)ss|KQBvOYzr<|HQAl( zY|P6{@N@>Qy=$3SGPzUm9-dK%R%xK&%o6QvzF+JhPNdg(k9UG@BVM@(mWmm9U8B1G zzZe%o!@CKF+7J=(85t7nn+B>i61*F!J1U#b)>b2XQF}BU@scTjlp;sFKIc5cV*+nn zNm2yO8ilW0N8$|3)mVv82}EG>YA@@C?^n_Nwl3Exw1+^NL24esybn`bqJSpP8&7_PKwKhRKqpns0W*t7uCmSf(Pe1d?p4MC7S~xyoJ%m+{#%*#idu+ zQm9~E(xkz_^*1J-YP-%|D<6 z;O5=PqLfb$*{(d2)0c(dllG~DCGTU(JqXy)ay)$c8f1Y{UO@w{|jFU5FI$Re7( zrt1lHD6_SbBwXLa2E2T=!zXNgiSa>pw?!POn|{=)`l-mBNPu%=+@~}c9LFsBTH$xz z_^5Z`ibKxBux^^2} zT`PyT-WK#Aa+A%XT&!oiT#Z3S=ZPuLD-lI55G}?O8@O`gJDtgkg zJCJ#UrEUb7;n^m);#JQjW!~X!w`SE5*~O3D zPnBMm&RY5s+tI{diH<0AYcvjKJnkS^Ov^g|b2j@*Wnv4N&(wI4nP{pLC;1OOa@1{8 zGRQ!wLgZdo(D45{D5Lpp)3nVtcIamzZBT}gcTqx|iFI({X<|&jCEf`xv%8_EVs@hM zmZEOc&N>9{i)$yeC6OBKl&0IQ)#}JLhP{O6c4%% zwRQVqtqtiuS@XU=%7^m?DySBmiM;mIcw5`Ci8vSbFpKVg39_SGv#+DRs-zuSNcI3! zrNx_vm#gW@REz_oTloofsrGlu(^p@Nw86jNu+u|6ukS@SaRVLN97Op_o(`*k0oNKK zlo2WY3-HIq-|X#Gw$k$Zmg~w&p(_35RcQC~cQgb&hTwxTmBnll4QQ9fe1zm7_=B+d z<~rsHgLSL`r;U1Mi2(@qWB)iQtPIpDG|s!f7`r{SY%II#?CUseZ(o+;LEG==!)u_d z!&T8g^~>tkh0$e|DrB)2`l+%$%pgkdJeBWRSikYPaoWbg`n*7&=HFl9JzfL;1|_X}v2y3)V8=dwI$ta*fK6<4P-iVA zw&IaVoyKpttV$=6Q}OJ0N(2}#r<(>6pEr}B8OqHpd1qnStE5GEuu7Aq!~EwA?rc`1 z2C5@C4$SS|d!@L21M*^yROeE4yx}jBHwlr2qu2bwF9s(j^m$J7U)kPsB+MlWPd_)N zR#gVRpC+H0v^bG{xApIPF&H%wLt|z;4boKL{yUonSeit;lB{v*Ygq`+#)w|wl>Ua$ zhxjx((e=Y~_gBUQZSe^3z{qR|as+$DoB73bVR*KP`mVY8)L&VQLVv^SKtRhMMpDZw zdx@u!y@TeB7##4r@p*(2Gj}qleDO;Sk&{x}h8p=tc*Z1SXrQZS(a|8V1!Hn9t&x1C z-T;FhaUFvFzILdepyzZjp<)puaYQm(2scah4dD{e zEAfA^f2Z9(aOhw8}!}|6#w?0z8{e#{uUzn;fse+==%f8AvznU%Eq0-n+5EZV6$+} z3+H;}#MU}lIHRpFy~*pgRB{<{jG`Sz;gK$m&5txm;!%4g`Gk#l3CFt4*KQ zn)7_d7V!1sncTPJih7fC1Z>@??~e2v9Hjb2*AAXyt8upQ&Z~P+Ok=>{ne6O}8 z%J28ElV?eqLbQjzXeS<5hdN=|vnqV}xF8fgb5SCpl!PV44(wkZmC3Wj}Qaz#@lGiD{5qhekM2*`P=F@8x zgc(gA$n|&K_05!gtp6_X55>Y;nLn*l=Nh$#6&Je_irepGss@-7c2gF;HyppD1Gum* z_Y4e=McjN!Ky2=>p@s__mCfJjUz=&LO{u<^hqlk#)detIVJx}!tfH5aKa7i&TH`o| zK(ikEKjtCSid>?J*mxD|KHpn)J-uuKGmiv;oOhM6LHD+Ahzv}%ZmZybTxed_)TqRM zV^3;W=r6R!%U;?#VK8QzrxmyRbT$eq->0p$vIpQ=@9$k9mdqVJ>#C52sW0ZK++xO6 z$0XB)5e{i88_E?yW*-%3Ty_Y#5o(4VzFR+w^QZUHh!?cy+G_#oe z)LA=1@yX&cK;U-D@|8_$Kw>gW$f|`5vQ(7O8^zT*$Bgmr)G@d{X>MsgYc>s>$f-~0 z7FnjKVz>U5Ue=xMgpjsN1%Z)I{w2d*S6&PTqhh=fQxA^-76Vh6>cqZol=Od8_N%?l z2G4QT9tJdKy`Lq-Ndz|5m|H#bqWSS}>MMXHp3!CM*z+!bmmU#Q$(#m5XrEEXk;dO3 zOkL=s4+suUIds^UxE1gZJI-hP`$5Yo+$p6kZk-=!y*+Lj3{;i6KHB)-W_DlGa8)N* zHkxOkFAGL|E8=iC{!Qs_PifWDU#-DN4{fF#^oImv^!+?a4!#Pnh_BmU+3{W2kJj;C&L|`!@?x z?DSrY#@Tqr<_%?gYTM6vW3e$&C|#el@-OU%Ni%JqIt7zho;lWTPvxs1#^56d_j40q z(WC3rN=imQW7Er9!R%o3=@z#Gx6Fh~42qEcw%K$s8p=2Ed~%3EM9*KZzcOxG>&orW z$+ecwc#z7D1~zaL{0efTx}Y(Oyg=Xp(s;7-AJs;J^yT6|OOoA%^KCjIdgV-oo0q=F&(`w$C)G1f1 zB8f4+$IJa9@(UujOdirzErqdbs4kvhBTsFvUI`YzeCT4DLLJ^lMZCr zq#1S;2G%pbSSQ+;!YZ?#{KS^dDOa@awt9VuuiT$05I$bljd6SF zW6ZTV*#;hO#^-vrBKj(TK~eZzL+|iEZIw3eMBgc`vDc`y|D))v-oqz;VN3CJZ0?fTxq$#Y@P_3TCpQ-MsE$y+Nf8bl8N)&?$jGDu0b04VKp zSB+Qu``(2QOQJ`vOX@L7)_wN$o7B!Neb-ADTt{+@5{JSWiOf2N{Rf=BvI!nmAI9^V6EFE#Zp}E>_}nyF4t=cf)onJhEde;k^MzYi zBiXni3Fc=RYvBI&QaEo*O#FpT){|%%X4+{j%119zbo_8*2ac!mFK7$;`@NJKyH%b7 zOKxrsN@Q^%JuTJd*u#9(6bqQ%!EJok_;j6mIJZ?LHDhCbm$Z2=7~lA^w%w|O==7BH zxte;S&3XT|@65fg9$Qu_D`?41I%*z$X@O0;t&1sC(bM0wyHLcvs=)|M#oC4hO%~{L z9Xl}}`w1!d4d*w#fD{v9&i*sAnz($_h!|qYAt_bM<7@kQ5z=kG7X_oTn*2i9x#0a? ze1o>(C=+kt1OC+aA_Jnbo!V(?sIeWyip54j-yGkv_oluMwxP~K3qI#)w7-{+#L9p#Q+XRnK}y9VJxX!lVLn@pmvZV_U3!{KA|zp9%!_N!jfX zXC_mzC89_1JPQP3)v?v5tIT>m|GSfLwB{G0*o1qW?mAt*BgQYfnjopk9lUiFGh5N- zJ>LZ?7_Ez{JAB*MQr&)75pBEiQZZXp*~n$AIe_~?v?{+#w#+cmn=`hgbqI1*8g_60 z&wpGYcC|tL3*G6C4f_)?wFYsYGDW7($DRDBUSuA~75~2t_qNTyJV5=^^D=U#NM|K~_R@RRZLJC27Fg5Pt-}M=l&Nkf-mJ$H%YgPS{|&S| zD6MKVE-l-h+QCAf$8kwh4#d`ZkH-{_s^?TSCiyUnY)8miE=6z?_PXC)9>$#*kGVxW zzN^*iuP#WFQ&Ix<(MGC>ZFb9Dx9rFdko7VIDk__6Bzf5N&u@$fFdiCkp1prB#==DS zZNmI;-zmP(EUiK+rjL{pHo9Fq-CwrSHu&>$Ec+;TVf{%}d{BgZl5&myo)fw4R-;dm zY!R4y;>XZ5uWKn_1?5WN1`Qs=jM&EpyrHYLK&HfEYQxW;8ZCFzM6}B`hYuqE&H(++ z0TYw3Q#TeU7=`$WHa<7hQpH|Q)4Jk9KVLjNk*vz_oD){Mc}l3e8@>TuUOpVb0e7F7L5=W6}qkOf%V+&S{_#4cb zJz%uL|B)TeJ4;I|H(!!ZgSmC%ksh7|2;7q@!WiQ(~x(80Z9x?nXFAbWWkJ1O* zfR8d`s*34EK3x>g-W1$2ez>!(oqz?EjxVzk~xTdSuYxu4(tvYg?Z!8FDwW`1&b8;rwUG!7ZyGPj(Rgt}v;zS8|R98~=t2;y<7 zPT7tVmF4uHe_g{a$2jBNt@OHHO-c)KbB5V)Ky#7z4lsnVCCM=rpw!p-D+&1~(6_<_ z19-rJQ8F2|kMz26N>vAU)b%IJx4nC4pH%oJS!mmPS7Z0nG4?~GYa2#B^aNfe*{yi< z!NHZGURWB~F?zzW_3o2fjUo~#so9tE*-~Qmv zjO7ex9N*<~rxUx%mbU$u1jeEgqT((l{>jkmbDcTuGT!H1wNGzwFO}}_q^yM~ljE0o zSq6mgodwQYTpBZg%)V8pOs=qlKqF^Wo(=d~gd=qiAdPe$8KuE|7|UsOS9FSrMn4mf z=&|?SN0;#8D|n&u-`(Yj82Jj0A6RWOp8V%k^@Yrx*dm^LF}jHgKDuQk_GoR>FMksL z6W-R)Dl^aNJPnd5-b}-bjMN2ue{2_{t?=?qfN@0n^ATkarW`2G)XqcCWH~!S{?jps zy)mj80c{d*hm&p3Y>*P|yEe`=T@hOTp4ma?Nk}coE1&Uk1>&y#s)$3bz|Ksk;2wfA z%a?L)fTL=VKQUc_p-ygjYLIW}6gLW(40FA~fn?s*gvRSXS>k&fd9fl3zLF9eg$>m3=BPoR0{ss@{Sukp+an!$@uA8n#tg!Z<5-NYmL z0|COfH^Q<9n4G=1o8XTTL;dj#&KR|Ws}h!f;fs@ONiTqQ935eFsrxGc1`6Vf(-+F!a=BX0K}1t}X_ zOg~^u4o;mr+fk?9#p8wl5wnF8;WYuAJ?!kl($L__ZY}T7JM*e3fBX7-e&l*^#dFg5 zKZJn{J5&KwOr=urqNqnmpA=@y+_ZF||5H#A+u#+JQEh5Dt1rxa_T{lTvONC00rTK+ zv1YDg(ts2i>9z~kjWyj~*8XM8&kL~WZ7vYbgG<0BZp1DUc5nN$ir?}7;#@e+8cIIa z82bJ-z95oqzz25m4o(zkZe@OiVy}+5()f}4^YNCYb<4um05c}p(%H~r7HAU13Ig7i zdb-9P+EIp|(*HSQ)%m|UtGS;=UOI#&q|FC^-U#8;_ijFhUA^Ll=&=j4;9oQ8`H{K~ zXd1QqcUc=g%m;d&JC<$)9GCqfA8!IwdAI$EoeP7?g|ioy?x2XAjvGqCzBR4zdF!#R zkDE<=tg!U(gau%t1YAn!(MGlAN`v=>g?W00Re1TwblZD!qJq7=T!|}|p^-A2`b0o2VU|8> z+uk$kA5GK#ZO?`1^MUn%vv*6C_tbL>bA0gy$d+0O&zjq7AaU-k?0OCZ;H&MTAL>C= zO{r$UhcRo+f`9>QMK#!G2H?~O*6=>vt?@P=C6&R~_KK8m9IYXs0@OaAx>+f$=QE&v zH4pcoyoVy|^xmg0E{uR-l<424q zZY_{x^ZR6UBki^eGgC$p)Sg(|HEri96-U~zEcJI%OT@VEwgT&*qP-dk_q7CmU#UJC zfP#04yd@SJ0rBM{+Qp%+jN35%9B~0IuftMBC!%QJLfgG=#_VXuB^!%6ol!+OD%4X$ z)8(WXTfCY01fc`o3205O=5kuaPOMc-#`s|uZC42i0Nb@(~23lLS(u3c0 zpb(RvQr}M7zpX`u!ZC*fP-}6m95d86;Vkfs3$BhXA0^gD4pKigGVYkAwji`!S7tq@ z>wy&B>@s3;$)}0UaPZm(ZjVfpjA(?l?khdOhk{+H*+WR=3I0Yr(6?UpIN+-Y1^7M1 zfSFz206OquiTSJQ^GWZggbrLi$fH>MysN>kSesXkXstk%A=))3E$UHL#*;S@N&AU= z74_$oTVDCv^lFUpm;n~m`TND)YJXI1L`dsi)&f=JlHO5s-WU3J!i#j%rJNSmVlHg= zA{;5>Bdd{m7T&sL1P7-GS;8kd9!Y4$%YQVBDoZ^J>4>9&dACqSwmXqT;5v>e_sF`?QLU~ zb!2NhBl>t zbIiHWHg5~$b--jU7neS3y*FX_tmr*SFDDUg=u)faBIvIOrskT+GHLA0=u)st4)2@w zxRSt~`Z^g~dY1V{VHK8il#ok`?QC+uclvlhLV?Xhv4k0I!Pl8Q4dsU?%e-NQfVJr30uTD&I=eyxbn~)mhNpa211WRm0WTHMm|i0oog5882Be2*F|=m z;C++rq0|mY;;8gPX%Bb*0k05=%VGqwxr}|G8Ym{R@A=~D8?UgiIrI}N4Q?Ksx-r8v zCfKDsfPe)z2K$$u!4=lhEsvSmLdM~!DCs`arBU!BIQyKY`)=d+CFivAH;e9*Lp?iE+)6&W)jVnU~ z2Rv7b;fuiD%TfpXoQB9JdCe24-9O4M(!1Zids6NFYo6RFzG2b(9qO$>seZCO_=Q(M zpEpBrCop%*n7=Av^C;c%9VD|@@MpP-M*Cr)UO&ZqE)4$VnrKT}I=|#;nEc_dtmnt& zr(0^Rl)iCv^NqVtH}$M1kn=>dLgVDVWM_6m!=Q&_tn-F2&ZtfZ8}-&Ad37Rwg8tF0 z6VK~ApVQ)6Z{;TzG`A#l{$>6|^=?F++y$cWo#ubmXqp+@dRzLe$l|5`v>jf+2gA^& z0POu#6a&ETk;u(!Go;f=YzPnOwIWk9MA}oEoj1@8v^Df;?olhbU;*K!+ce=Oe$jlR zIaYRlGpKkP!yJ-R;6Jr7AYYtS{bi+bF_g#KaW67zDLHfsvO_#Ui(>9)Q6ZW5w1t73$pcTZsh!95Pp=#*{527k%@Y6W)0SfIJafcWQF?!u z+q&@X7k#zbaTxcXHH>XDTB zz0{7OPt^PP@93R_hck#d>l-V6jZ;fD!t zAh{G3r^-w=>3WUu@scTMI%50;qVJT0*$}^%_n)rWDgoXI+jocF?}Jg1X z9!cuDqv-5L)#Q|D6tERx3uzrWbsLCa5s?V>ehH^=gIz;zQ`!$sTPh1|4@u1lYQT^B1&KE`wPY^PPsUy=U z9_`bXz)K;m5JA(KH`SI~ssUgl)xZlf(b77s6uZDJ_%KwjJ2uFE_-)xHevNL3_CI8T;XTj)=a>aT&Pbf*S)61EoRD@4yrRD2Y z+cnzC!wT`vfVq5Slgk|Dv$n+Fse^N{^ID%(9jB_XG?slJkBJc&-AdMxkSV0;3{hc^;d;j2`z5Iw>4 zP}V`WVCGNOX^%|1{vtb5D(vdxuX5CAZdt)th=5qKXv%QBqvI9XY>xpilRPF6f37yN z#J&q~XJx{bK>WgKhA_=)Jk=hu!8FX@^T$kf8n5%ux(%g*5%%epM! zo{Ye^9y*h7erAzx$>ElZ==1;fzL%|XU$JV5+vr80b_1?^=~94gVe=by^D^a0qsN^U-)I??6JJ-G4Moi7km39!ivgeEYCe04haOmF zZM;(S=r=A&8t^EwL@D54*D|8n$;VBV;O8ejS<>$>$U9iK(2!umh&~G`@wm(t`Wsw0 zL!{lr)E21&qJDIXMr@iZv+~^6+7&!ELc6_wbMcjJ*V<{ZGV*mIu-J4OtJ1Qn|3{u{ z5ha}yb2jcd7nC|S2ThfsHyZm zZjWP;whgH_7@upUYy8x<+%f;AP&!k5j}wGp&%Lsl-F(yljeH=aaQSAj=l46)sZGzz zCJ>_W`9-H_;`g;N`3izI8R|bLGP-QR*u<1k4HqIDjFx4?-3srls$TJ9-Z}d@A8es#C-JaS{DB-5T#-N4%~Q7U$Mx|1nMnG< znoyj^5^CfBpCV~{%OhBRO$QKX2)XE%$vmkd*?@HaOqu7Sr$~(cV~?2XCqLm4;cJrl z{?1PrN{CYU`tO0G@F;nW<1JrDCV&{kTll8(sb9FF<~GyYC3D zT-Q`;X-&z26u+o=1ygBhX}$5g_xii>ale*|Q^#lo|88%B+las^xpXX1`<l>o`zXKQft|J(toGQ#mAa-_Ld|`md`Drzy(K_#j`RGx5|}Q`rbC zXfnH)D7KvuNbowa(56&J(#j*Um>}C(P*!NN{{lRQmUV3m=bXvD( z1FoVU+MWyUPR(qFACmK~r2BSQfn=vy!U}G2lWoB+ z*BJ1c!OQe)wYonQ5Drf7*Ru&oz0>-HLJbC+ZW;B2UM(CFH zYl@2%5N6B-H{EXWb4orn;a`aL9;@5kzC0C?gE5D+;c{c&?H@f_)tIv?^Xi!!VmV|B zF;0|3H61E$wLsJ-ePXvhCZ#yOe6^d{_KD`qSba7-uW&>4wP!F&cb1am(j(NV%w^mU zfAZaR2^k*hpEo$Cw1nC++gReHZ(KEFN34bi)kP&d=8x%NH%ZN?Zsj#8=^z(xG<|Vl zt=g!bCK`ZO>uu(857%OHfeU3rx8HPVYOTST#WW#DF|vQ{XA+Q;bDN66{l=mAx0ey% zC68+n)D)h0R{%j%F3GKwpTZtVx^>#9nLeA{t)LQrgQT(=y>~(dJBaFpZeOzM%Z|!I zzGHG(b~&M?+6cBhgp}$<&}00SC&o(fo_XhFB2pcosKG1 zMr`-<8nYejQQt2%aj7y^0VMm#6ZeFp`z_N}_YCKalh*O6&DRkfGcU`ZJQT-HF{Nac zfkZ1UdjxWvzX>hmK^A4Q5M=YMZQlWgHfeUrP^1xGw&r?&jd*coVTbPQ>-|l2!<1hk zk(5pw@lmTT%->WhP@42W-OIB zg+=5G9jKN5I0;?bGt3YnNbG3lEBX7CvNVRzOYhLx4-0Cu^1|4sOXW5j_3C;d*alZ z!)0=({Cev1^C%e?6+W%Qt@e9b@z<`%hQ{&(ddpRlQwvoFQe)C=oX=k+O_ji3dS3nD zR7t{pU+)&Bk1KbwQZ{y1XBj6E%EZ=DnGgZ*`-DL1w8XR3 zI*ztoQUZgt4<3zrpE5Xf-dX*=Z9)!d*;;3w-a3vRp*+7(9@#zu6(9bixoh>J25rqIau8Qw%A_6W*uEEyKCeB+&7jW&n+L%yO}YGPu`qLr?-HrSZcB98+-19jS! zsLYfJB}~4cHlBgvnQ*Vh%Xe~P0P?q!pUI2<>3LyI?bz~hVLi^~8@^D#orHd&q*9UR z>ggKt;df9Tfm_pjHHEF^O_W`!=BwbWB?~KY&B?q7@XI+2ZW>)?BU4pf>&eoF7c3P1 zL=HUHGNl@ZYb*VQaA%xl!F((*{L5*xqQ~e7>M2+VG zV~{svVv=8^zT7ZPX&vIR=5NYJp=of}W}a8B`Hh@r8C+gN<~hzCK3pe&SCif5KJ~eU z8xPFCKc91fmA+uwMQi`A$kQj8`}2CBw5a5X*oP3bFUp*p)@^fVSJ~)X8yG-r8P}y| zgnf!MEU$dhxN_lTqa7qirlQmt>F?t^EjpP8jz31=I8!1Af7mz^nSB9V>Oz{ z^OTI`LF8^4Z#{|u{j!hu8r}naCMCu!PhLkWrVOKAUvaLHi zVYi6=D_~3<4DD#OxU9agFLw^Tk8oQ^e-f$gAuaKEaQ5%*p3jG6;qsbxprulMjo=Sp zf$Wb&Cx}U7rQ;kg1+Vn}4sjZ5Xagz{1veiN}LNsse2w3eSM#yPy z4B$8%pj|?CQ2HdvqZ{@KCwg@tN4=Sr&KpldbqIL*ZD{ML_PTo;%4#jEgzSPF-PE$4 z1DXSu0AjujS9TbK29|8NXZ$-T(zuEWWVE!*J~-&0(b5SZw}!7ntCJkh7nLkDp^s)) zTYgiOuzUOJ9w!9XwQ;tKf#%8u)85gU&0LniKP|-~9wH#5$zW$tOG}xY5zP5->VTgn z!&*dQ<&yq{{kcOEb`|PcDzR%c@n3{2FvNDW%CkNSZ)`9;@m(tq1cgZ!pC*Pp!&}xs zGkhsNkMluZJgdp8-9{hJRoeM<;s;|q9X4LXoI=fgONvW;G%i5`HZt8SvYYyH?-OL# zJJN&%v(k_$fq#c%wUM9{pt^C;cH>nYODWu-eFrMdZe~zimOqVPk;Yvf4SZl7V-O0RX z0J?)^juQP|^+Dflf+wZ0J`yaY^ZQT#a{vb~+iV%k zQ}VOD+||AA>I{MRQ33c7PVC4>C%47I{P(`{&~)QK=XBVhw!iTrQm_Bn3dT>XRMpZ% zqI^aJ-c*x}t5ow^mP85qDUZe7ODOCE!OdGH36NY1#N6wTuJ0+xq7pESFch606nC{1 z*}EAqNYuEqBWz+g5rkByJm8o4{xb>q%+0bPTSY^Dxgk!T*V}Kv*@m`ljHQx|o%TQE zkhNuShE@GPn(W6?^4DMLoD=9Ua${cRUhslOXdAuT# zpN)KCk5XllcT#?Ln~!)mR_+)@p9VUXjz|lj6I9ezGlQqnA8e{SZQyb~Rz)ypc)Mw< zR2w6M6+zs3K^RVpLZSV3hSWSDXK5ukdfB8gqhbb;A7b}sd;7n)fQY*59qY%q-IdDS zG|}cZmb5am8>j)*G5_3lF2A*9->v*F+MxYEE@l@K2gc+s$pK&fByg%^=-14NzMcoS z>W>Lw&u52bmy+LZDmoObHWcqy^((?5=L1exw@V#PI z3UUIkE$uIn`}TdnSV2l_*N=^+@+?#&cIVFi!^mNgH2YVVb2`Xju2F>1$d>S3uDJ6CctiI<7#9y@u ztZ>=0@e$?f$QHBuu4M4jtD!W?T){x;c=IJqxI?vbUPGKK^AoR{s--sAu6a^Y>(-UH ziPW+D9Lpr5Q~sZ&XDzDjN4=4{3y=q!*0d@1BR-sE^3&Y!HKrC?_b#+6!rH0vtP98j zn2!pw*+GKPZH>>QAz))<2;5R^Z9VewSZSpSmT%i$BRs@m<7Ml!tK8A350<7vbi0)HcPGAS$p)S%G0xn;U}^ zPxcx+5i=FDAUtVP+?K&2Dj)6JCd@fpPHDP+YMm}wG9A_aMko*StkFXBDr_WT8n)go zhit4<$3E=N(B!2PX71izeBGEe2oFYsc(Wh-om%q`Ct?~yKZV|JTL{`Pzrb6lbOH~` zl^&$tsd8;Q|H&}9GTRe%bZI>cK4R2Z+!Xd$5rP)2ULz|WZ&~EfhB^m_{$1LP&EGUI z>!}puN~R$0vrwZ)uirNY_2{!hU4|xtc`)qt4UI5c(Y@hFolycl`TyU%+)J&Mvcm5pM6E8-BUO%bi!x?URZZSsUA;J zl4h6q1AiWtFDlox#|q;>X+lAmhAUV>H`=|w#%@^}KAJ4|gAw4uiek6C`02bK(E`cj zuJk@9f{f8mwP4B9>WfU23T@>mTCF1#6&X$ur{GW=#u=VH9VGnAFF?)duz{+u$;yFU z>v=$oh#T6%V_|~>$C>v5!l5s1yD*DN>>163=r76@#l)c=>Arw6cDTA~^ubil3?FLN zv{K>ltrN{R;0IIMytlDH)F8FnTkfgm!ux+TH5VPD(mB2v#Xn{;$giQ+obB$8X8gi@ z2}+d#-XMqv_-^4|hsK<~H|36@c7a=h9-`S}Ehd4W1AB^HEzn>#R5c0wlULIJk7jyI z*uF2~KmSdQXQ}4pvd4Z5G4tZ7FCYP9$S<^Un&KqO3KB7CK?SsH7Unsh z+>~GZ1#!E~`{AXJBnHDuM7k1(jdb30zH3dIS*t9hu3EH%uo8TqSOPh8o!pyAciXX!ONrr8CEj z$6eq+55F!siGbzQ(Hcs|VJ;+mL8EKa5|G}^pbh^aV)3OT?EKO)jLag> zPb%{wblds>Cq|miJ*@7-EZD~o24`@SoA|;UPQs6A;v0sLmQ5Arcjruq$K5jzjQ`Py z-S}^RON6C*vhYrSuj7YaCGU!uzAi~&5yd$A$89H{h~`I4GAX7gN7=1+5sQQD6?w+d z3A3bN+zHP_n$N=gcBeM)fb;BoROaG87ZG49Wv*`MYV%l*#L5>zM7`Kzx|;RnAz zVWpoZWj0N$IT|Mqp~@+y=Gt`hV*{3cfOaEq;G+?UV=dnJFU_5)pi+t6Fe(VM(k}ZW zE=kpk8&pSp*NsJqlzWn&2)&0qRo@F;D7!*Rxwxc4=u9h+~XQCaCr@D1rLi@>cT zxX(F#r&6))g=GkfO%UwWmO!9AGKfECX%}8H>7&XoXU)o}KSo1De4u9iuau(T2=q8EZ<94ZFEo(SMP_6TfTXvLzm| zGajB)(7it?Pc(>N(oswwad}`uva-_S*?q`1mDr@oryRo4YvH2nCGfU9_BcFvv^yu0 zWBrJV#nWfU{+!c3W;;Amb6?7jqs0#Kq&S(@B|)ZL!9@HL#e68qs?Jwo>)l(&H}Txa zlziEdx=@H~n*CTMi*%~jbC{!1Qr;tt`CTVCYQ;6|1jLn8(hp;BV^F-+T!>2b686hcsv_mtyL$pwfZW+4#0c zbf4!+@%VC&%I~V+i%Tg>$(Y8eMyO8PogXu*vBehcP4b1N6x;Kurpz;w7tX&hPi>QO z6BgB-4w&a`Cs}e9x3t7u@pE)-qZPb2BgqB9fRznP>zFb!lm|u^> zLz9YthH9%?;`|UX+X=rrNnwXQIu#)HF_>pHZ-R7SIJLirFV)TNQT6;vNsws6hOB%3 zfOP*Ca86fVA#;n1a~h-=i1>zB32KwL5TeiG0-E3sUT?TG%d#>EYxSmcEe`NN}|9kZA&-zST*n5s2(9L+Y}V*R5&Mg()i==v%wn@M9ZFT2%cZB zcJHzxnJTS6&PiXRN?APzY|Mm6MZVm@qS8(V{VH7Z2{#Y3Z*V z+M;H-oQs!4f}zaal{dB~{o(^F)%eutyi;=0BLa<8+K9}5qr@Mf4t7~|wCmMSPAvQV z&6N*8;6%vmEvgg2D6b7&uWHrhAuSjcWeZc9NlqE zCHE)jm;9TX6_73)Bqlk~2?WY7Sbk239}xbl)mdVbTM1|RULGsJRCb|Y|JFIxb;G@; zR-;+>S;02&NGcWl_RN5A+&;R!)MzEA--JxQ^CSH~_TXtx=#H>%7u9#M5)Hy0p3~vL zrce~*;W9QJ?KMVYf`!`+iD#W-j11iM%&NzWoM++Sxp(;MPe*=bjSO8P49;_2ZqCSn3e!G{mLE7lQI5F}bAm+Z;eC;Gyz$obPg9dUhPUo<<~qZl|ktI-^_ z#@U1mN%2_Lt+sY-NZ!r050|L}8h-4P$e7J}=ij*$kyf_;dJ$y)5MSmgHohx~?ZGZTyrT@P>8`jrUD}7wH=*wi6;;tF^N8prn*_IhL zgo9TYK>DSf-u6N}0of?%`3rh0$Ne>p(QWjnb6f2zG}Y@pw8e!kfI9UFOyi2s*BIxw z*gNZwd-^r&LEZ43v)k1jInBx80HSXh(VaBKsA>cu0u{^vk-X+Af7~37O1jqr!rO@A z)Y3*_F4k0(9@nSa(=*j4SJN~vbB|&*!S%7qCOjXSqSpG_2LU3J8EQ^Gg}~Qsfo~Ic z?+wQcs0y{haIb2OjBSpZo6m3|%UOL1l1{b$pyRJidppTcr(?FSes_qUij_VryDMy% zlmmdEvS1-je{jfl5Ti+N9HeBc_0hLGht&gDMl@{gQTC_|3*R96lj-m z-|2pR(oE%)z}b$Pp~|0yU*=YkiT%TfFvq{B<0SG>Lh|9)Dkn4iDw+AgDboS zqwMygYd6z8E?f??A2uQNtACXdeeARk%R>t8w$%v)Kej!8O;*(=;%yEsVtpa1>KH4J z{iV6adG?^-J!1E%V(`SkWg+6cIrkZ@V`y_?QNZ4;ibWTryFkLz-o)KoqSEBU^=nOu zVvm}`=E^pU)8f9@sYc=2fW)gn`v$ly4EMkIYjS7nvov%G9&s@%K_=&hVvP@?B;y}r z3P8)|19N!2->03IzvL?gZvO}HldQ*5eAdR`Uld%e9pBG3zIl39dOZfNhzMPz_L;p* z^L#w^Qfwk*H!ZGx|3&&Bf`R1g7wZ*AR^lD~RkB;kE*ni4M!xhy3&W$bm zI6!;lt^>a}g>oa~tfplojMzt&+nO8TppAN|`0B2ZU6u?8>DKY+Vb5`2f41DcLE710 zKvP=n2X;IE<2osQyHPzmzw<48DKP+V46Bto>pF)w6~&9o5ZF1+)Sld6iQWV~A(%Gt z_iEvuPH|y*=V*~?<>oT8`QI>v_o`Cxib;g6bA2b!3*(aehR(dJw{G5GCACD#C3NE! zO#4>Qpz6UnoXU@Q{A(Jmw>(p-z?9v%SVrjfvR%_gn!&|w0;=upeae5OvzVOdg~7*W zB~~CMFE^pK6Z}bXyb3QGlFn*Ttcmem+vL*Ie7%S?40rEl~f z&4`)F`ScbX4&@))4Y571=PoTv1w~9#5E*KyY=Ju_YCt&ydZ@kohI&EQd5h)TuyHpp@LDir$=pvX}-4n3+nNy@WX zQ>Ptv1`_2_G97_C%8dK#zG)m?gn?ghZnv7M+JquPf(;Ef%QQxxd*@uu(zVngE?E?t zUaV;L3$G_m1sqr+$+1ros*o+{Z*Q_aw26z3<)3B4n_M^%>q)4JtaM5N!L^@9zNH*)H_wY2e_T$5_WbHEcudfC*;n13{ZgPkVub@-T>zG&8DvSgR;4WKWDWtwb${OIGP zp_-d*VxglI`psxj7`h!5;c9A_E*?;~OSR6a3`e!o{lSA2V@TCK{8hs;%(c!nR@!+T`0$bbm|oe$ zp95k=tJYK6TyGyYCTaf3J17qZn2x&boM=Ok7I6>=2{9nFr9Xzuke{gU5hmi_gq*PS zY-69U?@w)*E4Nw-dkE0S>Bt2geM(N6M^&gi5l|6XD|k|>ep z*%@GZ?Q-^4jSS$vb1O%Lf*a$?up^*p%J=A4=3Sv+6?~;I!-Ellj3}UIBlKr+3JS9I z=o~ad!9jA1+7B!oQWY|2uKZ@1G2b)~jYj&x@Hbt}U+nWY8bf~LTGMl<$T{zxP&ggq zHGUJCab>e|5vvY8VXj9lHx#7726OZ>eZTZzW{Gc~tEONRuah%$ni#Lwp{jTO2%m&9 zzz=~G4z(Ix^(12|mYu0*Bj+ED(Yu@%<8`Ji#>db{+${Gc$_s>>v%XFFRFqU$cXGI- zdSRN4Brr7B5)NH#m{^5>_-F#z?M}Z)^p20Y;89cdiLOc0l}d$FCGNs5cRb{gXm-|e z^+6ifXM=Y+OMQL2x3^xIcFLsGYWz~mW}3{fTQfr!Io#*$YC@!zK1kj|64ToATzVmTQSErfZ&E_;R^*RuSljBLo z+)NaC%Bb;QA*^CJJ9SHPwAGZ@aUC4Gz|_W*!Cz%lTk7yKagG;L86+@hSUr1I_>b2EfN__bPsUNYy18%x%uwLqGJh z#4IJyBE81rm#-uI-i|-a#?dQelOX&|%k^e-Dy7U9D_jxz5dejJ;m?`4Z`a1fC~WIqWDM9)8$PM;{Bk$J;qepp_FFWY zQrRGruD04&ty04~GU5#6&yYg1o(!ie;E_UwO`1zk4%ztxzZPK&+hpr5)0%g70++vE z;?xW{v|*g*y(x`51J@;O5FdWdj}?DpC-tYo(&&xJPnOo3{%M!?(l4Oz>9(PC2P4MX}BN|Tuv>P;E# zO&3?Qe^qO1`1?o8})U zM`mRItVR{b2M$E32FN=ofTzG;^i?-L~A@`VxQ0MT?rp znz4YILxRc2rbTw!zfM1&G(niHOsA|7$Z?RDpXa-eX=IH+rqfC}l3?u0-boZ#^sw1G zt8UKGkyoPq0%X6y{1rLGYC}&{^U2P3lgmS~E7i`mup7G<QH8<2U~4eb zk|vDi%iD7=pRjFxuWV2udG;@%uSKsvjr9bTdQ3&0JcLEpedO2-5?o|c7Hy)c8y1D( zs;1Nuq}H$&cWdDy_)xE%6^_QzI0Ku;N-VDZwV z-@sB(kKhU7)EPxW?$~G-{-!4UV=xeD>9+3AV^m)o( zTdFFJ3|nM4(FgfAxe%5k4QSJ?k69@LmL2u5=er^m(k07qhcC--l-sgA-zLzp7>L)9 z9owfY5Z%k@XfC(+<$Y}ru)j>DZAd2`u=sfzcjhJYIeHT3<{1##P(90#<=B1-`q{sw zdKY*Pl;)-D*;gkcqGABT$R=B^ZXmV2{y9? z!c*U7%H4+R5dnclTbCX_2>o*nHat$rq{}n&v*ID!#X)z#dyo>3HNOWAZk=oEX)X_U z1`}3YgSrfn|7hr^wq!?LH0J;O_#~-0%inN@`{G|_yl3CzEuxmyD+WB z!C%37pG-%xai34B3Lz#E;vW6kI0eJk9O$X9T?`yj?c0rfz zPy#Kr&Dr|flR%FP8Doqw6#5nGerm|7y6bYFZ=A0Crs3S7ZcI(Uy^jAA?USk_qT=EP z?&c$C{@@#7&(LeurfSgO6?9-tP*cy|Y9+MLrPWI(H{)|Nm(epbPF*{*I$+@!dcgL^ zQVLnjqb?W}?ES+#SWzaj-y(6beX$%*?p?*Ki*662@T!ZXitMjKLk~(JZ^s_&5~P2+9f;ulN{`Skkc0xFKcH)NkAd@zKpm57}QU@bDAEot;a& zmH(U^`Mb8m-`5r7I-J}ocXu5TDv$v_`C_k%sX4@e0mc?T_#A5+f(KRT4lDoLV~zwY z08su8$*xv6xu2o_(Of|?Dq&Jb2s7fxz)fwQe>9gahk0-RCXaX4XZe7ph+qO1E|J_; zwPoFS&K)u`$y+PlWr0ndaDp8|9cCOjVZp&$(UYpYp&jVGG8^k>uzla(wo{&f4piN4 z_%o8`Z`l*L-cgf_v07l)&I7*}*sU#7Oxu&fnuN%ioLc~}65w{wU6MviDbzfu$*q~O z{uD5A6EE8J_v0jzth-nry9i)OWSE{)_wcTlJ&TgaIqLX0Cwr-~bwIx`hX^hp+Z*pO zwG>Uy163dXP)_c4@5=w|o7kK%cZAwIIgXxay?zl;KOtg#?@1b%8}{r+>oQ%nROaE; zrs6faR<%@*>NBTG@|yY{>Ic4}+R5I7%3yv%UB(O>Gh^oC&qCX6!U20p;uo*^;m)|3 zSDh!H6WHRY950F@NfkjV>$wNQFVDA)a}H?bC9k|bXD}9zMe79zKd#7@q1(B8-<|{8 zf5*N)z{t42+%e=+kTPioaPLB@a1;JIX;#zcE>>A6B+yRd%{GbH4TTUNoa+PUSoUy@ zc^0!TCrxgnKd!ANeqj<`)~CC0n?y@_90vw&#j7}aab@fGi_x9~E?ox6Ss^FC7D;*V z#f1n9g|!$=0yytgV<46boS2AY)aeM6_qhRM6NDLSYpVj?dd3-yY;Ih1rh?yErOfflc(wqhTQ3pdukLNP(^;iDKq=Of0&Nhp0IB>-=%_o(?bR!k&S-=65@E*Z@;<1oIAX6_eeRyi0W+< zgwO`)IYo$loAy5A+&n1+g;J0Pm^|sE;}A&vCn*JW9w+c7X6~;+vuUGc|3*!OnwCTi zEjB{C{zNj_M!Hxo=#*`z0hnFp?%*|fBt9Ia=g%%5S~Z&1_tT9G_n95!9-6yl)^G#Z z)JK+ckGWI-6hXQ(kytLaYwkX1nEOFTf3mw2E`ew~xkp2VR+FQ3)4m4+# z@>-$z6lpPY!Bar5NGr%mSG%Y#D=3zq4^Jo~8M!p|2C^Gz73Rs#|8{AwA{kv)J^0OC zqvbfG1uAouk@?|kVQOLX_uY7}TX4m*I{B}|?Ak4EN`;dFiQmBjc$E>*Huuwzlfq-y z=bcf5??LBj0I8~)-^wFT%`WSxX|7LhUOxkOAL8y@Z&6D6S~U-?PA!9KwD@bXz~cOr zD%tE)0~JjY{kJHYAh0>Indp&Dhuu|{FISgJ9@V>mmvduX?rC0UfwpZs2G&g1Q%&D* zVfnlZ-kFqr4tsnKPgx(@g+a)cx6!#x=k5rFzv>BZ% zZgDIOpV>KJ=$XM^{VQuXiTgd5&sy% z8DbQ$MI2}7XQJtR$$b4^X`53LbnSWve`?lb64 z=&%d11&8{kqf6d5*S9vz6lMc$MdYXmP%nmlH1V5+Mqx%|iN5m0hMy*zLDulsu$E1! zX}z+N%pa3&Tsb#C?^un0k)ph6o~&G=J(njl>EmQrJfMp4z< zXs8#}-fV=yPlu4%8n(DSK|C;a->GuZLaJD!o{W>wDK-Wy%f|;MNNi`ey}6uw9zTaR zJk{)~ctS8z9_Ag7dun|BK%jckkE|?UZr#F4C8=sT(RKwiorBs2PqS{27n>3+8d3bF zSN10s4VA7abpF!|GDx;6(Sh#hxLj?a4~!rt75szt`3#vjzcyIa$}yr|hoKT3$T-Fd zV6arj*9fOs&w}TV7YuE||KTj~=1g+c2|91I%by`fWc2YA>EgWF&xE!>-jE%HQdXE9(s3!LDsSIjv&!5yv%A$^Pc6 zt?`$8X4Z7f;TA}6&}1fO5Gnzitk@Q~XkJs7q<*4fi&RpvHWv}I zcl|Ve+uq$hASj~&GFDV4E?Y$-{<>lmo^4)_+!pYCTU!8VyheTBg8;v(Uvzfx>zHcL z*)KYH%%4>vx#p>a&W#hQ7OHsC$VW8+Hm8Ckm&7Jvy%5HSXF|jT4dWW=bUvyZZT2Oo zn}}9!9m&K74e6zP%D`$bI#*LNc@Fbdh*?CG#}^FP(yny#(Jmff)RQYrEs;G|G%ou| zud9}BX?_>6d-myhjTFc1Y;Qoc%&jyTo*UEYR(Nm`v5WD|k(ePJ&Q{kwBDCBK=JZ2m zNVfDpHzx0G1BFvt!u|GprM9?84U)<%u;8uh=86vClXOo4}Sz>x*P^f)N-QzPnP|LsM(Fx@e7hb05@&ieS zE&s*k|kb?p)tRwR+Wf(KIj)L0t; zag>|>TSCO1Db8p$aWxUW$uNc>3S4XnEk}FtWW1Ak<~s%$!R4~{SOW~AY+A?^w$Qj$ z8umK!;W}!&B+uU(TsEw5g!8&d2 z9qDe1P3S4$xMq@xJ!T7SGcWCQWiZ~l?-Pa*HMq{dX0Q%99g`^aOkzWNlr}B!Vfrpks|v|4z0{wo4RqBpvME! zlxS|wlasQq?OsJ;@9NSvRUvZRaHFGHFc0U4A;RF6@c=lvI{TEovwr5Qni1Zop^y}k z{AZu@A3g+QQ}}Ey(oJ|HIC<|gCHsvNn6w`CUmX#8c^QMtC6fTW;P$;PtO*To`)9SP zkp%97_woeundDYbY~KsL>{QaJn(h5dwHcuzljgb~o|;|J^?G_!E#Di{sKqDMcw75e zd>pnf$*w`?Z<2#G!XV6wi>KWSX|+eU9acN#s>{O!c&~QzgRd5v7al8NRLX4NNlGI- ztB=qo4u*aWRQ|U}z+AfP6FRu+c zKN0VZIAUMpvRL=e;~VR-P9OdX?LZDCx^0c@<}on%rZU!zR)a8|mKy8>3vYq@AI@jD z1UEW^3uBcv)+v3k;cmy*RDfnnZ<)Or7V4-8Z~_UP9f65tb0Se5oNA&3(Voz9YoU~= z>+Gr*WUpOVI;OQqplJBJt%}$d8Oe7a$JzpSr>-cBVP~mb3_cd?hK&8|T) z!(V2HHGKum|2(}E;F}_Mu+WZV(ZIR+EEWwbRFz0*cUdL5rXAfxR^iFiw z&(flB+RC7kTNS*p3Q)P5gql}4U7p*8=d3Ox^ZzKYoGlen;o@kE^xf*Z9=`l-JUQ;p zl@s+(Ey9JoncbKbCz+((pIo~Au!69bB$?E9t*9TT7~<1GK*2BRE`ox{XJs`yD@jTq z3FC(3fRCmqiY6J$09pPOUrV7+Tvf^Z0H>6kx^rsp<+;n(N`2sLMbmmqv6c+W4VS2o zH%`k751I;VumC4wgR10IWz3QwXoAvK@VcL?v82D^!X+r4*^5<22KmqvVWRkox*?h? zQ{XSz=w5(DYLPffN0*HoTGIPL!5lrB_tny7p7b+=Z~M?aE5b_sG^pT;JE4XuyIkt? zi1l=I*T&j1;U-SK6MHtQ|TvchkXo(x&#PmG6 zY{Dnry*(e+GgX?Klj>hL`BhRyD?;Eh;GpY^M-5!$fhPNxZnLLtw_f zptYy;#DIH%E#MIu?S>Biz*5Ad|HbGX-R3#1p z{J5=cWuXa{FrYng-9iHtF=8IE-MPE(<#w=-IU;^SkTeN;f8-;wDmk44yXjShN7%Q$RrIn8_7u^WSeN7IeC-5-}2fVoRLoV&E)Y> zwyA@AJ@b!)p$Nhyjj4njzllQ4t%!+y{we$(BewSusQrHw*6fMBVmn<|t2?&l*S!Vs z;h>&Btue6`ba-+b{s&i+hR6< z|5gdlIG)<3{%`MyzAIof|1aK_Z`(!|d`NO*{T1H2y>e%IB;d~t)AcRm$Yo`naGp<= z#c7h3KTfa~ZyaZk`GqwR6Cs_oSv~-ccG2zofV~+q)PHFcMnjGZf0I(Eq3^%nV5^mx z8aL?>R>Lvgp<{Qev!c)vhnW`$zFGYZIehJHg=-;jTFb$nBGe$_!Oz-WqSj@%jr@V5 zb5oh;E6iW|K61O7ZWzn*_?7mRUDeM-?7o>i>J-UHo|%QpqmZUkg{@Aa^UX-*6F~1X zFB)MJkE)CK-I$l0Acp4(4{G9pmj9z@K>UEtKkB)FKr5_yQKbdK;aP>%LT^s`hBT_$ zM%EA|ekOrU7M>Zzl(69xi6~=5M{tE<7?EW~NbiDmDD_S4_W9dTgJFmYnUq$jq#FlKG2Sjh`$?OY1 z9xB)4MjV&*9GppMhb!-$t3FOejM7FNthDA?{4p%;kwj>bTr(_WuoWS`g)IMM@m|mlKWQzUm^FlVR$~H+3bSv_(8hS zCH-nL338Rc@rKb3>^{?t^62_7uHJxBo=6Bv>U{mefGvYkiYWp9Zv#DRZ_eZRetZC< zoKfh3u}NBf$!9V@;zLyd1NK4`JNbh41U)my^aNQL;<(0I3C!cp4n}rEevCFx5I?X3 zESXBhwa@pOg8B^5ox+wDZr<3qLRa~_-WPvJXpR+yvqo!W9CO3-Ayo}|XQ`xR9cAs& zz|jt>=NY5H{sMA8X6A_;rv0eFpV=%qc-b=zB|yi=tO(KOX5OMS=J0?oGU0AIzHhE) zvS_^5mh706)VwiC#Srq(JQG`L>lU}d?q~PzL@yT70q-j=_4#GG`4YKqVF8N*`Fd?^ zZcsfrmD-44LG@>qt8Hw^ntizyk_4+L-9dc<$cVvKlryk;5BL0`@ejJ6BGQMkt&aD? z>-1@aeqJ3Hh_`)l8%y;D1^bIv7x=>UbNa;kr&RN>dRoVw^@&Q~X!=WT7eoxj1F}&$ z)!SUI)5>FAZ6;?v9U2u*Q#!}CZrH=w#>wTGXGsO?jvPuZ=iU60R@MV2?tO ztrYo)DgDV4!_4SE@IK+QWne33A}ea@}KC%iMisKrW=(I;#F(` zJo3wWqbD#GBkJ-l>{CvQ8_8Lu@=czNlMZD&XZDXHoj#ZK{yPTROL?iP&vQp5fKO&D z5ExK3UppB+)OC|g-<$2Yy9WnJ;PWj#z%VI4=|s=O(@Ej9JtxQ#-ZqPROFJ}_w~_wL zXW&1YmT~ACJP+|S>!F6R`5>hdf5yVbf-s})46Yh{~+48pAr zRL(p0ZwBO~RLQ~%cJDLY`D_ARiW%D%m^m`!hukc3(C-WuEI#{$J+2-+frB@P6UC0? zH{o8}KD;A8A~GO5?nDyWd10k8cVvhd5~{Q^5V4&%^E`InUttUO?CjTg35vAiu7%iHhjpW{2kB+{p}u`pkZd%Y8{$sTZ?lo$s+n#A8T@~U8{ z28EJAbfTCbWQh4b@C(+XBvXAgK5cAFN1h?c#|Lf>W!&4kTs(7<05sA8f2IY$uk4() z$H8q@E*vf*jOJyLP_vm-o5z36ndH?L7B1cINnFs*gsOheG0jVYz!s(7ZBiRzc2vB~ zOm|1=GXKjxm1-yrlJa96P#9}c2igMS^r{JeVnrSOttZ8M;zIE0g@(?vKWF%$h zBhtNGUAjkxP`gH3#$5Bc%rH~F0OWA0Shc)rjtOdIlk+1&v`Zh<4-9?OYgy^cI6&_1 z{3HqJPbs2h(;+pSK*9||JF5Q&Ls+0gXGeV-`^jMb()6s)cuQt)At`m1@NY7?Xl zfH%*;6o=W?85z;$|D&+#3Zm0#cWOU1y87j(LZ_#k!&%_IBBXqo&W+17TLsQBC~vfE z4b|AorH+IJko1;qV?*8`i`)gE)@~0OWV|!!qqn7_24HM|Si?KX33$u?7B9X9?`NK8 zW^4Xc1@vCDEy4FgK)Isky&D>M{EymqiO%`%>C;8n@J7wUHoLQnE{l`$nTT=Rd{NPi z?D@iP1bWTZO>w zR$HEAhK5EzsP5vIy;doC2mdcuUi#mb!)_vAPa>EOH z1xCGkvmT7HB7xML@9lJC%m}khsS@L^(rkNigX1~7FFSb-T1h6OjTfQOt{bZXA!rr4 zo?xvWukdI51816=>vZLniEIuX#*+&{zzfyh_k1NmRb{VpzdDk~V?%B0^*R7;L~~Mr zP%&ZP??vxzs;K?=VN#%f>6X04loLb=ILsv>clPVDZpT^@LX%(iSo%DCGP=hzQi8fU zgs$WxI>BIKB;6LhdkG}?TD3MM4>!%24RG3cPk5i1CSGd5Q%~9w%pdOXFZC1B9{bvq ztzci6+B7*s6kK-a)F)q_m2&t{g|t0Skjw+T%a^TQIg2md;F2NR-O1$XU%^~4momXs zw#fTa3~Gsy#k6&yn^1p&WtVoDqDrAPw1F4RVGGM9uF1bE788>}g_UIC&c9p4>G$?J zBP()gpI$XWm=AHGR9VQ2-rK0c{7S_BkAiLVs&4&ECSaS}cz){Ri3BOe{Y^tKnl0q& z$SP?wPT135AZ{(Y#KrQ{LS?smmP~+0jPi`Nl5TwDU2+qxd(GTFA7>n#Vf3MHA%1)U z9&En5;>^lKap}p69vQx|gsEiqmS~ulKRkKa0gAoz8TQ^}Mwl`|iO z1jcNyay6cm(KdI$Cq{LomgtsDc?qK<$+9;P&B+&|S(xU6zP3=|@!Pk+$&&n?SM%%v zh*k|xUi}TGTVN_hlkk`}Z%|3peYXJcEJs0-kcTj^Idi$NndZx*^sQ09;T2KMibtvk z(>e#V-%rs@siXb@(#VZMP%)uD;``!_Fl{hf_D>W-`(9q%SW=z$7P#G1vm}UCFMfB? z6NMSqgP$!68wKa}%bw53d+j3JSLhy7bKwzY?^#%-F?Tq8rwexs4Nx<6iAPID-{8g8GyOp1Z!AXg^PEi z$ySAC_(i5c*Y@}OiIh{#IgwGAjHFA6(Xzpeo3*Q`4uxwb=U8Msh|e+@3X`IE_+sT9 zl74M1Er8Jd6w@=OJOslLec}b5(k?Wp?Wl$C#)sSUY+~dW< zvN~t_Qw_Q$@0r#bWigO4|NPtp_n+pr>@fopS3ol9d;vz`VGK2<52&mt=F`uPVMW^c zM@CZ&tP8BNITCY`!={{Vky?jm#S7Va>SyuKds_jq@LR3TSaz|??Y0P#r+QEo(|Zy( z(pU04uef)cQp~PL;Rfc&E+SNV_r9=c%CQ9C_ ztNmk_U>aeXOf?lV3se?CDT*zhgmN91JMi+sTi+un)GHr zvT9CzqhB|vW`-!iSqEiWTN<>to0ncZ&!Nq`DRc)gFRRj%>(nB`OsXHZnGy0MJ$3AC zPUSH-d06@R&V6~)R;wVe!UDaeUftB`JH?&2J&x3XYxa{qAHohX;z|6N_C^C@rn)fi zZGdw~jiN)odruhyf0hEjF0pPkLb_cD%0JraPTrX}?7r_x&oLLc(%N#;#x{N%z}nNa zs`DS!!gal|*N>DCwAe~hJ_o3Io0_ppWE3qL#MaZwSHaO*zenY^zcp2MVA_+d z%n1k%3=XRsUSHz_SeX_CRlRSSae~-3S`YAe#^0!Mvb28aN}|2qwzM=8$$$wJey5Hx zY|L$XHAt-asrYosT+#01E?N}h${Hph@MgelSCOW9e;OWkvSdk?y=p)M#cHE;40vLM zEp|@~ZtNrilP%8Kh??Q5eb>^p4RzpUySnV+7Kr{A!h<(uWmIrSgN9IcQv76Qrb5E; z!Dv|&qcP!DF@jS+Zm!*fRyX+1|BtE#u8gv2PfPZtaR|X4!|>e>j`kr0OvfW)$yFXq zQD#Miq~QREw@f?b*=Gc@BimXiw`_PTFx-}rSW|y}iO_Rak9Y;Ozz8)ItjZhXuv~?XG<4+az6k=2| zl>F~TU!N6mYA5oG0ImFAXy)h1af8|c6#&J$x@f#5Dfd5(>WkoGp^xH&9yp);^54E^ zCN>$=Hl!NIoqy94b~Q52nVBJ~{>z{JT`m&yI{+U(1X z+?Ly~kEO<+_oC|B5DiH~Smd84hr#=XI?lk-d7*_Fv|(`WFAy6KOkRaz(K;yq=zY_o zuhUfYTiuTu?56(m((@bTCklGBdgMz(a`e8BDkCu^jq-snxX~=g3EdZ=lN4Qz-AHlL z6LpA?nhTS>DyY76u1cututDlJG<;H;F@3l@DDeAon}iCJSx&N7u;v^aGlwqTNKGD7 z0!YoOmy`^7+ZXLwp-JHfsKsszZk@-Yn|@*qdmFh@G+OB1CcHSegM|Bh-*+)_i4eS(r63E>BfVP<1%AZ z$}@|1NNnl{!c|JJoRx&o;R?X{_b;!T&m9N`K{aGx&jaYsG=ZSGyZ3?7$|vJE(wA^* z{X?iHCE(eYrPaW?%WcQXIy8S4!2j1){dQofb~A8#{c{5w=pHOQs9NOPxl(7D;zK+F zOt5m&_%8C}2J@(b8ley0%Vmcy3LS7H2dQ~f#1hE`_dXMDt5r9L$44)QhuOTf2I||L z;4`tF=CQp=%JW)I5+1_^Gr+!3+)+e*B&F0_sN_xvKIh>tliH|)&JA^fy5Sp>ha*M& zDy&6C3k&nv5ZP6-2aVO>RiA&j^$B-I{trb>=au0nnMXoZmU#oc;fp1{Hr#|&;e2da z^Yzx#aj7J#Z>Q#pzCvX~cNW+lnH#u~H|YRxgZcygF5N1Y&vdH={b!o8nob!vhtnKW z&(^{G-%Z7o+NAZeS(f7Wca=FUNXgwPNUtV^Sgs(l(iyF=VtsqOJFspJ*|hobYPIHR z1pKewf|9G{_nioKYt7)2{%bpr1D{|RCNz?NdPD##wPM3a@W_9NcKWKp6Qffi8f0rh zTzGDr+-vjLm79O=U2XvEA8O)g+injQZz{87R-KbY8^XGL>AWIW4P*9NU`qtyEvGO@ zK`z z@d-pbI(V?(TcT-{PxPqnRpolx#)^+%wt^KE>)gvsAH3+iM9qa{&bJ#2K&j8mn-fgU z83kduW@tEf52(_05hKoEhS%jV^Isq{Nlwg8UhGk&w*U=ffF;IFsEFyJQ70(e{BTGq z$<@Gg=4f}SZo<+{KD_gL=8l&ax6DY(t5>DloX-&9g5yA%cGEQZFXf*=l<|WzUP5?3 z0iVqJZ&~ZlK_W!s;y^|+mGrwq{}_HQAD-MOzV58*Rbf?{tvH#2_IX071a@Z{Y=+O! zVK|x)_AUjz-Z~u6cFV%_V49~(oB|5H7F(GsN>d^uL7Py{iB^}63JXj}+c%y7s~8L$ zjMnwYUHU*W%S6APzYIcE3-dB>&lxjq;B?Lu-BsQu`^L;39&goIN-v75uFF@fiIAT$ z5swnmC8{ds_K%o1%=w?gMZaP+q2K1}k zTR+)nw@9i(!kcnmQtq43eY>75)@H=xZ~Zw#oRIq(TM$d^FL-VxYV~DHaJnL4^c~O0 znK@%c%{<@(^nT(9;QVLMWu=ac4M-0_KTu-vlx&=I-m=Jq2$e^}rK7(i56^gfuY;@G zWQ$tcGt}qMxh7K?;9TXL@5ej+^E*c}e7~~HEJM5M2MdICYqktsi8VJ7Z2CF1&l-e< zk}jt#&jh#29%@n1U%7?qy7ky<`80+v_!%=BA+_aA+K7mvRV|4JcdeDV%Hg#~O# zuV3^3YgDhx*K4wVD=lmx#SN36O#FlMUJ|RXHc_mYNf)d*6NG^krd*jo7TP5unGFN$51&6e-nUK3T zSS6SxwE6MD*^>~?7HHDr!4>O}ak2kV{K9+Gu{(xD_}g&(rx{qSwkaUgCX?5m{2lSm z)V5+y7C=N>c@}-j!PNDu-jv4 z(YsQ7MQ*Lar6aP5_dm(s8$PHU6fX0@X@h-|z`vk7zTxehs+aCBN0o2tzK>6#k_ih;E5Kp5V0d$o5YS+q1V1qiUn znw?RSGpdCg?WjOytI0;6NdH(VFa0b=!ODmT*<+5KpL>NolK}Hi|D*UY=mTDCJC@2Q z>PDk>MC3jMLucUTeq8ke-&pNZZtNHxr?Ni;cq;!hxk_iZf8Ax>}k51F5x&Cv7( z1sPjhHa@yR^!$!}(*!9J2u=(iHR~c!HFJQF6_u(P#!Q?8Uf;+z$6waLdrk$ z)>~T#C3rf-{0GgX;%GHG#Z)|i*3hs6)~SA0x9QPaYukwiSXa)?({=DvV!ns=+!@%{ z%ebM`6}Kk@FCYD_V6L%AistF$waodO$#95fei}6Dto_lG zy>D$y)8?|tQ57_;NhQ~@@)spqITw(A`ZNEdyuM>qq|3jJ50z~L6sx5;8s{Bx_-OKv z;IyZ;wCf=oP**1Uo@Er`OqH@l1+y2)KTg&vvSO^~@)n2B^wJ)u_UpPu@BJ+5O?^A| zrglsM>E@YeAtYD&H8{RLuQ$NT#6d7C9Xb=ck(fIAuA-!zKTF<=-sbl^@8De8+~Z2d zePWV6&^E{Aay!42aeHyh=FMrPJR|hk_M4#Dd|BieK&?W~(GVx&h~Ai3iOlFcT_1IE zfpiRZ9Fe0Q65e#zPPR>Zx|z1#L1yBBP|)}?=bL4nAzKg zi|2EA6NL{((TV<^C*x-GUcoS~O>^3Sa}y6`_P*XJ7P;oZKgt=_p2Y{3!s(7te&y?Y z16zWd4K8t+I#o}13fl1RZEQuboEVw2FA-~-g>qX~8-@NvD6IzZ2S$@&wj;(()<#uP zhVFR=(eWnZwW8_U0GcLA$pyY0tx3qs+UbFhfj(-6D7XMr`4Bg=95k@HWl^!hJ1fL^ z=;)ZK+RP^>87Ndde0tRhzSH(3gdl6Ju6=Bv(1)N)$HP+ryx4G!TR$u;Qz|F+fTy*ei zGFAd9k4`*Een`Ee5E@)<(|u<-!M?&ndg;avdF-70RC;2>yy$Q?0yyy*O@Qv-ci(}& z=B?gYM{eHY*P~DLmqOI364UnlRhX4!Nl_~!@(!H*ZQ#gVhwpy`gssk^e#$A&9_#HA zF8m|AnGVR+tXfln&@PrU#xC|^?uRIP zKxA8z{H0T)6b3BU@69cNw&gb3Es5(_B_5q}LEtG3ztCZ88THg&U&YFS-lUe_oZX%N zlU~bR(-Fh|(sOur-LK&rb_1(~>fr+R#cd0JVu63fFwva($7&As;+FN}17WnBH%oeG zfz{>ft4fChu3fn$CA!zM_USgAX#xp}&S9^-47zl4O}~xF}8aE^t`?YTo;s;R;@*RRNN<4nzDS;cY=f%<&L{>OD-Yjskr zNy^(Q*6Twx%SC5D=Da?*?QOM@Q>o>z5#jjl@#RnNrg9rMG_h38F~IDV^ZzIuIKNJZ$(r_J`r6%~ zovQs*7TXZ6(yO&*d_4fGQQC5}5c3gbX$pXIzl{uT6OVzg=@pn85E z=Hr!?A2oMoZ5Y{#u+1`{PPka=w61HW@|+0pE8}ki3abMeSC{FYpS-BGq$tPV+@xf~ zlWLV((T=3pVa}!3Cl!is@ORS#Z)7E&R<=qJoC#YD|cJdgL#nCeILoX+O-<3 zSYF?JxVqDy6=SU}91`=-kjx;ja#Yy#?2b)$0M5($CG0GYFEnLmT ztcUoOzUI9D=I8m9GR(8u!`2xH8(ikm8&eMtQMwY}dd;gOh3dp<0gg(uO&29^Yn!U# zhJO=CPbhB>?VFQwVa3M_>*QLlImkp|!po{zlN)io8(-f4r-M z^g%_?Wj&?7_ws;bj#&}d(MzsV^Tu}gEjNtYPVrV+4|#5za2l*!mcn1i>lN|So1xFh$nmCN4?!_}^|7UXsl5MY$-8aG+N5Fm z>_cas#Wq`Lz~-ih=cuoAv2bhCwNK^e=uN0Ii`BmhheoMQ0$oAaWG0r~J4FmR3g$1Z zsIuasZPd%AeiY;BZGIc0;kEI_D+BTEc4+ywyUB)k<>Q8=ti4SstC8jJ#(Qy(UR}@D zHXx4vI@9ce7j_elUm|gGd9;m$6{daZC?EHCxr${3f z<%E6K_2h&iJ$bD4amTcqdkeIB-eO@+Agt5({_4c+OR0)#=yKx6>ZM0`$Z&m(d!C7u z*ud~YU7yR2d{~xlmov>Xz-iM_e7}LI!prOk`2$o*wY&6wX@A;J?@TOY&+}4FDLDq1 zw&KNSHuIn^uT09eJ0v%tNe4~@arI%UhG80*VX0{rI~S~52qb%GlLIu2<`ji2zIBhz zF3r$?eA^>6%H*Wrp(MmHZ?-)#eL%H&e2A_YW~z6Pw>CkZz%Y5gV2zsNjwohTRZ{D$H8jyp?ML7u zE&R^$WB8ZtyKGNmj0$!PT;vUcgp?;&G%#w&y-S!(c*v}f_6pB%Wka!B3fK1!1!a$u zR$ax1gX@Fz&Kpw>GjtE( z9U#808%VY#?}$*tanbjs|8==FrhD7XA~=wTrR>#Pd<3Cy-_$Ax*+lO9qF}Bz90|L> zR)86)f0%cya2dr`eZXMi2AP(}C8W9Dg13ZZ#i`VR1ZRx~esQ#(^&2h=XKe^B~&eo%T+OM?D^@!Af51fP*`ui09 z;(5g&g^-}sCGTTEVhSo7G`(rMj}MzJqYlWV_%IbxN?Fe|76&a>{`mCOEBI<_M|8)G za(ItWmAV+~W|pnt`@itwrfWaUr>n)c^KzAbj-KvdGWvWWqW=ic*^nlSwYR(f;WjW; zbD1kP1RYlm0;US>9us-02ASuBPey%J?m(gYM~90zYC2XnO%Kc!(D_+fqVi@a?E~jodtH@ zY}Bj&NEEa9i6Nny)Zo}-f#j(ZkF0ip(e+=yK*Z}Ehrd|~T!5#1jvuF#J8`td$?W5l zl-P!W&lSbwsyLZidsz7Vi8QE|i*P^ySmx!tdT_bl{=>f*w-k8KUgF1B{{&`uA%3LR)m5RSTZ%}5BqGM;fj^?8;M^?7Jn9d1rx@?N-MY1onRgu zP$RIlVq9ucj{!7xxBr6P4O2M1Y0`Wad=|ca;l?+udlb+Rj7zd$3J6h2c-90yi5Lrf zG2dNQ@TZkMkjv?MB27y5km>h=H5Z27g88udhebsF0k=rzwH*r$w#O6jIjN@YB5t*+ z%J{z?t5WmU!qwDfLy3rPf3?O9&>Ix_J{1ZcirXdTkGYyh30EtvtG1+JRZdWZlFD1B z^bJ=wO0rs();iL8OS%ETrY+e^IW7y6Mgm;-9AdVm>EHe}08l};me7T+_+@LWZzrTZ3V( z$!X*7dfGqrdX2X1SO45&Ct=rrtF^t|Y4-(lbMpntrxgu(lZZRzYiV*!PhX7*Ocf@y zGzx|xi(T*p>Ae@utuErfP3v6>>SCZj$+&x@rDGr_`2&`}>+p?pB>naOGs%tFP+N#QSSRsX_kqm`Xl|k~t)4a*4)p z1-(1^bEK8su@O5zOaq@P{u=BMR@r`{RJD2KO!Oq=7px?_=p0kTevPJ6tXxvRkm zKRNxXEdHlEt1Q>V z7$z?ULq|Ct6BRf6IUnb|BiyhIU$i?azQY3=Wj=@eJs^H7?JH4bpMp;}Ms2e1uKZ9` zsSHYbl3m;^hEvJf(7t?2KyKQU9dr@p2~K%5>$$8-y_u=fXCe-I3o?|2mBdu7sk_FB z5q#DpO>*N(4dNbd@N!Sc?1pC_m?dsgMRi&#nSff+`)_d5CB%Atqadi|=#eiocEWr5 zxJE5~NjmSBYd~v6VY%w#w0AX6slNA6Mz3~-?1t&AU?XD&#o(6OP;u0RdV+Ua8Pz^} zU}afES#o^e>n`tkQpq*qLj}q3Vcq&(a z!WVDY>*D`WIAJY^$`LNjp9d8`9EO@o*cESQ5*M9QL3_fm$Iot2`F~%?vy@E=2X705pj7`8Fg06DOz>mJZ~d69E+?;O9LH?C2uvDY z1>pQSm0*lWZ2>qA4D@+-Dw6yw!z4OgPN-R`Z}oy}$9mi}qV_V*0k|=8lDw6dgo|bxh$SY_cem$ zdSu3vmBkpe4|T-R`8+^BuW_Z{%5tk{S^wd2OChj`jwio#l&?`PoO^k8K5ak$AArlB z>VFh976CY>gUd&Q#JIJ*B8s~EYSs_QgpbTBY|y5@frHnkDlVH%zAiPCA{J*=aVJ@-yj{2pgu)3m8cJ0fOwS?8DdopiM1 zTvpFHF0Eg7*R!-);j?~TvHz&=;kJaS>@1~Vh2s3fag7jxe2DV78ny^lpxzH}+rJ3ZPd;tF?z?d8O~cGYMl)igySwA_?ECxy`{jPU?t9mDp2z!mf3Ed4 zKmj^8wz(WP4z2Fk41K#8<(C%rsbtK2ygDTUV^JYN#0#;z0~%qB`;QC~1e`Tm1Zn z-@Mn`B-c%T;M~Hwe|k*=&90xQE5YB*oynqaqH?>pNUW3I_M2C;iI zg}=YaE0^bW^Biv`eer2G@Tf8RB2tn}icGdOOK?x;dSuYcuWqUmFVpMiB<}&gow2C@;!#GmghWk;Xfv(HsLM9B_U^vc9AQ<1rbs z{RP(Lr^s67r_Pm?OX73tdu98cb3aBQ(SuP#+94FxS^)GHScS&AO{Xq`*7(&m2P9Hr_AoQ-LYYas*7qKuaf%~Lb~#}$wtld%xe%2} zg&rA#?>}QNAw!{7?JY-pCrQ0)pp9C(DXBZ1p8mg|)aa~6jYdW_cY!9AM@^_doOsU# zo&c?{1}lcM5e%H?f(i0zmiuj@(^~;z-CjXWfsayKM(LCGyaM*R$oti#dk^4@3YU4QV=kxee-VEKNJ zXy-zJdpC%>EA^pU14lm^^o?-bz@R&f=9Lpr4aZt|f|Frx|2ky+uNC;Q%-r5NOiaI+ zSP+^pdlA|3{?xj&{W-J8wh1_Qhv$4=KS-fUES8#zz6D|Td2ne>76{<@{vl_^0vUuID>YjWz{c%V~srn;|HW}s; z7&~^<>OI{u>8d(#VB`$_J$`Ce=R|Xykl}6WY+?w98H!qNuhTRKa7>;C3veJn0+wtX zG(UVt6Gn*W(Q%FS5nD|E=%qLJjqPMOFi{M84_#ww&W=j{yY~15GTO?hV`9)t z*fGjmQdSVAJ^ZiLA?ccW(G z!uw8NiOv+yp^e(SE%sBQ1IA9Z%lcsez&dd?jG@-Qk=`bl?VqNSX4 z1fgHSX0c*7%9CEl_+`m>b<(pjzn-z?2S?ZR`l8I3t9(=Fdd)IaIhZ(gx&$OkU`nqc zx3rGJ$FoEIX+F@1HMdmnvti3)s22^cUJRmgzeyow8>_ZXhv#ifr*ln*tU^DQV{FE| zCadXG149ARk5(V8g4Zd{9&@Hq==9C&FlajXJccv362-&)lC?S;&QE?^PVb<6+q6iq z1Rx^-&Ys?!4_V`P^EHwKb`PiL0M9G9rxel+uqWnu-B)quUfm-5A!1c_FSCa38RyQ- z7aAjRo@LF|V=TE}4p7T)c1<%2{@qFB9^U}P52>>MW)qTRkt^=HP?{UBQje{`3r7j+ zyY3Cl_R*{&zI(2FQ%@jlDSnW3iZ|d3%;y8IL0Poy#QrT=F}rHSPqG>UraaKeyZx?q zUzaB4aMy@QxFxIipbXN^l3w!3+l4yiR}>ootI5{F zH}%5TM9!kjmMAk6-0CuvmZ;tlQDu~$MKm^pJaz>Lr5cbDA19*%-j>p%R7uaI}C(q+TJLA+uM zo)WzkkmkfB$ryaQf*{B)dzXNkqvT!NTDwj)Vr_bng0rJb^4T#mn_{>vec|9mb%V6nhN&f~`l@%aI45JR zy+wT5DkbSi^~b5@oymNb_1AQ778=h>|B#(=>FUbTS?q39nx`hr-m9c|W(>TxhXmbB|GgDp- zIIech4vDIDM;Uns+GPN=j_;k?dFKEtC6PC^edQjv{MfL%I++9wkj((6_LKV%`R9{% zJ%j9>Y{g(baH^6c_qO!?=96Hb3B3z#m69ei z95~%FS2KT-LtvT4&2puYEHhTT8ZRf)w->9aTOGhI9_t0qZiBIX{&$m$*bsGtoN*zU z$*-rdvJ-`QxG%64TSkWP32&=ppb@0qeckqx~L@OjJ}L$jr_5IMkMn;kYmcvH0g5}Uxrti6IrN4)EH z^j_kN(3K4>6!UHIv5w1Cl&*>vX`%7lK`bvNSp9^(+HxAa$5C;&vBk;1IBSRS=ZZCB zLdNx~ZhIbWDXVR$#F_B=`d8wI9pQLeF7Ku2RE0W*RTJ$5BNpQUyN$!QbLu9n%^SP( zJvD4%pMV9;bbs_+9k>`a>m6Tb|TC4Z1}%&qmFKsTIMo14`B<>fra(J zXyRG0jE>)BWw=VcUls>un~-iy!7fRG()nB%ZeV%8*JlQSO_WFe8Bggp=P`={B#SyM z$Tr@XH{e_b7O-uGF5B4%U?NX{a%(t{ZV(u_&s=NbYc%HGh^ z^6+eVqgo5^xQA=zM@%HzbV+Co0_I=3I-2atRj1C5pmj+(QNvDNZA=CHZY%oty)PBc zM#3G+Cpk){B@?#dCMq1Y{ke_9XN@}}j6LqF* z=c!x9f4*dra>_?YcC1!lRnDkv>#h9{!Lm7&3q|U1)GdCypp8pO+62L{2hv%jMqy)iA078}(+Ps}%UTO-R|f9d zf`d8(w7&~s)+jZ=lIp_bq#y<%qKXU{FBmfsYiWC&P-RtYGwPyP@FuRAb9!1*))kUG z3O?%#ukN@qDf*ho0xR2b$j&?QRT)O}6NZ$W_ugc-q(QlNX>)BM1gaPrU^Gf!r=}UW z@P!sM906}jbA#WrxZ;27sW3dmkF&zy7dDkddljKIldZLG+zrk0N5TET3_<{Ee!{jSDDCr+?iu2}2 z8);+4yfO^c3xg$N<cGs%DJ(3ro$^`#cC@W!)4X&c6;(!A`XRoT; zmxbrWY>CG?b=9o#@P?C{cMirrebw`*RY+&b5880I*Kbg&Mw_2{9CA`bZnkW_*-&zwU)E-Lvr{tD;eVdzT z0_a@jEd3Je6wERzd@3XSv6eM4&b9dR4)j+Q(FjTcK2P&md$j0{hnR6Z-iEL z9x5B9>FQTEZ}QbVa6-2u>()jikz8@kD= z5izrq%pvEz6<#9W$)i9HCv*q6FbSaqJ4??j+F6X?5e@C29AIN-z7)FJpa4KpSlfx^2eMv^$fn+ z5RMYa>1oeZerfP2Zs9XAhGPDsV!lfClv?}C;i7X1VE_3D@^hD*#Id%}6Q0n2XI529 zKuZQ&E6diSUw%M1Cojj_yZ@d$@xb3sb>^5e3F1lacsB^Ro;7ynUfyL3h8}q*1I(!4?7MUL<>t@B=7RygtUdMYt=g)!vwmeL}{o z!`if_V16*d&J^lbj{BO&rY2>AvN2zih-dLweOzu=AR@Qc8(aYJ@x}gkt;ig-EB-T? zu>@~%w>s|4a&1v{wI^dhTDkGtm)M7Ot&ih#U@Ze-v^`|xrlrj=vQ<@YCNLm!FIt&(li?aouqTlYsR z>qr8eDf2M^1|`S(!Kw{EMUJs|MFaRH;c>>xEnY;f;Qm_25=|UW8Cl08!?uo z)WWy3Ox{cA*iL;O1AC|G1Px12Qkbdq+GJydPbnP&e5}J8U=6GO)4_DY0T=4fWjh=cQD# zse<#kf1RI{xe-n}EQ+?8OwrwaTu&4ggx2iI_c8l1sw>=;2z8^3OiC4W?gnPosk+Xo zBL(K&n}MwJf9>Qv#K1`&Xr{FE+x)#1x>^ifR^5oy6LLV8SC8g;{*j8qK zR{EHXp9s^&=l(f|&;EDuv6Ucr3Kg@7I(29fvTG}`m6i0G55B&=7Uin8lI=6$on1RI z4UqaX-E_>@fI~9-!bj9;wxq%E3?!hx=oP#@2BuAHod}89A>L5}yT6PfA9S=BUA@`H$))*k8bdmFWq?5RoJ&CI92u6OnL{Db!8;?P>Pa}(2#F4vPG^d^AM`BZTK zI^Oc+&MI2cFvUP6hnY18j0sEbu!t9PHw_h&#j^m`CHlMIZ%bU0PD3HfV*E;I7GQ`h z;{lUdX!jhFKQF{)_y9hI1~<>}<7;h`RJOkDR|~nC{9J#l2=@L}@IjBMT))ZLqOB)j zQx#S0kLign<1N~04~8GWUUTB26Z_cN<(&;PxCZ&rw+#7~qxFm(J6A*pD-+%OSDIRt zeB0PU4!pNzRUZ&{c&5Xb=|s9Ydpt@aB#V9OHyc7ZpBPnt+)(FfneB&w4zkM^RBlP){iM~$%HBAzddu9Dd{qY1HlfjUEBx%O^e@%ikUh(tiuw#jP?8frPewSM#eXpm@ z-+ePm|Gd`kMNmK0U-X((ruJjZE?C{Hn6u?D7}4pTdH97O82lpTQ*8{*B1vN=SPmoy ztCTjIIPiw&)I{eC*+0*m?}PjzjV4Xs`(dOxh?a^}8RT#1FDAwz9Fj}My+RFb7Gsaq zBqh&A==uW(1nA%yR`>YVfNH7l2jgS`oy>u&gEUoVZbeIF_47%)F8-llGB(0tOOXW3 zb^kLlC3k-$pudZmiZl|q5zbW}FVjI#4~rRbL|WNSnC#)j>W4*C+n^INx()DN$UuUJ zbPa%C=7XAIE+(;;-NG3S9P%V37@ z?6Ruld0=6vP)WpT|qlnjyB2139x73bh4wMc04%Ba$$-QV8kR z$s63_yHLu~v}VD<+&^iA|B%ldEVNC2e1hOFy?<}+C;fA(T2^$QpIj}Duq{v;>7Qa0 za?C(({vcE%OAT!haxK!JUlr;}HQvHCjpve^v5=<|J}E=VvL%|nv#%X-K(U23+8D`d zRFC!7n2`@lugxol2>f+gE#EWHsoBOISl~p`n$_oamkux4;fc+K<1?tY=3-U&_XC~x zIa6&Yap~7L*U#D$0pXZ#Qp2K>tEmu9+H$a>tylz6mp_ha z`+}=n{slTo85|>Nwq)C|PW^VLphsG|#qlSY<-cq9$6mKBnuU$(_`Mk> zH+|heXi(9q4Lxb%lDJXq}?uA#c+ada%2=^!ovE(cmbvisI`uh9w|T zd%bPXY;Cv7$)Q45-$k1KCg+vA{D8e}*LGHEo)?STc{+9oKv#x(VRN-l?H349XS?T} zRM83JNwxBO&7GiaC(nr+sT#%FT&!$m?>{#C5i}8d4e~e-AktTD8*bAUpvNIuVi^$n z-me!t{~kMZSZhu;^>9XqB}!d8|6grDN$3o40!=B?_h{N%1jrB%!1xJV->$j}slo}* z9_{$p(YuO}OpGl=6?R-lYsuNpbekc zq?djQT70Cm$11r@sPF@d zj?lh&n!f=%r|Jb;m#b{;?T}uHCGq?Zs&-KmZ>tbSZf&7J1uxiPg%<-r7eAkZpfr&@RNOsi{D?kBofKnRt!6D zYw-g~SqhE`uwX5*K7ty+czp*U^>%IJ|eKFso&E@Nv>DkaCp;F~Yl z|8kkgfl27O-077n>39K)^tOnZ0v%fMm1+!&8fw}u(CdSZud34A`!|k6>{e|YO2cz7 zDsyb2Le1wxW~MdN)qOl;r{%hC_`(8Sji~Mmy+00cH{ud`GnF*|iDl0#x2xyK!zuvj z?ac|}&|DJPCzKFjgRrCkBbJhEq)tYcI#bdg*Z8CxlE0C>_NFgiGV7E~ahCMpvV-_BK6*1o^jOZ5LMkS}!&N<*B=Xeg) zw&m2|Q}9rL)cPeeR{iK?=4C9q^}wx2BHBTKZcRK!DtY_$Hdw6hN#9eCN)Eq@>otOQ z#@&T|I=}LZZ0Q+;2b)ECD*b++Srx5kJac?t2wg;gAYzwxlVV1J;;NK%57aW9!8j9? z;gn7Ek!PHR>E7eM#SfEFg&o;|n1-kO3X9o7#)2QI^rRugcKaq!l}dYdAeH{kRwY1M zHlEg2BeeTv!!`gQxYQ2NX-oZJsJgt9-_flDg1+--KkC-sU$C(78v8_TREOBoOkr8N;GrqfwL+S{L6sG_CfxW5%7M^+qQ4txbDiWilkcO~C z#{^SVr?QL?h`LylD~K~jt_Imi3G>kp{v5Av0Jgn5o5Mh^EK&dzrld-_+^B_2 z!8b{4xfY6?$(1UR?4{$q7JH3fJWC&9c;i&FbB31#nWs7d`cA}({pXLMV$1R`jC?_l z{(3-)n)*TfflM+W*OI$J?HhqZUA%Q7WAy=zrh>>u{Tsir&=`_nHZPbc2HG-|m zxNUPGL-VJl{^Eo}qRX)mnKxU1T=F_>f8QTgo}fOQGk}{e>)DN1Jn3t2;tsPXsd3?&+1We^@($VAdW$Xa?sqIkL!$Iy6UEhKx^S0?N*^84_MM6AGU z{}SdP_HGLI|NaE~0@Vipd|T6$H_dZdU;AvxZAE#H6nKAXz|WjM{C?HrEWKsHs4H~O z$-3HAkA(_+@zY(B^In{x2PTgH=afdYBYP}1pEg}m8B%Qx;W4{D0lShFGC5Tj6*4WL z?&3L052Ox1*euHKF@<$*h)G$MrA%)Zp5uo{-|h^xQbVbRH=m!1+*nQ^pci4YQl}7U zyk{GH7I`0L$YfXjrd4N$P$yrW*S0P|F zhV674AY=?NK%T^BD-VS~q)m(EFs9!MeAF;BcRNOvL06o&pNO5v9%5CUF4aPiAtIxIGsISQWijF8KB5~_hO5~jB`erOJ78u#t>a=6{|VS`L3^UZ z-NPKWAZ_oO#MpeY2IpfG#3dbM$L1Y>(M%MRH63Uwd8LNEO-|#By+HcGqW1)8-j3>> zNp%i!My2-TE3uzts=giCNeCuHMpphBBeXq#`|IeXx$(XMLpl;%P!n-t=r=h_nYiuT0tGs%_Zw;^)j74@lDifW`wxLA#1;5leF1 zUDr1T{tE6L%GIwY<=TK|r7YYZpK$2zayOsL;5o+yNOuhd^s3VWACX%cm&p@w?`*HH zeC`GFN#dH*bsV>@Xi2N|9sz7S#fCQG&D{zfuR2KiQ?+oNR%`&K03ba$X?t8&cJH}U zHtPxARqG^NGM2$^XZ1Co^F-BymK?Z%)7SytSwHaMA@$QXf!WYrglL`J@kQ}UH1Mwv z9|V=)>|A7vol153eX9L!)8Rx)ii7nV`k-E+`HFUU9v89JHg(V70sEsOh3h(Dtsg`e zl$61;G=CXm!xCEPoJcmKAZzPdTo&c0N7BozzxAsxI8R#r=O(o^xxuj;PLnE3-SHcL zJr0h<8nZ8K&^wcd3rs zaL5?`=EG(V9?3~#<_y+R+&z?0Al!BrPc>jtmUsuycKvVPo%)2oj8j1~umXiDoQ(U? zDs{TG(0EPK74zBkj8{pmY8amWc|XaC&}hMnx8B7D<3o1!5>qNiLU%TcHs~SOv z<1A;!{9YH?9wAwCF&mY4o=CHQ+L?5X^*Nw|{*^Y10kBmyB<;38+h=`sRNUde-gQv= zY+?m*FkrND_FWsuooq3hYw@VssnxmP4P~q4;wiN{|FXv=kQw0kfOsNgb46=dB3PBY z=g#4gYpQgUGw%wdHHd2%YAS2xw1>=VM|_g}PBU~laUeKhrFuxspd5Q-R;}t7nWE-@ z<+DtvWxILuZMs?9%$h#V==8-nBVQr^Xl7fuu5$%&z`XpS)^L7-4UJ)@G!v+@X}a#7 z&_BoXwY;jG`W{gZ5u^7No~L#upf>#s_MIiioO6fq&gkq}tPdi0HkK9>M|8~e6=O>jm%2!u$&Ye`XhB>jpu4JubbG(OYNq?&NsO=^#U{-L=+zJ zR@DzVKH1+CvE9Qn$6ahcx&e?{kNCIC7Vg0B@YxHzX7iNRoXru^oYtFC4xI_#KJ+d* zs5j_A3g6BNd&L&*b0=>0ziS(pGM(OZu;6F>8N6+N_vTb_s-+}btUuAPTh#e+N8(t< zmr<6{kYf&RToaEBU0oh-wt5$nBjT8snf~-_dcfLHVAC679Wq;2h z=xjvCQ#fgvqWHv0j&6{$+h8I7+350D=DIxpvk@jirnWQpMRdia_wKjdX6dqP2jqY+ z|7`7$eV)8yyklD7@^Rdzti@TN+6;P{-sr^F1i`?7=Iy^MKZ%}3q|Si8mpA|l^G4WVDmp;Zma%m9g?G>l#hg{;tJh3|0q5uErPWM{Dgxxk4^$kzfGzn`Kw|oIV>BVaVGhiXvXDEM z+e==k{ie}T?UQ)*j)^HvLH(%HI{nBG5em$%tzNWc&Oc+tOKL#@C*x2+=ED~w*)>TzsB4Mmy44{!iCZlV|6PkkdeV(zOj*F_kU4ey4as=m zujr7(zuvfumMVYT8!WzMX?xe1}J6D)pQ;x%^rcYip zQA%(w%ZF4q2(3c=Ly~j}fmJsxQj&xifSIyen z!WGFam-gmzl4!f5+rVY)@_SjglA1rm0vY?81C3j1Ke=m=(Vh_TA(|tvi=-JGEQDhn z659IFtD(*Dz@sA?sucBw^uSe*>5#8|7hL}^E5|lNLjmyS-d)y_aC0)=2MsvIwJqnm zC1o(fxmEU1oJQ_7D=%-AT@+C8x`wtl8<|2Tp+@AS%9tg0W zRf4tOF8)rao<+4OXSt14vV#m_2`!bNGNPg7*F833fc~{@4Kwg)t~$&ecDPs>@F*6N zt(GbZM@t#3%gU6f3nYk@#1swALs`d`s_9#4Y*`t9TxLNmnf2M&+xc@8a5f8vcuTKP z9oS1VZMUPxNG5D9HWc%RaHVnxs=Q2zy~$q7%MZPJxtqF&U9*&bLx6Z5DBu^_5^0ZAY7TYi{x4tm^Y}S$VUeyR{V#KAu*sYhF*45 z!P5QmN2EySk}^ft_lw8_TF|+sBRa_QC|rX2)VglrqR2m0%E}(Znpud!I}v?8=fzMy zgZFqe`ks%Y9JUS7@%+9B0&B{8+n+=nHcYED0>n;$^A-^^fDGTyZSuk%{dUN9;z{;uF7Izx02;?yd3@ zOj)IZXBPU8RYV{{%JS&XeLCk7VRURV-DWd3y^~W_tHV;+0#K0N;H-%>EE1`$oYEH1 z%in&d_=MVo#x8TU**-((Qw$Fa#D*;G&xteWpudC)cb23-T$4!$+g`!@3-u^+ZFNjE z=QZjbVU9k>+G-V=&k`GSxjoGsVAvCHq!I5p5?7A%uzN7j9o^-+jdwlcigkp|u9kjI zdfx!%i(p!mlHENh0_$s1HDtZSiNl<|Glef2)BLHN39-_PqJ=k(TO4T>A{+x?;TpbO zKkb9Bu?ib|lsa=lu!{TMZ=8W)xR02R5aK5B2>f!~ryLKds;?a7$90M{sLTWrFwFyt zd2vFCRnNb@PSA|eoOVMbngXl$GbyTJBHPAh@iKv;-S*1}jBR0i!K%)?65?pS*g!{2 z%`gL;IU+S%CRL2!r%+WLap`~-+vkX4`*AEeS0+mZ5U*6FB5Vu!4)Anqzlw^G zdo#{aBMeOXjGGNelu8Bg$TwG^#=PH0kIH2+w3k;wtuLSx*SI?dC^mSP-C4AM$arn|sUq>j?ic&2@(txPSM(Eqiom}9ES`Q%1a zHuE_c6f2mE+i99oMwPb8^D|h~xq}qK$VbnMgyu|KvX?<7`3^+Hl9K!dFs&UJbi1 zagU3nOfiIL(_5YD2_JslOwJ=*38MFwtWK{J*T$C6{K?wVn~Nlf z^wIouM1n@Jy@gRSsnPP|*yStGBP#CAA06eL_kZQwZZrCtfCtXi2Tbb>WWe-JQ8aE} z-0u27cXO%*czWC98v~D;9FSuHJ3FGPc}D65%~&uIlcDLF^iCChvgs*OxUjHmqBK(C z(?9FUKZW<0pcl>m4pW-{o5?zmby0^Jeqx=1O|E{rh z^b-}_`>FTci^dNMZn+mFXkO+PX+G#-Xx<6L=Iq8cRdJjIx)*qbFx(;{;a8trFV>&} zI``DtsKcsM6)Ljga(V_N@O__QI?8DMeh%9Y3^@*Hny({Yrhy7= zcbZ?={0QOANX(jhdd}anC%Ah()`5o*xwEIq^>i4ObH761ja)HPi}be=o!65Xg%{jN z3edR7!KHJ_w%_t;qNYJYi>uwSClyn`*241Tl&}HFnKP+}o-JBw))qlgWAfj%*S8eT zxhJsLXKKdz`nhK1!6ne>Jj+we^~}Ik(6HFJMFqbr6ac>(L`V zwcjY)Wi&Rb8lpd~y-~czn3kN>N|oM(F}>S9tGRP(A#b0wp&|R_L~MA)#6*{5)7Fw~SJc_y%A?2^x-Wu~H^I`1H-^;Yi=Q^uC=iV92Pb_{Ei%q_Z$NydP2Zsa|5m(P} zrvAgD+EBYhicft*5&6aIWhdF1Xi6FerXmI2Q*eH;AB*S9Wg>SB;Te4XGkdgn$ovqY zvhbOaVp0_f4~K6B^k>>UA8&j^r4usf&9{|A{Cv^e?Q!)u)~9boO!M9D!0%w>=15u7 z@>0i9E6i@9JW}x-K*^*jPW>UV%}pm8PsxA?{Wifwi^UF`BUW5!cw?8z>&UPYcYcuS zk4oh{1?je%FVfT(N)+4wLdE5o1I@Y`Zf=h*qIk|r(_vXWmrq)y%C zCeOB$&2|TZ=lJZA_*UbH#EU$cWnT@T4iW$FnvUi1jmz06f^u|0vop$nX>aDeQ_4JG z`&rM#3NFJkZXVl5HnA`F>**#UWc-swq~NalCW_5JjD;ti6Ym0#eTSfR=Xzfz`>ahtQn4ZzH#B<0n_=Sj^m- zblT{|WmVj!#SaVWw)|nVcPZZj#!{oBDJpyee>$;Jpx4c5uj)uL3x8mAn#%4a(Qht}f1dgw@cl_7`LAI|LS1WBM zUT(-_?TvgZo?pHeG=)j8b(5N}!PB?gZ+I>|DNZcTE_5}HCFo@B%V2^{vBcU zn7G?F4^D<)cEwP?F^@`-UywCk0LXrT3O(VBIBo0Y@;X@$jFagL9(-qvMDs zE~>7X!xCqz++j@JlEFgU+jpT1T)E0?OYQ+2uzwTvM}_`WBg2_>!F~;(>1C;+OS)OQ@uS-g^G}sLup)Bxt=M8IlIphZ)?p-) zYj4h~hIku?XBY5EXro4J?IM@5icQ83$t5Ki+efib;|ok;M9lKXhF2BCRZC@iM_cpk zh56qBqtX1w(L2$nLFEq@RijF&KoSljBI+NHf=-^}yiwd5Ks zb=SBXxf*vG%dSezL2+#;pEpnUL)zN9uK+`U*JdOdCc>uwL0W#QZj`8jQKA>1w}9W#Oa|O&r=&EPu2oLg(`baLZOS?e#GET zQmk{~I)+B`yk>KBp`&dJ-O_ps5m0V7q><9`Nyv$}y7A=OP)~X1k=Nj>cHtM=B+rsX z!-BmuuR@oqo@nhO8KhQDYIKE*+-;?-lTc7)RsB9g!vIH7oA>JUK?y$aM z!==UhUTrKK(#j%w;`0W}OZ>+kYHAJRbgP}7MqQNdMW?HKpt=AD{mU&H>nO3Do$c?0 zXXActYj4kVJ-~73pZ55=&BsX!8N9uv4qJb8W7hFKp^FTqZ-)le;SVSE7vr+$10|O# zia}bl)`db5GVtfG&g=fiomLKV4D;p1<&BkJ4j}RvVels)w5kKt_{URMJ-);3O%q;w z89k3Hciq)=(O=!^4(g?(a05R69|4IkFB4-b7ggOQwi#d#;qTvUvW}guy;&+kufhtn zbj|&h+zQ4P(op!mhnmj6n_a0IxA~7l{vPG84B-nwZoPT*k;maDh-CF~Ynd%8y?8Ee z)*4i#QUzw@#XiS#751AH4ZQb5s=BACk@jTuu94wdyhDhjTKCnJ*CmevgO`IG>jp5N zJd*qsn09VexI$TNTiT!$w{&}$I+;9=4{TC|m6%N$$wm|nK(EcGG0v?yvP2Ox8QLB8 zjhc#&4E_)qj_|IYQ*!6>`BvNN()-xZ!zI-73CU2?4eao1s}XFcqmz)fxee^B-;MYWv@7 zb(eopUOwn2V^_ZG?^c(|A|hE*pCVLXIr(hfqsFX$-Eh_S$Q$7imtwX^M39TnC}lQT zpf&K4#z2Hs^h}*9`ZZUQ*}=134ygr%akcbMRLQ5e%szl8-d#NcS6jYhozbWkmAvG} zK)}xTPx?l4lh4=acH8fM{q8Ss`UYk4$zYoc6Ym~*oYKz(ifw`>-rAYP!LLtYX_bXvi$CvYSVoN7L$ASKISsnBsAe;K^uz8`QPB-pjrZ`wMg)W45roPUu9Nx5km@A*lL;dTH9@spip{=&a`jPfnW(k^A zXIZI6pP$2+*E;PsZAyVx%vf)>i0JFSduC>~ft91*91kOFW-;g}Nb6Cd-k0{|G&r+6 z-tNTn$ZH2xyoiiR&h0(Eo)h)!?O6D6<@T!9WjT|*Q&&l9?e1{$leKUrAXQ7Iu9)3& zGbHgqkIWaYr9GRAR=cSwt(Tq0DRGMGvg3$Dq|MVrJ(AXx~t<*DPiL zvp}@mS*NGeO^$RXYDBlxuS{+ojyp&ejv4ESPcM@}Nm2}0aqo*(|NLFvRxIeNdo=4H zs7iTMdn3*9CS|+Yg1q2`HhSPn+;Dsh-G1aC{$ff><52!k*EL)(nf>iODLD`Y<}wv$ z#FcpzjOb;kt@Q2I)2lBs$X={Jhyoq8o+5+g6Z0o^fKS;ymmtvaV$6Z zpDCEBsZo!9qcSk(m@3&8h;;83aeLx&&-_$tXJtsgdDlNl#;71!Cftk2hO}72G##D3 zh*6HlKGC5)vo7|#v4hHbVR*1^`-}Mz^x2hOFKoJk`iuL`eOr&+O3&yUac8N_bBzN z8#Ux_;R%;RXwhjTbgpmX9G8zTM|} z6nk^6*wZ=wrJ10WoNygL@$vWdkBf!Ms=v;XqpL2Th@+_`k}(WC{=DHs9ENChq)wkz z%ryTqN61P8habZ(sh~(!#{GWbJ9=`W3!3R(1#i{wgMaK;;50e2&>58rDU<~ z-B`=*nYU6xd1aNtU&L@xii#gS_n;7~%{gf@b0`vJL$Nc-ib*Rm@_ zyTX|R7H6Ym*4vdk7f%c(j4%JpO+A02aS@+(coW52JfW=qYdc!OE!%Tma({uOjBH%w zj7hXs8vxKaa;B=Q%>O1INRmU``PD39WM1bbbX)!SJs+z!vLle_{@YgHW%?}^-ng;? zBuOoMmeTeycFJ(XZ^=9xHN6%;cZk62JGhpmIUDdk^%!-9^yEDu@48Mr)RFq&%#VmY zQTyLTl=iTPv_Ge zvMs}zTjCZTB)+IT0UO7yp1D^lSO35rgBp7hJ)vm$ha3&Lly72ekAxL&g$iaSBS`!?gs$=nHcDn{gZr`vrp)*F@|rwV&zHwx3gJ^i z^J!a-&7AsdSto1;gBiX1=T9`jw^O2O-6y7Ms7yI8BhR@lX>|5zJ{p_>pfcaggX#k+hEf|0`M5w)jwu;t>qNv>#HA9IAu}X{@v56VAYl|6#?=Qc< z;GFAR=Q`(k-p~EIUkSCXN(N!HAS90o)&5oZ3cP+)7Qwi8KkH{=O{ZCB;VQ)I&li^M zcio|z)N|%d&KuEP=|rJlV>fWH{fPdxYd;kgkRI9+R&FXq-oXny{_Oo3oCO9{T;X~G z?tWu(5H~|4=TW@+dl|UG-V6D?i}!{h-_GrP^n9YaS%OS1aw%&2eIZE02lXkps2-Td zO)-JoYup!5MhCKAm`{NeCfl+$)jK1*FIDEp*|mu;_DKMikB!ysTBPFkOCgf)Bss${ zhFWhUvEP;{2%!-GBcw%*2A2*zTKKW{0Ie8;)>{|~D^O9jgTwG1t>N0HgJLLdveSo= zUXa*c@ku`A@KAy#)$oZ%RDbWg$SCZ2?Z&v1xsqliHP3D341RM`SdlyenP&z!jb;WQ z`Kl!Navg3XO1ouA`qS+3j_U1?U^83=yHk@CsY16Dii(pY5`SOm7{FLyt2WV;32!{b1o@tjAs! zue$#GYnc(N4hb9N=@ba!bnxRs3DXNwa91gzQhd<@F7+!U$AKWE6yM7uRX?%Q1 z>%n_&+HvC))bYrg&84|w08*YoKk z0Q12Sw~hwsJ2|c9Is$7>B_V= zL{Nr**H#Xl-=>9{6#1D8*}Tvry`o%h9#wZ)L@WQlE4GdHl8fTG==4%(iOogj;-%nR z9YEqqXTz!PwrTL0Xl4ZG)ISVsaCKsvL5i9e#jPszbXHZbl(_c29|>M|_ zy-%mTK*QlcuNAV$MfDcJwkhipw=UO!>k$fjksl~>{M4Mh0Tvo72*;?%gzwCivmX3) zNkbHPVLeAdc%ObDD}j)R;$p?B;_o)~8-Bg=ZnS&H`^VnAvVJzRk{JS8M!0PU+_`0& zTbGv!RAaQJ3Mt;@aaL#Y*KOi~ zLEVaReoB9p6G#&n#sn|KyGDZ)W&cx^72#bvYT0Szf8=LNxX>-f*HFvquP#kTjFinf zst8`$Kp78=2n^v5t_eB8yO|kR50XT4-C|VisdggUshKvwWY!a*j|j)Cl?W-J(uG`y zIe+)%bAhDu->K-%M!8aF68flZNmaH9kNR_$Y9Q2^nw%H{UtS5Ueg2t=RDW|l~s#clDsoQ|_ ze#$h_4!F|M^tM@%CJzD!`2dTK5j7zW#_8A6;MJ!|9t%wxCC0B8tzo--S&BPq?mwdI zq7?>J6Fq{%tg}0ld<2(W>Qz{lFnAK{CvEcuSryoB+j<98iRr?B&4`LjV$sa8{^noL>k6`;`(#Q zidDM@{F#asr|M{x8;gxrX`Zfiu$k`kCy}-rz}8Rc*h&N^2fVc4^VU=KK*k}<{o)IxbGrl+qzp8!kIet2iP$OxB9ptqp2FDx4I{o zbccq$RMLCPQ^=qkl;QiQ@%yyVD5X4Os&?na&+Y2(OEv{0xmNn+xOM}``yJk9sBjyv zOv_$gqo2pdOxj5Wo^mB8VO;&AgB1D_XNfsT?8vDpfVEYq-wrP8Tg8m=W6u)?wuKBx zpJle~XRA_Hzx>$t+rZ1){USL=!JM(!+VOxJS1RIbKa!6tTJOycE^&o9O025#h8U-< zTxEMRAcOb$-<2Cp_HI;6>qdC7!N^pajepyMF`n(z{x1 z8;=Nn@K(tx-eG-G&_gQrp-7Y*!w-fT5 zmfv*?SY=B%?+i-bZKzYu|3GD^W#^gQrPf`E1`GTmcDG()nUWDY&Hhw5>y7+a6+<6a zF&MGk+Cb-O?uamd$)d0f#+afogR(f}~ z(ni$!vP2#rq$Oz&B$0@*!u8KxhEdFdDN4s&i>xm!ZS4t+OecnwJ3{xa;)y7%E ziUIPfP)5@@`SHpR4vurz#6D)t} zb>AM?ygZ!eBtE*2q_?tz+3sl9f5g!*X5J5zWLY`km8l!$HJsSwT>Atz;W{Y4OD5Wb zp5|&D!el+3JD0XcP1i3)eaW3obZVflq9qdy<2yz=j`PBsN4cEd7~S!+k3KH){yC$2 z9CVc^L(SCk{Ut<^y;P|-A$7$d?NOPKVYxALL}3^KR_?J`u_%Xw+>lJ(&t&r0n zXbP+b<#vHbf!WK=2?;F*GWYf;(A)SAtQi2;bFJKi=f5@mGP8?Kp8no`x#b0J7~UQ+ z=tQRgc|$U3<*#9if!o6w@}1ug{QbfH7ZE%3`U;fha?s4g#^y z&u++og!f1$xHolIw3BO7mv;b(UVvD6R!lss7$-Q69LBHA`+F_uEeEdK-{}5b#iU97 z;B14q)Wu;M&<|g{K0jJhJh=uO6l;&$@bt0m%y~0af%jhSCyr}wnPM|k z7bLYelC$R2ERg`k7Dj+GR9`j?}E)wN;u)G?=y$`OxfknRY$9)reO zvfaU-|LL}@;1K~S$U-WJmJmU93-RBjvWfajI80!{>J*2Q)UK(igRrV@?=;WRWf2vV z;hdT5arz!YEOa}l)NvrFAhjpl_kYa4@$Q%)e1aQc3DJ?^e2C@zG}M8m=NTT6|6{s4 zdJvoR$_A_wPfk>Z6y2JQw+&zF~$Iyk$-j^V7?<#LK7kWP<=rUV*Fi z7t%!N&x68Kr&yIhq@l=q1pZ-}B!T5_V(wJ5O`*l-`0{z8#Q2)<#MjY}g2{Z|b0?+G zbq~B;kCuRW?}20UtFVS;*TyA57$U;*NIW>w#2XVL%KLAOo3mk|6pR`7>d$|)mef^q z7&eY32vIxFs6#Lx%O4Fk=$j%m__J|8PMv4#sE}RwNLDJ zLONb7Wu~Hh<$bd*3(G{6;OudAZ9E2>muT(usNvbLbDsrxj^emL&w8-Ln^>UXU))Cc z!>g!p`icJXT60L!C)uNsqrN{cWkOt%=T}god2QJlNhv|=KTP;m8ZaU-x zVueQCGFiaxXHOW1oSIUP9e=g#smJUTjv=S`8k~AK?{b7vcfx?XGxOU0wie=q)lEmq zgi_Oi^&GSt%=jUBW+7Ys%H|V2FyISjO1d80Mjd7b9~b*qL!4D5iD`#;S>;hfF z?n3?DOmSaqMV^)Jqje*L}MBe)B++uubm%TbOS4;!z-nFU`WEM6O{;?KYh z9r*t)Eh0NwM5uv$r@HCn9^P9jYgrw?e!*0iAYfPxn`^*&x2(36L}t)S65H`=_Db!H zrNos=?+%P9o~0>r?z-CqI$cz`aTkc91L73hiim zezmPC>=0Y5hj7scNGBh5iL6ZHq*sBdLEnMeZ+_WntO68vIX*?0wI&8=573Ca*M+8| zOQ*D;KiiWqOz_`9%1{;!_obPJxI9RJ0@$@S(^gqyCE<1ioYFG><-OicCS}!3{R6ja zQ*TakC<>9w7FNeADOqBtJ=DI`$1bP?{F=wzIw7dLUE7Gqw`KHsS{1|9+;?C$)S^O} zae|6fKvB0hT{tMR5N^fu!t-@Cu-wLaAAj#Sq@iASIqyu%mE-ZdeS$^-+8!C0@n`f|e17sbnOOUG6inMNmJ*t3H_~6;@>BW? zMkUPjBIEEFLoK57AkhH!+akMAyXvleJ;DJP5(}a4JFt{$50k zX6l1A;NeTo9Ix=3TyuybJ+yqd?F~pQ%};2g^~g5GcpslQ9m4kET-dgI*q@_#VyU4y zk(K@FSJD{g(6}U6h6rq|`us9asRo>USe|HP1#S=+A5JcSpT*(XM1xoOPgGlT16o8! z(2{hZ^*Z_V8Hc}D{Ky35J-J@ud}q~hmGsLGnf#WD7UDUttbS0O#tK{ZunvaO=!tCM zEDd$JE9+-tPGvUVify#l^CB`{q}bd3X|zfRvy!Bx!k*R}mjAob$)6LEHnjPe+X!RD zuuwSp?5D5aOa8sUQlw5mfZJ=?;a!!~q&0i?&`&#H(^Br^&-vce=K@a^8+Cf>GkT30 ztGEAI941WRe={j7hV&xl8}F3`sxdX~rFC}V=W^09_wEj-zXDzHi<=<)Fm%KnAFQNk zd1uq4uTRvbJzsHx3@ajEM~ot;f1S@TL@NDL`42_asvM=R;<(*fg{skHB2NTSE1dLT=P7bTQvfMZjTO?b)B$MI0F*Z7c{4h2w#I2B^%D%rl=L_mj*z9AkU9VgRAE#;KwQ+xW?FF>W}_pxv0VPG)puF$#+-_aUNo1wt8 zTuU9SzV@4LBrh~LI4#fGOM3oa!D|i*m2>RM>3$AZ_HkqBKGPOkGP@!WP^oN(alEVP z?E?Pa6@XMe@twUx#*)ICE9$doNL2sYOjbc*u-A;y_-d1l&qdK==LZ)53v$-lVc<0K`PGqZ>{EsS>~kIH@)e4F)9f}2*Q7kK^d z2w#NeD+#W?Pth#VlZ5~VIM^!}Cb9|w{E~RI21L5h#8U6DJKi?wp77p#%QTaO8&z|E z&dSAna7mV)oEu0L&7Yfr$>?%JF7O(|d~dai%{R?3A1)iQ_y@0SzAp1ZYm&rR5Y}V0 zAvO$C0>d_u@hpS?EJy7&4Y*^Yl~0c=r(ecQFub z7}6p;{=^!jOt8LmkASd;FM2pO4OEDM#xvF3(e6dsfe}rd1DNqhl+zS_%tR>kJDezy6S zjlhXEEJ@|$_dGI=7z`?7#h;^l2EQ3Cx1C;FK>E-eFlp^PK z;!53UxiDBIf z)F9caR+BYhMS3G^GxCmsyiomw63`@pwo)|Ms+}!)rmP8LE~5)7TlutfP!3?IfM3uK zKnnM_zn?eOd%RrgXRiD@=eDSI^07rwyyAh)%u-MWV?TqcN58|PEJ6o{nF;-x3z|o^ zp-T-eFc}jvU8!8r_L_r&EYk;##w-{0ss2C! zA;Rg_ON$*1^vV+7@ z9BOJhwx(Q`{ecHTA0;IK7Osw&!hVhF9eRHW5&+@iFjTri7;n$H21O&a>r#01CxQg; z`sD1OW8YAvGNu9!Du+(^&DzTt(tgw!Tmofaxn+Vw+aYlk!w674JI`irAH_1=@*{F-o~iai6xPZ+^dMgz(^iC@=Tl;zDXWGWM8l8j=Mvf}`{MR2ON^ zUZY;cdHVt4+a=3yiZm%OR;UVqQRg%P zGI`D6#omTRNCG(43rXyVc4_70t#;N=Rl*u>{m<{)}m5J>+n>hJId9c56=4)G(Ay|8!+woQkV0$R_@*h@T+&=efNin?= zz;P08lxp3GP0P9m_4+B@J9RqvF#qo^D`a%;#dLJncuyC!)0+ZfNaeomEu7jw=;t~S zC5^qa!4O4K01S=}I$^Y);I~%AzfSbD{FEY=$U5cNXGNPpcUV8mOA$H2j0YMaIVh@0f_5~xaTvTpTb zH-0*e?)@r7lJ~Y|DFoXHntFi}tG*R4HGn%MB*JaTD;0)+AH$XXAEplGx{1HlyOMW$ zg8aAQ7Z@~Dp_1ypet8QXnZlU33p`Bp!2Z6?m!duMfP2`#Qb$GSP0@eZ`5mz+qtSY8BbPUBo-laZ34>KVZ)lzN>$Zr?;B=vTg1=BCi3}FTXPo+ zaor^CmKOG*X@E4eR4AbTT}}O<`Tz==6|8nLEX8Gj=u)^ z#YiPKbdRn(eDF%cZER@s6yq98TLj%@AaW7@a$8kCyvoi8_B0*4Q#(_6P$Z|Z(t55? z9^e12h>?0OYq!-qi}h-oIGuMd3IuLm=NE{gcDMeWV>GVW3vM0{c>H=S{dtO(C4aw( zug1}0!-i%0(#pz-F>6vgCwhBgvZAtzC8_rppJ#*xin<1f@0vKi^bA#+o?4R5PM4}9 zlCN8}2GcG9VvmdySwjueb5~}FpyFNr_P2T}hsWjf=;BU#8J##Dq4{l4ntsLsYOr<9 zj=2*k>YJ$)q;jpT6;%}{yX_M2ADs&i&thL>h>>y_;Ylm1yMqr#>tTwc$GVBH`rGAu zoEaQ@koI5jjQ}5Kwv4RxrDzWfkNro*-q0BxETcf$L53-#Ao<=(cMFlA^TSG12Y@+%x=)OqIFrgO zIOpp-Nc`Uw$KKjjx51A!kgO_dqn*KroE14tZ^61lPLU*dTm7nsIfOTCQJl7$7f@d} zSGd7!xB|!$^_`+bJ8fFFJQ|x|9h05Y(g)vprKs*8ddSeH=YE)N!^wkxqdl^9o1@fA zHaobp&(3Lii(coA+Wl zSGlUWRr4rpdy&`nH(OOzzcN8|VRvek2(i=|U#hoM7_^H(7nNP&Ycwiwu%ZoGx$Vl$ z#BWhEfzY8+@IN<`ohIca+OM+PbqMmdoimc5JOkOpOifV z>>FC~Z3L=u!$U}x1&aof!sc_{*9)9IA}o(ptcoIrTb?uzT?!f&J352}p2!W?JF7T< zQ=dRuQ#khON=pQfa(p2z`YU{&65i_^{wZ&*RmXjec=+$^#ROC~XyTQIAAc7pMVr{t z-gNqMC3#%(?fxKxhf*ZfZOGa6ZRS$fDM7eDQc{0@ulSQVS7+c^U}hcHr{II6c^dvb zWF+0?pj-=W?4=NAt+~DK`{$0Lt1m_?AM6=NZHo(l`jWQOzg%sn+06gn6$^iDyWTar zymZCZkAb0Uv2a^VD)fZFS(gr9@y=lWeqN%mC0e)#=t_+2ju1xZNi3Xeo%|VgkQ;F3XvZa=TJ=I^hthacI#e_y+k~g)-b~i9Ay-?3?2w9{ zK&au`F}gN0#UhQ8_B2GLdfRRnp`x*Huy|Yih!zxI{x9k5xvRMd-?7R&vekEEuu||R zVJNRvg`I6Px;2A)NbkWPS9W{=fI-rWq1c;_V8ENJ9O7r3cQ-8SFxxv`jk9$`+-dai zLcQNYDp^*yBSK6x3Ok~rmK3gt*qYaSEPCIoT`cvV0#-fH^` zU~d!Il(83|ex;Nq-Ql;MU;$w4EYorGte%Xec2j2N0En-a-8N0gghl^I7PKfR@fiow zieK<$(hA7%n_TvfXVbjJtKIdi_vDR2&bz8B{3(xlV3_C`o#ZAxxm;(Y49<1G+o3G3us$z6NzifGq@whHp;1jDDU zm`~<~LlZ71lRA(6B3=I+;9oO=s(?*Z*;SDq2XDr_x$i=x0e6Gd-{ zBA{v*73JhwY)q@GcbUQ3B|R$M!@wuHKT@>3i-V(KuLiT&xG!0vEARrJPAm$O|JVgq z{%Nz_ip%PlsU>~LGC|ot(<^jkca+SrXw1JaURfYrRvYBPg251{LnG1#*Lhj%!)7d- zVP)1}Au8d`M&YX&M}L% zXcwwy^?bciU218sQV;aK+f2?iBcaK>1j2f--EJq1#H-G%T4Ogk zGdak`IFHL$xbXD>o%}K=ALLm}ss8haBkkAs!DhX`RBFz46XP5HU7YW;|3eEFA=l>a z*vJKuTyOqy@ef(>*9Co-XV0{sXiM28)Fjc&+0V>tM)Is2+wgq8p^CD#nrB?y16~Xi z9k1@Y{^xWHPSh9#cj~|v?QKsPijX1Ro>9(P$QYkf*Ggs1uH5l~`_>+L5623ywPTdc z#z5GH?t+dH@qLw+U#Z9R&eVrbzTFL{1ir!qtoX&Lb8zn z1VMR_Tp+_J38@#Yt_XyBOkj&(+@lq&&%+qW6osRbN(%k!1#bpjv2Cg!!3Dy8`$})= zNhb^fV3Lhw5G>b$3Z&?EJ?8lOt4E=-9^yF7^l>4JNWA^&E1$*yq{|GHhQGJEI-*9X zn#NOgkuNXz6~3@=pWp+fsVi&4VetY4!2l)W<>HzVxK87B)Vx?lI~!a+P>r_Up5*38 zyE3?Bh`PwASls)NRN|3x`+ih|35q*!5=h{zMt{8^d|$>U9){*85Vo0WVS)457`_z0 zfAs#WwvjOp5nP}K=YwTe(|Y>0q^FUbhgH}99naIM_+7_F#;%Pcw+KQTtM?w*@wPqz zt}D$(Tp<^@h5X#Gg;lTxwh6}tOI>Tk^joc5*$b}l0Q2!M=+~Ew4(%mviw<6)HqE6v zaMVr#f|7aZ-uWQX#;I>#U}}lvaYQlqn+>|3x>aq;?~}T@eWn{X^Cy*!NwM#D0ygdS zVG_{bnjfoyZz6XK?9|@vIUGD4<;OH&(e#}9#mY8c?@b<6Dy8v@>lK^6oOFvt}_wI>-63waxl+u`jdkvRTHa;C#+INM}~5l*eSB;*E5IWGRx8 z)t)Jkez0a>J>>jN^n|f^qBL03CKLXG!0XaI)wC;y?ugNNa6}bSgo_G##lB0hED5ui z$CW41Sv!g+R+u@s^`{llz(4iSsr_i8)>HC&a~c*C{L8Mgw3*AiQmer9nJ<2i*wo6F*A%kG#jtjD?8qY>3(eL(B2H zx?#X|w<4>CJ&OFbZLVyukcTDYiF4!O$}Cke)bnCyCSp~vx(!GndgC3iCY(IoSUR?$ zuw#wd+dY=g2%f+%V??$TfPe&vphi2n$l$kPqBGZ!RwVhPEYaOM_^3_K35-Bizf)DT z@tGWV;QDRe4c%p$L>J!do+DY>tlHPg*aTojUr%)yWK8|v6_we2QA+>=MCp0W`bm~@ zQO^AS`2Drh^YN-%$TEn0`tjq&5gm#uRM?6XK9RO48m3r>$7-3+a{zlDD^KxoW8{H= z=q#h80$aTw_`nHyW|q$i+)VFv7CL=Hhd8MfEl9IPQJhbEjhy})(Sp%{zP^$w&+!7s zn*Ncl4{sTkqR#FlnUHNvf{xZm_i1@2~Du zhV)v=n${@xsbAU1doTvZ)Woef)$nRuUAuDN&N2JmZ(@=LinDZ2D+bV+{Tu2x z#gCL-n)W)ozQfJ5AUobnR!&C>(^7BM(`?B)=j!Vb>V#b&?T@ULmh|o8@(}WSBi-dn zM%(qK8%szbNsd!L9dTrg^l+>)sF(&F%F^9If9DcXuy+x+?oswUTBlEa?A5hRyb-@# z2e1#yD|k<+568>22dtn67fi4FrgI=BnHet*4Q&ZcI>K>4t?G0^Y)#CQ9tJf}rh`|E z9xL7gh;1o>#QR?J(o$l(m+KqqcZNp%CsiIJagp4qz!rQXce?YRwjPxO2XTEC*NgJx zbxo(AjY6q(wDdX82DR$o{U6m{5l@eiJerH)ZH-uSuJCN@=m&NP9t$}ivZdq4+JO4{ zdMHng?W|Pt*r-3=jFFASvih_5w~mC{T({P1BY4WfqxAwF{j1$&oZ6VkGHlKDcDwoE z@(uBgAIAIW=cdE0eF^z>Gl_b9DiD=Z8;iQq^1eabpeyx3_BK6FS-X_y9Y$hgGdXmk z>TQ#S^!ixgrRC})6PB5&kTnD} z@1isUP+7Dv!_}v3Bj$NH)|biVr7J+tid-~M&gB5dK%zTRa!0Rfc;(XD)z@N34(Ss? z%v4wt#C-GffsM;#++i8hR{PG9Km?CypMSY_ZI*ZKV;jzAFM($U(VN7&Nl5ksVTVKG~c@M2AV|*d4IX1 zTrKKlCWD8(Xp)z19*F9AF>RE#5;Kju;&(Q#PQOupou2Drbv^GWqr@>1ilgPQNtYTL zOIIqqO5*o!QnHM7L#@gcqTOjSAO51xam~G%%2Vs;UeEi|FTBg^TDV9#lugrQfA1%A z;z{-TG+IS{LEPLL6=Tn-D0!dz4~nkrLf+@pa=7A0#Xv}R+GHChR;lTo`uN%Ss@8kQ zWQ_c`5>NF-D?b50{)%3szqs$EY*y|&w;ctp&Av0Km*Os_%-Tj zc*;jA0>ahXc%M`o_`ARiE`0cx*&i?t5p3K976$-A0|Y84E%?FU#3x@L7#Wqp`=kYN zaw4*q^Ojpe{my|;<_GXjPJkf)FU)&@ygwumpHS5+CqGhGQGo+dX25DOrQ{%7P#qQ? z;iq$+{O)|gR-&Tk1iD7D_>wjp2itjiHhAome-!fXHn8n-C~F%(_?B~i>vZ;1S;x}~ za*=Z)uW&KFwA!D*5`3a!#YK{|s3p`k_JwdXk-xlKF7-LH3<)qM@>n~5+z} zjq-eV-;KcD=@+`FcVBATys z*XIxq+&u6oxg@MQ?T7C@Yv7lZ!9RX)b>8@Nn(cjuIb81Rj`bhM*_x=Yd|g9AHZ!(Y z>|XR&)mWH4{hK#lytj)Biny~^*SyLV*hKholQ}Tu=JtGVlDnVL_pUx2RBI}RuNCVTT%-xc#zyz! z59VZE0jdDGp_f&AGlnB@MQD|WgUj~H(%RN=RLs&)WVA_bl#^kz-xXmt1AcdLB8vIBn-Y`^E>9 zr%FO(6imw{dH=jMJ+M&O1|WK8jQrXuu+BHcerB=ZhX@QhF|s&JPULD-w0blo=E-F# zIE!@Z(9UG-Hfb)vtU9m^Np8g;Y>dfM+LH|7td$+HCDdMS;uw8C#=f8H^4fd#+)!g& z)%{BJYj2+rDNL#%3RV`&!=+lD!DQNKJyB%H;B;=Ck30ME4|V#`oqs^)fDYjSU?e~5 zvqp_c0B_A)*aQ!#ofxGJ!%fBxK@$jLfkua)oG#}}*ASr-Mo~AjzyG^60>l-5T6sG# z-iPOpV^LisB&gkktpAAgaIf>THz?lH`NSrOIZ}@fPD&V>v76PQ6JNEH;}r9aEoHBY zs3W`{m6&i}%~DwH-oY^gc+o{A6p`9i3}@SW=KfXt92b=ROOn16N~EyCQEHj@dgG|9 zI(-)=IXgan_m6-kd%Wo@-i zC;U8@vBm*1?&w7|##{q#WzDz8keJsqSW8)&(vKVAZSNXah3Wy!a8&|~q7`tAR6$or zMfcLNi9IP!3jGLv2)msJ@%c&Iz<$PH6hWq~E4$NCknlz25DC8Nc{gQ&6ZK`QQulni z2cxn`z`*-OuC=E99Szq3eBuADDB&D$i5JavWQ8|R%zO%Z9_7PW`}Yz(M5;C&o3?;) z<>W`TutW2du=}q*{aE9wwjU>Kfl6l9FJY_S>HNa?_`Qiw&X^pk|CT2jo9xXs=P21) z_po0qf4}0rVh<-+wy=!wr;xHXs133}Vc{!#(EbkB>1iR}(XV^AT3rp|Phk&J4C13G zLa%PVQFf}6^_~*ao@iRHtn1F%R#-c%3azb2w~VI~I}1|w**oGsAfy$Mj3R#-^H>~z zTO~VksZJJ2m=7FEPF;5OKH>O9<3q%XO2%e+MFV;~MxIDwE|p6S0BiJFajhUxc`#&k$IWF6GmvO+HbtZ-_kG-qRPX5nPrkF=$1i~iw<}`yo+UtG!G!2UH5{DXb zy5wl|vFk?cGGrDIfz1`wFNUyJ*?zuEAlL{VXD|5N*)T6_{!J`1^mAkbx-Sf+fn8=o zP5_gZUn2Nx5+8M(b}vLG3Jy`sh4KZEe*8QgN7ImemX^n}!FeD9^ZlOxq*~7(z3E5fUmYa{XTmf9Cxtul5xbSXDuw3wuL z-mY6@xjP;3Sr@|Q8)>-jhK&aToU*s}>6&fAb{_&yBgapEI5N#~MyyA+kLnfK&-6rp z`k4mit5=}8+Dy~sK!=$CeoCvlCYI`1p8z<5&I6Zik4x5%!_quM%tXQev_iF`mcH(y zyRsHlTc<5sAPO6LND=LIUe|~8s`QQdSK7kM1Ur$f)AT<(9_erysd@jVBlr~r`|4IK zYo%^YAP-m*=>8)U^8cydcijt0;H$Tu&|Q61!~fs=X619MD!3gPT|7wvG>;)+3tHIt z-VYDBpFB9OypyxJ@MI;Enj2uh92QO>5{aPrkK168uBQqDSEb@6$a;IxEf3t8D>9fW z{^&8StyXfX+AdjP*{qnQd)}zQPqx(c|3%y&BhH_bP(5qEYKeCbcH>4Yc$7q5Y!!=Y z@E1_`BrY?hmD|P9mWJg7QkgmYF-b73_A?I5MYmu?AI_#c`)tO^-S)o|B{vpZ>e))m zygbHZADj&ZbA%bi#{P?7CDe)wDWIu3=egdAqoN$CQ=htjUnsa)MB2uLj6q4aV9TVJ zN=x?dOLQMO!>XFK2Gj$VpD9SzKlMqGiyG9h?c?Cg`mUd^^mwwcN9j-5ADySTT?~8lVqQ_!Fdfj)xeUjy?r9O-t4hBjl*H| z#G8VibQRxIxDmUC_vK~ZL!?!ZL%fj`oM3ypa0m;AAmq8y8xnr9Ra97g?)=aaa#Q!I zLRpYw%|%`s^9C*7#qT+Eb!QlTdi=50w5_Mt(wc0GY>a8aERH)YzLZDw2SVCj?|8QV zv=JAQfoUM6giv0+0Rx$U3k`S(4#*z$!L34CiM@4kZfCt@?acLZs~w<)~@>!VZbrnaEmE z`hMGpNPY*?5{S84CABoML^i%ZGjZyE5Vb$|{(*?^aGr`sn6_V5P_#g8`lGpogz|bu zkJN=_X%F5Ju^sm^0gPKIt}AO^-t1lt|XKt zEWE?}O;q&{P{d{ONdYV4=pPp=GoshFq&ijVHbBOiYWusU zs5gzTsog^dqX9Brt0F$zAak-8#Edma4oLqR2)H-rpxbx@>uDDDF4<%~=bRn!&Tr0n$z0JueP!m(v+ zQLT6Q8yJo$T43zr6>029#iMT_uc1^P@53$ixJ|+*X#^X_IMvHsynQk-cdab>U4DCW z&BPqYAdW|EfCvpt#fR8QKGNX{-+(!dA6l6EcvP(=H-B+v)K~ij(P1D~oUWPg;gezm zZlX}hg`kL+)M9yirOp7xyCKql4D97NWCoP1%HXDCcWf?U+W%nX4^H6D3jFrM($C-q z&a(l%IO^wckj1paCU*qMuJdKu`|70aw9x-ibQTUxJzN;ZL`ebZQbABskkKI`Eg&GZ z5dx9}Mvac2A|WXt-O@3T7$XKqH%M*LY?Q=cBS-q}`yY1iJv--ppC^SH8D;K9izt^z z>B29Rz?m+5+ma9wy$ofJgvqwTE?Cj>7+TnY2i~;u@ubp_*yLEmrdS1_>Vi zaWzHPc0ETO!_@}$X3g$j&zU{y$5>d_V!*?Vza?a9f_kSLGsqP4|B;0U>J}D+I%?GM zs8c%d_=pkFJlx%8^46=9TlVke&Zpz57stG7K2rjI36e{6NoUnOsoPTb zZwrLjMAmQvH=sB^(}v2x{rbTT&0!J6lwvHW?&|wV{z#Wl+lW`NC(me*4zXTl*N^bW zU4EW?@RlZUD0WF#ebcf^^|@IJ?+gx7g={T3c*}FIux9nW+p!YveFSS`RG~*hjb+;o zeK6+_0h|a1ojhvFOjRObwAl11+q=4Hmm(l%JpBcB{(4&egtx*4D#DePLeWjuJdMlf=hUHZ12g~rFjDdxPd2z7`Azjgqnu}e(u>q0)}M6anE{l<+iskJ zQ+lb{2CA3(36)k91*Y-dZAJEwcNcxyf;t&Jf7n8a0^x z%wDvt1_GbJA-PLIfBx>A$a%gLT)kQAJ!R{J)TpThmZcX8|8u=%e_|2no?1Gjfg{DZKzO{tVZ`_Gf0oFh;&+C9m+I~ zmR3+7j!qKvzuSnot0&I6z;VQAyrUO=*yE|my)=I%I335xrl~M)lfz|NIML40U%It) zb4Cctf;!Re7j_HyZYg$I*-grON>V#(SRfnCxKGtd>>#2^v17Y%vfk&XbFej$sb3qU zbhN?;XyrpjsaMYk=*X=8yLIVN6knfS!0U19KAj}pfbnvG>YK6 z0*BkLxhbnE$~T^kJ8v}1Zvo~bGt~|n%UK!c)pEaZ(iCF-?JwSN+5au%emoA$>g2LG zGefu~0XFBF6)faigVaE7{s1rK)=UMd-E$Yx5i?SRs3hzlQ7BleZQC%3d?&GJpk2Q{ zy2h2UWh}4bYrX19qg`wzg542!4IbRKN_h8u>-<*6Rq)egbTQ_e~ zP89BQH}oC;xay_Tu-l}6i0$WlfH6N@=CF~4!kYhMH+8=HQk5kcYn4>Ssgl$Ntc)zu?5y_NJL{ zIubbE;jY@M9B{u*8TkIkmcYbn~&_)Q@V1$kKAIQhyJ=46Y3` zo=7g+=NCE6B)sAD+;Z;=mw2qUX9v=bTOjrru;c`MZ*ubTb;ZqryZ^|7IMA#FfiWGB zMdSrP-F)S}^?nbt!)2Apq-M@~_3CYgahuVXs$HwuHZ41pqbl$4-YDPCnvZX-M~kKL z|9KC5cyi5H2Lgi~=hUq-{^JBI854xpWL2lN8(9&|YO9~3CvzSyl`7TNHxHAZ?xq9_ zcwM6%h)HcJJ|rmC2n3P;EV*gk5Le`C`Ws1E4AaEJbk0ks0UOY1k`ArP5ggF zNTLd$Gs`UWND$Kk62{9T$KUmuB);(lkY~a<(xJj}%L{7k&!t1FfIUIJE zubdCuf4b?^@hsqA(D!)0Jm|wvrza|(M-+&U` zxbG7onAa&*tZ>&%DV%Ifrqpk$N9tOFAX#+EJpQEnnd7{?>}s}gNh$t^G~5p9ltLBh zRUN=KMCgbJFG>U?Utw>3IA)yDjZ>Ia9Dy` zSa`uUM3Qv1TA`O(MDGx{8e}>eSu>Vizsqw8J>Kc>u^TFC{=qfGPmUj6DW5nBtsAJl zDu~duHaqWwd0^s-Dx1)xLSdgCNzzgy@((52@w#ndAtHC;fjy0fJK}s`G-q{DXP& z6jg(2EY;C4=z~iU@XpoG_dheSP;91^&foMm%RNvFW@WMIkPp>`?BSJ?J%OsARsoy23xz#Ow0k*D(> z992>gx4&HW`jLlU)2Z7*yvm#c0=es`&iZU#PPWGrl|zf`cTI4@tw)ZoYml5NqOO80I&s@l1{tXj zVvUu`EE0DZO5jrt`$x82<4DYp^Qc}_@!-)Zvv^arcNfFLpRZUr1$S>L{o>XjcP~pX zV(HrE*T9f+iee!w;C)q6?WpiTkODT?=h#Ri^0cQW#r?iuG#paybS_!QE0rX^ZSjLt z|DTcvV+HrA|E8X@DMy@N|MeHir(JkJzBBrssEomZx&p$-Dr>G%&*c&_vYYrsaylq> zVhv^N0>20OuIkY_8LB`42lKO*oVB~W8dr_+iv*LxtGosfgr>jDS+;j05nASM<3KUK zEK0&g!1I`a+e=&#+QU9A;U6J&&4~Hh`hMx-Ua#}Ko=_T#KPpZ&og8T%!(U6Z!!Ew} zzauBloXjrul2OyP-N<9}3(>pp7q6qp`lLC2NoQk2{@`UUyraagWP5|fx*^_)z3jU; zU%jHJHrRS~etl})$@}8dLF3C$(UmL94;G1{!%2(A+w|fu=UT6_vuOsVzUTX%i@-hd zcive#$5BrY`7xV4R)+`He3F1r2Z8*H5c->-E-pvw{xq(;p6cy_8&>p}D#?9`f`Gb0 zert<_&QB2k4{?DW{t&7pe=k)x)*_E=QOYsF@$ymXG44u)u%2CWTDPPziow$9Z=<5$ zXM^|lG^JjF#>S<^MA5!f`s!rp(^#z@|LE71AJEp;nDM$xnVF-MufjdPxz10=sqnkE zh|OcpdUt|&e;)@wJ~j%6t;V_}(YSkbLUzm#V62XDmDWjGn@?9l!%qjHZgWDZCYIWE z4lQ@S1$$T)Kv%%p7wgkCRnTV;9&?$7Ii3(XhuoRILHuu^wnRJgOvubh!f|8x$yqQV zd!u}N2ew@CRmtnbl2x^HqqdS|+SqOa>7zg~Zn#{j&XiM;3~%qPiAuK@2Aei;c6|N1LT?@IgN`_HBUi5BX(TfGz3J z<@j6QyzI!jqE%w|xSM+6#LFxq&~N$<&pN3BNsvairj8axY}wkpDBYv*`GgHra67hz zLo(eJeYW*hP2|CL?v3+qB5yqAPg$@P@YPx$b)E`hvhIZIkKorcMQK9+>YHo)|%??BaX_KTCBWNGG7*VjkjQKk4rfG3VbOkgPRTI8%UPS`ts3?@h}0It@4C z9Logm`IC4DFGXP3;+BsTrnc_fT7@1zh6^lfV@BE&6W(kNp8}@075NZTtZDla!RcP+ zi|F1;_Kf0KHS6ROTt`5g;|{&EoZOR{qRfak=xtvL*B%FACF&@Fa+cVk9zW-F(*jiT z*Zhc=f*|bBwN>ZNTP_DM)SHg8&{n}rUnnvlrN=g zq_y!YHP8pKq~heYe)?&#pvDL7zeNbhd=LZ9Nk->A*eftAX-7=q&{RoupPj7X6PIEY zBWOdw%+U(Zli0qdqkcx`f%8fq>`AU5`G{czXMaphd;QetSA6fh#6 z7onCp8;WAazC6O|t_ObfU(AV5E7(e-TUZLY2xtg#T8-X1W*;7ZpjtTSz@g+2=QI3_ zgY4nVs0rz!N<^urj*lVYJT?AGS1j=~Bpx~an7XmZdgGC0%>ZMO{oHh)s#QcDT~Tuj z@-x`284})9ip`N}_gsShmJXz@U}=F46nrE1LhvFfTGO z0L{nZs{W6yL`1)T%g+raJAW^m*11mvO_^@F@1WBH)()p!ixzYjmaR=E4pN<=$ps^ zUeAs4%#3A|aXoXrc(aR}z8`Y|9-Z@c;Pba;Rk*kUhdMBG>S-Ez#*Ld1Aia5vpdg;+ zaMwKv;-*VmhqijmqZ-yRk5PZC&)58C`upl?frN|DK7@tjGc7jlW(~go->y`Cz`5WbyeW;MP0QKP&NWt>ljoaPRhq zc*O#IUT-Qvr3$poJ$F|#2sgZrvwXc-dMkQI_!=<3&GwIMR$jl@=@bYWUQ?PqNh(CI zKQxX_>++g9&Bl}9qASxGEe=YG3NX@S{3`4l>(oQkD2> zB_y|UH0qyy4T%VPm`0;G!MiuqeUY*=^%))??H+45n!P1T^>cI+ zuq8U0RjaXAi4W)>U-T7EAKk8PX>R+jDU?Rf^DA^Smpq_wB{Yf&Xt=0>!fM7At#Wr; zrZ`kNonL=tJ9BDlzKIM-^vI30lR1@7(@kAE?MV_|-$hmOFDJ6!hW77}oJSoE@&G&U z;c~!YmV>qN(^^xu=_Hc%if-l!LX+}ESM4taFbds;V2cY!j-XJ4y7lkksQ7IuMy(_e z7icAq)rurwvVH=aP8Ou6s2$XW|7AMw8La|g4>Y+J!&nf(`^6u2I%tZ*%o0!fDqr+K zSUK{#axj(Dpe|sH`c)cW+B0-GM0a_zz8vB(f}k(w=hP(xIY7#WP>9Tc%Aprbn2`OZ z%2Z>yO9d|~De<{CIlxdU zeJw3b@6I!ODg;+pRg5j~lFmr?fBlRQqxN>ZYjG8dK+5^{eon(a zuLJ*=#E#SAm_+sRoGTIr%H*(G-+fb$x`-Jy93@DRr5sV*oS zz8U*?Mt%SXapPg1BfI}CsC)1I`BDwU12YT?){HAipertJp68+W8vyV}!9hC;TK~ul zS~3}%QeQj(S%2ER%Is|=yw^g|kA9qzrzz47>+EwbKh*Oq+;`LT$Cg*QrfE0Lmij9p z!`CqBB;u#3acS88vEbj25rXAk+-Ar2kj4STLcJN3=CNF;RFD>rOYnl%`y-s&PLJ3y z)V2||G?hR0a;c%m`mCV*<;35BkW#4NyqH-vZ}QN+C;a;yHb0q1@KPmXfVNt-;V-L8YdPQXnG%g#Xy*$GK%u^)PLVp6 z(x`R9Hfvwboorh`PVbgp$$UR_!+(^IruU+&$k#%4a@l{eE#i)4mJy}b`e4T})biB@ z+#q&;?ahz5Y47vV{Yc?S?HDup#;rsk(6JT;Q(PKeUl9K`GeWE$*2G$WVOxO;PM3Wh>lT>oUEmOp)SPX5JOX$9F-PR8Y+eiCL zO%1u z2wWFewGhha7fRYCxOhWEHxk1xTm65H1OAdoeVm)oNG1OtMP9BhXUfVuz3$kj_Wn(= zsugm%amAMad`CR=Ip|%zU98=h79I;eW|Dd7OoWvX z-NqarrOK4(BNX3pTRJ;p1vR(54qtYKJYJexekmYv5S$u}lTR00p&0%xFGm6*Q)efG zsvJA}(6_!)yPErL>BLUzP(ZCviV&!((VOqK{$op%kQ-wC#LVBcY#*eDO(j z-fb(Fcnv9k$ULXuy2p%(81mUzAji31!00_;TUW$yQHd?;wi;>(|C(UM^d%LFEnuh@ z**(uVzF;Pteo;60kBqFE*#g*C_p3>8kTsw^qi9ExnzQG{C_7!t^SjC$P2((n{O@Yp z#QltEY5(*-S`&W}I@y|mVpU#r9^KV6zXk-Vy7H+TfqCfz$G}<)*%n*b)kV+c{v7D= zuY^X+zhZ^Pz@EbXFtmT@eRpAO-*F*p?#%3iPj}sfH(Zc#9tUl5=r&?rFH<}7813TFV(rlu(KsRpUxymlTApXC3x*Z-HYm3|>nBgxBUq`0Ok9iM^THGv ztlAD%-wnaiqYJ^z5g1;!}7)R<)?isOHI5~ z=b_$a*8ODZB1WMvEv-&qJbJr_8>gta+>8Q?;CRW4C(rtp zF45cslfF!Jtk6HQiJFoKZSANzz4YDp3pC^yD+dsfrVqQsn5_PjiI5#y!#G0R{3T1p z_ERz-psU$R;&rZP?JBE6dB1c~PWOcZRFU6`T$2>ce5mc=;VYH)j6F-?F1Uy=6A>r% zd1DI4TGS%c{K7g+r{H{mcQqV`SGhdwMnCe-DFt)e$40x(DiWc?cBwX(W1J( z`&|vY9;1-!pVsIeC#p83FK3k6lkLMRlpxk3YT<^OM;@nY9@mUsztEIS#r)s8_U?ZW zW8}zAw$k$pJ8{6Dt1&H;=JtV6Gy3a6GTJ%e^omcG|n^icYC{LDt zgtA+yO{{xy6Js$Z^rjgx*}Q3z)RX*2rrIh%rl2NgQnam9$CodZDi>^R=39B!My!+T zI3*M>*rWJ>0O;+J#Nsud2NMuhLyhs!NC%#wO=fR@CYTd{WJZYrkSTBIE8j)`MxV)C zo1*EX1kI@t-&~_rP{F~%WplOL4o0&1d9;|dUp;T!BQC=i+>*h9KN33Ux}6SmpkV)# zGTV6;rp=o5H?`mSE9||7b~M4Y&H2Psv_WlClCAyJsK8lN_xiiLB->I0a%LB*A@SEb zwjv_l^wV6S{B_Cz(W?`9uw_t%IxL>)6uJ0bz2Gi=ZesMq&#O0=xv?D{zwGS^EZpP2 zn29ON>l#`8FZz%#D>%|XbB;?Rk_HhjrsXaIg|Crf@ zRe91iZ=iCiy;K{|CcK!TmIJ8cE~b3LC5Ql>OV*7NU6=H8p1!jA?71_2W+C)`AB z&5Y%|;=Wvt_xaTT@U)TM!~2v@?u$#}0|!mVUEjn^F10a~+_#(%q+HI7D4(|fi?H3z z4Qyk1cx-CX^cO1?l=ppMsDh+iSv8a9z2Qgl{l_qv2srS+QtNIGw_U+pq7=SaUIt?Z zxL;%c-8pfeVyl6VoO`S#_*OSvY2?9aV!80Ig_oAW<?p6~_l6-=T2cZNEbBk?*@M zP?n`wew7#TiwWOfCRomZd+}RrmX)?(-VvC`MW6MQfU2V^z5;cAi2Jtf_l~BACn0>xRL^<8h7I&TdcVZevdExaGGFCoNh@xftLJ>EBW2N))l)UabojZ+DxU*g z9NFC~H?SSf{HJyGu7#CKBHy7gKw5tZiPo-5C$^v@qIGI3Z#@2sBvC{@Parj&*eWh7 z{7T`2nKTcbk^ zWz75JJ?l*rMm;MI-f6LN@huh@o6nx90Q^3rW%F||$a|MJT-zGn`1&T@Ayq;q0;y1F zse^Ah)SF=~(s=5WuXg?56$@Y&=u^tJFf`@VcyN{fjQ+mV19DD5+TIQd?_~yI^z3?5 zJO=#)K+amNVZ*mx9@!Y2aHpv`exr*qhmEzsI3zUoInIJP+{L=?Hy6Ma%}kU0h|_(9 zMiCzR{wfWrUu~xR&__HtTZ3V2{1Jok29X_0eRET13MX;#b;OH~H=jG@6}E;k>dV%F zSXZIT`FUkg@o@%CC*&r6TAG5r((bOJ^BG%mkN1ilEx>8 zP61!?>?}%L6w#+gHLt<})M|{7I)Ayqj(=p9?XTOo%q+=Eb$TmJ*F}*4ilI6upHrNO z|MR$#>dq1iQ|^XASI!uhXZGKN76{V;Xp7Po0i)Cvs9_rvAWrzM8eKhs6O5_9N28{w4Df-uNF^P+8jAw{xZ`SQT-(-ob(HDtd=*ygd47d!YisG`9LrjJ_N$w3 zMJeaK7t`q*^ibf-O&NKGr#*Rq>BJP7?%ehno9!;FjInXTn(goC;~W!AN93j6*OF-_ z)`x4~6%@`i zL~yKRZtq?4UoIWqzLXQUc*wjoe~E}nkp@Q<-!B|Lt6oEvkyOaACw*a{c-V-o_ZwkCrp$sCI%+<5wcM25dnm@ z>M(t!cvG8D#@K*eJxP`;$DD;cDM2_it9ouuxZ`MWuukHn=o51J=Rsw1`HKBg^(#?9x8TGA4a(5&+PIKr$ z!cD0zh@ZM78(^Vg@yL`2K1!1D9r4^Dg@nfY(Joi1F~t56stHS%hKcMb8v4s+o)7j*1mDt zNpa3vOR_q~vg?1L-gFir^^DUY43cRSwSANM%F7h) zO0v5t9~WM>b7_x%mI**bHqI?>|{Y1fR|jLR6xd) zw1u_5NJ>iR%;6egTx%`S7T%IlkXRhsXq4`$Y9U=*>urp)Q#``n!CxDBt0Yq;{# z&(x_HTe!PF?!4(nQZw^HFm5yRK|<3md){Wa_95W8Vq5|;1fDC?Zb1{B9MmWWb3Pct z3i=l|#ZN@Co38o-27@&0GG~Ies?0pPsrITPw9Any^jqk2+s*^}y8lAEegcMblzz8&h)U?DRm(yD|#OBGg?bZyBCd3;)4Fm&&Uqs zyiA@Xt!g$H`W2{71yv|T&Q58*Wne7QO$-{y>cvd03_~ zKSNF@o6YuP6?v0Xzrk7w4Y|&z(Z_c_cd~Pf$LlyO&%#i#X%9%N`2iB7-l-_DU&8Tk z(myh1b90pq@vgS7<*twF5D3yU*BjS%4WIGT!-$jzuIS;OHX-6QndP)a0!{c(b&gL9 zS`O9{v+|!LIN22>!0priy9AW^ZT39ro0OirBV+$)mGN(`9HN#8!t}u>T~liQk*(L; z5-Zb*09%+DF#!j3inHz?Vf~NSc)7QO0oFFfo4Pxt@ul@cVheI8rC8=*_`^x;icL$F zSq{)tt08jpk7-3d*}9|#j=J7WX*vaXIG=d?|> zn}aig#K-YREEAb0>{ z9QlpqKoOo44g9g~qEbbS^Tug`13j42nXTPsl0yUiH()&;5vm3pH_*|MJ?{XQ6N?aI zCkBr>FsS?d@`a-TZ#3(#s@?4BF<{62UO#8aX?gQWi4b6=gYaAni@XNH@^RAgSe#bP zkmf4jQ((na66GK>#evv-^ajlCV}=`R)~EyLPKowhf2y)tKgrL2c@8)(dN)4 zow9X@A9ESfr7HO2K$&Vu?c&qIC`}KlM7yy7j-#qjCZ{CFC9e7x-533xY24xF_|(#; zUnmBO!?XuD(-=#-vWsRCEg_%JQD3q5QvL6s@;rO|6W7jy2ze#oDOkt^W|URJw4OH5g^KLAQ)teHKArJAekp zR)sbcX!09zX!jdc4aF?lVrldI`7;=R)T6D$_7vgM7!ht&oyr!MSZ=K?dB(EAB0r6o zLFTb0AA8OdysEXXh&YXz>Yr8MpfGA2C&fVfA{Z?%s<2(%3O#AQZ2A~ylfX(|87x@P zJ|!Gv$d&)Q-7fs_!AU=)?`H!{xEc`nVdC;9V%#!ZD>Xh6%2L`q?I(aD<@`cs*KpQ{ zIm-bsqA!;ABzOVshHHFVQ6?Wh6HbuoH72}NW$=Gi>q_Rk(Zt?nt_r588olVdhG##9 z`f$4zqJmVhjO|mort66r%{+h+sjnmSprGAf15!8rpVS}u1DEDV3-K^daj>XPI9$i} zM*VuuuYk;_nx;nFBK@jmmi_t5=H_qMB%4^ri$%NhI8;i7N4srI3=gSJV8U8s3a@*F zhJL8(W}MW#uuyH-+N;V>I8E048S=i&z<64}f+P{M@H^k{W6K@Lva9y^G*`WKO=nC{ zPd{+8;)&Rm*)pAjCi^jTMNd6BJM-`fyt)Z0;9 zb`8-MKc&pwLsDjldiqJ@oiPyc>SOk3Qu==X(u^yz-yh5$s_s&AwNh-zRmK4oBA-Nlt zHHvAru*HRC#)~&7+sC))NxvRr_1;hh(A-w0!Q8@`S0%*h-5rw!@}WQvE7s$}u9GTE zJ3Uc$wYQzO<0rUDV#4suXLD87^6l++M8s}LK$YurlSFaG@7(yBk9c(C zI%+fiK&Hx=#l%|CeoFj!@k$`CC*js$F<4ciHT`?SKZp(@c;_G_~YVgDHkIbNmu3hh@MPW)~1=zGX3`G8Or4Y;XCt|_-Nt0 zfCaGZ0<0z-vMU#6NnPU?W{l5^DT?fF^p%QT`?@h8pjYZ9X4$`*Hlu_QXqtey?z7F9 zmc^>X<-Ii#yqi}>I!LPW<$-NQvK)B6zNzD*4F9!n@qU|=UPiEkbcs97M2v*8|9o;dcD>Wr(>pY^s7F;~vzKJlg5?9IIw`sZP`hopu10VU zQyl1dSx&8reo31?gUb1X(b_B{=OW`qJOu13RB#SD!ni32Psq((t!Yt}&TG zE62+K1a}ybGxB|^b?J=uq)sS$sa;pN&$F2{-AC8Pt%%;MkPjP=PvI2Msi~^cnME?;x=ci%OqpuJX(h~6=YAU ztmcyD-%agHPzFnG*d4gq$J?d68ct;gzZG_KP8BP?6-H!Ty)x{{%vsMZc0Wn26X4dEtq$2JOT?<s8JD1K9w;WIJ0R2wU}Gv5Sbo3oO-VRFOcs0zl7S%g>4n>)u*be&1_ zY~fnt7?%1cv7Tv96b#&45y?k~~v*_@~Z|Eg{M zl9Ct>w6sA=GjOFZz6K)6fWBe}IZVv+l}7SVlC&cP4WucR;D47n`Q~<& z{(qV^Z?Dqft!<*+=kVNoD9e|qy8A^j6u$H!DcvAjze}F$dn(wkciJL~!6!NZ_mO)$ z^Qab$)fRjo63yg3c*4kAuSUuY3kIsyn(Z>l={U0%eZ*)#BXN0(MXG;*?ClS4^x3d~ zyVCK$gbiR|&4!i)Xj+{2PI2P8no(CxqTQEgHuE;Ss3CP>pm2{5Tl3g z=`oHHhukayIm8d6J1Vzb13dt_c@PNP z&py|nxtA{@lY@SCZihW&?7#+!@H}HVKSvAYeA|G|(IjIm_yvqDIH7^zsd?ea?;4xpqSe*+YYA zmy$Ely$~db;>C}ZwYW};TWt9z1sC7r(^$EE$K_~e%M_T_T|_J_aznPmn^aRg)+&W& zIT38fB63WVQ^FkhPuhPj7SNms!bRuzjw-I(7t%ZAjrzuULFVfW5cO;7jbWD7Tkvb9v4_wvKw z3h;9nWIa+CJcHUb@$K@&3RV-r06r)eag5XrH7 zwC|XcnAnqWR~Xl};4)O_3DYMHO*~NBU9`#hdpC$`kB&aRSR4r>sqrI>5J5B8UeG5pWBENfya8s{_x2g@`NdBJaj_yeX=xLAeP`-+7-G60otI{LQ$gWq1_Zkbte)mmD` z(arzI#W--n2`@P6(U6~Rka8yl5xBc*SyymwqU$^nM*G4(oYEpcG z9H(FMXM~TragMvGV`395)TJG-!0J}h`|wce1)*`b^3BIasyrRi)mf@|tYoPKTXD}4 zDI!++)dU9mNA|_YK3-1aXfWlY33MTnhm+dxSoAqdx+NRIA~#3g2U1d#`uo2r-Gb(m z@|v0|WVvvxAaxtzkfoOVQ)jQk@?R;GkDqi3p%e_-8bNyk+DNkYmqE3OQ(-{ks%FiMjey)Z zFy_gR6O{et6AgUjBL(U5qebAxV8{f-t#+^$2dqrex=O7T%yTp2S|zZ<1Jr*XLA< zX~)B(sp9J^>j(Y#b7Q`Oiv6h{O$QxQoLzLgx_4asth%H!8>@$*?v1s5pI?c~p@lUN z70-$Zwz^HIAfB_4Q>r|ihWnFEJ0>s7Q!)uako0r~B%dS+6S z`HijKK%aby8bJPBgdw|JA znpN#g)Mq7r9lJ!ui~b~Pd*aTCK4jOQ_xI$su(HPn3+mbaW5gU62faeRW( zNohJFcAV2VUEd}Gmvrz%`#W>h z#hM{rM3i0+_ZK#HIuNQ#nQP9(tz`*YXzVWtv9*s6@ua}+; ze6R>%;US}^9g#b!Qi@+@u*WOL*p@m8LaqveNn#%%Ljj~ig08CHVqUEL>pbS&eOrk5 z@SxxmK{r!8S#@Dg?z1fOX_M=xI4HZ!)ybp}I>FEjV;AX8@}y&wdNYDRD>()v?+(n( zCl2|Xg;b<8`A)2+^oQ*8wWc}+Az2?ZdTQb<`opY~wWs-#HR~O~--T9g9hGc{lJ6mn zoKDL6XWc~1CJ^rV(Wsm^U*&UL(7moFfj$w^Xd;UT5jEbOzBVDR)~<7u2q>i5wW+Pa zfX>x-XR7?1QL7`NB6yO zw&X%&Zl7y6u1KQfR0GHVBayVfM$g0BreniW7bFjfCjC~okk$@(uh&U^+Q!p$e-@Ob zFDuccT$Ix=2=pd?T-S81f8?GFQ@<*WMj6+ zc0uZc!+GHblWDQ5oPJm&r=nbN4ZZ8Z8ocpEdg)*QDRxY#r`$RGgvj z(GZjSgt(4x)lWK*17S{vZyJN$d^Nlh(h~IlN6}gTHTCvk99s!d6r?AqlG2h>QBu15 zi=)RF&1e+?X#oMriF6EvF-9ZZ4WnyxZ^Rf3@Y(YRY_IdfeV_A*_jO&C9^|tWVM@RM zS!^@gy18X~L%1*PP~(wiO&s=cuIDCUU|mE?!K!!OZzGb>2yOMV|CY=uCnccV&lozi zW-*@A?Q0x4ZeOtkWH<>_gWgvZyee|3l-w&~P||WDGnG?Z?f)od*l*XM$8#bbQ}&sZ z*1G6J%gG^{?070ws82pR_4^2_Ui=d%F$-2=g^Sv1z8Ub$t;np$y&Bi`<#QmvQ_|xE zB}UU;329KpsufO~Hj*4ot!QyeaBpw#7;Pmu;)=AV>B731`|O6`JI9uWI`8E-fg?D_ z=|L|$r>uLnW*gnfg=zp85a(6hlq{LX#Cj$T5I26cfj=n|Z@Y{tM-UE#Y+F=^<{9_4 zEZhDTE$;`)@s*juOD!4dp$|<>hHfDir^+4$VVAN{Zdv-&V{d2mle7b6j8d+;nxB-R zhCazCeyCLEYrXxJly)43x0#f2(qPM&9ZBc;j%`vf_H#`AN8!McVp#o;!p3+;nyZw5 z@p|LLP&(Ah4a)Ms0_4oUGKs0mBPKa|2Gr*oPy3DOVFwfKlgtXa40akn6`eP*e@T{5 zH7*N~UF$`xORg~z#CIT~W~x5E@!#qx?mMD^PpZIJL;1HuJ|t{$51xIFRmAKc$voLB2lNyZmPOB+1JcoH5dG8~{a zo(^jDdsqev@A>4N{gy68*ig-N8;Jw=tP0wW%r>h%@vv7+)5zle^{6E)<()CbTUB8^_Xhz0ZcKzNZK6(%xxDe8eX|V4A zRBn8e^YtFOOm}noMc)(e7DAlV@v+92=q0ka{Dq$FFbrKLBT-hN049_%_cTo%GH}B` z%={{8OK*8jb?CAvTxiP*N#)?DFX+XsGvJb~WcImJ%z%8Dk9G|C$@_NasS=m7khQo* zqcO6|H|mKYP&|!aHOpr)BW@Jcy!xMVK}yw$i`K3=iKP+@|>bbQKt2h<#59- ztRPT}#MYTR+4qUzdB!J-;{gD!be|kEnRR$L*(XhDdlsw~rE!@d=Va|$&IJX`SGz^#;d~=cktze zMOF#Sa@_Pf_oh)7Am%O5uK7U;G8kMMyvt#@cotaz_$%mo@5lUy<~@cPzMNLnt`=dP zadaYxwGRPP)~Z4^AxFKaBsdinj6-U&)GonE&&QOH7atZUCd~LgF%lVw0~w<# zRThmJY&Q*`vH)W2dvKhGm`6NKh+;1E!mT^v|0pI*TP|zEILCr*%?9|jr5?fBRFl+p z`AT?2&J2^&NzY&=Y&P(A2Dn4p^}l~e^Ztec!8f5g3J^U5D-Xuvl_lHV#5FI3YRxm2 zTi~lwEPID?#LEJ$f{SZ7`!E;G;uYOpQ~Bv<9>Jkx1DQL2cU*v2>pXJ-*v;--^kMl^ z_>yzN-~O5W+)-iDb);~!eSOgcvxVB3(tW9b+EVh<1(=@3M!2yvwG;aKpyB5>zG-8s zSrS*=(RLNk3oG0L#b8}fBCCClxS|gh+;O0Aedj?BDZMWYed;ufTik7`8)!|BC}YYv zg4bKcB_q z%z#sb-UXg^$)TMlO|bhI;r;M$bWAfcI{`{qwYCslHdp$#foL}dC5%BDp19dlBZ*(k zOt@|qckVXB>BnG?OmG3e?Vt}$N<3*eE6B!r+yJC^icLjCmHK0Ikb8eeGv9F4WqO0c z*8+l^vg_`EksV1n77_OpSoC-4UX|!`hui z#XYb-eQ8fT$(^qH($$Vp2f^o+D^9O%jqR9JUQO1$|NWDDT%b&PXH5#2=avR_vV^7E*-4C*j)376F_uMeK zK|o6Gq5H!a{h<+&_UP+PX{gIoIR>+<*A>rd?_7X&g{KQzMV>l1bHua^8Ylv~G4O49 z_V&IgI)!&lm!&BFp7E$zulHMPuXobotS<9U79W{*u|oK{K`g&xeoW4I|D(9=d3TwL zTBJl6Q8$ji5E#32fecNBF@-807Z4wsLzSrxvGtAqagS7rCH`aI34CC$T=jkMc;8;2|FcC`dl#GOx= zD!1I}2!4=ys#wtAhSou%HwT4~JSi{d7)_Jvd)VAOV8}4I zm3Zo*R5E;VOm>HGy;`uKjR*iQ3rDfv85b*rd7B`9UTt<%0lL;pS(=|*7!GqSx-FM< zhL<|zT~mN}Sov5vfb^pJ8my|6pUm;r0_cd3NInW)wfb?Pl(L4w+cA&$GBiwu6;~m5 zy2yo(oVw7KoM4I2!e^T|+#z3{;Ebxf%)%RB$hDiQ!4ot2P(vb<{gHuH}<*6S}H+s7ruoygJvz4O1hVV#oKle@ani)<2?l)N$pU%epV2Mi89S6+SFKveUW z5wdykF-y12=5qBaGwFsY(H)%}LtX8?SAXTlYBUI7BaaRG0^JGA*~l=-Id{E0w}~Zl zsf*gw3B}0YzDAJ>QWp|<`h-qTNJ&OWC>a6oufm_Unf=h?NgAc!q#vhU({_Sa3LI7# z1lDV*4aO1F8j@WZToh14S|K}QL|YF`LB|isL9&Dv(}mPH3O2o4_Urvhf5e0tz}x`b z7x%R;xiWGTJklxMJ6E`ISXQrm`Aw*NugUO;H8u%ede!ce3|%XQY}k=WMT3f4|0obY zJ!r4!pfp=(QQpMc*&dV`&r+Y-aqZ|C!!VsD-qk53N(1x8l*b1lR+X8Ubi~_9ks{-7 zx3v7uWrV*ln?%w>B@yVtR{TXzQ-^qpr0unvjshgUEC5qb1FFxfmAHZv-6U)0SeRVB~rVK}cRsKO+OsMu$c@R)Z|X=dA4p2(`ak-}=2oLS;z`o0FrrB!Q%ReUjbbz!(4fw$Nz zGXRkd^~u}cwMyTC#2w{vbL)QZ=dG5<-ZB<9+Eka>m?z4_xLrRzM4mQwc>JSy5TGuJ z#?r@Pc*Dd{%`o?7AsK@M)&RB93q|YeZPdy5SzP=t9=BZ&^YU*E7PeSpIN#~|5?HcF}DUc240Z(Qt zIj4&phJw|aaR+ar+Ji)@;=R=UU*ueF9I)xLf@1yCc1=0A4RlS(Vr%MvCy06249y~* z-}kApeZs-|$4}%>)$j@8fDtC9ij&N9yG-Tj$pne0${pTz^cY+Nu56j6m7#@_y_g`2 z{q_2kmFkB@E%G$JO|>gv@G(+qWKPw0H7T%IEwScUVx6S4J%uG`C|Dy36@8zE4SrA& zYp599JsGgyW{Aj_y{Y6VHc1So?7@v|$I*+b+QTaPgY^EKmi2YX#59LnVy3N^ja-a9 zh?ChLX0kTu{VK)0l|YuExK`}rYGc!vr3Jk2mf)QYr1mEz!9FO>VP&0}`WX7BmADw> z)SN(~@>NxcUStf`^KhR1&jR-ZDu>#z!uZU4=etduz|@I!IV*7%>nj}au1KPIcAN4w z!0y`PNjLgczk+b<22>q%?hhTG*MM(IBu9!lej}3&)!r)S@rN`S6<$DUqjWos5me~i z(Iy8GE-eatZmS@n6mo3J-Nf-4)T07sZ=Z5i%VPBXT(S$fC#lg75Dm-ME0^7(kIZ~V zP#qPKN?%<*0A8S_)J_ye=8l!GAxsdMgd!PVfwuJ_Cun$pyhmXVn5@@Ln^UVNZZNHv zfvxPuTm%sToppAMBJoGUmPG-TJ5A3*iif1NlSDcdTgJi=ernJQH%MJ zB}Ar7Id$)T*YZXy6Qt_>M46lhI~<*t4+wyN?@-6UwPRf^!Pn6F+2UX}Nlg z){&r*rh#2wvd60$g(jdm+T}EDKjdX8kKjxJ|Ba&-=Y8QHRfkhKtV3z82L1Irf)3Z- z8rjo3dN$dZf#OYQ2k)IZ9Yow}NQ2JgMC7JwU3-=DPOI|0*JI?b9j+pZnfkDj4bp`%G&9pFp0QtDthSo;~hmk(9?kn_90?cJ(+V*;$GB0T|}NbaU^4 zi4`?lV_QZ|EDLWyr5`G_?$6hItB7S~L~bDDqoTuE2ZS2P9-Ha|Qt$gz#Fxt#f)djJ}CGh<#$ z8jNrlVBg%|SAb@)<$7W3fFackSG2gF6AYLI!V1^Ne%InaC)#gtraa73yYW(pO_I|1 z1Gd@s?ycsd6>=CkbVmyLr36T25%^w^yw?2fJhBu0eQ|!at5Uw=yPsae`;x#iUrLd7 zxFju(q$kI5GkQQQn8r`Lz~-S((qN8mDrDn>O18)mty*~NjP3YLf69X+<}dmyMa81c zvm-eMcJYJYYMgiBIRl)gdD^}IFWsBsRmx6tNmF

V(8EdS#c*k^A^Oa@WOE;4ArMF6-6)UG@&q&QG1;LejULAg6+!8Thy@scRg*KwhHse11JpUK4ZnJ-_a*c1J8rxw5r z!MN{y(luJVUhnv(xcEGXk&QVpQhQ34Pz|2?JJ^yw7Eh^R3x2?`dH;ys9dD(hB5}A%-XGRJ#@xOVChTxT~sQw#$PYeq>p@0uRH~6?`dOfhcR&jSHA6x)xrG)&EQ3haZ3qz+3xN z3&O52(w-z)^-p7ULQ+ef>-B}>%B<0O2mPxLW`!jmRDaC1B16-H&SzZitAHM5xHeOlHa`h_>R=Jzt;;rqE2VLQ6Rwnk?4 zZb@kC#4o|D<5CN^#!i5bwi9~_-k#_87u{Ib{kt@ov^SRMaKdBao->5S-}#1+vtrv# zFe~L-p^guIHRO@Dqo5G*N&{eSVT}-$xLqqE=hN&+b}9V|Vfp;^XtEjQ0P|Qno(YPK zDwgw!U+k;4DDat95*sSuq928|wTZ#XTm1siliqb^=0R8U5P;O9_xEPHTWZ?eMvG&h z{))g-00%7nePRdu({ZNk7>wN%I|aSU9qGTD;O}^aBCloFYDs7E@LuQB0h2qB`D8I# zvaN+-QX^-EjeR+@jN9M3f4N@LqkG%UQgx~Po6!QVn0er@0d9>5cB>__z{+|@)_k)- zh%B&ZZfGJYcDat;O{MyO={Q>Ry@NNKegY)MbvtOqkiJ^u>ZI6 zLpj-4GZ(Qd`i(0td{ResB)Sh)LMTsob?HA@0ox%2S$!MIp0g5bZPH`;3T%CRAJs!# zToMW|Uo=zBKTBaSeNHD=@d?DH{0vXu5z-DW%FVO!1$@g(TWP08iX>=!K9u?V2|UsO zz)}Hs!N$%bY3k9+oM;Y_SmoN%;0G|jt`C!|bze(+|Guj^r3hA=#9f86#~{o&e_Uek zu_<35y9YJ=P$GOfYsxfEJtfRCi{VwX7T(+2{^ayAeYqaSkF@Nl;N{b>=4Gx#{RGD! zX6CQyUg@tmYJ-!BVWkDKwe4hk=u+1h6VZg!&8vcFF6&yz$)=G zg(2VoOHSxb_V-YSG|gN&K8(!!jE9|VYYA`j{+^$dXq7oJB*!C;$zQL!Xr*fcQ*30P z>~SwB$|QYi&qym&Q_{UT)JW#}>V?&BOChv}2Z*Xlr?@mV!>bF3sT-OuSc-{j7p60} zAVxY0R2hTj-JYlOD!`r!1O3_fzvJcnEb|-%B1E@}xP%N9(fubKp76U3z(Ol0ohhP= zdQZTr!H*f5p-=9v{O|{7AL(sM{UV7}+BmCk5Eqm%ja05U!B zd0a5ot~}UYA$yr|uk`p#Y{T(X=N>i9#T$>$UCxv%C!rHw!qnvV6fszv+%BPQpWHEN z7xwAm^Crl@Dp{oQ>Kg!dRvSG@o{>R(fg~Bsf+R<82YxP(=^S^1i^>JOD@#)b;i1b^ z0q=?KO`I5k1BLm=Uo0f|gxVpFy!S`varZS}6(?gBUg9M)b1k+Po{b3}2>(vNfPK*O zkKQxyDRsKlvu>1u6N$}umq4WSnHkyI?O#@Q}?0~k~}I6=nxL4iWxXL zDG7cM^lJNDp)3s=I__v{J}jVzs8ujZ6RY?37*gvcf>*%&5cQt%GVZi>pR?(TD&4S$ z2$Svq9IE!cgRT?F>#%zp-#BWT8m|VBhq3M2KK4+OJilgN3+5aWR`PqGXkN9Td*in5 z1wQs(S>j)2yL7h|l}VNH_%fllF=cHI=SDgt6WB+=8*jg`DXEu$~d0dit`yt4+x{TpP8KgbhDC=>SJ~?WB7U3(kCs=BF@p_`5$>BG7-SyRF zP*%G2laV`H?so+|_et5tUpW2V#UtxtSg>irf`#?&YK(-e_k6t{E7zmW`>*A^OVfkH z{DjgAX4_yxfxey*(kNGI_uEhLbFpM9L`GC~YNN#fc^a2_!Mb7;zY@D*6VLifrPs#D zq?Bmu1)!DaPq6}l|5HD@LLQcDU;eHNrX@OE$;LdoNTb0`OKLsQFj&16))}$?sLXu9%ggC;$6SXqu)wM?lqhOn*snu0?l zZRLt{=U)E6C7kAmWTmMB#_lFvAb$x?>Kh6?`#udj+4EWd`72q?TizbLRq}VAH1K$M zE$slMv!$)-yVT5BcH&sTYLhgiBcW8{IvGaFJ_>N&cnFwVr@KsvlDg7s$Nbf|AMOjhc%5*ou;+5=x{-|WfZl1gqDfJ~ z*?Vg-cB;+wp#u)ExK^EQsqX4M=Ml4aW*%8ehUw%aQsUc>BY#J_|e>{8Ujx}O^)a7BBg6C_6PA|6B zX{LkkHdl>*{pMn9pV-30o?5Q(2Pc84u&cJRhwUBMbJ*Qj8x&Ovrd%tCjP$mbQ04X1 zQ3vdss+dYueEX4Pwf5tL>Er_J(`Fhv8DmFg1C3X+I9T13>L1f@Yz@3ww))}&$ED0U zLJQSqG*haTl69|lONMcsaH1r@>F-pXJ%28TIj)LyZe1Zm4*#5E@F@!6QMzjDmNA4t zX4UX-f|CP=t*s>yZaa02T&?HuP%jd{zaT>|v_xiKqN(!;qj4Uxrd&K4f z=g7(C{aWTsGk{#B{b|K%SF_r&;GGs3Xn5XOfN}vWZE$o@qen3F__ySj4-_gCo&x?8 znAV;#7mm=}5<~sfV|!J{0*&9q1`}9ir;3XyWY|*4yUk-Z+O?6$TW>`J^RR4sU(+yV zcZ67-en5Ba=g(u{Lhs5QlubcbIrXB02hpm-4-@X|g7f{ZzCjN3E?C&U&#&BKg`y2F zJ*H<)Zj_i`4|y8N#bij*l|Cc;`M6Tt^4*uk*g#G`X$om^L!s?AHh%MOX zHwS!aVryMEBaI(3%`Kq^Wgsgk7`zQ|ZQcp+04fKN82`P<)xH?Wy+yhzb8{x&6nhCyRzfgM96^>uSt4 zzrEvVEfP?okmsM3V_NdJqSx}p##n>MVuLv8kzMY^WR6!qF1FxKOnv36uhhoT!WT@rh_{U4Ey3S+5CqAPQAR2Ok+&}g8r{`zz!ADy_qWs78hl2b zo{Slnd^F*rd>aTW%W`Op$(FkWaxJ3j${rA# zwfV8C4wb;}t&*+;oGx{H=r+E#<|9*Hl}m?}t|r~kNM%~%wot0=>Nb-2(|mtVTcFqE z^DD-a0;x|O13eF0>l-u}c@2=ZyND6vEAhkt z8O(9IKTP};fQdirtsmE32Qlu5}Vj(C^Ua4iv-#JnOKPrZ1{ zx^X03?Z#=-y@?dxM8hUgwAG7@|0ZRFmms=OQE8Nhu7eGur@gl!JGRk%vZLe={J3!PC9#;Ksi!PS4G?3yife%@hkQ6q3w5vpBieLLFQ}jj_u2ae&HT8Whk$7d*VP1!|Om5rFhn3)zTH+W(jvFBy z<=~yA;Jn_LsCJor!S#DSHC?Hy#(VYh7XV+@r$`K3vY@$>xg>x6EO^F<>ufSFbl~M- zb5-RNJ_QpDx$C9cSv!O*$MvnixwwNQUJ(0=-`kWX3f6-Pk#<)GgxI)}0Qr$UG4i%Q zywzQ)Ao*}@=ql9Wdca4bPk~w4P^cfZZ16^gkQ>>T$jBapOW1~JCAkDV@xX_-uXlMq z@p+LYZ->SgZ#=Bg7NowfvZYBO>%2`4xj=qYD5F9S^ z-1d&BoB$>J-+*~qzq6w&N@@Hd@Ed@v8YqRZw0-Z~ka77LvWr#(91d5T!9i07%Jktx z5C}G2;BQP+Z)1iWnwu*-RUbh>4c(tpHpPt7O3ccNahF+kHADSn9uLli)pia(*40#- z&H9@KN=}Sh3*I|adiCmAmOlw&X5Ld8v}c31v&_@#2M_1;@HB;cz9{0)8X)m)0UlgE z-E7muy@NZLafC&?#a?!wl)DcH+d$*eLPTO*=<`XvlVDUJY~GTdbThzd9B6M93v@Ay zo?5JCp8>_zumLGAsYaWfN%Csjj?K$#=o<6evN!0+P%bJM2~bRMd49GAT*?4GK{?9p z?5RR*ww2RZ&Lj-*;k&n1v~I#aLi9}WG!5Q=x+4t)*;QGdT_$J*+WrojWM>&)N>Hsb z*<;{B%|BSC#eyw@odEaVnLaGEG}G)*F*G=PbB_7pWDA{(9A8Q1aE?opqy0y5a~g^F z2`@b%D;}sTKK%NjvidnOb}3^LoDqw-~Wg^L1OIr zEKkQ@$_ltMbM5~{NS5_C-DkmdvI28uXHCKFMyR*37SCrkmy4IH3 z_Rh+ci4z@<0xj{`oi=}22gBuyH*%1Nx>ma0+#6e#q(H=$Br|k|YnTK<^hY}7o2pYmVsosVC zDPQ41zw^m8+XEu#ZhID>VSbF?8`zqPt0#2eQrcZdg$glJlxs8@yYxRd*!U-^8ci3? zsvQy2Od!%g7DF8_HHf*SwoSK+lw}4c-MtX_d`i}-01H(X;(Xy-W|Dx))3$kDppfvr zdSQ~@i>su~?*aqQ2G;PyH?->bMfV&Ca3=y+%3Gmf)tuNowdDCMfAamg@mJE_HbCrS zZZP%tl6WGv+%!NK*T5eYe1EeKoez0*!C$Ik+_6^7|8!H!G;DnW5qDmFam!2besV_P zk&|IbAd86B*Q#_kQX~I+tyAX&`2Cb?kSOTKk}cM2V9V#^zOSAdax5O0`7sC-6O@D= zAKlc#@SWQEq8%PRya*YA<^PS%MyqmOGNb6m`sF+nr=GKxZ)u9_`f~=Nlt6Q@xq;%b zi+TpDXv4W&-LHYco!yvKk;XjHYiHnjNh}58#L80C7MDV3S z!E^st8DcQIhewPlJ|A!CHsa=77=4|U^ZagXE|}a{W2v)mR8$D-Re+#!LD@rme55Rg z04_WDVHY4&Q8-la9vR^{oBTcWJK$`lU=dTImA%`!W^lXY2OPBS8q&hBcbXH^R6-v5 z-lb0k2sZF7)cckZpj6+g_s@JT=T3|FvX|2x=XzaV?->ku06xaI%w9-U8#%Wz>0J#d z`{1^x$x`SpPQsa=(JynyEPe1>$6YQKElhfpgS6i0M%oZAN2`X>f$(DYHXkM(m#9@0 z6M$-s%`o~^oeppKH2!_+MeCU$kuu9LWU6d~2ZB5AFxn4;`Zd}wIZRznT$JR0X=OrI zD;SmCQA0uV7`@4l<#}2#@O<+_PVYplW$;a<*JlP5W1Pu=JE79o>!t;tcuT$DLdnR= zY2)cZB}!xFBNctYtpbIMqWt#b2I*Ebo$$k6AZfi$kupxr>IH~3vTs(+61O$#Bx(29 zkB{Fq5=dFW9$Rw3e`XjTCHTFTo=>@JK5s~9jU*;5ZZM#AKenQJ>6S(8?Ct!AyYw90 z{rCdD%)dcD;MN8}uVB8B>AiDpF6toYn!%I870to*;OW;_4|SEDiWLqNCfpzs&6ftx zz8A5tpBd$YH9@*R6|W}vTQ&VADkwL0MlBoNO{e`A+p=o8dd!3HbvQ$4h zCoUXqmv;7q_bT~#i`)pW(Cku6=tFv6=VTT7cQK?&SC7-r(&Z`tC?LQHGMRoQFe+}$ zOA<)a{H=7c(&w5B1@<=mz-c1L!wA~1r3Yz}aYLbxugmOd-@EX`JS-fHE4jk%F~nxM zoz5QRSEt8e&nLREjv>xx4NYcj#p0J|76smZphiy*3zH0Ezo3;59TVa_sX@*Fgooea zXO2205Hs7t#DW&MfsD1$ozScK(@M&PVXN~;9u+|1xEqNkPKSvESlvGg@8Y;H$v^7w z%ChNnL82gm6JMNQOpAC}IzCWt_w^p+sZYfQx>luyTL>gez@7xKghA z%uVyEAByEyt`Z{%tat*bl%03`=r-h45L@EIA4xffh&T4Dw97!Xh;Wpan7IBiQk4Dg zIF9LI|Y1y z2qo8BY@q32drXzhm}HMCd)k7%Vq|?{cXUakxVUX__-Ml{R_cqhmwPMNEB$ggji5YQ zQ)&Ti+i~q$Gc3rN>(p^vc-9m2lBk-GXk$E{n$3#*6s&d>Odr5H`poXAs{Zp%MFo7c zX%N%!kTpS!5x;-Sjht8r=%3Yz8#fYEdztdiTc=AcYnQmbvXm9YVz)4ZWe3JY1(_Ru zSfnVmON!*bKJRyAW$FZ{T}iq0q#TI7FGJZWR^MWh-3|^jj7MFcDlk>?odfQt*5H=-#;BL(76MagqZ%B?b3}i3Dp;qTLrsaYPrsk>mDiJgSClDG zqly8d3zBvl>twN_2|%b?wh(_)uU;WQ!^jBr{h?^G<<|nv-%X9<f@7Fbb`G5F`mYxLU02TJuLWn!CuDY1x@ z!Mn%LSC)Bd5z9jn$$moNUXCu!ft8}tMdAphQ}bkbv^U{mb?KJfg@^gTX&BRgiB6rR z*QD9BP}T!^+)7MIWjUEzPw(@GWisSdjGTN!j>u`O1Xr~vz*!f}s(b(BsB>A8p|g<1 zOPTo5n&prmCUFxVV+Mpfj11EZ+C`N*v6#2B^$+1!DBUp${(lrKJ4Y4^N?%0U-xyIc zq&i}MZcq+zgITvof3^DW^Fg?}yes}zOM35CI?ou#=2sJ(I`pD_OTp@tj`+@ju7L}n z=~R6J_>xx(S-y|Ycz9X@Cw)kh_+oCQG%B>a6gyqQ+!f7YKJs&GV|NAce5*sj(C;z! z8)p@G);r8Lc0Tv)jZXA>`fo?LbJNv8XsYF-zL2jHifu1@O2~2(NF8O*kl=tZ#7To; z>An5c_XJk9jGDq9CI&A`Hoefy|5a3r0=mB@^ePrvwOF*U>XUuvl&WndLl9h`pWf`G*NzcD@LLX8s9Xou-SsGzTm+P zwOhUzXIR)~u?t1F#Y|V4CeW$R*av-uaMoB1L7gn zk8L8?lSR`e#KH_%J>|a4wgACef=tOkQ-oC8R`Nhb8!beVKM#N;oy}Ufx(@$%gs6hC^@S+d zm_L1&$5qR`A9fh?O}dnzG9p^6My8%%J~6UUr_>BUnB- zgo>|+vB!EGBi~qJ=D2%EGi&A8WxCrSvJ4KS1BV*gC+K80k|ue4i=${vu?vrn9!vGh z(}4CKX0Z=YK?>h;*czt$WRZ}{d*mJMb3 z)EDV8+V*o(ve-`Sxh-ojN;9Lcfa{n6d-8h}vhL54pP)oJnx3%z7rt+#hnrjh5Ihl3 zc~pKnwQQ1&nSlx5ih|U_lm{&t08~pA5Xt?OS%|8lH>aH}LKj6+B zEKz#`F=0As7`Gl{dg{sXggc;iw*JOW+6j~zR@b@HI%+aC-*uTsdoEu0Bm7dr_~?6E zv=Pt;Yay(G7s>=P#N@}kRr+tRr+myKl?8pz0Pu!Y3xb(LIi!|2YJX3;A^L}2~OPZtG_A^ZGA zM|UR%N%czXt51@cPxkBDzHH>7V+Wl5iYfR%*oO9(!R-|b3%Rp+*8Nx>por$8R%>HF z%ZJ_8!NcK`c4WKm@C`NI^2tX$d;lZwlG&y?_zNbc_%b+RVOuq_2rl6#+ecAub8)1 z4E(dx${sFdA4U5W9)LcNTEZ}WTxPIj@SqFgtjtW{0#B*AdcQZ1wvin62 zj0dO#!n|%K8pU&0+CF47l&ieadb|(JgS__z*)KCmL?A}7@1RJn6+#YoQpqydNaNF0QhqkIKWQ;u|&uu}nu4!D@q!~GF^Zh?L&}s73cAz6f;bs>$ zJ#T^myDhr~nyUW({qc8^+OBQ41B=KEy79fXZ3Ned7x1yx%1^{znb5;kw2oliY)#yN zyu!z3OfFt4@+9s7Ojs3nGQR+K#yG2q-aKNEpSEuBSU_?T^=yB`-ik;Z&jg+(t`5l8 zLmX5#IBSN&tfM;A*tp+@{t(a?k2bcr@Kax?UtvXDj%A zaHaU|gsr?f1);p$)ejSRUrRp*&qXU^infX{g~f-Y{I6_?%AFQfe`>ZH$3-X7Z%eyt(LBp zVA)z&R6_nqxN;cl^_6td8T4CW+sPXXdNcsV)(7f2dm~k86GdO#pb+9OAPQq&$gw#3 zI#s(ZTHU+m``zB*MivSXae^%AkRk-nu;bNbi@0_o(?&-($7Y4V90bcdQ6Dg+7pb}6 z;L2~Slv+il*&g!by|9qPUBQVoC6zwT)lVw)c{qg)V@!Y<$X>KF|4@f9Qd|A$>7EIk zVZCO&!2+C(C`$198SykapjYa?R<(4Qxx8)vnEd-vopzIB1zV3*>0Aa})MxyocyfqMX=A<|Sj&iWt*CNwExFJ* zE>};QSv^F4mq}{t5RYK)lX@SKB8NHs)F52~i|o|kJtw328|$%(adU(&%je7(`0~7+ zUBX1uc;6wkbLw)y38vVJgr(?3lKpI^_bgwJ@om5q7DGWVB#KZHd9li!S#0-y_`-cFdbWW#e~nx4j2@KlDyc)iLq~g!JEq5P2ZKuMVlJt7i4Z! z9q?$lLE?5<9Rw3~8y@iBWn<{|{({=uS=OdWYurG3;VQGe;i8z^dtDs*H=n z)RQQ#+M8)xa!iG=9Mxf4^1LKytI5YKEY9`jdBuP?5Et$6a(jUjg`7w}XL>NtJt8&e z{q&Z*@`A+CKZ+>wDj|+?%l62=u=|h7f43o_eX+E`rf1#D_Lhbf8_#Rix=)0kz_5d} z8WzHa$O=cHCmRm=Z-z7{LYCkU9P4NzJLQ`H46TKi@W~G(b99O@ysbY zR*1*lX}b~`1L2j!P^zmC_VL@*D$_aLz{Sl*uq>exqGdn~%-JRgvdXl^{EwouaBI44 z!!U}CfJK)|sg!hYic%sV4WmSwF-AAcqGJLwq@{CAa>M}X?lHPcdLsvnz2Cn7fP>@s z?RlR2zOM6(qab=VbE2iV0{F*dB>#~gj4a{#zu653UMVznIM*bFvaL4{j&E{M z8sA61Gn!*T;9UvI*zA`&cggTB?d`g2LyHwXQlxcXB4~~~I{Qg%_UL2JfJ!5%YWb4B z(UAcOF^S59N17QgAC{jR0&)OhKE=TYcIbM;H6umv3?*4+;(OSMo?cHZFeTbC>hPx5 zmrWLY(zeQQ^)ORR+Buo_i5S3D+Kp;@Jv&HN?LFHQpKglU)<2U5q`(C~`2er4R953J z*T$>F70Qc0E&W2p9eQmb{bApO#rg^tq%uqV{AUHp@1JS}-vaL51AmbQ<%i@5e*@=q zag_a@e_!!u^-A>pA*7cD1D>Pn%Q1}_R7ldaFG$8}H5!GmEc27?r1h2L9ul^WtwPD? z{3aq-I6S%lPe%;<+T7X-cENrSNctP9mswGTXC62d*XLh&GBR(#4VnYUx~<0Z^XtjR zP=Z!b34w6^(2V~fv%6D z<#{t%&XBp!U7=8_`!gf#KK`&RMUgTv*@IN~U^+uiE{pV-XMWHvoz~m4oRocQF>J=i z%vMKJ6sG1*cNRX@Ccf%_#i;Y`)b;nFDJMyqJlE@)YJbGH3%Mzxk$xps;=(6Yn{dOn zp_|h-pKesuWvT?m0pM^9zKV}&D$jZ&uFhR0)+S8D{_!UB@gR6O_NmlJ29BOV(~-aE zkf|LS=5t=qDbP_FDvO-LESqw+H99)r>f>s#JQ*D;aP1GgvNLRV-Oy;4@f}WjB1K-h z4b}uLGo~z(U#l7sR|o4SIQz~ zwgS{MN<|+{==~)b_|$iDl%IVfE(=QU-w2W)opOXOd5R0h-;e&Gj zBkMeGSY>AtS9GBH(r!g6@K1MMTBw1a}?GhgA6x6H;$^2E{0Zt=O1$e8)|O?5_G9+(FMeoG(Pu`X6i>@ABYaq&N zAT)D%_mfKeye4k2pJ&CDDlP8Jpzyf55}Ob2k6#{NbBN26O_5~ZP!zS4d@&YX4S?Mb zPcx5er$k1Xv%VR}N|w*C*2eC9o6Ic8)1f5I0@}!r2@-O-5RV4~&trL_YgFXe>5+CM z6})q_3(p9|OXDKwHRh}==Y{omk!zf1l)co^P)HN)sHc3YrY_e}9{Nv2wz}-wUuEp* zjp@cMuk6Gz*)_d4a00ANR*0mT8lwc>uNHnHA3>?Kg33{(JL3xKi`zn+no_2rv4-(1 zx|(6}^nm6VpA`G=Mji7aDx+Q;3iI{>%6vQYR$naN1`3Sq?0Kr^VmX&wmT&N?sonH> zA3c@q zvSzC*9hezVj;is3eINOST(2icoD|63R!9r}it$;oY;7pfNEcD9t{;Ui=m<}sUx~hG z2p!gv+f~qug$%Jt_)8qhRTw(wETZmhCdplC!k=XEmLlA4e%5fYrLMUV6ys-1*j*PZvSp_De*Lq$Arwc^Z5n_ve|3{UpY41jRJay1F5RD3c z3CwnInA0jM%| zEgs7486`TDDq=VU%7NmSaiiS>feq^}u8IFf)|@(j+T@=ZuGWzGt1~}bSXD_IeY9y~ zp8gz6%}|yk75p+{HO8>**PYg*6Ba~##34#c|ZzATkn%w0$kvxoi$KMUPD-;zNrnMgI4 ziC%&S0!{O#eVI*q=<4#F7)9V>4m1$$GG(s}R^k)-_JAlBq*n}JSvK4u&wS=7_q7f9 znwf70ooQuam)^FqP|7+DILEcw8Uq@!Jtl)R_WHFN*EEs6bx&A}L1{6#`mj2OsgmZYqhy2xF)`*qtBagotX{pH z-bPj|Cog09PC8=$;nbK$Efx}`1|bjiferPa(JN+wWr7)Yne}CkGb|#X&YA`?KW69D zM_9j?c!^%q>G++a*7Sa)ICGh!*f{WsVp|}Zh2*eaG{UN1#O4%@8-eV%K_i|B{G%H9 zwADltYX~qkGM?xBC51COgGJ#=W~aLBE^K~0KSf#_W{>?`iIM4>dx3*@-W(vye~^O;6HgWe}LS8)zeEh%K1j@-tbZxnOr3g6e?eN@uir} z@%Y!N^Ou_qS0aySrCU?-hNqx;mTC)ou`tl|wUCFWyg<^m|O%L0gk$GQt-=lSAU^75*hT*Ay5I50UO?SpQ z60bt@nB)%R;_g<$(>}@dWN7Bnt3~)I%9PxK|BM7=eH@iGx`Kwi*eht0nS$P6@AK$B zntQOm^*(rgWuaf^(s2U7o-&82!~AGI9x-Cr{Sn++Yq)lDr_drUri5qv5mWg1`Qw`n-|SUt5X$xjwI) z4BHMzRe3@w31Nh;%m1^-^i`-?`)DXM&t)_7Mxbm~$?mB0(}UWXr|#g9oE9OM{)Wo?2LrFS4Ju!nu?#zc$F{Dfi_Oko%4 z8eYIu`#-9$_JR5nA0Q*n&Wh@FQNY1wkku#)q_@-9f-~+vsy=m9cX>BMrj~|i`_t10 z-GGnAR`gjYyIAFSG=^S!9fsgJ)%A&1dLl2Xo&FXjV|ot06vjP%E$~#;RU1%;^2fp` zS)^R83Cn-zZl=Cw{r)Ex&!gQ!WmpsiwuCg4(Y`AtEP~2tp~oK|;mrS|ipH?H<;_lS zfpBmI+VEUc1V77)>q_C4fc8!rZMCvqK|}_@zT3YbbPwefKfQb8uYB3elrLaDEW7tX zq?Zy!p!gFOHHPj_BSSjiz2RedJK**a)Bx6}ixSMz>pJjN^!i8ozi*WqV3Ko~G8NVLR`B^AcCQk{5i^JGk6=?7+)7Sqq3;QGo;B$M z(a*Lx@YT4a5aY@P(|fE)F6OD0l>G6VXi|2?vE@zh{N1q>Ft|_1-Puku$;0{F6E(E` z&euOJK0UeRh1E=yq*UXL5ZJ`z;89>E@vm?qvIvJLa>&xnW{ghvp~c z=?PGq0DY>koKf|gn8e0)SN*^FL#|i_^{g4;O-TV`w@EX4LMejSvlJxsAJys;&b`!@ zL*To!1|!UHH4ELgEZYJ#0|RE7wp{0gXKKRz3dTstMGgs}rHnuwGqt&*0qj%jFfgg$ zq9#9`VgLweMeDEtro5z+CLop3!HI{_nfE;984Qf4B`wdk;hUz6FlG#pqoRugE7cG- z5G!qjAZ|Zcvk7CbNjz3;6^Ze`dvNBB^c>iyhK0PR;AcENPDht=_hb6bjUr7+uZD-u z;c>ZA}i#Cc~@p%Sb7wq8PPegVSaDL|uVBE$w5uxb1s73xf}4v#REYH&KL zP}|Y9>-j@;f8T6mrY=7ATjBKedMu}A*YzmZo2YlFW|n?tdRPVEAJ0)?MDW!WN! zp27?;4%#N{N90^D$-^)Fo#DDl&6zHWE~k!ipraxwmb_Dd=~T*Lo~R?~b(?F^XlgN# zD*0$@0hb9~-TJU+TEaNh(%xNr3GQfmyeZ8JN!7HqnT{_I>U6WPvL@6&@so@Q#SIet zm9Eh83E&F4JZcJ30rWaPfLhyIHGpn8&mFKt&nCDMjJ?F5G?X z64ZPL`YcK@&GpY(ThX;k$Azh^X_ddJJB{ykpSw4(OA&_4xFySrz68EI$H9*cqc_NuVr zUe3E;*dP0SJCs3{=CuC=D~Ie;o%bnU%lSTh(<1Rs)2KohJH9fiDvo=K zG+U-v1n5+Jv4a@p=ICde^(pMFNSWHK%PhAAKK=Phe;nBw7a3>J+qSjqDR3sFGvv^r z(0@7qVqD6>rZxrvtk3ZSzDsr}vwsm3xzcamV6^X3qG5wuv;J}8FmzEMhFHG5y(_H= zzYtiDe@SD#z%WE)TVdF3juzB2p5qadsd0QnDzB5>0E`cAE4#EXd0lGrkn6S9>vd1$Pg6QWrB7!ea%yByT5EA%y)uZ8*)nPUTwmJW&ZsipNZ@5H zo1x4^ADLTc>!I|dFc0+_IwJ2`qb22OGrNpbc6(eCFFFqA+PN^wAdkQvcmhL!bknE0 zglhnfxY+R&?-#}!%x2U>1&zq6u3h$eL<>;{2uc?R?HXmBUma>IkTOUeH2O(o0yO` z^2yhT(a`&dJN;Nh-XGW>ZGcYcorGhC$pU2=(~u+3^|r1@h?5Y*hXPazvky=8<0<`l z>&UN}yvK@LabI)#uou>Skk>28Z&bNqrO`eNeJuodn{79!7Yohz6O15;R ztY}qv3p>n`-Aw{JW@}e)C2Gjatx<*1$+4}$2?4ebPAo!9lHgwZ!n`kGc~03U_6j-n zpQsr-3PHe>visj>78;e>Ztjh(eOhB#3vxJUs+LmCyUAR7mYiDfDYxiDV`lnLo`vUv z&Sh`mY}lhC70(oi3dEb;FzRb(j$7=wS^I%llqo5{NYlBjx@<2`ZN?xl#Gs)6oCTXd z7#zSLa_V7xUh^v+)+e{aU!!;Ge}RL{RdVh9Kj3bC?0G5^l}Xnnm#|jYg|t&&3O~IZ zzZ9s&j6~0wLODtRmbA9bI3s;0mCgq_k9It`+lE70Vsl_Lpn555<_<5_ccS?tgYx4h zM?Eit_ojnK?(gWd&-*I%7wH?6QhuF74J%8;eU4CXS>Ia$mIm~aUdPQY7IFT3=a}3n zQnY%SdCMxvCrMn$YL@gYrB~%(-SB4SFT2JplEl9*cN8Gel6?OMM_3XY<6c|dr^NXJ zm2Q0BLKynig0~dqsc>hb8cH?Cl)&hQZn`OXM7jNj+Jpof@4~o^ljd0+sRJ84ru^?D zk7$1keX28gR-%W$KI#IrR+7gH+}Z!@U@c#D#za8niICWMLdsLR8;M>#alr>hBwCL? zY8Ir{EWlA$K2v17iRydj3}lWCP*`_A1-e1D zQ_-)ACEW9nB@3bktJjZF$F>;ZqIylh0$L5CtK%KK+27qCMLlKdr>``D*=`#`sDn zXM3!#Oy~5G+0PB5&)E?}2Qz;tE>R1|m{oXVZS1REy`8+ty--=tsuoBlg;z4@VzRCz=u!`yr{*v8mr%@YR5KiuTvz@TvyW0d?n{C9CN*o==K+cY(yl z%ZflNp;|QIdNn+pO*S69IK&fH$FJygnY8Pb|Am~qq{ygta(}H{Cv`Z2)yo%6xF zJ+*zID!t`xcK(ZEJlwq@4T$8cCLz8@7!_ zlcm(PVYlZ%T_^N+YNERK5pxVyKhHBf_6-%}m$^a#FB`z92b3&HHKYK7siYU?C2vI= z{tB$oU-5hTyfbR*2Qg#tuv1jM#=*MUA=)YKS>HWTtWiJt$;dGnLV+WH(5{#U4V0@x zf5A5_EgWAGEqoa$3cDlMs8n)q` z>23r8?Z`djj|VtLHXr`^VgMIG67j7hD9gf;OF<&n?FeP9?}60H7+hXHZQF&EfZFVz z=^g&QQcm||S?JTM~-ZM{0j z20_C2j?=4gB7|M8Ip!!6APGj*WG~Wz)|ATs3*iF_shzCQm)r=8J%+KyCw>1{d*FPU9rcvZU)W~#b~pP&b2jM zL5ca?RtIu-SxKqTmYs6-MH%leD*~os|1mC`FaP9lHL-`q)sL{-$K?Z zvo<%)BnN*hs50`bx8V&Hxp^!k$^^#t0hco8Qw>G%-+qHW^ zVN3Mkv?7s!DM9&hEWAR+y=W{$0H`}5te{f}X7olsR9;(##Kg78z5x6xV`?4s{icN_ z4FEMTU@2yknd=d!lCwnn7SlbedV;?(Wxv0|bG>U9|74tIpOL z?@Ti$i&K1zCQ*Cl`(v>J0j+Y^Pf!I&Tpna?$i5N58Q9u(+{IwDJQ01APV-?nsjca8 ztcE#1G@1PhLfut;HO^#SjF?nB>8q-P$u#*>9vAmkOhRabiWIc81E#lquBi2*$}eJQ z_c_YfP`;+g*!q6AF7Dcs&|L5u}B_>-qCx&Y}%3|Vu%ob7nP5|?L_px*cza_wb= zUG3|HE`pb0NgDCZN(AqpP#o|Er4mh}Dv4D)y;sg&QR6${jR44}YE1}XwNGOF=i2@b z-mVS5kVVmO40vSAvuEzItu(-H{hPoYIM$Dk@2+(|eF^G*Z}kiBHimSSA~R(OF=>vA zNW>Ty|BmiZ%ptY=JdQls%pG&7s)JTHg3erWYxQ0vvx!RW$@@swt?ULflR3;SG)vSA zr-~WAJmpafi*cgg*SXK!xSChp=8~N5`pnJO(F_E4b!G@)sz4YTm$YyIzGRXLS*K^1dU z-t9rIvM4Qm12vD^CI2YfbIfa4nZv|+aGo7v`<8fTdHPHZyr9vogyOB((DCE-tE#n= zxm$Bld)(9EuiDyu1f!Z!f4HDsIN)Z@C$eWFrnK8BRm8$|=~S=`jWm)A`Z*6NbX3Y# zZKkEt`wMAWuFFwr50LU;v|t)J@(W4a9i10!wD$|F0D`7i(?dF|p>**l*@kaexvK86 z-_;*=U=>V(G@fL+n3z%19iN3SAbO}% zl6TByCLi?n%I*st3j|Ft9xx?OFMa>$xjlw-*-Qcf;8w-23n}9i`%Uz{v#ebkalh+E z-(uyCOJhLiZEbnG$0%WlhJN@JMzQ$SjX0tq{dYmu8MARF!}vW4Wz;kF3Lis+#TAaN zKd}3S8MnqMW>+Z>H`?_~K4py2%TlEez=+hs?hcR42W1K(sOniCR%idbj`H5gmV*|S z|ES7JPMd{~`$$T#ofusXG(FU5QKkH<>#vg|lr%9DR`ySCI@joS-lW+}0l`~u`i`GA zmO-2ywam)aHzI=hyQD&epjBs8L6W9Uxi577q}NiRS3nszE^{OrrUb1L(xC1Hb$|gJV5(e7`O7Mc~f|=QYJBOB@BiidoHP zpDYV{Vt)oGTUSndq~|9UZY9YD=W%PE6y?7bE2n<;=;7Ej2Gu?<9`?SL3ABct81QWX zyv`J8cG@MgTR5dW+R1kCQWr+%n{Z0bu*T$ix)ArxRI(iNwvdu(II>0C#Z&|{d=nHP z&YCEawK2UAeoP;p!4t~Cq@KD{e*a>V`pU$tQJJsdh5o1zcR40SGS+4?`zdo4b=q>J zvw|%cf8J91TOWbbA(scAZWx1C^gPODTEK_5b6Yt}bI((FIp_hNF5Vfe1Qqyibw4ol zq`BJVawj_GTbH$oKc%o+Raeaz+lo6|{E^+i74vgr9XQxPBP5_6d$6EQxT5$I~k9B2JDCpU94k7f5uo1 zsdyDm<8TyLqQ5E}X7?Qr{J#6C!3Kjxudwu%ce$DEX0}|%V;^V+uqW`%{fmVBAd-p0T;Qj+ZZ)c3W!3<%jhL zbzGIb{(~;Gk?XJJO_Cx(q|L%66|$e@oFncjxxOMo@8#?{;v?4V&hRx??q)6wM`@ zmcIs_R3w2!ViX?1b>m;n=?BoP)2Ps&7)%m1?QY89KWxmgSbG}$%TX$abz4PH_R5a* zo!;9&oVk)aH0i&%98pSY>O~Icp>Qw7(szGpmEVV@xhBgN-#39d&R6Hon`y|A5Qi~? zT8jQg48=r^4lX){H(pYiOP6DVSGxoQZoSTE^e{KbVU2o|IJhx9tsN#T870t7+J=aH zi1RUd{v()ZowmW%VWFN+9ylShV4IhOak=0g#PRr1JKSHbrMxz-n|-4|2ATd~`c$W3 zEEnem1e_VIhR*(s=k)h5_Zo^`Zzg;-87j~^IqBj0RW4WZ?xs>#-3lMZe=_ptLpO0E zm#lO<^2K)NIqzT0=+R^#fajjzf}jy&uS&3fr|EV|2OiayNj>B1J%6D*O4=?_Z#Q}( z%=|~GMZ;}~o1dd3msqqC6$N`hYjH47W6qF!&(suS8vp)4`U>`Vy)N^<(}O@Z?5F0i zHlLKBjDqT1_+a$qZ)vZV#k(6f>Q-V)bmfjki^AO%e>&9w=puo1L~V^~h=9H1;alN$_&d5u{VpZgl;BE|qH;%1i|76NDSt4Z++Ij}w`|d^edNUl>(u zNh`uDjBx;VhhUBXzp6{H4pZ>QIf8#@RosXWOwt`8Pk4aYgY|btZ9e-gH`3cW9KMW^ zSn&oWSYQJ=kOQ1~*)szQwKHxT_eu-)PCDJp8N&!&aWJ4IC0Qt}+gn&Zp->KH4nWv3}tO z_=OjNGnuBeZyF6*bC&vO-2WaJqD60M0S-_jPPst|4>mN-$+pF37N)POU0!v|LUEXa z7K=Mg#P5|-QMN@6r=xaYH^G*ia1?K_1;BbN<#^~?pahn&cgPTXOe+j=my#OD^SdkT z%X~<4IV+_lyKBMu97JK{`QIe~+tY!@BdO{Y)iZ$!MG@A&&z`Hk^?e-u`YF}tAIExjkyW}`KyL88rQrD3%Bgf zMwi+?4FMN={*r>6ppi{it5a3b__^a1b*Q(Gh2rlyi7w!Cc`er~WvQBk>xGnXj?dlc zkoNk|Q(o*9dogu4zZ`eGUW(z`VLGqA#0p8SXK~J$1^r`lp*pu`Hv;1FQyjn##uS*A zy1sYgEylc&f3gySqRX;k7mm1x@7gq@u$R)ojQ#KklMPMtwCubU7E!OkH z0Gr0qZgVM6G>j1e+vb46cdg-O8eu6Xn@OkWJFhP1_xHY~(M^>VUv#lreEY3=8J87Q zAM`pfspbyQU7Lkv(Vr}t&n=dgW1Vl@w;37`8McRHKGxY*UP`$PqO(? zb1#H>cY%E(jVQ8&4Q3$DWX}`rHB&Sdv2O$uZkQct`3(e_Up0f-Ou-x#etfwtZmKf# zfp7i_hd3Npemuvd+BjUbjTE-4tVV}xtd&w^23f%QA^)`;cu`NUxu9j8o!fb?>t>2z zyt#3wjrYrMcURSD2)*!k%ZxoFp+{jKuyz+$)1&UbylvA}v~M0miEBXkYnvL9txaV) zpJmy;X}q<#$6dsq=g_ufPB)N0&Wlk7M|tTy`duErArGhgVn50g4l)_g&711fTw&6D zFrhx`+#O=~s^=->MegbkH^g>YFoR`ButI9-;>ZaKb-F@kc-nI(Qwg>#Bvb^#r zZYOZTWtMgL8HDo&&)*i(HUbZ8D;Z*%-(~_~T`x0ir;CkFr)C2v|546LfAQN`$tG&z z6s+2FS98sg|K}%j#A0;9uEmDciV&a1=v^P4Kc6;wpFPUR1RXYq1>Z#Dr4p>2xY4n5 z#!yJJ%=UaEou-IJrM6^la`G!dYP?U-bp1i3ku1OT@M@FD-;BpZh_B%Eo<<12HR*4x zK-+@(n5Uu~3r15a&yO>MauT|*4kAJ*S6e=9+i@@3_Rj;_>M{Ew;Q8C^yeMygI(}W1yh)Z-pAn@7gzc)v~d#+D`v4 zMoYH>=|pxx^Eb@x3lN;+jU*wOoGk&p)vr2R&`nM0sOo?LOH0bj2*U<_1a!o}9WQidRg+lpxAmfT zWvLD=HcN^2OD@UywPQ_bl>`t5tmk7FCEmrcL;dZAT3w&ZH3-Y5<#m!3v8^b0SZ)@; zHU=?O#al!@(a>-hgC;ku9kM|UU77gVQ_f3HWQn$A;O4vgfe(umaALkbr{{O0VSe@@ z*CEWY#r^$6X7-6;+p|Dkq4$nU!e_buOszyR2gwJ!JVv>x(4#`_#*x$VnilJ4D~7cZ z!4AKq7_|PQ`ssl{35)~nAzBm5yc1zwri!`{sP@mOUqXw1xt{DU5vs!CvwFu?g(`eC z5MhV69`QJ9npMq*9&+?cvu%ER;Asp!q0gA0h=^HX-Z#N`E&f2BgdbEm)ohY(*72q) zWQ_m&B<3sY;G_3f#SOwG6{vBUf^kG{y__}n)doeo&{Va&z|-AOktqR*>?9CCek+tA zo*v!88MqcEiJH*<3VY<)3-@cym`KoFjmM{uRLl~R|9t*Rb>mj zxN`ROWW7<4o_++Zw*uBfDmAPDIRd#Zt^|G{(1#mBplwb}KIdQLtyEESC=iUuhpSf5 z>8uC!bGZH+`|)(SS#xp3JZwO|ePO{F0gN%J8bK}xM0(SM{wa3t0z@e-Ty*PtD5s_k0Q_6diaW_rAT%9)&PCK7_)XPB_oF4YHH zuk44x{v^dz?cR~6)i#TDe9SByq6OnNcc%D)!cA^0JXD7=FDjo%1P1d=V2A&Wn)1y= zc~=JijYE)%!RCC%$R6haVM5A!(p}pUvUws4HtEfJy7-AW_A2?JT~B?R{5f+4q(fmL zCrI(&_<#9aZP5e9p|Wt}NMv;UZb;C&C83bZY_w`eT@ovB68ki4;QB<3dUhuLTGY-# z(hJ`(Ja_BTWf)2PIhO;th^=n6@6u^3xP6}C3sX<*_QL59S#k?B)S-RTQEit$qH5a@ z;+Iw@$bIw64@F|Brs@fj@^N%pR{5f}Aakx|;z!F!JlV3o+_hzyw_awF;&GwB(@NN@ zu8(WX3kHH_DlUHoisvUz<&FO;8bwOJ3=7OX(SHUd-}OiV9S{ zL}c8-4BJ>#E=ahmT>4*EB%I@fO(4=XasTv|Fs2}`*B!q>C4uELu)jJ^SkMb}-)ay1 zM7K$qz(smw+BMD9etpCR^AMW<+E={_<6u4Y^zK!Xlv_#%VPkiSpxrh-S3fh5HfI|D9td1@^*Iw zauC{13W+Utaa$=&;mCkMI3O#kc!o$d|p^+=r_63L+!MkJ8+#@;LbR2xiuQ~+ts%_9G{BXMkm3&OJ#e1S_O2ml;ZO;9Km;BDIq5#vt`I5-4 z;lO}=5@%HvL0^i#)K?eh2K91QI}{8RKXlpt@=qQIn|UY&rx}Sj0>;7?3ji8l9BULnhKA`{kslDj4uJTzd?p#UoVnhU@aEI-UPpjwJ=(1N}E} z_uM;oDrL_2dQ9$rREDJmEc}WkL)Ao~KzdY%g!DbrS?NBXJg-jyf4JAEXB10f^W7rc zPc6bq!j>=(;Z4080_go2nr zsfrn5FhinTmMP5sJPjdmM) zH$XslYST>-Q{)?^ZVTsEz7|?9Vg8TmP2#P;(fQIYH)R#Ylsza-icgukTQ)>Fwx>;b zrC#9|DVz|Z9*)yt~^~$PU-0>+f0N((0SK~F<$}Rw%yttU;cQvbKG%QyO4l6 zK6-j>BTS?!NP+gm2>kTMkCVHq3Tfv?KqL`mNI-?H;Hu^%CAlip?#Exha}x1Gp+_v( zIaWoHR==D%W6cUr)YCngpXiW|H&;E0VhbPI7s~TC;nd#u=so{vR7-kEplt4s0xapb z-FvK;ws?@FOp`AbW1>{qD)$(4M~>Hw0LHd-0hP85Gkwb!gF^ zvXlSOJoAMM+ROGtdF(j#Uf{oVnaF>hy@2WPgY&kR0~5a#dB+mYal8xd0(}B} z1|$3Sar6T`&e;;@kow4KG7Qc;CU^P|rg2p$~p$@(K=y$!kW_aiqTUddPY9bAOq+Ao{A53&{C$*j!| z^6)nlNdl+!Bu~Pu9DfsUtlNYxwELeVNxyxQmuw9!5qATDP^~>ef|zX~k2m94TmNtV z8j-&6VaPCs>6x;rTsZC2kCgm*{$ZcJgl_BqsLY0l`WHSxzvP~|l$4!5E8ideQS6bf zhVxK4(WBra;@)q^r1f6`i3)~qDNDsY?lQ**x?_7H>sCAGmGyyeCMt1!}1-gzCXWE+9ZlMnU`RaOHvi>|LM%U=*J zk9oqYtDWvaG#Xi}6SQmg_x~mLpm3ivtQpp(GiNH{u9#M-*5>TNPWnxVO&lQ#oOFP< zkB0ZF=E~=|D$mY<&$crX5A(u06tBuB3$0^QYz^)*P+w=aIVt=^Ov;|KSXX?V0!GbE zJ?oaQk0uOwe-iV*&AB0@rs}Z|r3~b zkGPbgmWN4KqJ#T?RA9}FI4FT}bbb9C92FJ=sr-Chr-5S6`X}{uSySn7S6Dld^{rXm zj%Y^4aqBcOuF`eXxO2hbVlhvTz_1ouC-FeDpvxJzWx&ZZtpY=O>g8y1WKQ=hm|qyF zGFC@*TaRr}sywv)VcEfnABvKn*OlNv z)FuOXe7&b3HK#OdJwx5OkV^q2Alqb6YJcozuJ(H_JSsTDE2%!|1ffu8Gr6^ZQQvaX zEWR#d@h#6N!C(V_RC;0g&~z)FOPGajf0>7()yO!4$hZBRni@ZDqs!B@hU6J8U(PO$?XC_tJNnc&Q!V&; zF`ZL*ZnGWy!RCR}X@w&~8aP#{M%_G6w|?y``ev4MhD=A$6S@MZC#Dnjf|z-7pFrUi z&=MZ3sA7-I-*M4vvZ?Xc$C)Jl(eC#6%Rk+CXh3XSj*;)YU+;+cj|vaRB=h%j+6|pn zUcUH)G58GYNc_go>RV90tvhS7--$J*sp_q$7~ zLF(VCvd|37N2m-6EJ{1vkO_wfrp*|&<8eciBEFS5199th28tP_@WHD9Rj6`}jc>G5%_UihEOSCoM#9InnXC zL1H`~i$+uUdFF`$F*$FGpk8omdSEF={~i$63|q5UaOIBclL$81Yf2>W|ILh{e7@vo zJ(opG4LyQ<1}U=le_Y{SO$(q*SVu?6WA(79bH(U)H2v7 zlOvV3IUN2Ih7TiN-h+5}m>q1{8YDER=9@TCd`qeJZg2CSp)N-v+Yc#l_o#;`T5__M0*Vz7{@?v1BO5_0U3 zl5&a9%TkN`rM3oJDm5t4xFC6b53^Cxa=qSjeKFNTgEQ-c4g@Myy24J+ z{RZ^4{2LPA`blbSNiKjM+>JykW>@+Yi;Igqp6xXnE$VTaLPk89e1Yi|P%e+o@$nE% zpc&2b?73*pbHaepc}DRR%;eXgRB`)C?WN+8$@?OGAif1CQl-}vS5N05x84EIGh~Gg zAv(a4`GLYO%iH>A9{nZgY7pbFVv~v76lKvD#U&uH>%Kem+>kSD= zk^B9-v$01o|NdvKye7ML%=IPXS6eoA-l0&d|50?-Z%w~#7{)IuN{ULC2!eDs3>9e+ z1?duLM(1c13F#1!QPMe(99<(M22!JIqhYWSW5B?7??15P*f00zdG6=B&Qp{lRp_)z zq2{)VhuH=xrbZ(}b78aT3|Y41c;$VZ>&SgvnHMS6P*gFkzTP`X{H?%mflMq-38fwC z(vjPEXLI*5c3;=Ew&^sI_ae?2brQ(pZm(0*O} z-h5z+5?C%ZvJ?&tqoyqN@jSeY8Od62cSg4=XDP<{)D+eP-+4%r=@-`4Puw~nA2&M~ z7kwI(vjK1345_Q*rT`(z_b1of#L|u?E8&=0@XJlLF#gL|R#A!kY_%2G-S>;Za#?=M z#LnBQ)WhL4*p;&d4cX+);}uaBAyj5pEBtq;Z;c(MdKYk$>b9y$jQr0WdJu%bx4I~U zJ{peYtQj;eZoS&xH`5<}57(E^T&Gv2Vtf!*XW$VQX>&t2rGnYCgaMtJQ8z)2LB7xk z$g5kp0O|97=8Q{JSk9Nw^*OphU=j9m@@NbBh8&&wbf@M z>_6#F(`(lne-I{8fO&}ECkWWN2Mk~Ow7J4gbI)IYj9GoWX6?-2n1CV;^NqqIT(`d- z>U-7A!#QlWNW3jC{MYVn#*Ts!a#=GOQmOu!irIJO*I?C;-rr>D{v7|9*s(qt$ldH5geK>w9_%N)_A<)GNA@%V5hq}KJCvZV)EPrGfLaZQ{yqHjOltE+5O3spyFX2( zBF;`tpr_CFX3Tu&T(o7c;(|r-tvk6ORm$i%!G)e{ znX6kXi&56*QlfQA&Y&HpR+^YcstR^f!7JTFs!W7R84$C_aFu&sXgc@JM?|3`l$Hk( zX?PAd%F_WEZN(nbjh~rtxmT-ZG=y-d5mE{!GStE0PcA=#mt3EY>$iYeLiAV? zNdA#%EYh4+znx2z433KkxYPPU22sfk;h7teu}ZHHOOY=}{t7gT0+8u=e&jKOP_xXg|L=mL>i(cL@F=i+37Klyu>3D zL@e-K`cpy0_eegeHu{wQ!y@n}6ADg4nRDZTU z=id#W#jSW^dojX?3)NZ9U0a3OS>@|CY8?AV;@0rKl)~BJus#vXnfOjDByuds_+8-&H zfu_k&7Q9~n16YL<_(xkPLE{7Ij+gx3@|@`(EYnzy$K#ShO&sF7y^vr5#Iu{Fl>3Fr z{?GTQg6?A|!3h=gV`B*0@!PJ%5=4qf_<5@mA;XAXYyf^Powj>{FyKoyqPfpz8HMHO zpY73^jb>((dmCIfbN|KwQW+eQ3UkF(j+iY6j!BXyYtqm*iJEN^__}C2`H6TB1r$XH zuWoH^8aGGBPofZI+>IE!$fLY@G9~0nknwFBO^aW;OHbV)F*L~LA+W|b-8REc*(m-A zOSc`R^QTjg&S)XRRV3-);OJ2AOTy{$?Erz(oF`-Vc5g( z9C(aZNJCV00a&T57Q0#ybTQWnv7VqNX4jei>=EzIyDNK- zZBHTh(2{+>)7iA(3?twDhDk0K6=%hsf*h{gG(IJ;fVMoF?{wSkyhL_riRt+%Y^see zV#d+;*}G~MSJipyo$Fk0L4*i{>~p)8M@3nybV{8Nwyko1^0;Jgx2LJcbeqa|PjjRu z@+WK(E{_q0NoG&iOZ7r`++x4%TfN+2=faZ3`Y^uD`ZWl2C{A|g$>W|F^mgU780Q@P zNuoqn(+Dz&&?rBilSAiEgAzxXA=&ZHv-I+i*4QMjKc5E*od_ZO3<&awNp-77HO9k~$O8kwu%5`F=v9aU2UNEq@IHgin zjxT)9mv9HP*m1vHSpO^j2t#I=BWVT!tOvIlSxkL=`b&`J=pL&P6>{zKITV&u zC>MEZOni)z4{N{bCGnqI)CFBcXONm_KXEy*FBmg^7U3 zVuWq$l<-!hgk-PwSEfz#r-W4>4(H7qR;LF$d5=%Cd^!!{tOiEkt%ZDr_*psU5#fXU4}b~L zK*c!sYH~m~bDXK)(~@D*28*clWlfYcQ$z6gdz*Dp1~a*r>87IMAe6oKC zmE+|)nI}M-is@4`(2{1!2yJYm3nu!Jwq6O}D#fGT zrXJ9rC`MP*UpzI&FMi3+Hc~*2(=V(@p__7Re5~gpx2^f8jlkK$DbIV-1(aUlE-!4q zB9GNLt&@LqZP)DUJKnH^hbL;DHa2vCbdos9A#=o{;&6M@RPw6@0ZecRtmtxNV8;y8cxS9s%!+}xRA&agQt z?f9PJt?)R~Zd?_WLQlpjZ7^?0P(=ejSm!*=>AUVxxA~V#T(wRoC1?@p)>QX;b>8pz z=|v2><%3Q5XW&dQ%Q}@VERXU`m$LY*skq$f7o3*XRa}(BR|!boNT*~{&DMMf8jo?9 z=~|bEPP+NtiKH{sJg9=X+*-c2>v)-r>qS*A8kyMVtg0i`E zJaHN%S5+@=e4urX$~4t(^wJFC@)0X{wY;YdZTCYeUs4)+2Z(8gU%^fcBTLyc3Y>7b zDaE_HUiQft5LaT2q@mxrdVvt$R7hdq<|#D3+6}BJd)K;=0Sot$pM@#Y64+*q-cMOa zy_<4NBD$bQC;x@+-rhTdpC>PI|k^?RB#W1jS*;$A^RpIrb&CA|{zEylN*^cppRoV?M^hgO9 zk0n0N^bt*Zhn&Xk3O1BKVPmPn%FBi|nej|II|uyZ+4WfM9%=c|s8EEwYsbu4wD3mT z{jbN~U(l9BAS3oM*kmSHLTc%uR==>_PjBE-a(1E{a7FTn-3dT5O({RqyRzq>9J+V& zd_3u{oROt>HY(M;e|CUah{r$G9)Y}Cg0sH<=_*8)4o%Djk&@WthVWyWW6BraJLIsG zfD9v$u{F2x!`dU2yN$6WE`N^KhzEjIseO5dL1n6yWtbE$^cyWAu{7{DtSV zO~(hUJDN9tuT>B{7)9W8qN-WWbV!hqBfL1RP^Jt8s2~}M;>UJ{l0gT43AI&Qo%tSK zGFCqb_MtCB2dUOt-ZZ%TJSJjkcK=9Bq}f9%e6EsVpkOcwMx~- z4n(;-uwlg)oIQq*pQ117G&x_gdhT0#J6)-+I58u`dX-9G6!T`kbG9j-`%z6N5GQfs zpsURwgW;s5Gjrh5k=Q8v)PtwFcOU%%pdhYM-KA_kJh}9H+kbE9XCzn2@TOZQZ?N8b zulTezGVVXTF84eK-LPMqq(SR9ZC6Cnk~zN2pn^_V)8 zB3r#Z8Actjou`HCq657qGPnMAH`%-D>p~r$CEv65LZ^1IFN+313U&1S+_qI!haUaO z$Am?p2|%i?Qb})Pcz46M!zZZbG2o1=T53MXQag-#X{2zFbLrC);q}DeS*j9{+kUTn zL}y#2qb>^xCHWX+>(NpvDw}zAg5WO`O#p71OXpd0!@Nd>%8oyezwx=-05KU;jNXjb zF098u`|oqS2cc~kyafSznQAjC(AyiKzFAnCp3AUkpAhmO1hh>DfEt^=Q_b|l^~pv; zV6sUqC{J^`u@B5ff61n{M2tx!YzP|N>D0S=vAbmSsMu+tBO}H))-YucF|83g)5@E~ z9>Drx-Kc)%Pp;w0c9$~;txp~`NKcu1hxm|TyNlPfI|euzbPuPGKk*)y>L?C&gSP&) z{(1pfpKgjSXf?X-p9T(p=fHaop*n)ki$Q^_#xvD!=#AFtxCp}y?g6FBIDcG?2BxWT zJ@3Jsxua=rYJh`-!#$v%$ce(K+()H5UWnckfbDQO6^#Z)cYw|L%DmmtPbVu zC3CqQ(t_7yqSQhL{dRPbOM6}iCSo&Nb6PDspnsk@!t9!(Gu{`k+`FLEo{j@P>lhqe z<=d(~U5(Dj6~RvXe~E~%Dw5oJdt0lSX?qde(}Kx4iYaW3fHIyga`G1mLAS;fd@qey zcw-Dj$X4EbZ2e{-`^8%DO;pA8VQ(uZqm;cXvyk(+2O^ZyxHP&yr?Gd09GXSnsI99ycoek$~;zquGE+%pu9m)%JSHS?8Lq(gFIrn-q{Km?{{LAbx2epEcRV|XKV zEP%1fiXBvUbBJT{q?|V^aZFMlLKRYv9PPiuclGu;y-1BwWAG4{^>M z0q$2SUu5Yua>g+k>|6ZsfEwMK@HPUZU}XA$Z0=fMHofhU=ILw}&)n=!ZKViH*juop zeSCg`q$UNll)_4aAcy@X?^jxCvd<4r8y>F4{Z`)`;iCS+k<_>=f)di!s%x!nPEe8= z4HIYF)NMek$&vR5C3o}YZ?!KO$nh;~(Uy+9=W*W;<*jd;IcTIvtZ%4#*$VU9mNGo* z7phz}H`j>wv;mQP>pzidx+|8@)3KkCp^|9)l1V^PoUP#R=tDE7u!SVdNa9<863iwf z`{vfPNR7y^zYSEB|42&zzeN&Z(@kY$P4^&=WFps)+O=zN;4>X8jKW_}WnoKU(e18b zG%>y-(G6Yq@qSOBvvq*QaAwa<7}JGCojRL`PVf}A5;azOr3oq-SD9REoeU-|YJvkM z;S^nDPtp`OvU4@}^>6;>HP9Ddy~p>$USbYp3<3A2R)?FQA}4GA$!LL27~>qcl=fGM zW65fstpTEZ<#i!FXPjhvJ9rWxUL4X~Q@#1&Y5RCZ;ay~QLM2ZBI=5)z4S;)Aa#5hX z8j_CaRS0kVbld|Y4mKgt?j#UnO+lAKj>r{9e`~haux{OsYNcY#6j7|9-Fd4Hwc+LA3U$jbt5U5n9*QRaws=}85&|O(f zc&lrU_<>C=J>n`T(l`9R8XSgq0AlHp#CT(hwl0H7f!Ut)&M(1D^KJHpU3orc5SKWk zY$w9R7NnJ+eMGUZA|Fv$;|A*n-G4q2fQY4XdqCK!eo;MN&lh+u!!s>FNPRkm`7gLe`G3~@Cc2%B(zpLwdH-%>9vIeQL}=-ki;ASgxXApiYmW%Iw?HyTS6r}SwgN=B-j4OX?ef?Y=Y_h8$YSC}R1!Qf zXSpe}+P*;U1(W_)e~tJcU~`!H3!HDVhQ&=ik-cmkVR7;+aX0{eXC zR%Nmp=0JZu4%s&(K%)p|tO(P_)@6-O7etK*Td~U5Q4U!~wpK)=^K?pU))l;FAtow+M;?W0vMI&26d8{?Y9hdik#vhNNA>?R3}ONp?<|Gxm~ zuTU>=Hg&Bo1o6~C({&}s*~KECSO<_DeQNW-4-F#i4Sqi^s9|K&{c<+v@V#B6Mg_>N zRrOm(!1Y<};C@Um+PhcEp=5QicDJ2s=c*Md}Q%IBJX>{T^%m#!Z^`QgYv;DoHD+X+FLa1HZC9VMfkqG=p zEzsS`v_srMXOufHu&#uC&9BXTo$lS0sTd#k{h=z?bNMu;u@{wrt1u2m$1qwqA02Pw zD4i*H!2$|@SGS4lR>c(Qy1Zok5HI-HjuF0eR)*Z|59X~0ne`($SR4?NEM}XO$vG7!C6u*rGe6e=&WUJ2S$*__9eBJPT z{x~;Avj-kn0=W`(>}slsaEOIn2JWfVBvON7 z)uZ4|aXmt{@zx7-3so(Q=iI*4PSab#O_GIr5Y6L~k0V$SovDUOi&xrVw3bdpA)TsA z{o^h*GP>G)g4&F)qdicWa$(6y+EcR2DRWt*@1q@9wpCi`3P}vQlJ+Y{w97cHkF*Gm z#ac6J%%Gu9xZHVWeU`jOzyGy8+vZOom98eO4}9S zW9LQwBT0Z-@;21n4FAoakwl`I`$Na?pkWag86#G2Y1q$_*E@jys~n0PWu|jGmx6rx zg(}yn{$4}RUEEl{+zI#YK9!m%sfLD%Vs8g3#o$AC^KLcDo>N58jJ$w97)c3)0cA^(kxSNmt0ydOJlCeh~?c zIui1wd7!httfF~gJ^6!&XU;>$jim|Ob7a`gg^6_a)@#05!QO=cYb;&`Lp2`2>^PNs za#`4mj0y-=h;|wObqkSxopP0u{kxxjUyuw>rE!6L-}>eDVc}k;=V-Qd1q(Gc!{a?C zw`v~Rj*gDDt{*gVz2Ta}0{MgdQz1HuUsvmH8f~!56??*m152F%f#{}qU78v@%4UB_D3Rcw1&HB1raxoX*9p^W5rF&AR60MvQ z976t!*Hkly=R$!P%kdsW^3DkG@dE3gffbgBMN;*=OuNvs3tq?Zr{yXbr1ISWkHLm{ zbBQNkmv6_D+e9RVQQ8#LZh$wd!yH$cgEbO>I?B3HurO=8pn>Tp44NK$;{S;-F3Hno z8wlz40i?0$?RmE|77pqqLF@EqO{>o)i2z5zsKayeK%)=b!~Gu&3?nxW!#&q^{zJL~ z##Z)K-d1_be(W6K&4b*ktK=Re^YbKfkl_6O_z#rXUgX_Fxs|x+I5T3{zv_A>?%-3 zV9?5Rtc0^rQJ4+xrmGy?E>pUP>+~xwzRq~}?s)Nj)I5qC(Q431Hdt z?{O1$mt^@o^sK@2{*!=vob(bivFw17Rw|KohR9$Uy6JSB7`(vZKkgJSUPoOh6Gz1R zyx!KZucx3Vty6mYFM$!clUvXI1KzOskCN~n`J+wtB^~sHb99)xfy%UDbw|HaglPJ5 z*_&sIM(NM#g_!9S6FcS*W64;xeb^0G3Y(c`C+0d!y#57NTs+%jk0oWfd&k=X+Ak8# z+!a?Nq;ISZT;|pM?&c!=>9b(%qvHsh5x}ioJYu$=BQi*U+ss{Ri@ub7G~`JN4(ZpWO(v-AP0oMI4DFt2XEH_R7= zMd7%VY?>dVbx`b-u!W9%V#e5b*&Bqx>~~?#kX*Zgu>B+m(~+=!vhm_@mYR$^@%rRN zl!+Z4`-jqD4>fg@3pX#od4kb4Gu8KooLH1s;`FixvubWW^DkXC#KqK^p%-`f#UP97 zH+2kzielS?uf?iNUO(J8FQ6B+ok6+xOM0p_-TY#W!z4^g^`R}FUEfvis7o*`{Fc|VEjqYd88$km(60wqhGtT)o=Jj2SAd9zXFSvCGjd?-TY9|?yxo_R6TM;iAz z!{`zFZO^baxHOI_(*X5Tp~jzdlNi4KCY9|LxpOU}M+_D*=ZWvT|LSp>F4t5jVgLKA zg`-VEc>YS~ag`XJG-gNA{X5_Zrf~^i+2R}GJksB$mQWPPE@9pPmKtFzuvQ%f#^SU8S7nh`#qM= z{h*lDSg^)2q1LYY9^~rSxr)dfezA?oM{7 z^xQ~dc_XsmQ)j?p+p+0%L(pKg5mEYJ$#Gp~+ZBEQ5XB%ekvsB_O6Lbl$MQ@V9|E0! z?#>#%?Kc`Q0M{k2U0U^81Wqz;r5Yx}Z^T#^KM(2>u`+zVIsj#%>UMa$7jVv1kp%-C zlp)iP=~bT7N5Ccx2eRS%78SH9o+U2^rY)3u=mz%kz?tgRV*479di zKJp&J3BNrK%$6s59xi^;AHj9*7JEBN|D36Rw<4LIDk?u2H7sc=)8Du0^msic?2 zK`XD?!np$7`vauPW^`2?r!=V^4l!M%IZwaM_YZjgXC;0zA1EDlZFQ)A<+?SkKOTK! z>y&scals!@kvS~%5bjkcy;{FmgVHnG?X?xGHSju z&u^Id#ug}!&i+nFe~$h%q!2=kR-C7g0YeF(Bq%)i?zJO6B za^C&Y=XXbj64s$XV#Ulu3VkgfwpLa2Xs8FP_9)o1>)cW|k5O?FAFqsRg2nJ09N*rv z8<#$Uj~{XV*PgmwlW4Y{b7C0~=A3L}J0Sjot7hG@F#l$e*%oenU2fyS^^Q}GW8lSj zF`&S3&lUzZtTPMEtQ5D$P^U@R|TOo`-ji*%BremG+{(cPfC$ z#(-ZS5<09vrg&G|9@K3V9V|LSVxp^ktVu>BaX~$ZEE?^rA+xx6K%i1#Z*+g0nKg1s zd{BFUuU}VK6gdr}RduVw+-^Gbyfnxfe+;tZxe_5fqqj%ev`d&ZiOO+dt1V4VyH<1* z4exYJOq=f+9tW@Zyf1B`dy7*0OE5Veb~$W%K*1!czZlb}OAm5JV8bT$tZ*y3NjG;J zT&0w#l{6hv_WIOdUmlQfR0{<1SES8M%)`Hyvzry^AZfxERIjr&9Y9TGT_ClJ)v

u<3?e!!<}x*Y8!UvA&j8UONtz07Jg=PrO=v(Pfa%b$EfB2T7+t00H@ zeo)FA*BTfQR1jZ{YHDx%_PLscQkj46SCnq9oRIx0&g9Whe3O5*vMx4%dOU+wT&-o} zu`FbccwL0P`mIUx^KbUoIFh?<#c$&#aVe}G`)DrrHl**Oeod< zL?$F`;_z3dh5$bqn-W}5P${b|c_?Oh?}O!G)V4NA8aR~VR5;Hgkwh#o*;7IM7y;&eE`n~4^Vzsvyt8EEK0=*n(Y?K=m`pz9wyu>YwVh*#;rJ7PomMwqyo~? zC7ydaA+J4?eUx&}6df_kug?LXdgv5UOb6T`hOJCno(#8HG1Apwya#ZX9N!xS66?*) zSt3^j1#><;zY`TIFndT`0SWvgdFmuwBZwZ%e%t@1NlBba;q`Sc+7=-dYX;hQC^Z~P z04!HF*s0f^R`cnAk*u+jKxqZLuR9HUM7e#tEDL_@UoZYtw$n^dEG=-|4`No}Se zzY<+yMmsHvW!@L9hq$+?YN zrZNYbMdMWq%Q`{Ns`K5OR3nO>d5FGMFq*V{m~irT@(I+bFac1t7EF`ooJ`q!D zlb%S?d!7uR!=nb61u1A2c^=4)s*28&&cGM4djPtyQ!@N-B#in zg+!LbFl`{2Vu4Guq;C~X)lz)`@XYKH2{vffid#PL6uX%K_eV^}da}xpQ{S?`7@z*s zN<(q;SDw14zcJb&^5wAWTN%G2!>nFYi@i>?%=V!de?l9kP`kO#>Qs}#p9OlofP3yK zZB6q%*Us%u=`)|P6d00N+r8-33{93PS-+8Lbgwpniw^?jM+21y$86R9CQ3n|k~Y?jHp=C)-$a2GiN^r>_xa zB&mor``VCZ@tEx`_x7>`~wY;JVr@K@ehu^t3`n=LWFnYLt_I68dV9!unec`W^@ zaZ7F@W8})~?Szr`GX~bSp90jGV6v^kI3>L;0DpG!N29Fy&?#wsE}Db3#)un~WJj3R zG-kYiU&nMqB!fGs^Nk#_5`a%7FTbUqEZ^bHSw!-f@mwowMjHZn%SD0;SOf9P*8Qm|}`^tV_oMCuJT(nZA z8giQMdTp!r=#(dko>j;P?mQM?F&z>PXVKN-(jU#UpH&HO!;BrCz15|>_?XB$dfp#+ zXDlk!AWPM;XYoUYK-zPeNAQ|ky&m|L>piEuMdRBpp z1PrTFaC}DjF0W2K-U((->blIEY&*rwj_8#}X)5j6PDz%s6OF>Kb^P%22i|#F?VU{V zxWkAr{yi?tKa#XD2}7>Q@5+7^QV)6Fzu6*6g_w+c(O*o&hWhnqf`#1W2^AHqT8E#0 z+r9BVtvGMhz2Z79_iAhQnrC;Y1hFggc@>tL8?qN-##VO1supJUE+dne*@1kg214iR zn+EU@N;{qap>Y@ok87o<`*8gsfmBg=gnntZcIej=xrLTXg8+gA?z4g4K!~%A&eJrt z#QW2VY#wyq(g$>Z)hJ^VBdN0EAVs4SzlQ{vq|WY)hi@3TXq2?j(Bkbe3jAohbGi{T z@)Ey~ST{Q1@g%~_b+q7^zY~;v{vS!M1Hrk$l49x&e|LaFqz9}xgsl?9j5N6~jbRV` zL6_)@>9G%6ae6l5Yt3J#9N{q{_y$)9p1Z)EuR}U80E-PEI@!22s+WllU_^CcG;vwe zqY#k)k0i+@xmwOHn=;hkkF)ZBLa(d-F0T;b<8d=1`uw#s>e4N1P-XIJ+OC;H_TF0G zuv@yoyi-3y<+7B>w{uN_XPxDdgIF%ZJA=J>56mPa1BI*IiwduhezoALX-Y3uuy-2uP>D9%%^OEzDrz2563UJl3zSW zm5x@m=2n)iH3IZP1V&n~rbK3(Eee>FQAo?Y-p}CEBidkzDD4^Uc!x_jJz!QlM*sC_ zyrAxwM`go}@7YcN6~K#Lest3U<%`rLmR1`+>wik-dS$vIXI9VfByqT@Mv-|q^gR&) zHsiUjnXDkvCbx{T4Tje3uD~NPoq97>Q^k|BbCudGz+`w3c5d_oM}t{NU{;i z1Mc)Xn8TT1t%#EHD|6uJ+%rOiFxWFR|qEAh53Q+3Q_3Ch-qiMx@85>#+vF|{zD+hl1v+SU4% zvsGjDsxJ@~gL1DidJ$9(ITYH0o}^J|4f@z()aek;p>o7$O3z)KwDSDyrP=mI>k0iQ z*@5*lNaCYG_WHF|5$!3TRpCclOAQ%(DeAm6C`)sv70camik6_#WS}85_h}Afz14M8 zyIOa6?R3%Hj(A7kkeT>{-V*c_P0qlZDUzcbgPIAdWg!JjPnuHQlOOhrx@QKS;wr#& z5!La?Yw^oV4}`{=E_>+3@(ta=`r11E{0?w>{`$w;sC?mKpH9&{)wbj&3w4&~{P_v6 zKT0}5h{(&RMB&27sduTSU017a4GM(Xo$bV|F0BE)HLw_%;h(gFo!^B7q*DR1>+(qj z0rn&YM-PWQ9oD@aLPFfw_dGi7bLp3xP}kvBQqdU=eI~4ROFKRs_bhraS05Ut!OJ3C ztro`{hX#UyH+0h9iBK+o28>kAcOw5$)vq<^&Y_Y>%C>Ayp?Q7l*)R&^}-0 zqgaCSe{Xs#0sX?JHaJmP&XZ{`jC}^do6xXI3ewe`ma2K6 z4dEXK47#)0Gl{aKeY!6@CO`Wl^sh3@5y4+FHzX%F{db=9?U+rwPf-!Pj6K)DO47Hb z@|^=7bMP1{8C?W~VF)~iq^GXa$oHo;W0(KR)XBNHDj<10?#?V~Wo;?K8|z-scv57a zqIiA22gs~c*PD^TyP6`_(}}2Fv5WepKxAj?{8eeO?z+;*8w4k&@!-ZhxqH&fpY;<| zQu&c7k?nk4!XgKmsby(me{=Sq2N1$bm_z3rTNK`tF7H~rXBjWaqh4T2vZzvap@5L8*sIOwh3Tx+jdKK5 zOc4Y4PTFB?K%*N|;ll1F%5m&%h_~f~POkQwxps>ws#g~A0NYIZX*cFyEo#uuMRss51gX4@rN*~8z_y8UR9gog?GYoyE zob_N?!{bkj!kUawcl`q(0`PeE95+<3xCidc5bBSs+_X@+?O3S26g=GSfSPVyJ}ZX0V0gtG1b9p1B2u@M41F?WV)OZI9Y z01=^R-x~^i7)O7O^D{702{l0deR>=UV!Z=4OfArw)qgV^Aa`t{*m@{q- zndDnZA8!5OMq#S!j(V6Cg5~R>Q5uc)K$A=!P|tE#K+guQMru;g6&dlRTBPu1JVJQ} zP5@m~Iavh1Z0^LkfFrJ?$~Ea;D^X@&9|WBd9C5k3gU{!>AOvO$i`miHMpwY{Tib*u znR&}za&WIu84My81J%hI&eETey2E7HqRIkjE_KR%4#gV93XNvWs*J{k9=Kh2rkd;a z<as&$POgYi4w_Rk-vvqp?jrygcD;n<7b#R5G zd3t&E8mzFW;CEq5Nnyiu)dKhZR181NA?pNrqpYHQe!4g8vvKG`>-fH~nk*iNk6RMm zFOC05Sav#vppF(c^$F_h^*vSN5v%unU2QETCl$;Ctok`J=C)&eR_--VMyA?N^iE24 zYi0);WicKre2K-9A^}+?#!VT8FC8{V$rh-3_~X=HHP^_sgVu6v-@8j5NVg5^W-$BB zg@krgwj(+@<`;?LmWateZ6;sK$*Oe%+84}TvQ7>0%-LRj7Dg(QpOt^mag5pg{%b{` zHgay)(+O9w+-bj@Q^#D9%hx<*V5;pqC1kDeF>)%sd7R9{ZSE9eMYfB;v@QWHbs#mE z;kai+Mg|FMOe~T<+5Krq0lkd8;u4e1)`!yzG6C~rJzptP^7pZ(ta*_{AV4wp>4i-# zw6?MJPVnF#dtMHR3r)mjSd1c(e{k=*9apDQMlzwvxi>V#8iY0W3p^|?UN9GmpHg@K zL@uD?o=I`W7Em4DoBP1k#uPeJBc)rnPk*c`RIiq;^XHjX`0-`Xtg7bo0o3N^_kH;= zn5*|&e3+*L=9I$kc-UC)L z#E^Fdy*DX-IwmKzZ7^u5`l%wQG({H)x_RdOZh6=L-QLO*Xu1l{X`J|SImMlr#qDrh zoz=^HHxacguESuC83@M>WqDih92eInCT(BD>@`k#u9;b+E6+6rD*t#LA%v9p#l<9a z-X)JBh*g^GbbU-mVsu=N!&#H!4*zSR2|{B*%vUfP)WQ3;a2CoP3+ZR~I$e5oyN8i?shzT7yeY)J%9D z_jz$GiKI6n)ch3L>CV)mR|wDFvV4XevBFzwpnbXy=Fuo|ohOW`?&rj>FkTr;%|@XI zP1$0p{LTft!1})!3TgGDDk?ENbxjCO(?R}haIte$@2PzFXO!`#s{ouSsu?=*-zpot(TQ5Pa*wL*N_jvRSktx~X)ajkLGEiMu8DhbvF3(V_Td(I8t#XXaxGHSO=@QH%IbMZdAc zQG77_;Go`)g}mY((ILgZc>Uh`O#jz(Lr&nn4g+&qbvauictogOgVHN0U@Tz3NfB^P zU-w36BUO8H!(33PHxFX1c9|^->C344{ihGkv7k6gz2p!x+Bt*1S-jfM1kX3$@;1|7 zXS^ofS?$y;Pndq;-y<$Bd^KG@HpP!DHoVbDl~a}uE7q(a2u<8P(Q#+ln=sbs>VFhg zn0NktD|Xi=VN{j+g3~Q-IiTWr02gRK^F|^xZBKvQlw3Xvr>Kyj_^b%n$(r?U(ll-@zZ3P4XRAj%DKk(D4H}_l zcF`8uDxyD>YOKbSbyBFm`C{FPECtGlic}(V?YKnD_K18nlLHY8u8Git93ErwgL#){4m(Zv_K~;dsCKeC3*IxH3oX45-;BJq)$CZVOKY zH+3o#K#4eT^IrnXuf-8!Dk&np`pKm0`t9e1E3Mu#}DX@ay zN#Pf@;POPu!BsN4v)6U%1x{A>->Rf5ztNsP2%inhKQcy)iBad3bpKsIQwS3QH0;SZ zIU?gE3JURBS~BW%Q;-hf}V6-Pr_Lp?WWMAp*T%r$|PA-K0yGP_$9AlGjTxVd*EO+A)s2l7+=7W zD5E~(mIpNXMM_R{HLpY!d~)?7*nAcsho5w{(r;;2@j&ydwEtYw|MPw^A=`-5HDBp; z&=(fpc@3N^jnD#Mc@KEIITj>Tv8U&`0+_>m_mB|lrbUzZtn;ag;IHqQr;vh(2QOJYIF@8v?S{4NJ0X zH7=;?fEslgqlLoheEqCfR*|NqG1Ua4fTG3uUFpD?FMY>v4(ndMV6?`6Jh7l}ECP*a zoRsL&v3UiEo30wCx_GwiZ@QQ#DLDM5i&E6zf7|(FV#AT}vQXGQn&>-Sve|N7Dg7)5 z4-`(U8p1y6F>Kw+a)#2T3-~f%7ymRbC(knN8oWbqY{=!25%{n&eXdWRx4%cRy^^j) zGNPGGC00HDjICz}8n9Wduf`t+>$t9S(BVgnx-iAvPD0e1c8n}d*Aug!fm4d26Z?or zgPi*AU{)xa@cd{c4-~t|fAO$cKvi%DW^6Jboz9Tu2k8;}{e42EfA=ayFc(spN@TtZ>FO~On>Bq= z(h!DZ&+)!6g5c@zV%3U)?$o6P=0b6=UW0YCo)<1oQEI|MrC`8S`UVcV;Mb||Yt2G9 z8}N3GZQy6L3uD)hE8@7=<>NMHu5^7+xLj;LCTLwPVh!u*1@1UiSK#zs2Mk`9VUuOY5WfS)94GuZSpT{^T3*UVU^g2fkMhYrQ0Hw2v z_L*lOKSq7Eryp{u&5`{0_Ma|1Ax&x(tx^IL3?rU-N;=%1EOa?f6+sisarPfyoNl!f zM@_1~!ZXvNfRM~$;JStQNuTsWgV>;NnOe;6)&Eg+7H&;`Zx~1YDoThbh_niVlF|(- zN(xBBMhK%vNDmlHL_%uPGD@Ul^vD59hct{BIl4D;)co!D54`Vnz1MlqdCv3P_vd~i zWmeerGQg$#dh0ot4|GJ@(l0z<(;}x;?A#?MUv=eUpP45Q&EX7W=P6i9eYiHbtQkKzmtzYAqd=>TG?Xez^IXx_FqOCv5`9*L@4Nth z%5A5aZ!solM!2O^@eq_RYFt#7rbmPk^xG9FUEKHNl@um$m38>eYGv78YFNKcO_3b4 zK^T8@LZ^ExJ`w_7<_&gnEl2CxFEz-^&g(ngGbxJS9S5COy}0$tb0T=toWXCEnnRK6 zUV&jRdw7JipuAt+=6O03lV)MDJR<1VXTRidnS+H273rb>77dG9o@`bTS%x$T&FDi_ z`%i~+PJBN*UFtuo?WlFwR$S}Ww=mDW4(BoSw6|)Mq^rlUeyD-a{y_ zpch0|@|kf@RM}plxh0Q1p)ps3D|RR3&!a`D*W0E{FC9tj&6Q)^V=g(PAKwZ;>);!^ zqb;@i!<9=x&e%}e;k~c8aFEg_?5^viQQerllK=)A*cy>MuOxcxd1|tUtvFt^kIY)8 z+|?PkKQqBm*Tic(Bq#s48hYrwzkbuBZ@9?EN581&3@%gM zL=0XQyw4t>3RgB)$R}&Rw&K-7KhW%a{7FdnEoBDcjH8|B9Z<|rrojB!T&0g=z^It% z$AfJ5DcL}kqPVp5H!5Ea{OO4m>KJN}m8Y%j7B4`J=#kRomuY+62)SNALIepWB*^6NY1 zqPFh+XI~1+6@EF5t1Njr(DEH5)Qq*$s1!+yto^0rdrRlKOZ}5(kxbeE%5Qs6E~a*w z*m6JG%mmjin`G)ZKYzC6L|Zw$wxaH^U44emvSMl(gi~u27rKV7Z+Kr955=Z>yt(CU zd+pJ-8KLNTlh{yVL+W2_&=5Hv@3wah;SKfn702d4YdiyI^;1Gk?8xU&1!XtAGCA== z@Fh!j9%POqz)R9UG1JR^arYH0BFJ!nSdDARTg;+2cQ~DA8c*q{#Fn6P6BnB4LJ|5rCUsjTk}eY;a-PWC;%j8zfY;jcxkaUVO#KLFch&0s`;G2o$_ zwmQvxd+SQCe|IY5>UYZJjSJ=jvyWOCX5WJhr47fWc|oVx@o3LDv#W{yjzaNiK~Uoe za*f14Tq#Y@i4ImmGexg+W0ayu_lXH)Nv6|D_lz+@Ot3x3CURIptzjjXcPc^Y%VAFtuwg&|Xkq&iy}<$0HpeBDOXqvD)2 zFi<&p`ctzrT{pXH-t^5{MMwX*O!?JS#!k%@mASAA2K5;c{GeyjShVL3`|H`c!}%I7 zrOyCzcgu~Yt#U% zO#5{X3;&6QyHF?^>lu$|>kAz@8yG_yV;>d!>=0zHdk zRBwWmwTN)uh#gb_>@LxR{e{Y6}BT8KqUj!-!q?HgF#(^Qwt*}bISy~|vzqAK8~$xFkfa(YMqz>{xWb54@< zUmIfY+X;OBtF?sS{%t>~xbZhE6t5M{Wj7v^!MT_ICTH)z*^P%>% z00*mKlqUVJEu&JSNdpwK-|yxB>X?78#~!=21NQRpjDDa%s9VuzkdtGnU_NA(-HY(VJ|&uG&%`Q(I%LQ{WV)WyIi81votVwO|&I3T;3R zw9GSj4=vAsQ%#ZvQNJutIq{5*CZ4J2`Xn#aj#@P!Qw^Kz3x&k{P2v5lLDx}6YI`8& zm_&b2-W!~B&=!iVin~9d{XBp!&Hw71iu{X$ga(_B^T)^&u%F_|R-~3B@0szZqP5g1 zUD5zhTi#zRNSu<56V_U+K#z&KaG8F2-D{SiCLCjvbO|0oDMm@iIF z*jC@_^YP@F$h}R~lO&V=Bxjd9jztrjmK>MrPeC0sTH%q2_#xkVbe4I2fpQ^Kju6u? zaS~Mo(KcF^wb86ryqKDN>5Yo1#bqPT<&p;%n#>svZ;`kyYvf8e$-9jvCdD`G(y^e) zC-(efDQy3{QyKC?iMXt$0y)y$1xS!@Q5gEONY!y%!=17-T4u=q1&**JOY*&Yo8^7~ ziMjrV;FRomUlGz}%_;wqOXSSQ_k6Oz zF$+Z(bwq@wme|qIXJ?7g&#)EqzZWL&SIANM0Pg{*+wW`pz&r;KfCr z=i7snarud>z9hy+6VIY|v4=3(BRWP1eCciuGZ;pH$ zNlfcR)0@BT_~P?Cp{0Qb+;bMdTlT?8bRM!33v z^jW5YjXU9cU=k7`r9nd+vSWKu12l1k>n_a;ssAW`Vp_U*ysUbKz0(%(`QG3wx7`$D z3_ooH-}mbD`XSwJl%qP{w0zIV6cr;-jxYG(k(KAJg#QdF?zwq!w~`A)L=WN1e0A}3 z-^zL7>_}79mbwh#jJ^6)tAccT(xviB-%1qpV4;YwbqBYPiyi@s4CwFaX_HNi3_brS z>`Ho27r=<)p_;&K)`5t)12*?eh3y{k{hQ!P1;)Mmq&Hj1W7hR)MgX2_ zL#OG(fwR#|_me6E8sAI)^{g+jIzGsx9(gir-1%Su;?sw7lTschTX3iQR4CI`s2?-S zct%h971dThn>=wXf_c@YaeN>JsMBmtm|9A&eo|u>i#(6?V|ZFdCSZ9$dM{N?adfm0 zP1P!F2_(Wv|HjuS9e18FA}$xN`q{XPim=g`h@`P`RpIioG&7ES|7$lVba$E=cxAWW zY;@$}wEriij%nm<+adQgU$+R&MflJ4I*W-s=dzIS7rO>MFkeW|DOqTDeHNlJXJ<*@ zR#`N&{&z{IKx0#$o*=|YJ)5-)W5HgDw1w}#r7SVC)35M+Lmt{HTQ;gtKw zogfn>InkfJBMb_7pKGuH2vtUZb5Y9iE9Suu%mPF<V# zrq2E6%#?WL)I1c7%Ai?uu;bhlAtfQ@i{XbXFr);$re=W?8x_npGm+l^ z+mYD;*e_nP#;c7V>sdr15WKg>~!B{RrIBKJjm!yKuWYa0l^HgCF# zM^*(EnXDpU&F4l2`x_ z&f{n&MaSFMEYLShuX53(A5>W(m4IZO~Qbg@~E6XSaxw_dB&P* ztXg^Ieha@iIn|JFp<%}3zuAc^Vcg{O11}$(&S}B6YOU%!`E$4@)?2|?UG}RJ8EsNd z^_xbj8EodHcl)X2d8J4!Hk=W64(r}FCUL4I!BaoyhBF`32w6D4pabM^dd&&IfXX#5ce^({OORoH88;*ndj zA~MCBglGAtQfj4@U)p)g_+T~p?N=Xv`GX*HZ=cv?N+6WbjE#m!x$fbG5^=p5%}O;P z6Fj*8C{U&AWX@aNHRW$wscFfm#b(vhQV&1p1(8?oh(W{jj1dE3SQH>1uozV#@f3E91p=mF0^2yF2YO~513JEN`eApLz`;S6^`>2Zv>LSvJYK;^LG#*Yf`n#NBCC14x(nR8vVVN2~!*NBm%Zknwm@N=T>~9i|L$ zsfl0hk@Jpn5SiSitK&?98s`xahuWqr^9RxfSwF;s4`8z1t?)AIBy@khC9II5$_D;H*xG{MR9wHi|dqeodE`rX&0{8g1e)&;WH^u ztP;Rb~Y{_`+#34Mi)oFp=5hrbjMoko8=O;iz%L69(^4*jo)9n=c?M9Zs z;HswVhMRRhXRihV5{WrksLqpSx+H|=2; zpi%XNQ1ZQ^S5;~kAc){&U1(UhA#YF0Lw_B zHDEwzvIKP!r%*fG;uy(QBT_aIW94?{(2m%N75!RkFHkWt`KAzcQfTR<$Xqd>BvfAN zNMZ`MYhZ%2eSI7+4R}Z6?^U#GG=Dy;E#$ls(_j#9`0XEsKBBc$cTZiDNk!^SnGy=f59V~@lB@}e}wywT4yTFVTJ!~F(p&#$_U9d56P)XxHL&Dxt<>d7O z+AqaZz1xxAZO8~CII{v7-ZP$Mch2};By{dz7Zg3>O@$TZhsN;o(YYIt{SB-RE%cdh zF%r)|I^I)@>Sn$K^I(_DLzmHr*{u;(abDNcSHH_dU-fRAc^?W$NV$Tl>5_4F<$Ydy zravY{p-qheW6vrN+#B5MZR{$LBq@j@c%lw9;bX4meld~++D!;H*94;7Rk_4nt7EqM zmO|(@%^yK+St9ZN>BMMuC$|Lvv))Y6t_H#b<@ovbS?zdSd%~r}$gb(oPFxlTBEPir zml$W0@p9dNpM8R7$JZV$N}EdQ?wWqe@qFoMLgGkVsd$xEj|GZg{o*9&>+?-}SvrP$ zIc1h36-tD+);vufQ!IXrp3v-n+wFvhIFM{@IZ)%&+zJoemb4$rQJP&C zWe#UqyK60l25Yan^;M$iXuF@d%~hO79(^l)sN9F~$<8g?Px94eHcQp3YLw4EZNu-c zx)Tj)Rog16%BDa{q5g)X(B{^OtiHm5+?$UM;2aYw&?@=S^)^jDmuZ$OewOXu`wXqz zx~=F8{)vLoM9yRTgW^+J0}tW3tLI~d9)sr`az&R)cC!QE{0g!MqIPR_zlBpEUbNTN zm9iE*FC;Ht75)6rDPxNF;y(&eZ6AziHZ$v+Jd-R%aOQ1GIurDQZ7tYrrD#yue=-e{ z_4eXD)^#tHCi>wM4#hYA#$yXiUJmjSi-Okpf;4%_=#etfYeRsu0-Pkg*l`RytNN^T zc=mDPt}Tx!#nV;^T%U`gVde3(Kw7oEVxz<=nZ`8@njPTmTW!ojicV^b|HLdiEE%13 zbIBw6Cpx>bakt&H#=0th9#SYlFhk)r)VnHMteZmIVUq*q;YyInze|oPee2`a8QGqe zor<@cTkC*fl`&Z2myk|(#AzEgDxO74a*MIcS0w|yAb(>?T|!WHvy9uvQ0c{`ljabW zVE@fSR;P02^0AlFN{-=*iY+poSpr*%*DmB3AD)N2+^sM&#Dtv)hR}9c9#6a~FR~@N z*^4;04s4rF!l>zy=0?x+&5oUIbZP2lt0eNsa{KjxJGt{34Y4@Je-w~^6vsuC?Xr>> z#EVCtK7kkPh~|zNRd1O#`0fPJI*eww^;G#TPAa@FQ#F_GA;AB;e1)%#5p5L3DK|ld{%Z|X@qGy%+Zt(Z2e^GKR|iWK@(VG+B>qABx3#|I`bWDK;2a{IMOfSN2XKI6&ca$( zmD$nx4egNj^MV-HNs+e7kgzFA&OG|I;i%JAQH4Z{b&wT7LoTgeJIl##Tr4^95RDl~ zmz|8Til3kSaDdn#ollFhheLfzuekmIgg@^jgTvxAqiy)2<6$>|53`uL@m;Bc%`LID zr@j;su8-K@e*^jQ!t)ph-q-7Y!0@UsErhn*ANIO1m%7lrkn50o|5npu!Q;J6Lh$e* zAUUhb2UTRM?CnmfQT-i2K9_o1bnW-?o5l95Q|tD-xHx9u&iZN>(n_BkmB zG8;~^Y9L{6Ih?AvDsCIx^+z1K8}gTb#s-?!Ex~A=&KdirTC%V5Y(Tf9ckbx@j1Bi? zM#3Lm%6?IhopDj5-mnW}&wnwkS-mcUOX-rq2yn$yZ@3CrDVjViV|icB**@^8-3&vX zFtk-@@t*i+>ggW_sUirDZ5q(S*)0194MfewW}|-;-I#NzQEi>Er7glJ^DU1W$!rlZ z8MWAXf*$>0ZMpk=&^|z4QOZ@!p#V`^1N1mrgFzr$$pi&5p2#AiYt=0qWQEy^y!+*{{(mq z8D(6bZ$t{?9>VeVCO=A(`Ku_*^9;^puSWe5Ga06D%nH}BpfyW4;?3r0w~PBvCl1z{ z&=z%}+R#MUZ%@<4jJdLmMOM}eI}-yj9?Cf=D61=%SV_?9$N6?BD1X49Zncc*4#h{_ zn%IoZim}5&wzXQTVn-b?T8ff?A?!B;5^Yx5wIjVb{*dd2u6RgF$3F^CjLXi-k43Y- zyMN9+Ekg4k0Xn0G(Ua6<%K1GO&SQl@I*I%7L7 z_E>OcExRX=IjUg#jdZ$rcUZpnP}8K#-1Q*oj?;H5pzy`ry}0412pOjl=H>&cm_Ea;L)qYF+$% z6VA6TcJN$cwD&C}+mY*g>`T_{E`C=Stz@h#idCkJ-mUt_JZDoD(uj{rEsrNktGM&$ zma(t$PF1^TR?@Xa9NKrewHq8&qUj?1zPdW>ooC~R0+{yfak zfPZxs4^(-EKdH=H09XR8rgtTa&2F6f+Ve6qa~wBZa1g1LYw?T|n1X_yF-x-u$ZLYb(C{JB)(jDcHG>&{4(<9N3O`%r&S7` zn8t-WP#bt>5G&Y)Q%jb%8NH=6nvrwCP~Q_FcVH0fC$XZ$!f`%?bF8Zr1*p?J^^ITW zXDi)KsHmglRZu8dFT&*+`GCAxg@C;Ub58p0NORR90WFmHky10vDwChH3&4}L%#f@u zxB}maZhg$`^tN-!vpW-XLAWlYUfez68vf z^Pg4A(j^{fc_?xch3)epWU*jsZkTkk8mq5pLN}LUQu8vk<}RyReZ`otc#}_~a)Lm_ zBlrE|GKYE5)7K7ubTL74TONX)u?Zj$J0hb{l6}sPEP}9K8tQa0VukIr(rH0X9|)&< z2&SQiZ2P&gAQ``n7EBY;3tz9}imAg>G+$pCIvyPa;vF-`AEhp}=*%`qa&mg`Q zf^V$14=P0GAH_HG#_Jcx$B28-)RB+zOb1-l6xa1ShUoG~Zm z{TJoNJ;v2umwzotY_uVZj@0LK-Wsv%?CedwYY9j_40fyKzL6fL|3!8g=GN%H|71tgNZ^D*=!Q@D)qkj}M7Ro&JjZUUz@AddN z<)ts0;P34pb$Y=bz(5hlB^gS(X=cfC?6($nQZjWptkpASDLfz&L!0LT5W2Pl2Ps-V zcdqxSDY5dJiF)aLz3#W8~4R?<@EK)Y^fNi zg#A*X8*6)G((Lz!3oW!GKexU7fo$-A`{>uahl5lS3fq}*eB+@GK{)KVPy$>64B&s% z-nRW++4Y7v;vYp==XxkNVq34(RM14B1XynvPoJLoPWLJU%9(emnqY_#4+yIs<9WSq z+JBltn@-Wqn{HeJN%%egnBWsCQJSLbcn*G}HxCn3nW`Mp-L??-YpxTB?TgckE%^I= z3!oIWI>D+}pE}9t(EyV?Uvk0ArG+uyg2Zrl>DJ-iFTMZt0+i8aLZ`mA4&_v&!tVvAjP}0*O~=&W_;wYRDU%q?kEDMrNbNcUOQhuUjTQ4Uj2#I8LjXmOAQ` zt`Azy9x8Cq*rig*cBUW(uts*pM0)*>eLE|}H1b;hqgUA}xjbiz*!2QhJ|HLh4L#GN zp1mCOyI5}xN?Hu!C;H*CB4p2kp2w zyOK~&nNw)C`=upd0~sE}jgZX=m0{ign#sv|?@6s>m0HQcnX!pYnsonj_VD9^OJ!m} z4w18M()ix2a{kO&>Cf_hR<`KEQ$}Ch9k;u0YfokKI*(Tbc$%_$s8=aGQEJ<9mnC$a zmN!svX04oUS*>(LMp^z;?7*sX8I#WeIun*BJ!w>v8Rq>oyE0%A_UAd&@H!-c%=$l$ zQ5-bX%{RL~@Tog4`1g+Lvc*-dvRfg<%_zLnaysZ%ZQQ+5D;hVdE{{8?}`tb;ssO$He|v24 z{X-JRe$GMVV2)0CIn>|+uHehU?@XCivIY5aXa}48unK;xAN>19OTfgl;UzL(BkTY- zFK3yWk+vE{WHTNn!|!Ss36t#^{GH!7W88}!{5@1*1?u$gAe7fX&6y-S!vJUf9W2E_ zu*d*SK+y0f%tBj6tHy;H2mj0lNRKe9zUzAPU{2lJkG8qHmznmE)BS8rUVU+GOpo^T zzNOf`ZTIvJ1-Sq8xE^Z^|EbFfT)=jj-Fk@4y4nkN?_IupCVOLb#4<>Kaz1d7LS#?OCgXwJ-^AKiAZPQ9C+mvmokmiA}hQfAJkF^zB`*`XtHHg6rx zxEIJbAG){%b75Z@gAVV-U+skSZL6$}!Z>h&#DzDoN=^td;9 zjvDXae~b3h-IFs76Z!0UXxzVU>g5C$ORnM4on|9r8R55pmmqxTNj8r5)n+Coz8%&v zrvDvBEqZHTNbJkoJZ+WN?BE`l6WvU_4t#C0t%b9W)Qwk6dt6sDwsZ#6^?qW6(^h-d z7fuVXzM1<>5>z#%(?6d zVF3*89Yy;sQ4`o*IYKA&uVee&R^NtATjKL~MLO0x)5|B&)Urefg1X)gWr>)6Da^|` z8=aMMSl-aW0TG&PPaa;-S_L$=uaweAe^dLdOB%b|l3&@vc_|QoV9v2C-{jrMSEtu^ zy++?d$I{slHlwuM9o}jue4}3yC7l3l7ffor9C)JS6_@#SIsFrVrmi>pivNRz=As>Y z((po!O2UN{L>qTb?4~_*l>~U%JJ-cb11le`*J)g$iR*;*4j3EM+Y={fZTCQO<;#D( z{om+*Di9g6I7p(~SBAJq3PNlUh2#^!xd6?$>MTS@Rq9QQRDdn@gl=m_j zi@|E>eW48Udqm;8))Ikli2~uYe zExNqfe3u66`1754o&GybuKTw6La02KH+~y;0xwc_t-@~53r;8BSX=p`>V||sBI662 zhaxsk+;uC@C6;oVYR0)M8*NdQe?5Wby`-|iMVhjqSC$E8L%U4)O0=&P8DZuf&DZjD z)TFz(^U<0&TVQE`S~oMVycbzm!3u?;i8;6?B-w|8uC1=Ck365D{i;~}^lVyf>nF&q zf7?)_oEN6)Eau4fkTNK~VIN7P;;G7;RTlGUhxMqQ}n!CI!my$0R{ zfXBy64g4bixLNC<+glVWWCrMP=AKI&d47dNi!H{{OSS)e=@orm{a%f@L9FsSH_n0j z5Dh86dT)5gkOv_&+qx2+=@FvJAu-W%w$+m;cTA%hpj7NN@7~fuyPs+`M{Xc^sz_VM zARv8yE@)?0kq-J`Odh;+wx3OoOK0j}y5T$XB;&2Sq#U%NG)8q1D2c9}xzJj^^NY7X z%J6FEzA%C9lLsZ&d{q!NvGH&T7I`;$l-|vYNYnjUn(@d!rnH87A>7gtJzi6DFUj{% z2qW_&UZloefD-COM)Hi(HDqYS(w}(D5?ITp;eRS&xq$AIz!&gyl`Jx9OX^qjCs}=4 zqG!&Ai2ARs8Of}xuxgdsKcF_O#=;i4t>*zA6(BvHa`b2Oy$?QTlZCope&LVE^Wpev zk8~$MZSdvMmvV)UlL;24Q*-KTd^*k`$7k<%Y%!T}WO@{j4?8$flpWX?tXc_wWCgP#q~zV59-elYMAr4$(AK&7 ztl&}$`UxRRk}2+f;1U4w%lzUlVsCYXyP7@d?H^j8N(hh@Cqkd-tr!-i(u@a~%{lr* zA4-BB9*NS;y|FP%e9X4zycj^vf^K{(c@poW+g9rzy4bnYc+t}g^NkHxM73`(+?D_@ z+-fg(Y)7cCQclE(*Q?g+!+lRtWE$@5D%`tTuOQj$>e@IEi?>S`g zQXkyp@>NXQSo#=0j8_S5Jz38Yj>Vs4z!1$#M6( zTZ*b21*51PCHwum8Dmi=ewFrkE(JaFkN=p7q5ww zfEaXg{Rk?81lCgcL0u5k#zsS-Q?%t&04K>g(SqzqFNv^XWwtG@FRuzY3@KdfH+$1Ruq*bOz~_g~o~Mr~`de!D>a>e|nami}ji`hz0%ih#B z(fAn)yl6uM+ks!8yfh_7&iEA1vkxzY>x`}lyA{l;Xsu4v--zgOspmf3SI?b-^y3V7 z7fCZ5wDsQw+W;mHzxn>0tJNLUhg` zO3PT*BF($FAOgV(L#ga!+1MW}FEF=#U+9b%i#-l?criEx%FJ^F{9RR~h2F2fLdvrj zLhNx&F}Y)VN44M|M^`M$)C0qJp54v87uXbeFu=L^EG&ZnR7VkGik)Us!jCWkno`NH ztlrvl!nm*~|Ab2$O?8QlW5}-8T!3C83VA5~xp6+`^Y8sV&dZo)DuXAFHr)Gi0yscYzjf{sAK(}VPLlG- zN^*#xE9*e~;;n~KnJiC5K10|-I=$&QzoIPfF90`u$go$Sbok#GB^Y~MpEvni^cc*(0`1$BGzsACOT~ zF(a7QIlW5}0M-&l#GGoHz`@5-1(F*Kk0ieas)Nspl$R+JoX_DYO|I}W8gblvI4?;f zlm4=I`J$O%p|I#S{iVB8mH~Iz!ICZ-b1-k#{Ulx!BlDbD%1ZL!HV4Z1x4Wa_jfs`v z<0}ri01+Onpo$E3z;g->y76-#v9lU-nz{ZI`MgPO+p9d9#6UJWAwh)y#FW=)+f-yY z_XxvYC9=Z1Iv{&w#0q{531)Ex@^4#5*5%&#&3*2PMf$KX{hn^&+8)ZALm?+(j!On; zho(n)zKv7-n31vYQ>Zkp^?v{X4dPpxTruU$2iXl(pUnj^bQF>=kzHyWsOVLfn5$o4 zuUFV&1Mnv@!0%`-J<_L9wW ztr&D%|MdZWX%Q`au(4(89bNYL9Ou#^UxhU7Wgl+wTai)wt7ybBeIW z&foP}E@u?Vomo|V=j647XC*NZfuO}*9|ceHz8#e2O}B7C#h}^o=W2?q$*`9vbV`x# zMVZ@j6Q0FH`@0rV?_{mE&-eHXWR|%HV;ObYIoWOSiR9^;3(>+w^Ie?cNI}K@C(T9| zWf!WwnJCo{1mTVsXc5Wm6FF8*7xk0B_`lp>BbnQ8mcKI$X zk}=A6G3$}Z=EFKI9CcLT0S8gAO17JcRilpZaD`JF70}g6o!05oif!ag1LY}`YP+OF zp9i%#@+NUbFVYj9Qg(Ho7B#ygE?TBKpNbK`p6@W-R*mnHd~t8>nR-k*|jwv9b! zw{)nC>p3+JEg3JR#?gYM4kSf&%AG8&{Xsp6?uUJtd zjm7$D!CfPtZ#;QLv(>=a(&3KmuhDnjnc2kc$c>+zW4IC57^xp}tbiu;Z<)ld^^z zmzX@#mpY;HqjBy|pPtq^u4da=V(dIq+pH_u4!5P@wd$Cup!oLcQ1Y z6ALo%6bcPcu{0^0fcbH2v?}ZK$rhY%46U1NmoNpD&QBHPOwtGG#he+0*Vq<#TSI#v zC{${Pa_LX{juz<@VH?ax>K0!REQOSOV$O62KAwb53%L+#x-P=?wSYgKkOi}@fqXyF zWX=(5kbxFt;c%dpFVl`~a@cDosdMPO{<8Jd>DszkMjfsANYqJHS)qLS*uuy6>fPVRN{D ziBM|6Y*n=>Fh|%MfVf$w@TC6IU?LA?+1pa*y zSH>IbJs`aYH^Jm2Tzz}R$TE%1l}qysp!!%-tpG|pkVyArK~9ZkFgDn^#bye9E{%tT zn0!|~D2z7?MnngVWb$20K6vd;o>$nZDsc_f~9_hyElDHCHHBnLoB5)t%y3MPAZ38 zOf$>91uMlSvmy@Pk8Y~S1qPC_&r6`W&S%X7q#Q0a*a%LOELgN>_bZZsPmft($mlLj zq`pq|=vhxwy;}A0oTX;+AWBmbu6iRP639sw#@(6xDy8pOU|fyp0L=D2x;eGi`$wVC zyIh&5OnBqM^u~>bSVpoN-R+mmfI+Zj0^{+}=JuG}#gBKtGz|zZhJOuWn}c<1cM3Cp zh5k57)Lc}({BPVpC5*&IJpBMxqTK={x^5V{O0Wu7bG1n8nJl`>+qJY}c{u|)=9$=U zZx{XDZe@Wo<%Gs+`MEuOkg(Fqb;#Je#ff5|*CEego=qHiUXSbdMCE)8`pRakswqq36ggh-607D-0pK*fi`&+DFT3j=;m~GAd|xHvM)VaC zW|j6RQco+L(vkyIfLx}2IW!s(2cEg+hFbA$uios|Mzi6XIdnp1QV{*L#~FY5PXYIQ zDozHV$4i!>9O8J_bj)`P;rPg^ucj1daKP@4AX@b<2Em-#R5duo)1u7!t=%9wFg5H+O|QBgY0 z&wBq+)FLfl&uAw|heL^t+>Vw|35=la>`YM6L#1fO!aX!o;q9t6TXN>9LIcTQ18OL= z(|vknn_f(^OF~(98>g(z zjQ$^HBs#IM?&$d4b#59Kj2pW;$Y($ye$G{%v~RcHV}M*9c(~+i1~HOhWy?bKwEiqB zJdh}|7&VUtY_~o19cnpzS97S~13TVq|HC34=aQgVWlP%Y+xSH={#pQAaFFuJ!AQny zjB3KePb)$XQQJm?LBLziL8#~)#>#>x>jx0tx7DB~O%&C_oJOqhjg1$I$73Jk=(t_> z%}+|@pFVKON0q3|p$C*W|4xYHPR*4W100)aDh1vKF{blN2=*pAyNCZq$XAI;CQu9G zKR5FYq=bs6Im0??+1m?kag8r7MRQ5}hT)`hKs>h1LsD|V@j$A?j4y#`v|wWM!UY(T$x)N9O*TkzLr6YE7KSqQRSlG28#Rq~m6A2K{wIACw?z>*726>s{a zwE1;SW6&d=^zi+!`7Qwqj^R~@e-t=<8mHpdXR-)SqOqpW)m*xhLa532IhA?9=J}Vp z(&WG7XD*oJi~faSy18NRN@7^xKJ?L(dNm~4vA@KVG5(qja++;MZ-JMI+P%oPk5asqdW6!{SJv7o5z02s5d z?P7e8uOg7EVpnU+=N_s*;Poaq0bMJS=$A45XV&r1g5ee`vSc#og~Zq9l9`=r zZl76&zrUNbIXO>P*$_46yJ=`m^aWr4M^RqfzIMklz3`wtd$r0Dl92;8nc?^eLJTFe z8@-%A3(=8l`O^HwGW^ruPgO=PlkE}%UtFe}gAoXGkCex-f|)+u`t(1X%7`7>SKA7!Fa$ACk9QuG+Gj?OoK|k@R1Z(98O{2fM`o2iRTz6H=5AFeF!Fp>8E|iTgJyl=f{oYV;czD%}bV{-+QPH55Z)k zQIpZ5lAAWM%5n9pDVu=%j>#uvQs;v2Rjm`y4=^uzFy21&Sj3s+jgtQ}KS}2>zs9g^ zLhlR{eRssWmcQvc$-?kJ73}EkFbdsSrl`rxZ+un-B7L`$3wJi`XPVDl(U446@MUqQ^EpI(^wm&M+`d9&N zvB1sR2sx!=;#3b>v;I+hKAzN!;D6`%R3V*XW20ta#=c{CW4i#bonQ`#HMRE`%F>PO z;t&PSxww@d3*Xa}P2W*Z>A#F9U5orH`Dgvs_Fp*|z;}8_>@-bDWAe(T1NO2xT8?8N zhlCjg%qY`Mo3vByb`xG+ZWoTgoXjars_l~J_wFzi&Y~v=Pg-rO^``9B*S1lM?(&T?tJ>;NfS@ zPh#7avSyC@PhyOGEM)y&EfWf@M{UFK)pKGl%zq+iF!9Up-=_$Risd8>w@TCs1Wk-= zXWdg24y@_}`;L-^SYCg2=b~9>T=yh`yF9j1d|hs(ko$OrZyRF1MMZmtOg8%f#_t6N zm+_zxU}@lA+h|uWP}lIa-d%tN4?YOB)Nd~L0bN}m#_psfiT&jIfgK&&g+Bk$hf>%S zn{OL5LP>9K_{~j*LQO$l@$xe+v(3hRDt&=smTyRXWc#H+=Jk=XnpRe=LJKn;=>Sva zu6_t127NfmgRU^`&6~T!lNz=(#{Q!EjqCKR`kIh`-G2s)kdOyqCEkKGo>X4$3|EQE z`<&LmOCP*q&STv&;G2Qztb$El%b?Mgn zzDp_n;D(1?mtVXBlOw#$MU?)+=`L=;LEAE|&N|oIy;si0r0U(_t48GfQZOgwG?p?{ zh(~ak9|;ZQR~O^AO0|)LO?Vfa@l)*{O#56QvW~|XPU0_OUaWW-D{l_Coh*zAlv&5J zE1nrco<#}WKH3I|1Ys>W(eY`NC%@ zvyN9Ws+Gszt}2hjJWOkv)swWP_<3%6Ayju?FD~4BC6BEg8@iHoC+CdgdR7~W%2QyXV&{aT|M{C8#iDHLNB_or$R`Mk{seU zsI6Ca&~7`*u<{f)dDTL?AFmcpM~VrM%!3`NRJW%WXMe?r#)ce< z9&_o(5LbTCmfCLeWd$4@C^s(V;ND59gp#=^;!{L|S6dIul8r0FHtsB|bHDka^Z zqD(+UKx!idBu95_prmw3jS}e?qsE8<%778lFr;I2Zp0Y*efInZpV$4mT4j04CA>?X)C)|dczb z(3a<^SMZ+Wy!Zx@B8Wm8y{o`PYX@uL7tVMx;zt`VM7eR;%kP*~wVru?Y4Sv`dT@5} zoB6k8i)o5?7FMrS(ldDv+tD)HI@vT^i^R9`ya_)#w{OSe3>a1w zuB4YP9mRt-x3w7Au&KFl!b!*IrYimJ#RhI`*~P4Y$G60EJm_%^DtS-N8UCfwCP8e~ zIv$*kB5KDC6y_0Sl4rF>XjCk4Ir-cFQ4!*&tLr8-|L&$d`#2L>B7Mxt{o+$W@9-%E zK;QSa*V!w}xnE`5zm*4Rq@bK{JSU0*NM+HCbzC}*=PJXtlu=BD^N3Z-+dr4jsKGm8 z+g~SJNd1A`w)p_J1ZKw{5xMne14xMM0RKnzU4=g$qZaF!K1TBe6>BrYRiTEpsG|(h zRoG3t+a3`H)_&A76>AJr8*%~6k%E|oC|?UkrNF}NY3H^qE>aF}`k@%4`gdY*QQ-cr z*ty+Cd%Z=6zn7Vj4$=(B0?t!-roraKz!4iSq%dpq)+xXb67fOSZZSOq!r%7jO`Js9%yqz;l*D5T$xWDVH01HGiwD{& zW_p7Q#?8$PH#mBq|HI2+;r?v{<34~q(41qpQD<=U-R-m7ENIEbNyEKGExr$1yfQlB;WWREqDHqKy%uJRrWEC05WxT0_`tCeZjlsu6ga4ovL#m2_m$mQhq0 z>R3VbD>-LyvbSCx0J@Cd#IH!xQzb13{sa`z*H^435}oba9tOd#uAbtkd5 zyjm{klA`r~GmZw7O+6_n3AX(=t5H8BYM`?W-k=+z_S3-_!od}r0o?hYhc8g#gMX*6 z4b*PqaBdfM@YAV(#CB?k{y}kqj&1wc?o=;0qwu%e9XL3Bd!+M4hEc%#rG~}xwaN|d7h^$>KpK~@$-8JMbDuzB!eFDjl;K8F zvv2g{VefP4=B7;eu5QSiDeK3Uy@Dm9736d*)7wf7uj&{MFVi=bO^38gz8o#(HWKX|?$4-&Cgh|d?uEh?PZ>=*34btC2RNiUvIX|FGy))~Y;QZn9A zFZdB$5!%Fm-|C^ta`UdylEC_RuZ>D?@W5^P;>95)ZmVzcWce!EkHi?TPcmrJm!3P1 zhmqe{fEq|Zw&jWZqNI9c&f5dMh;i-j*J?Te>H0sqaQ{(RsnR(B2ReOK?@GP(q5dmg z_`UG#TC7ntxkBB7cI%K512jaX5o**whOID2s33QF?q57y>>s)ZnlpE#%+}oR)V=(c z*>u!XrHbWw(8jWH#hE;WIb$fCE?~Nm&b(@jZ>Om3Z4Nx+rivsZ2@xrf1I;B-5uC+w z5O|ew2{`QD@J zG&+sCu5!Go8K17e5Us5VK26y<@OKQAt1wIibt%-xt-bdbXu^)x9L)rNS5lMCee*Kb zKh_U8I-x{gYBmd?K=S9Z+V^S6MwvoQ#th|{Wsu2$s~tNu-_jZDH;3BX9BnCW69jD$ zd+AB((r8c7Iif_R-4?aKQV{io@LHTI(rGC{IuD8-bEobZTymIaMzFidD1!iw_J+$W+ z<8{%3qElcO2Ab<05sUhZK|G}FtG8V72brN4?6Tyobo33p+8>y1yS-`i&6I8$JP6V~ zE{8{YO^oDk<=&a7&!Z>5ss{-jhZo01Y5eiX=;y(Ln8^b*0)dYVg z`fagHf}P9IgThqjCBN7&>r`K5v_NCFm`75AU!>_zrq&{+oRC0 z3jF2|Sf>YV8)xeOhEdO*8c8mx)-0ILzV(8a00fzCsvjrXkz+hmZ%>bV7uHm3t8~v9 z^*=^mksrn+pdAgoaZ2*u)NNUXSmC4^>t_Z%`eTBWH0!VJNuc%&Xq#E@y-UPeV)F%j zOcmJMglZ(Jq=>8le>ggQa(%t%pHpMnmX8N_DJeQb8nhu4c z&$>GVSwq&t)z3fN$I_#+TxA7j$WPB|CRptQn7>LpgFb?0wqM|4U(^s$T(y#BE}?`; zV`vcbsAYUN?qWA>P2$E!h9)-#ogz1G_DNSUQ9XzH5%$vQ&;1_C;Hny2czg*XDX7fO z@(%WrcX~Nk#66+&7{SA)-5SrPjx|{3`F4-IZCW>aT+zO(AvOMF{$}r|gzP=%S`t5HsF8XEz53t*b+~Vrl0A?{SW#RlPuJYkZGt;pK09l8xR#t3jM(;=15Sauz}a(e z6(UV2e4Aq|+!MI|eeYQNwZnZ(b9!2P(8k{^Qy&Aams~*AC&BJ%f*O zTtc3vM3D(nEO35y!08h`UOvH94_utftvhSpRrlBX^7YUaQ-4f)33E*;#mbh=oJoAz0(!@0p#v&W_IoMwU(4@Z=%pPe#h?~#B zp&=yj@G_Et_MBcTWO2a-5dyS!^C483T-r|G*t{qH-|4kZ-NCV!c4 zYY>b34l&|eEYok{t$p8w!@teMCf?*?07sIc<-x9-;#h~}1g5DWt6*PVlQ;d3H^(RK zAwF#kV|)3N7aGB304dw|tb9zIcf42;Joj|cAXwA3`+X|XS6CJnAC&IqiHu+Pr{2Kl zr^$V7-{*{o$B0~&$M)6c%hW378rt&>5iK%T1ONwRtj3=7Xl5}X}3ce3= zWJ%k&Q(<4iwmA+v#pqpTiz-nU_Lli&b=9g#Jd1x?8Lcf9H_23X-F0c#d=HeM`CPa! z=PlPAi^x1$w6k-u22`eWcwTgYmnLP6?jJX$DIS;a7uX3)h;X%<%-VrPrf1A$_zRZE z4Oo%`%vhEYkpgvQYFfj`6^0+-u3jdbjLn@W;$3;x#fdT{zOq*ZW+q$gMOEb^*sw2I z?%O-mzRP0DA9@VcOTw@2H`D}{T*A&4jM4Rm{vShE2VV_x%FZ8A=3IV^*tSQx#z1FA zz^KibPT~!jzR1s~>Z6QY;DMk!WDq8xI7_;_Y1z+92KAWlfZtHqeQfQ_5F350E&DrN zN4dMk9a@Cpfn~kk&r^OuYu=3x7+)2%PAh~K)NFD;C5JxA0sH5l>Z|!nRS4f?wQ{48 zXxxwpoa_&7Q0>-1%?XBH$T;tdmKe|IQ>Jfk1arb$r4;MJF{E~$oTr^3OA|U zQGdgCE+gSDNE`q4Kyfk;B0~1A*ph+$!ZJ=v-zp5OsHjWUumT;)BZ9^Hpg*AX&D?{a zv^H;E*PCo@tQt-Y;#g9QA=3}K^Cs3{KVL+p7vqe|*(9LmYM5}`R)p?67a6%roxt(+ z#+lK;JS*YGq>xLQYMwWrbGjME7)y3<$fp)0&(bBKfmgK2!szsv#Upi~J126^}SLt#i$SJPpPT05HHsoGAJz7xq zkLzu+`TX+W@_9$lbLxqe8}J|b4Q1R@{Kr*0GLMJT%U1B!(0Hq{4`>2%#S`o54y9lE z_g*CS-r!j7a&rN@xr2LU|5dX5>ymYyGQ>+_`)@P-Kl9w|YoSoiX`#QX|B}v8a%h!)X7=Odl z1+?Fv^Rw&`wS48Oias|ulk$pKBl7DFK;4>+BBY3G+mi>7n(5S4-IluAFBF_h^l1U9 z{IWt!McywCEwuEdaWsf$({*)|pg!{fE;#3q=X$5=f7N%c!P{rFNRTJh!m? zDvV?CJMSDudf3i zao(Fr3=A-|=okuf8!9&0VgfOlgHBW0Mn)Uj?N#H>cy=(0N3c!7=JAIb^(eZm zm=H`ze_nGFEguq+p!j3xb)BZt_X^@UDS>dZGef{FZxOkduQnO~?P!IHU`Tw!+4oW` z(kV;RDk5N24v4Ei99%n4s_g+t4Gl-V`fWg)f1M4hC*+Bj?HsxfF(b~cM78&uD?Er*FHrg31wY+EdCDWK7{{$-fgJdtX zh=Q1`o1#Z^dV9<;9+8y4imcw7Fp>_dp9x$_4yW3Z>N<$#Aa(oH4_8vS>vk950^x!H z^#12At+bqx@I}TMx-OX6ZtLHL&HE-P{Q7{-q@=-XgFe|Fij!MLJE}y~2jWmj5z!|P z0o-k_iIUC{^{|i(p=(CGN(LY_FU*F2ym<_hfhm3;9F@7KirnY`djm``^Yr|&g=7N2qF<85uhUNI*uuZO|wn?y+DP#uvcdBO3ney4EHv6JVD zcMb=vL%PeV)B~o>#r`S2JM}di0d*=s!YTm4tf#l#fjo7WZCRy$Yjj*zBS>epJKs@G z+^m0Y@T9huuNBba@=Ax+01aBahB_V97l5S;P#0oz`?>~Q(1~ZNtMt^G zy-VkNh0Lpw`!J?W_&d+jo~4J3(29UvdyrTWR~0kJX#J5aqootw%r%hxqMhW}x%?FP zD$#$Du?jq>#CZNz$DjWU+8$PgP8}5sWqJ?}N zMWvJGd5-Y@O_Gnimi8C0+*-AK-+RTIm37H3F6sUE?7p*B0wP$HB3xai8-3 z1m8o86=6K>@N{tVF@3x)y6Gm0Iqdjfd7}%}w${u;pxBRHX5)Gxju;N13a#BI#I_L; z3f?85f>!m#H1?}RV1SW0o(rT^heSFcd%T=A1^>!2tQKWkhb1BeCt(R!Y=aA3)4?e; zt#F4niMaRGTip-nJtET#X;(LY|K2wpNbktvp<|;dD+D@D_RWZuw4~y|DRUvA@z5IJ zXz7gHlKR%VSp|7PgFyN^ROx8Q?DTKom%2@|?&a!KZs56XOmIx@Jtz|kYT_{{jB>D# z*6tpl^Fn+hp8NR!0lQKQUjyPwqR?X2E@@{W8TfSfWG(vHd&pXOrNd3LY*j`jS-e?* zvflh4ZlUuoD$m{h#lQ5hGZN?F?@A(*l(F@Y*Ft<+H98MG?A=Od8Xmd##Cc?%kz@b} zQh-TY#05zZyNY&vg04@9NrBxW7Jb;-KE>Uoa1#!l} z`0VT;CCc`{R1PJ+KL2ZSI$&1ApPfy#JIHCBlR<94;}axu`16aP5l6rDuqEO`m+I{S z`WPVDVh1H3fz^u{OwxBH{E(xaZ-TNclIrklZ3*4L-7fe<*w;Ta*}LHO6v~ymhR8?1 zU0fU9Pd6AP^}!j>{tZIv^7<-nWiDENIpOwhf)?29^}Jf( zO&>$!FRwLM7^Zg3(vWzvSB1utNohL!QL*-wM%#=)2B9d*P7rqg_J)eAHW+|Ln>`2I z@+~UA?l$K2s!sl&+mt+>xAo}-ZRl-Cs_>9^J^3M+h*@eTr)Z3Jr{m#N9D$7m(|Oax z;{Rp*J}+b%NRuAeAU2d*)-@+ZuL&nLj zfqV{D8XQu6a~lo#KO5!x%`Bb7tP|!FO6_^>QXnQuu`igeh>L#aBv+i zY59Z*?FvO;eLes}sOOH*riAtwh90k&F`BusDd{9LTjY}cI zC`eP!;$BKO_7RUdlm8{m2%Yn|t@~iNsRdLDj7uuI++GvW1N`UW?1U0F6qVX>Iw# z2`P~IUth)CgFDFqww0zJ4Yv5{Y2toM za;vu(C;0uLx!9C2)3}nmaY69VtCU=Iza0m!g{ivpn{{CJ@Z;Ty-=D?OtPcbUKZb~1w@9j*>(V??0iw~)JKl5^ARIcjzgQ(lFi#@Z z#WnpKinVwBe>JNL1(BpcR$Shl3z0&yv=?0MZHz^8s?Xa>g=Z{zyB8QAow5pG2`0R{ zCT=Bu!Dtkp(33pH@}k;3?vIZ8tA+6|`bFfqf*dxl7GsKEsc>OycOgbm{AxVfmxhE@ zGex2jlysm~bQW|QB!y3HR#^$VCb_d8uC40OhDfk*sXs*`ZzuV*NY|ft6Ku0wmoC-q z#-+rcWob8mexLR;_GV7P6KUO!_-oQ7p5E?7_6OP}b09|rXlI$L1Vdu_;ujuQ&E-{Y zVd8&OaY!wmyXJr;IZKNpAuj2#;kkw#JFieVzFg&}!z$Mub^W^EjEn6vPoJ?Qefdfw z{=V@h)rRLJ4B;5>*~l$5;_Th%l?4qDl8JaR_EvN){xyK_TkIV-daaVHVU>FX?n>OU zwLqQvo4xX&0pZN^Xz}s*OQG*69o1xM_MuMTdtU_`Os$45n+RW?(Z)3{zkSd=OQaT| zB*Jz?4BZ-t@ulq>-7^E^l$7y!eZ!hzlLK%b3H2}W-kvPoc}&tz>;1)+34`zwlUcK; z5jX8#R#&L@3sEmte37A*+twkvepk|6>es09I)}>~yp|NrcXsOMDn0J@Yz&CrgeVpQ zmJFSrEOt)=6C~~n1s$%hf7={zn9*`2oV~3%x6R`WfUVtJNk8$uK@vo`OstI;Z?+WV zO0?7*7UkwF?@wiQH~dG%gl0sjEz?U&Uc!sjIF{`fE*)&Fu03+;2JcX4pZAnh&($DL z(~SvdSm@HOuF{;fHY=m0(C2Ccxj*#ZeoHU%JVb*E$e@lsvvo5Uj9|w zYZP6|RHV63|AqYdYqN`oMD&#yVInq8x{AY~zP{hY)H8y*d0kNH+H|WEgM{?sokF+e zYR5vDGGbY$L#v(2@%*~TL@M!6c+b5_H^9jiLYZ%3uKL069%echvtfG5I=R;;mE{nA z1iN@I)r5sxE*B{oc*1Nl+=HWRvcB~NWW z9<=evt!nLa+={8j9gpDOj!%zD*OMrZA4NHSlL|LY_Hhc@a$bm2zhY`jc1>G;2_gd= z^Q`*Lvcv(A#AkEh`k)6RHZ_gkf*)2DoZs@w2sIPI)CJcW@{XYd&zg(4Kb~dw|K4Rg zb;gTa?cT#mw(mcR^BnsI(g#N7^@}&CPH|3bpylnLHyxOle2!1;8B4Qw3{h17QI&<2 zZ5~7;+@!^1onFi&kg2`(Kn~lyZ(W6o7toy6uxPvn$U%+n+v&KL`{jE~G(AwR+Y%+R zaN-p^olaD9{f`P_IQ{<6RHE{C0$gMuY&PgL`2IT!eeuWRqLPQNZX?j%_H!V#1v>eG z>z7BktopQyC{d9wXVRZ?EOQciuLrJ$Kr>%al*4I?rPzxbqg@W?zX$m15a+b4Wzm(c z2Uy0-VDMc=TE}80ehfiac!gUK(oEyio13bj?ef9Ixw@nS%?LYwmc{e z9+2tv9FGD;kG2u{eGIoA*B?TOnx9S+10_uO4a_JTqVa=>RXb##p-4O zQls4Y?@Xce>eFX^Nj;<)ofCKCpyx4qlhQ4o%uVjruJL-(7Qwo}B{$zBq+FVkF8=cR z?|UfH>ctc1a7dAp{7{h}PZ#4f_!Ciy-Mb<3*BrJGTKmR?AS?dy*7`xImrgjY?HPHh zQnbt;S`M!dF>pwv`u2&5Mj4EpKwG%VV&D{qq$TRzt~BTuw(aKs+A6N4j*>E4#>k)bO7+uN2chQ@#(P)G7wts~ zJ+mg=gkGK*_&?@Zg=ahdd(S&jhH2)^T5vI=n!FF)@9TOV;0YA18$WqsNrdHtrA&QF zPl0gt!_PV;z~jrJTL85xxqw^TC5woJX4F@apAF+x`M7!w4sO#8o5rufwd>zbjq1su zZiV2u9d@Wl!QJj_&mLbc%IyVCuCGT?GMfV|?~y3f@R5niTszg@BDJ96wy&27Xm`dg z5tBV>I-9`o?u}(D1Gx?GisY75Yf7?`u$M#8o(uGuli@C8O~rKTpZ}@5QI!-zM4W4F zC-|n|gS12tG!f%kGS!7_?&TA)$0+$!tgcP|HNR=p7kx2w4|q3qNa4Y3yzN$Cj0`sO zzR&8d^0Ht-Sih7Tag5H4JHN=0J!_41$!6%J965;@c>O;V%Pk}b@sMZQs~~@jQ#p25 z;$YC*ra6R7eo2#PSmk7Gk5aqwjHl4?!Nk%M$WOrJfq$~~UbNR0OVa(nCg?npw%qd5 zaiM<&Ot?|SkUy5qhWa5Uho=Vry!@$#vWb`4&;3UR{-ffd3&$gX4JWxyfhLflBK^{W zLbZPdD$X){8a{}97A2X0@yVbYjpO+isp*Lt^rLXz=~GZ>K()&X7xzI=L&QLJuug{6WaviA~ z^8L%NhfV-W5b5R1ie?6+Kx|y|AW7OhuN{bKA5YhT)&gfeyJfsf2aBx1?6nS~fEIKn z4=%w$x7{t^%~sa3S27>qQR7VR4Mc13^N{my)FYa3uLcVMY8L!IsO9Hr>xdy?gSqp!ZuX?`EAp)yJfgG8Y$fNu(8RV zeJgEyaBj?t68!Bh@h-+304Uhm5nk%x+vRPKkrP*-7)A1m{$$UCwtCLe3fEuusCb!e ziI-4YVa)&2OZiAXX)OiBh!~!qU=5S@R4tywpe@t|{mY8G{VO(U@0`iq$A4UVaCycI zpX_Efb^HILl4Qv8zgk7d5CO{*9)G~*XOol`@C3uLSJd`(x#!( z^8rc(x*;xM)jX<@ykI&O>O3x*`UCQ_v(WCKVvunGia^)g)Q|&sV2e;6qqL1 z1{1_r-Go31B5QHQN4JflokyjdR6Y4KueNU=C6GVVw`%^wmif`C!<^Rrb3X>FeezaY zYPqmhGZVXN?N?FVF93)z1(gqS&Zz$ZY6X4=asF>=X!U)r+*Ht~tgExCc?(AS1gZmX zi@TVlKCFFgd(@M1EP;g8O`)rN7reb89Nbrte(EE6)*_eqwqaj{&u5Rz14Arb21O*+ zj7@yoQXh%lM(&5B3~vrRT#MmX?O1S7{C-i9MIr0>LVaaf44Z}sS}^4-MytNse}$nc z#ujiR1utL5)ivq;(FRe-y>bV>HEUU?F`+cbH6Y&kt;hSX>TSh!tBGtLG%=Ujh2g>IPR>u3kX~8Y z3~eX6j5)^iQgyT!zo#zN=XWr+3vo$_-Qe;MA4wFC{mMZ}i3uV<1v9>>bvfs3D z;deh6atWSu8tWW=-vU`J+W*$ymde>U#gayA%QisU5$CSZ$G;l*dAZrJy74=tR_)O9 zQ0Dn9mXB%&y_9kU8hl*VR|)@OBr=k8nd+|m*TZe3vc(3=%3A6I5qN^~mi?FAlbo^S2zwUH8ewlY#(ua!y+^(7;+xYW7jC`CBxfMuk<@VUoS{y;AvIb zfhQi>4c@J50j#Fa!TPm06(v8O#mUar$5o<_#t~BIDO{_~&%oXtNtrCHF3{~pB1@Nu z{&GZ#;P--N_O%#K-tX*F4bxL!+}XQ77|98Ki$3jhrNWlji}wpO&upGt_o{uP)XOhe zwYz}-gr=H4**S-A!NSusNJ3A=N_yUDwK%sc#Gfa;q(uPFc}fFI+C$Ee0(~RLH#BUP z)%z-aPVUCmOVW2!3Xv{t?$C#S7#c`~vx28RXOm(Cd>4=r>nz}nxZQ`7^_f*k^*(Qc zXSkWL@HglnioDYlgkd`#+Wt3XbsHv=t#nsa3cYy3A@dHQUQ2)D@AO~S5UYGy0bMHn zw0Dagb$;VE%kN|ja1$+)XTL$gk^%iU@b%yswGZj=I}|kALj(EkPeUQ!;tCzuV!XFO?$hdJhgx27|8_SG7tt^}SMpV(YbE&gk6-W0 z`Qo^#W7UoB$Z{9z=l$8Wq3I#j<{4D=aanPkn-x?wTCF4I)YB+FdivGAjly$P)VSKdb<#V3hx;wVd zoPg>i8d&FRTwn;itiL_9VC+^hwfbBSYx(F#vYC)A8_Ih1x!bj>dU6EY{r>Z3>gEgp z%~yEUw;e|CP(ar-7Ha2o+zi zl8#a3_Z5OQpYVE$$#eoPN{#;y;BG9P?i+U($o7=2c*<`O5f>XQd1{TTh+>yL{X(xl zcn%99AjBe^Trf0scLutbC4L87-!yJ6>Ck?1_6Tn2;*-7dvzPMop}EW#9{WOhBiFyk zBGYHM=Dv=)ESKnzViWn9`)6}JAbGU&nQ*UgAnHG=miSb|e$2E}GbBwRf|Bpa!qjhu z&jU)<&3_zpZZIDkzu;-H`~cEbauQ{h$E$jBz4g(@DrZL4J8bx8kRSUiiFgm zMdF}E>i(bDrEq;d8t#ogwmqd)tK+M6_7@qX>l6C)r<1jBm$HAkmTY=@B1g%2IjLI{ zMcd8gsg26z;1c`L6f3%#D*Vtr4+DqVx7Y4vQBS7&FZBmf5E>yScXx-@jo?=_!C!#Q zVv)Pfc(y-p6g5fv7CQP5GTQ1PH3Zi#3R|G7X&u?z){JZ!oNeJNk1k2k`YfiN({ zG%`;6_}0gyyU{cp!JM$O;~>_VnZlTo4dOpD-^S^eL+fTNq3w2!Ya@jUg&vNYoeg#$ zKS9Nfu@4saAH1n~cX3LOA_*PtPay&kLcSt;>B14sLZAI%nF7jK<2mKqyd!{v3Kyi?6YwCsh|%UqazTmQ<;%&wBZ$@*dCoQs_gu$$*WN#untzCR*@;?x_%hfy1_ ze*EpVZ&re7rv?YHtIGt0r)DAEKQvo_Lq(Pur`m9v6RyHb_tmdG(LH>zYdRE9;qZbj z?m6%-cXWOBi&UhY&mV8RYD{1ywxO2aStr|0R=)4+yWzk>OA$9#fwA0T=y)?_q-*Vf zJ1^r?fD?=RdTRh>vwz3Ca>}$hPDLx~MOCPa=X}|=Im5VAlv0j$hl*30IOB5-Ub!1V zQWH|(-K0O8|54qiOxe>Y-7uMjwg=Ai$G;J8$d=1sZQHf(`Q@QqN{i`X4qqZ*HP%iw zS15;DiBOf=Jip%Ib6-~DcK)d6B=~H+6dZ$Fe*alSla%UELX$KC zF(mlp-QnAjZ3qxenJ`~wluy9_IYuM}J$e|~nJ)1b%KfXf%h1p2;1yM_xP^g<-!JU4 zPwuou)KLQ}UN!TL^&X>s%D%bQvd&bZ7XDem*b|ZB-pB;v?r5{exxS&Tl#O+l*QMj& zEuhvnp|tJf58%T3rsna&IInXP5xi<=0xF@S)%xA#4{KzjE|lTl5b?z_>d|GYSWzUi ztUkYsTM=bbz#OjG%U53f_${}twJ$@YFVj4WcH01o-=tViF~5Et*x(MsHou`8EzW`|bQoHqTQd|Ng|OOg{-W1%&pOT5LQ~jEVW~ zvS`3T^~WlxNqWpr2ajCsJC#Wz;VMRAlR~gWgxN&9m6^=mO(UHH=jnTmKQ@Apn%g7+ z<6noOVet5byQgnGKf!)Pkxs_foRu7;eCP#JT-{0pU4Da;Cji;(Q>S0nJNJik?3 zJE9xIMuy9|WWr0NGb7}8_fSp}+U)(Ua##1_CEog&vkZ$#XYPq!NLQ*^gKrCISwg>z zzGDb-lDvogiDj+wUBt#AtXs~x*0LMUATIn7Z)pebv|hK1*A5Vj4id!V*J)H3oBKCM z@S)Hs-MZ^Jl{!C1$G z9a!EnGee*eb!V$&uO{dUnX>E}MX=arZs@nRzRZc$;5?<5A2O#04&%mkgeT1nQp=Zq zX}O|lgIjmaA*BZ{I&~uE5F<*&ibE@cba3WS#SiViaVMTe!3A-fD25#X>k%Pmskq$q zTgiO3FFP4#@tYQE+KUM{FH8=LQw+0uBe?$LKmWpc9_x#ELX2CZMDg{^=(JJi&J7-7 z-}}l=_}a-v`RU^m1&&gqT={;uZNuTTvvVET!occnNcnwbD|RS4Oi>d=zeOGVc%|`~u66$Fz63;?CUXX&zY*06do$ zyW^K-JS-SkU{zBp=jET8+J1Xum!g-ukpP)ix?jUto_ZRTZ6sEf8o7J3LBd=ra)q^L zFwkI#p}1uU^frD>JI=P5?I4@0?-_-%@O*7PdUDz4oIBl)WSYk9QT93TqPrx$8+KeY zrbdqoEh$@!5*ZC1W?!E|Ryc%7S~whH#VLil)w;d)c)(h@U&Ye+w98Dw4wu5bgFO9R z=)ft=G~ZS7Bg8yL9VfG3dRdlU7OrXAoW=9~j3dL9L)%0NKLq=|-tKNnKZ#LyR{f9~ z)K5R2%%~ul)6>$N*U_xlPER(gg5eH7n!s59d{XBysf*PWW=~3}Q?R&yp&w~5dNo2) zR8*%_0|qzrcyFov^seB3a9LPSc|SU*r#XW?`bIKVE$X{sKR^8VqGTSOf)~V?iM;^@ zUMkyWisy-|tUyjGhi>UeyvX!+6_tMx29hy}e9sv_5&gTgC8V$_fH|)i5L{?FJG1q1 zK)SNc>9b(k(6=$TPwpL@QgzLoB!Gj8O^u*|U?k#_e3ziw?R6{TqQBxGU9guj@1YXU zb#9Vl%t0HL?U~k5MR7GL!-+bpZy_T1{E|Arm^ly19{mw_W({z56=>l4s_fdAZ9430F{Q0<&^pgEj#Qf+SymvX!^09LBsNdK!bEdEN>Of(yd#Oe@AtNF0 zRjgH$h|%(s)}=YgKK+t`k~vPPWJ&*w?QXfM4=1E3;{AS-fBipmR&w00JH}V_SHISD zYLtCDa;1njMxhZ{?V9SiulEO@KNimUs$_*mk%q~Z-lcw^-K1^YnRA|rC3818Dl0d> zL|M5!Hw!_I!5aC=PTm7OqcMJUIyS}=7w{x}QQJp7-UL75-ST}>W?k2AhSD?!yF4c( z=G?|&lG-2<@XY@qcfvL5Z z|IndyE75V95`^#k&>X&(@+7|l9RN;Y!hfaN^Q!_FfrqX;XX)tHwDjj~S^$*ZXZt7= z35Z0i(2;8xDbg2$h3WLxzBsR(YTftS0SrxSCslvY);2_I3=8jnRBGb|jYzi0zQ%*e z((AEam8ziuxhcyzF2t&z=;S~*6SHyWIZ9AO;mAz9VY)ufo73&DW;Rde%eCq<))G$} z-6ysXD#Cm!K@pQHHICHcpJZs|0G8iX+E(|Xvh3mKR)@%X1yTfIKbE?y-vq-SqU$Dpj=J?#o=XRK7TOhj+max=(~j6= zUb0Q*c#6|tg$zUvu84HuU}b;P3>Ur&zrHBp>)LSwK^lHJ&nonN*&2{M~mN1Cs%Xpev)G}3E_iz^sSX?sQj?qGD5U2n7IKq*-#nK^VRd$lEA8;jnc@;8K&Ojd;@m9K;A)VN zoA4jiV|qg`#iFTf3{$qagG}n3rrYnNmsOGJySlR2mKjwBKf}>=Ey9PCt|D0bkRQ5K z@-A&gr?p~)-@{a*C;ms?!^+RY5XIuE$i-HH3%J7Q_+5_@Zl|xwvhvp&@%)ozSN%M7 z(tj6hC#97|!M#c7eyum}FROI+t50ZAMV_`M%gMD$Xi?zu-k{E+(N6ss@ZZ*Sm@{W$ zV(%@A4pcI^GB_5cyCH`_UEPH|Y(Q!`156u21}8s*jUT<@=n?+|@$C7tV%Uz!3zd=K z=4_lqTHk@8g_-HBugY2eOzS|Zs4as=+v{c0&&4hj&V!Kt^z-ed&%kmqL6@^%-2gl- zYxlG^!!F`Z$U+EJP)`#WVKXR~?9fMT`X-5)sp;*oZ?`tTm4dSXq6Xik|XY zR=K)3pQK$zFa!U58HqG#^BYeiJ7+xVyQc1?BnW68x&n@Is;T97*(%|3 z16$aqVKrnjmyYN7-4E;Cghr&fI1!=s295ODU!HEBRAdtNk_6&g6?Sds#tsp7gdxnF zS|?${t&+)1^LJ{Ty2fb*tiH-HQhf17!aKX>WSU@+rkCezk+>EyddId06UWXP#o!E4 z`3|N*$!`m+mBCS}bl!p5h9Ey)@%~%=GcVQ=?iz~=*R*VC@=4v}@U@h^=TE0+8G%c% zwr+`}adO>nS>RxZ;XFr3%2!Ljf4v=tJzT2nr$(gi2tdNS>P%Y)I02pkQ##(#hEo0v zu5@Vag>lXGK4Vf&oQmG~xCtFI!qqpT7{K_#X6cf}C=Jlj_YL;m8Uftum~gAX7;5MJ zXgGTxMHEzPiuIBf2^Ara;TSVU-j@F(zQOwKY3GHZ76(f@?7N`8oLN*2%f)hAI<8zW73s z7?EdPLLPeSZ#Q<#c8kfhjLjG%SYD2&@~u+%@xW{Z2~2aybEEj3_*L&7>EfN49!6|E zd&5r3rBMw~$18|Ysk22@XTcS-Wp3j^h-T@v$-s{nnp?YgF;5G-8p z-sMhG+>4z&Z^{e_8kV{KML9 zls(!qIEm&Kc*eM~jme~5?$G6adamp~SWcM6_o%H~)(Nl1T0LMYD7@=^GR}t}6F+T$ zD?1!gDXk1SBFI#Y_3}ky)3AwqnLFm3_sPj53&!oFI+@(})6UC~qPhv=W7i+ndB8Lr zi9-JL&?xJ{0mbIBrPN9D9S(j{ z;C5d5MQ*l~;oXIPjr+OfJ3ALU^tk8z-|wPf zn1`=|wruyb>(_e9R%?!)GDxTGZ_*!Y`%(Y**QRAf7ZoOXii1XiZdZ?gNuKBOIoGY{ zb^^Lb7{3W4hVqHdD>D`a0K(90U`qO|L(=~0AreX0nJ(&h4;<&2`$d>Ju?x+es|eI1 zL@2$_|aec_-zH4R~D*@S+?tpW9$l^N>^cuz}<45fFEtgWE7_UvN?Ss$^8OzgW*@RGvN2f z?RgewTGME)IQ|}iy04{k*JHnK9`R4o68{Hv1n%cSpID3En`=Td> zfbo-xYX7aWujE;0buIGm5a;H+6!)W1sX4pOdt~!I;G^)8PTU$=TzO3CTc zd$S_N7zawWDAuXWgV}eo^4+w!W}Vw)sLEY)M_@#H@vI|my8eC5wA4&~bI)~4y_+}i zWt-;m%NB96u(e-pypewE$WY-u>Fs_HD{#^5<&#EzUShoeAjQ-pO~?Y0;bo3Yel6xiep%hG zA_EOWVt@?0KtN+K+v2*??V{!2DVIUq7RNp-hB4OJQ1w|!Yro9wI@wL~t;5@j`%e!Y z<5QL+cQp!3{E%KP-#@_JLLp3>pBs97w`nA!-$tD3+*Ix>BHuDgr)AaVwm>x5Vlc@mHk|<# zbJyb<8|&gR30}b%&MiVz{a#hS!0wle1rTuQE~vZo3jbyXN+nl<;p+IGa`FiE8p2WV z-J=u2MRh|NRDmPI(DFyPeueu`f0D89kdhn4iF6-mf+_x2`s57S;nqqsIHRr@pE~k^ zmR?grM=d3Eae8X=`S4Z>IE{IsS=_Wv6MkRBy{n=5ob@d*OF`ewh%g ztyJ73FZhZ_#@$UCD1mv%U2mQ0Z3XlE{2t%J|D)(E{F?CED2|Cr{R}{IN=i$NhN(z9 z5Rlpk=@{YYMnyncT3V!IAh0nS>F(a>uF;Gb%y;jfuK?_d<#6Hg{&yI+Ro6b5)P?5!PVAF9TYJ;3|1BO77U3x?E6 zlK{7y(!LK;w1#Jvh19xo>=`@CqJPYK{gzxpkF7f(Eg7CzknOY7ZW&GH9rE}+JzN0$ z(-m1XU(@+|k{x;75z$<>kNGP9M4`yQU^`|Rw*T134$d>t04x;w>5P49PW#Ft>Dej3 z1C0cI&3trBYDy7sV*!DiVmNKY~{&t6W~$v{gDWqE@#5L2-x6U&-+yi*d20bb#yMa-Kuw4r8gDxUTmTCv&gOlCK}(N6w6DcfD8ouY?Uc)6>farIaqS z=V{N`^Up2Xryz1`#d93PJ;DujEuMdSKSN_9SLAlcJBYWc4j!l$+Sb`Mp>6**RU6)V z&Vqf~>Ahnt(O!-*eK&dapmj$j^~@BNbnkiUQs7kf;>Az-pPhVh3qe;zZ^%d@k5iAATQabO}$^<*{qkwJfrAkA>P%`M6DAM*UR^p{je*lh^u zVHdnkv4NoywMD1+Br~ z?X&n-`q-n9I<+T=vJ7FWRgD@HVLRYdojUFIr>cvifU6@C0zO7H+Me4HM~T1 z46c_W&F>kApJNt^hi}{_Pg*eqs243B&vK9U_){rnd=2m669=8fPDgcE$8Un)8J1~S zP%(N8X=#yco7%M3ooO7g(FFU(!DfFr_dXIEY%xXA;i&o8@vr-d=tE*v|K)MGb*AA* z%+#$cynV4(Rd?-CKp(s69Q=LXGDAVunFl=alx*TdX|coIO3d4eD&Svh{8Qk)K-ok) z7X9f7rP`W0E7Pjt=mzUJ=o*MWY;~nx@1x3dl(Mtv{;i`dNUAq>xAaeE^ju|aah5YW zQ`b}ouJu__))XM);D{75wMVf_al@-Cj8#e^ysie%{~{Z?s6Jl=Z$z2cidG+X&P zKE?rNAXvLf;iCt4Z@uurmj!l$D*B~24OXXC^ZOeS*TB<(uB*qFzEQfXq3yTKvlh*Ww6_EQ zq*u&d3=|m+to^xN25PM@wmHH5+A$YI%FAhn@|gwvBXf#UqM@YWJ1Iv7n$z4ev`bZ+ z-;x+slw|KWG+@DDi}C<}nv#n(pEbncY8IEgulami11qizf|pDyw&|;FLlZAv2L>b_ z&m=jNjDAv%)zg;?aAw@`m>N~~&c`f3@#1^26-0NBFTz_rKMBDFaO$O^A{m7Mb zAp`TgN)&1g(d@}p1V{f$+WHxQ3U&4_?5ww#;56HcCDLYFPgO#%##GPs++QAMDL`EI z>{y&^fAuE~Jdx@=aB0o3c{vWahQAMN$emP&C5!j!=FSbN2H=eUOmbw(4d9i!jB}^> z|5Q(p{hUtIM1P^RtO;FbdFyVlrB~>w?9~j;uUwx(l-&|afz^7+X7bk7sGF@jp{*`c z*@NR1E<2FYJYv(9(Xz%0nHOsQ#E^2QCgZuflfm&8VAmW8t<$g@H!D#A)I9|wQiKpo zrXIzYY4nqiW$gSO+d4)l^Vc`7mzH3u6f}^|mD7j~$!Tldp(gN0aPiE1N41=1pEG#n ziuXD5=&SBJg6j}9s1;!r^mAN2%4~s(1phfrVJvH5&5ae`k?|OEw|ZKp>bP%Fv!Wes zwvB@TV09}0$X1Rubxj+O8S_FNq@pVLl>U)51;=l_q;=J(3_q{ph%eilw|l0~&t?pk zheq80zJm(O0ZRuXNtntX-^Ul(vg&xQb)z>ooy9`J15*`Aq6IAeLzx@aaO0w(lVUo3 z@?A_J*u74b;g7FwQd1o?CRLWtvp9;LY$Vw-4Lmko^{V!PlE-?LRWrISxM}cD_K*&( zNnLz*M8ClbNiZ?n*TQxRmSV4|X`NqD={L?=neO&9^_Pt*Eu0Q`jJBJ-#Kq$%=d-_; zqKn^o@jF-Ka&SWkeQWQYE<$1Geoo5hAm<|kc4v3jqyepm6mAUMZ| zk=Ex9HvRHJjYcEc)$v-I=$ZcNInR|yJmTU#?Ma)z^PMqrDpp#hdEp=(`+(=D>IuW# zxMf)jzWAUHKA4`51CE*hCaUeKfY4anx2;##xeLWs*_2r4!@&lH^)E+HeJR3{tUNGs zA109l7Vu}OOd)#l=^vLX7lV4i;Ysiqzp*}n&pPXds_5dcUZwsiXc4ipls7L3M(Xv^ zI`JQz*xECVoUs0`y6zM|fmF5Oa=oq{YIRA6tEE=ZNmQhvNF0ZjT##SAkCe(|dzL^`?WZn6XUqqImWLbVF$Xl8APk)FrMb92H^_kpkeyw|Y*%&mP!#HX>Cn}z1!F~InF#op&)vD>yohbRZ7sSM} z$%?tazUmC_B2IL`lNcNGYJz37Bz8U-pP{ljVlu#C9VOp6Io2Pq?pWK&oAv5cGCR=BWrBzaB;lT=0CLr{!;3ofiL_!AqG56h zs^4!-safXFhr6|e^{wuk##RiD3ir_7nQ8^IYwN{1it`tCisQ?n5B{e^tQ!j@twBshM1 zUhTv%WObtC6{IK9vwJ;MFkaVdO%*VA;wJXmUY#kaJ~lB&PooRRd*g~Pt>RoDhh#K4 z+=qEPHHH5!hx&<*inP8CWH^21Y9b5cNjEB-k9c!DTZ!#lz@MN*yfmO|SeAysvuhaj zRI86hD#3zaajR41DQ%u}?lkltSxL=B6VmlXDCztkBoTM^mBddkoaicrWNE}R1ZP;c zgs&%su1Xw2t1psv`b^ToiV$l}AYgSpoGEZj1k@U18}lRc`}| zlDw`FpJ9-1wg#fSi@hpmiTm1h4My(C46JQ8JKt%ZyOS{L|1!}z(0rQ-s`@C*{Z|tw+R-VK0w*evj}SboV6dRz(dN- zBfSlKsuQZqtJOVXwh6^DQH(ORZ|&opI$dI_>%y+2tX#Rw##5l9=Ab+U_a)!HHyr4u zkyTt63pyad<=L7IP$-mp{I0p3#Zevc4Drgm;kzT$5WoTraD`lERM+OmocPOk9$FGQ z1v*Pqi+3>ZDAsM!{WXM&f?oLq-A>WiUS5JyKNQ_oct%0Sf@mB=aE6n-s z4g#PZ)r@(qhubr&0oCZCL32*LmU)2SJF8X&m^XWI*na1|jhhMGAFd`PFI7m^@w)0t zZ%-;G`idWmk2Sz1q`voh^2^6MJ{dtea;K7uG0xNL8zcY7F2`qr8|RwVzb*yD=^=ek zr8y3IJCVn$N4(1|&aTeK-WAuc%6uo*r=<)#emX9K0<7_V)jXR6d}V#XMs7^fQ##@? zuJ@Q6ajQ{Hd#*L%A%>educxoLQZd1?Te+=8eGIMhx^y* zf0$>jKJqLP6)G$}8(Rr~Gg9xnUX7pdeb_tbB=^Xd{P=lIT0e2qB7D61upw10BGC3# z-%BaOSKYU^^h4vA_Fn`-1O*yavFr+;nrAIqilf;)&#HMv;GRr*S1QR6a5W|hnYAk> zqjIHM0P$M4TduLoN5|}ze%J%Ig%<=a!dNsG)+$0s5s5}8>ix;euGW&rj-2KA=yE_> zpVJHy(2Klm6~Angj;P(`2P#)djJ-QwjL`m|EBEp`Y~3z085?%%%<2_O4WiIbD}Q>P zpMSDMn9$MHACNdgWvWgb09gdH$2J|S4#_v@jUPsoNL@T^K_t+PsCp!Ac9kb6T9R~L z!-DOD${`z9JH`sCDi6Mz#!>SzQv1X@Ea-KTsO7ajbZCX-nW(Xs4iC{vCq zfX`I;I%c;-EX~>}tr1ol^Owj3vlIMBhQcj1Y)($y&S+Y>S=@8bre3F5n|gV()a&8K z6>k%At%S3K*#lj;&FFuu0QYvPwz>hR)ps63kf7FMbaWvMdz>Dm;mN7!BX`zP5BLuv z=Ao?$=iq%>1LCv!b4DAC2pFOlpZqinJf6NtCu5>go|Nrb6YaPz%d?FTX!xPp8YThA zQ}tcsi?|)GVqMjQs`*C-UFQ{japhvKbr5P4eP|Z*J{smgdGW0R7%;R{R*~x+!&o}f zDWW8a2xzKL^;Ko|$T^T`A#&1iOp52moMu99WcAj@JR)|O+?hC;Or7tv&HjP2z7sMQ zdkvcqAU=gvyqeOh=3M!)6eoPFmlyNdy%P;n|GwvlvnIn?oN2gT^K`LLY`0}J(ol^y zTM1EL@lTE`zy;<8TP=@W7KK=I33|I(Rpa{A6$7r$Fo~aQIR@$McjP2#H|2T9EQzxQ z76ef++7iJQX8=#vLu97Yt5LBud@@x&N(&?3>y7^G*-rQrRMlhe(u?@q9w2vUl48O5x{lzUl;aP$sz_012jpp0 zR5vwG*fhZI=h)nS@$QZek#>Z#EyKx%uW`^>^PI6_7R;TpR%@I+YZ>J2YuPI|UT<5Lb3O9hfFAC)r|Lzmqv7sX9A$=cp(^6Uq? z2P&%u7SV>(%)FF0G(ubTtE=IkhgG*rON;?mq^gC)h99lf$&@*X7|(*qUlp}AHGqb1 z3||Pvd`bDjXs=!My6NuZmQhWYk;-XhuD>1^s+oy$=+=(rQPK(*>dndWk_=JAp6ZLL zi5q;e_E)FDZ<>X-bYy#HS#mwJpTL`G5xF=CGVy)69-9n106J3}JFG;yQAugO@c4TTUkXhRP0Jmx!Z4*5!ZgDD(!uLW%0gm$(K*&d?{6RS9T?aSC1)cD|~E+ zrl_D=mh@4V1X6RT#5)%@Ho<>L*npnH#a8FfCALsA6M`icB)bKmyl*TGO zc&3PQPM@XX=I^Br%VWJp#zvYl@M^uGB!7DoHa=(~Je%Ps1SaAff$#xiz0%1L(R=w~OT z4WVG65yq@s@21e9+DMc1@j8Ly@NVhwcB)V7pl5E+-;LN&B+ztP2Ap`?)zkN3+ADV~+aFYSgi^Z|vr$q93$bvM;yC@paW8(dVyq(btaZG%hSL5{Jtv%Ip1n1!+6Z_)o!|mkA zIa99}Iiq>a()#Jo3HMbwmkm__0v}xI{lR+Av*;BGH(;3$(KQiT^Vi z+o|ig=8qA+Ic^xIY)|0O2ccVUYKO8V%NqpzB(gC^-W}Pdmuqc|soR&2{7_@M2xt)7 zjdhFJDP{aO$ZX*<%g~@H3$aIDew1I2B9HKv=M2KebnBqZWC;K9@X2K{*q=%HiKI+F z!8OMITK3Pl7y;@T&FA~&F-xA)pRR)raM=~tL0Lh5spmsT@0cGpG2o-7?1aA$m0^`8 zLi9LBOX8A4-Ns&!?WpEhd$#9t(y=)T*rybSQRm^%+vtyHznI!=ME@0{B2~TnCGlRf z2zOYX$KX{DnrimfIgZ}6-7$Oi@|rIOs}o;urA#|}o=U^2N}XtNTkmkdZ-7u88=6qS z(z9}0d>Pu_2vPm!YCMfagDVV`pOM6Y&*hcmyJ3s|cmKXhH9j5ZARby{CNwti&If|Q zIjC8Amb$b|HeC(d%YKUrQ$u+X>AN{4^H)=^A%yQ3u4xykRqDhNvx9|ppP0}xgrmD% zPJ?EG^$K%TpVf*L3zx=t27QppDra)VR01El| zyy}=j#Kcvf8xtS0GdIMfQ5CpM6uCO1m!i2pseh)eN zL;kFBW*0s-byUuq#~@MEp`AO~-AGU(8Pb$@yB5=YOY*^QF5XA+j!>@~#V03FW;F&K zQ-_W=iv)=H8I3o+JfC_$bs7;7QRL1g50T=Mj8?@2{p$4nkoAZ}8a90-YgY*K$&KMi zZeM#!>XG=Xh5O7GF(Iq>n`4f%!TwuF41m{%7BDZjQb}O6x*;LM~9iGTI z>MXFpR2F`Ry`UH7eI*XfOcciqW3RK2C&s4Q>0QADas~kIK?{0pC8FaISQt}Tf3J&l zv%ix4x(5>7Jrw)Y=eXu1Cz`%q>D0jDcUd>UhKvo7@rX{JdK?e^hMe7MU|NzqrO!|g z%oV=STrpJ*7;2)ZVdW+$%j;!0^%or6ZE`u--Oqn5%BwO(F&sTRHPKA&XO*+rOwQ3z zGg<1mxB8_rsvf@6rPhiTMMc`^y*M%jOfSGpUN$ww_s4Rjj6G1{zD=rYk!fRP%Iv%a zm-v}}xc(?G!&bet5B{myD$$8fStN{oD`>zy;I26Fc|Ao5I{s!pj#LaskAkU1W+}ww zS-P=4IdD&E%=tTNWPprHFHwuNI9*5X6PfyTU|2qA*9c_3 z=0zBIXNwXbf8Sh3MW|gB{&Hhpg`7RO*o5XfS60j7_h4OT%H2lir+*DCgr0M6`|7LO z9S*gfNsusDnJg1x=*9=4AL(93`oyPhNAkP&DjA1{nfQ7@hfo4twkCAE3nmls-UVKP zI*r%(ESMF7)p4H5Dy-)pnHQO&ui{4QHD8BMzc@aC>nq>J`?szY^NrGnMuF{snH;09 ztKE}R$?-}mhParOO!7|9hW;K|f($Dz6AJ#h9Wn!ZmwxfH^_c~&KEZppEX z(OCUV@evMbW|hCo1WO`W@@;C47GpJXtZrQ7zPlRranvP0DC^f_Rg)c_#*18yNc8A# z`z0>QNmB*3;{T-GjaN!rwYB8-)112}8wu|zi9NTqYkDT`QLar0hsm1@K_V_y5mJ@PU*fH%oBWiLQu#QX4@~ASwT=cgLi+3MsC$6e%AAEv(`jJ5j zA?MYwyKy^ymx!aQko#5~#=dc`=+Qs!<=-J);gCsx^0v$TQ2%>J3MP*2QcN1x1c*Zn5XPZ+b1zk&bD{wW2j;nUr`zbVU0utv&H!>u}Po!55A~8rM67i%;aT2FaaGvh2$Wmz0)Lk2eq&b@jv5G{{ep zatdPHS>;*??Hv%Ptw`jing_0UjNrN;`(pbfr&hIi@}x8= z5`DpbST1+&yd*y6&bMRyMrb#X-mv0fe_!?TMmpV)e;e={YWiezB%_*#8)sVVm8F#` zd3A#$Nur&DUkK|$Y2nIjElTVmIU=Re?KEMT0;JfHBLaz~tBAnSNxQ``arXYVQ}aTa4_xL|h)%&6;; zl@PgmG#flfe@mw3+PKYEW|!odJ6hJ>mFdh?~&g z`99;&qjGT%tEu<&DR-mKP^ggkVSfBAws;yO)-h*L+f{+#c>`J8H@GEHRl^ z#ssc&cg3>0q_Z(9qB*3;1NR63&RsbvR|&kmWg<7M7@2C4?CkHP{Uk&8zM|QXEr2XMNgPF!cNC<540u4MJFFIU&EAzA#bVEK)F*j9<@1>%bN{ib7B@(4%Vv+AXzTO z<2rm@IXi$`@;ruHI2wFbuya;l9VRU~`((&ZKhMxhZDUC}X7@^W32P>v<&skOh5Fdx z{zi1cm#of)^&R}@L!0OgdJ~$1sE)HxM+>ajquo|ddo)vaVWp+^1CwYm(uHY-aMQp9CD%`=1;YWlOy5_NQj)%N<#b*2odf?Ul5q%Sqj0;&lNL%#e4ZUcYVon4rLK*upn~YdMjND^KS%M?tn3Fl~=2 z^noAgJB7EVlrbQd<$wLbOpLZ20KjgE$Qn?EWqMM&U0WYPZ=tU1f+&`%NPyT+Ub5!6 zQiWbj?wVU@gcS7WAdnkY6>_MTeJIiT{RpN%JcH_)qrEQWWmE*Up#t{v zWmbTu@6UNW%g&YL)Dv9gXI#K1oDTh9y(97A|iy7hE|mc#C9*M;4Oo0F4bGsnYjZ)AD!VHE~{*^SXU8E6;8n-}p@B8u0jhZi&!Sx?k@1 zTp~)q4MDmYo+W)pJbm5%8vUPFOS!gewQTxZ8v%2eX1P6`JXHg95?zOGR*+F%)pZ&wB<0M*h zAfKOq;GF2>(y48-8>2oR@a){i=%GEtBpYV2{8hFGiMI-41bKFooi10nFJSmu@RkcA z+g}H24IQ7wEph&UBNLZgTup=}f{0^E)0Gtdr z%4ks{F=`-dOUsLI+4OsZRUxo}UW;Urn(1qxd8&(+A~Ii-r0IIMbC}JN;~P><4NFRz zrdb{S9yQB-=F-gK`y}13Mr@zP-c=BhHd9tZ)fe=J*7 zrx7CIQNIEimKz_huxxxg!P@Sg*6&ENp6WpjzYspgcW7@-$3Tws}X$8m_zzaZo+pnE@lAQd&i_j6kW2^?EnKrWb(T(J%<+{k1 z)X8+YJJBUS(dWh9&ie8YyjPSzyFCTbC=z>y;fPdh2-m!<`MfI0FzD6ceaibrWSsYP zqt+JQgw^z+X?f_WVwE&__Y29yIYhX%Y-=sS8?OZ{_#a>xr*6 zdi*7F&jM#0yW06?0?Av=qH4ljYKMNgji-S-Om0{Buesld*#LvhnL2Lm9`>Lk_wI)` zAG)^E*-^(Y58UdjJD5lQHsBzgQGIcjPaqmd*XWArBZ&88^#P(GP*CkCbiRpOtk+7( zZ^UHZbp!sP)*zWNN1!X2S-6 z5q77pi+ZSu#^=C4HM#Xup$J7<$-U>tNg`u5x0j2JVS1mJC0mKKoSk8X%6!~$^&k8? zi?FsM-Dwde*SxkJbHDH|^f)bxej795X3teI)ViVxAd%TctYviBJSer+6j34gg;MF_ zkk|;msFB$@5%lY(Iw=Dek@1I+6oxoDUQz^DFNaRrF0r9|)p$PAUe!&CBNEXu(YfWV znf9yLGXM}bw?e^>2Qmp0rP?4VdL>z)#Tp&o znZs=KQD5;o%PE<%rvP(2X0LmuR(KM zfB~1k{YTb}u5i|zFQA%TX9 zQ`KE_LYkdnzE9ZKg8BmM|2j|B3g{g}DoDTWugVyu*F<3E!hfWlUEjl+k@e^aX-BQd z0QcxTm#XS1$Ni=blx$;8+|6hhm!yTioHbM`C-!o$r;vVmd@R^%#F=B|b|JIxC~17P z)YK?H>|(!hrqw03;=BF9Vm5C7-H*2wVM$`~>!5mu?!2+2Z7Eq)LyWVA1+6;xFrXIx zBvQ;^y=Cz!A;2Axzw^ihaFW4$&+_DG$0z>Gf3y}1^A|=q#-#DPGD!83i4%2`1JkTI zjmlRbBfXPwCvk`3*(D`WFigeW;23Y1d{k!8(>NeyVZm9BD0kGgw4GFBuKjV-K4Y-J z!r_hgk6zK9V`W;B`R^Vg@`q!Z@*|r4LMbA=@&m2spxwFt^A`>ioCkw>qF3l6q4Cu8a}b2`i9H3X zxj7YaD|P+bA5B-faoxG|4+1a_;o|PYJ(HZ9DfB_^X-;}abn-K~U>a>$h@r{Mj3!4= z{HwAEI!_8f?*J=P(BBdvJ4Z|SgB*V+-cY;%NyD~^*8GH2U)l*a9)&8O=;mZ%spLDO z5Fuwm9D0OL-p4_%Cxy?UX{tY!pS&Z{DKTCSvn@v@t;%A7*2a*mBvr}NGHzM*0KwV0!QombgDg>(YvMj4X}Z$#m6b8p|lD2A%*>(n)$2NRxF34~oZv6T$4e z52EjY3%B&9;DVOuAwr4HLc5@auEC0v{6TSY0015RpJ|`KU)Dm6!oMeMe<6Y(NUOY*UJ z@2{9yrK@XfHHnuP^Y&aPZ~ARH06S^TAeAz0bBy5G6=L_8c`4KA`o4`Ww;YWlc$^xg zKN!Ql^+`gI?mvHMC_V5CL&pde;CLnmB%EV=hO7&6c0CT@w43MfF=GVu7j%I0| zf8yuMs1pZGokHs!tCTR0R6QKNq3cu@l}1$`VBgm;?kL6=EHcygx9jK64t;sq;i1c8 ze$#!{1Q(I(T#YXeb7L8F4GsJz%>9e#_g0K2`lr37RAW;xav}6Y@~LCch_$63`51@Hg=D{7D(OJSyRBr za@B+%53#{?6?P{w{O!D*KXJtPdSIYz$dM$X_3Y)>MB;I@ePn#MpeLV^w>|tg^~buB z<7A4zz#eJL*-gfqV2#CB_c17r`dH3K-akzhG@v0I-KC|C*M+ClmT%CHjnpKBAr3!8 zjD!W|%b>H+&|?lg=a06TP-v&M-gIrZ&bY2iw(pyJWhiQx~)t z44(qpKR5Sj0!MIBdF)GTVcaOU}$#26G( zIQ#pRl#Jqs==CFiY?-{t3XA=P`cC598b(Ay0eK9jqYurfCjIr^0O}TY?6##e`>8M%Ti8(oLRw$to2Y49@!Pt2fMtIJN;*TX{t{0yIsd6|7}f0X6rq* z>{7D(JG7*qrXuYo6I?nPSRc+}0RzS&W<*=a_a8Jnhs&Sd9|2n>Nj`o_M~?9yYMfn8K?d=&Qife^r9EPq zrg-Ca`E%=QVaR`CoD!-m1# zIH?Hpwn|=LhLLTK%4L4u^%Cpb>|FA+ZyUQ?9mA*ZLak?@8aw4Q zpD?2hTr89Dq@{!y_dAWM%9;ufDQFqb@e6`R*{pTWUV(txYo@kR235Fk0lO8K;}7k} z6u9?kWt%=vq&P7yznE^-wd8amS7SR^^?Xq-Dr?`XL35C$wt3ke@jUfOlW`v#pFd3g ztZl*GG*!uZxA(lmj{Dp58jH35u1)*0f+cPeC4$|zK}c7LdAOwK3pjw$c)o>@VTZUz z+gwhtC@(c33WBYs^3zzVTCVM3{`T9nN28sp!t<}F-p#X5=8{ap85QL>!apf*ysglX zN$l>e&_vk5sSq2Edpf|pyz}*o&(W5;@l(4&SG>5Gu(DQOr1%KBE}g_*D;;2Wl93&< z|G92d|G3wv@=&%u8D@BKeMPXMh&lhSh~qF{5I?I}pd$Q_jDPv{Rj|dpk$*9#s38&y z1--njW1cjUH-v{OW1l?$p|U=%iE&IJ>mvphX5<59E4wXj9ngC1Fai?lPdrQsKEM#- z2N9O~-+EG5aP0`&c<7a%2vJN~Z0Do2BgOjzFA?B;tUHqj+y{55+s9)1*JW_Qp>AAY zg{7(g$h@ZJO5i_m#k{CW!;ShzB@F2)=ylhYu+r$J01Pu3K>DzG`Tmt|yGUx{X+XvN z@t12ZDhh3zqqF`;rg)fFuZ-;;5DA;WsAqd*#ZWE=v!M?oXxOF&kF- ze0(vvfccLUYkNN)aF?lL!LD?RzMyc9!;wqE zBQ1Aig8UuZ7rp0Wx@=T*86tbDu4PV-aMsE6>+q_8`3epg)H^E7Ot%m2RUeP`SnZ3tq3V^hX2IJIa%8p?Wn9 zUvhs`HvWD-ORB0*cZK+hcH!g1PG;CAkh20~ifrkE_E)SALM8|DjkPI?q?`M5zmPy4 z-?_%DLJ4oW9BUGIKD1UZAna@}`F-3(!TH^1bm&rrs_K0{?n~=@{pdDrXV>gu-0m|& z%6Mq$BY6F%3$Ug@f;`W}QQbE^I5r#85;|nWRkGB5!b3NyzSe;Hv3FBMH-6g=f7$MV zR_kHaReGsp@1GmusTc65z6Q>h=KWm%xz$47wf$;5jL~X&oynt$*?*sif?slf-xDgc z4#%j5C-y1VKWKgpNe%pd%Hxn7{<=um+Py3Os=Xb)zZ<|6Y0+%xQm7Uj2lsRCNuR_DgS^9XE=OhO0gr2vU z5v!gUdetkchXh-BMg@F)YQaGSYGD8NmCQPaU(VaI6DeIU$4MsgZ}cXWM`mMwD74hE znU(f9ojth30ND&Pun5-wH3TUs_L>}AxbIBaUp^iT>_>lnTW#$uj~5R;XEp8(d=O92 zV7hmerW;$?kaEKb263qnk&Y7iN2ZYqnimV_Ak47v*?`fgU1&P&WXZ{V*1z-KXdJb6X%a`1R{GT4wUVD*Zx?vxZ(c@cbL@B> z_`v^>O_eG2s+Y)s&X1c?xMV!V7K6g4!DfL~E72?JFFVsi%yUz}PgVY5ytMvOFwgM= zTqKcR`#f5jHDg;QCfJk4j$_iN6~~&ZCpVvq$>;;yRv+$Zv3i_zk%Apzr6+jxh$Jhg zY$jT!FSHw9Pp>63z1Pw4S~Xfn6y^pi=(#cmKmFoB*u7I{%I(-X^xmpv{93vCm^|Cu zCcepto@hMzse;V&;VFXD7956t*g=Cu62kJGnkkF zREVt^^d`ugYb1SR5}5}6Gm6S5$oT(~WQwJxced{zoB8Qc7b!%a>;bKt3@~_yfWhNWK+8EQnR?6o06eui??@a0CI84=h$PXfNr*nb)_=kU<$d3)PBf5d%= z!NmUVJj_=W$01v+-c;hm{TUg_ZkK$2m2Q8 zWP{y-Kx2OBbosQ~z<^j9XMjH(9NHE*O;100_Ft!2&G*D-8_eTzFzLzQC7K1nEkSq_ z(Ddhx;h=q^rjnBFWFPsD$B0!@KT^hNsi}Nvx~^mfKSZ>}al_76Pd<_fVQ$~VJT9aT zX1rELSoEzp`f3PARSZ1sTzaCY=xE~iuI>ZsN7q_0AVu77pbOzVX!#*c)%IEYL3jD~ zjpAfI`nxl4T~$goyK8og<;vTKaJ0HdjuXBq?6WYBA?*s4m6|w5+1*v1&RLZ#uBPN(U8;ZO6qR?1&~TO+YWK2y3Sn5aF=GZLe^O@ujo1n z%sAh<;Cjb){S~fLYET&>_lu$0r{iyhl>xRNRKUcRAFV*cA|lczM$&PX;{7}KiRM?q#> zyGsL<2uIn84Urb=`6^mJoo>!)`V5Z}xgk4K^}5IluE@imAg@5iTKKX*Nb~)e0ix|z z*3X-~{&g=lPP4+QJ@g}(RNdX87{5MRa2k|ZE>bL3bbVeIrkOnPwj_M>G%(CZ4v{db zn1kuzU()&<`RPWnG`fT8?mUwL>!6G~JZbh9seE3kvSLUZEJryF%)(KG$!ady(`&C zY1rWkXSuWda~3k)M{#}fkR~lPrvlVagA$4-*Z-P|6(MF_x zs+UX)CL|Fb5i+`NVEKOeO+WT{Q0k<-(d-~A*j+s&UUS3^>yhz5(%{KWnk5B4iILx# zinU`Lnc|WBcDIVzEU>gEwE@l#gwJAf`wn{(9Q^2ujF52`aI6uu>Oe)o;Ic6qwFK+l zE8Xw^eCsC^z$3?Z!n&|T6XG7lrYWNuLKk^e%U4&4Dxb+}@ySSl0gF`8cR8Ql{Ny4- za#L0e%L(D$26)=bvy97XF5g29XdmM!@|Bn|ho5Jfd)2}Lt)x-_5W>FVosgnSnQi>Qu&h-uGR zpS`yc{Yj4RcZj=M$#8NiU#?%*foJO!;HjT*p+T27o`%NG#(zE^>fmwC?m;uhvBKgz zMNW7ZFuZdp4FiyQJ(AkJdYnVUV@+0inRY8nUTge6!vsNTn&M$nmS z)E+Tfn}}Gk$5$0KQxru}d&Z6stF>ZpL1OPMv3H+5e?am>UgzZ8=l;Ck*F{HuydQPZ zhHI1d8Qux5LTUwrUPY-tvhGb`YA&q~WD}%pC&~nv%Cqt|w_YY|Wr_8@!oFKBP}|=& z&U|y02wu3BF}&%CYgcMPjNx<~%fADtY8AiS5NHw>h`E$}pPNxA=YV^#&m2rC_PR*H zv6HSr!u+hN+J4p*mrE=~ z`Sx9dhI8L-?Pfg94j_Am{E|t>f7f!esxEjz3{Dku zkrl$xG|QlJTHT=pm0@8tEZmtWP+tmU8|(rktCxRFMmsf#3gH4|z~WSWCR>w0Vh29e z_r_oqQ*qhHUY)*bC`goW0b}KdjdA8mtU4`*O0_|9yt%_i(d|p`jcuY1H&o_F8(Hq1 z!LOL`dxHbcyEcuenESpTC0QaCP3X;=#Ob3SbYy7Ky_C5R;HDZ}eXZ91^y3JGq?_8; zWjT)dOeyoYxd^Mw9XiTj!tqiKcAr%m6BG{-HKI5{KL0QZ1tuTfGj46HZ?Y8dH-Wdf z62a8q-|-f8ER_At%l%330%g=_h~hu7MSZ=>j*qV-nGqwKSr4JgT-)zaV^Fa}B?d3l ztuM&ErR!6Jd?`|%NX{39g{=_ts;8DdO5_j|S7!3;2pshDEXiD9`#n zj-4v(Sy6H)G#%XjkGFQ@m@+DpG*c~kS#^SMJR@-c(&&D&HC@*Gk^uP&3WONiCSx$- zKR=h}{JP35bUK)42#{=wl3E!S5PPtf6o$;)>C{(oBWg}3^R6_nOAt~mOz6C=Akqf> z#F2Ygpz3Q5BlDmIZI!?Q&aBR6N;EU;$l}Oq91Gio?jFVfw=UoRe2~<_x7?sqRF<@( zMQTz2K~~D-p-NM^DZCTDawnC9Zs^V&3PKS8tgIqTU=oXG%#`zHOjTY!Eq~X@&3d@? zw&EjUKE#GV`PJ^GA3^5pQh2wwye#m~wxzV@qm9wN`xTG0`?$CrCP7^u2MbwB20UQZ zR4S&SnjmtYenm$aoY!3BhnQVC?q*r=ZbPQCmJQI6JDF%xW1wl3 z9*DnKfq&+)&|1)=`>ryRYk)slF=^n|n03TN+0As|@99XQ6p;^`oXJ2&Uvg%X%3sZ?81*pUxD^ zd&J7(S||w|1;S5kHpc?jAhrsx?*1|ij%s?%(rHPlUeGoFnIQh91zeV{g3Gf^FWwS< zyKezoV4-#RN5XhRC+hL8+IaivxggmS(>a|lMsnYlGm}0#o9(IvM%TaVD2`MZ`-(!e=3!M~g~vG^VB=F(GJ zTjDxMnk^eTnu}M6w3*QBGuE^;iBaRAr77LMmOQG+y^-)SZ}MKeggy812GGc}jxDPl zGd+FNfbKQ3U+wZ{zIt@lw?x@B|J-qoy9HT~H}8VU#k3gYAGvo`&YX7Z49das-Sn~O zW`IFI%sgEgG{Q6^p5;*U>PCBJFP1pQs}aAVCjTr;6Tct63)OPkV!Hp9$n6#~tGveF zk-GJdB;N;DHOcStl28E4@9}+Bn>=96`>yf7E)r*#^~7yMKB6=>UE!Jey!x`a zwf-d5wxgT#^9xl4Kh95HenuJm9G>s;k5o&yGQ~!&*!r#!ZXnPL4VQ)btv?;}!srrvayL$3HeQ02I1jEMPPBcNn z^@;7${2gd<(|6I4(t)RvhL!-08G=eI+$U72rjqC}l+VYnE&;ed*2I!yb{@C$6f?L7 znI-pBIGklHTyq)$!tv+H@|@L|DtEuAv%l-~YM@(>Iv~;n&u&A$&=}D(EfLu*oa33+ z?Tr2gp^UkY2}whiru)g%zkd6AZfiBtQqR|LM~I$%9zQadzu=`k2Oo3~!1lo$#}z90 z!HB}M!glYEO@_Akd+^%tIn|$@cy#$KA+8k2>%Q)uFeyUGdGv_87SbK!X{k6s@~SlC zERq2E)51W=b5$)DD=4hWH+rzkOhcy^@&$mYUyUeyUYe#-7rGc7{J^qcV^pSpG1o-A zT!FF+b$|FX=J>UI4k4o?gm1xglkOsED$t(TN+j z+?bS7(zt!6Atan)?R@Y_Sg$xuHO&n-UZM>g>nAy_1^m=cgD|MaL-Wa39y(`|e;wQ4&7Tw@9aZF;rcuG1Ul$f( zWxo>=I^aswnP%2W8)g@7TsY71G&TJgysORr{$BJGVtJef0_TC2Lui#9JpYl<1=|QoL9p;KU@V)xEX21X&7;|4B zkxARKJ&Rbbc;cgb(KaWJzZc_{O18zasVB9$TB@QV2YS|i;t4Y$K;iRoh@Fj{7t=k~ z3~fjXw}bd0z8RZ~NjZ(UXKSi*ALxS)L_QND!fx0U9cf^!nzTn=k5j6oK#hl|fV~6Z zdZ}(P%O&d81%+^CQAEmLRNkDx*jgPlVNm3zz&bWPs!ECx4=QW8PH$3Jte}|%8S;8m z0E-Wjc~cG*s-~DV!&!tEcH2(HRF`OWaM@cr^4W`GRbwuBY>uP7g0HkyZP6-nQsrCF zhUMZPjt7~+js!}JiG4UwtEve#e0zY#VxxV`8kImt;t0hLrfcn?+m(?=%)xwLoc@?Zj1G?~}dW z&79iTTn-<#|H6yw;dd5i)RbhUN^IHRn}qp{b!5tp*GBuxIRVZ21qw{qCw_nZmgH^U zXP|2W%xKt*PuqCD0PXehqci%5!jzfB9OjMENmMZ=56v-$VwxD0dADp$!Sec^^Uq{$ z7tO`x1TXzRrXsvPD9it9@l1J`wWQ2?YuK)S{4B*6x+IRo&;G)mMl91cI*zw`DwUYAl9|PfD6D?z8Dm|Rde-G_K2wm-0uhFcADd7S+!?IAOv^K2V-e19F z(!Gk%+kOx<4J}<5~#(~E8s{{)lKRh2&=KiYJ=lHm0VLzij zdCuz!_R(5%?zW(pm(2OrIU(ej0=7BMw+{|Y8n*Wbyjn?u7jiQ1MVLkWi4OEFcR&iM z4bO@s8*}V3*t)v|XgBWp$1YaAnBnrRk5l(YS8b-{%$txagTWl(%X?`4k(i2c{gZfL zB(2s9o$h&G>w1CQ4gblfkY)dH0)EsvfH#mcX_49dD4_@$U3?jlIS+?U(hsKQ%t~aC zftlWb4g%Vf@FGhtNB$DdCsk-7Ai5haHoVxrM6FpMW>sLav3p2vkomQH0$ zjCWTr!xsDjyn@VAcG9GaklDHNnF1|pjF;i^E4R-JG|4X4V`!Gbny7GFX#eao~c%B+~O4g zyx6J+b`LPRKN!44TDWO``%Kv$+{qaHr>Gf^S0m8$W_6Hyyk%@scxqD^#_0=WUJ%<@D~#@jUZZ--IY}rkoD;c69NEGF!5Qk?4=Lyd>7t9IuZ? z3uUhmH7t>}{-s$nvoaLZmN(4?H~m$hU|}tB@c@snFC5ZbQhkiI@&_Jy@x04gRn!rK zw`)RC)P^v}yf^CX5>(wI<49#A7f4$ zFu!`2x@5)|0lCy?iJ^J(U^%f57I#O*IIBay$DE4JWM-SSC0=y^qAO)3J5*gUFyL`w z`E+i@C1E!2ABp$s4KyFJEaQ*BvGqKP>r(8r-5HR}7SSFIUY=HMHmC>rxJUpR(x(ix zG|?9pDO~f?>U1cQS*P`gtIvhoy0y9-Uc3KDv}oK48PK18l=GD`^+94sj9AbyhTy&L zDO!CdtxLQvuh{(yuvKP)r|$^5&E=V-`}`R3liQ_OwZz2qKXnp5zBw>=e1V9T<%7GD z$kbe>(%fIB(P43Krd)gqwCFh+U&d4YBf-(Zt+}n1tG=M1$KPye80rWn%N^L{bPuEl zbVtNIsU_B}xj&sjZt}mNByF%bRw}(eGPThflO;nJm&w}nGm?I9(K~qTmL6Ayo6EcN z1hYLU>bH>gH$o?Ei`v>3VN~$6#1@qR)WTfEwxNDKp)I;9qHsq~35ssS?aECBiJERdf5r|X zj~;90IQ8o=B!*R<-{YBUP{8~~V8qCLiGoIC>nZUgN6B87-omxcvu|X^CB%2_!8cVU zj#qBdY-JZt$aIgd-a@+#`r>i$-Xo?EZWZ{PI}G> z0turEfn^9o2=#0)&EW99gv1@;VzZN_P=JFL(Z=m_HE;)f%$}C4rK(q(#O4Cuk*fF4UPWv1fs83YtsX3Oj+>p#kJ*hjB>cLr&+)PNrItX|R`|i@O!?NIND6xrtV9*G|o$Ec2 z#BYD4w8ReTjT@7=&&aqJONF(~h^!1G%V`hXer$KA)mv2FCN#G9?ZExn&t`$CUs@SL zormvQI;E^I>Yy8fk5ca#d+wqUyXdQ95f`1}{X{m$(I zjjg}CPtLWswUhsm#2jn$HkJbUc2&)1NTy#PFHBQ#oMrLF+u<)ZjvSxO@HvVoPrUtZ z7}PJ9!{obk*NVZ3B2-_$pyL78au;8% zIxeI)qI8JC#ab|_|0D4&m{DoCBnlf^3vj@%GX+~u_sC774iJKeeQ`PHh zFT>r8$9z=#lvdGHYYOY%v4C}by}!>ahUy)W-_Px3dxZ>+T~EGk)dmLPj8nllXV6J# zZPte<41ptSI#Xbrdd};TFHSa>{-GpRCeXu75+As_Z6|39>)|M8e5C15ZaOGte!#2r z$Idx{Clt$`pUOSo=OIQzZ+@ZYtTZLIdqu6QqQ!jE78DKF=U z59+h_71eBSau54k{G+T~--$OvJS(4;wcqQ;9#iulLq=GIO$&AMze35L2X}C?UK_Zhz6%Ibgfs!cM^5E&t}hYHU>k`;-%IW z;}Q5?u3P5&U!ljPNYm>h{7#yjT*^&qC-Dgc9SagLOG zM4ysLpebEJ8xBPYmm9I8vlI$mFqqikb4Nx0?2E!|VXXRd%l$C}Izz5oDWu z?flTc>!h;!m^vZWzwz4syRtev7VGcr_%VGy<$;pRL>gj2Zpcby#Hz!X8!q;;#CL~l zx{mtLsfk`YuM7vbHt5o?qX4WMU;cFn8`>YUPvIDtd_Z|rHc^)b47aSDd;cOBS_-&S z_*i3EOU6*W+~jf3vbRiW$sgxd-|kx0epl_X*y4R(5^t)XE}Av5Ut)x~Ec-O^qdLxp zPmH&!@q587^tLY6DTSr!oOEA$ub`!xjXs+Gota!F<;)6rfGj^VI9G+AeET1 ze)adV_GS8K)>o7ysvY>ocg${KTcdLlI^lMx`Z(b^XANV!SoTf{d8XPp%)F-;AI^?^ z(mQxkY}=5TZZv=&O1^)B+_Pp8b!+?k?X=Shd&Z((KfanwCxK>9c{D2}wE>bcfaysm znp4V+i|!*VXVk4-F*jBLG3z9Z<7u4b2%CArA%qxoxlMG~8vV8mkRO@IOK?mRzr%ia zmn94U#_ubKY9JG2in-#|`CcRqiRt{XR8nwoqn6Grgaoy1vGALHHkEe=DQT*&S4IYp z3J=QoPg>H1@F}NvXS#QfKBHUZdlAT+%R=EPQ>y84uYY8oXEnWF-sDPJ=d~hr#*oyA zr$7AF&7KtfnhC8)oFn9r6E2%5>50Fw@<&GpkRd7+;R|H$#KdWl3iq_g07)27J#N$t~E^ zDx$8387ooAJF~tR0nk*aRV-4oODuV>SenT)X-e&{wpX5=pfD$0$wcWl+k_{NOUdDE z@{-{gntOINpP`_727tT>KVQ4#8w>l;eR)9YtalRT@gk4ra8uZc8Wq+$ZS{}Dr1{TH z5_SCYYw8W(6HKGp<+SYOq^AEOqRPbC`ilb3(uecL@HU}qZ1-p_!}fTBcuiQ6`o}*M zUrICXeCO{^sKO-yMjn)_8Lnmgbh1{@o$Y&VP@4#d zU~vo4J6^{m%&TS%;uvFJ9+kITs-h+W&@JQHnznCWew26r<1ICmZ;Jon7l7rcn-r+r zQu?M2XtpS`%0Y7N9Qwmq42O?{YdPu$AaMrir&<4%8YW{4kTN}t-qFtk*W!g>`QDkF zBkWNRWQmT#fkPptttJt`XHQJ>))>5Pw`<3G!(YGGTXap>vvdf-J++KFShJwGdgRtv zGq&acK|IX2-QfEZpK`sg2&74)>WoV}2)660mLS*&U2Wg*{Dg`(bhuqzXEW7k5>Wu6 zb1c?LE`YB!7W`dttQsI+8GBqZQ`hx>KQrxMK6@Ac(Yg4ywvN?A_L!B6i44Pbt(ysC z0%p>h=V0`4t3}75zH^!Ac5-~0R0`<%q0n)TaD%r>QO$VXs2M|k)9mn_v~>o+1l;9= z8%Tn3{*%EnF5g+vc2(=7&p(n{@*hcrTjvK^8Ln)Hu^#&Nmr{2>+y`kE?S05oU7Tk& zbCAg%=S)q>_EPLL8$bQ)R;ByFNCcsUXP<;2 zpRzkIywS-TtV#S3XWiULu#I+lCMtfQOooqsg--U*>Z)BiOV8i5c;aAVyeBYqRo>d> zX5lXoTH<7cIXMLOHzibLHhCARgk5fy?Y&Gk$#r>b^Pd7wrtw{fe`Xj<2dJnlHx6Xb zka+8BJ;$dbmI#fWN!cLmk05=F#`1__nJCd=t1-LUDcb*pJTK$7^=J`xYhFbC1Q-g< zYD|oRM;6loiYJwphN`A{y6fs1Q4q-+sM!8De%mqkVVs!|qwp_`K)%~aapJD2hbT_b zV2DYM2NPM5U+=J2+YE<&e{6kjBwgxVNiY4uOFB7P6lULh^ry(Plw9Cu`X{c9S>&Ls z-^hC`tyu2TQjr5>FUk{4L_TYc?ep*crar{>=Br~@_SWj6oEfUDjmDg(RN1HAl{Hgy zTQ?pTb5j$3?lUt)i~rG~wYV>HxNUI6q_PZXNzHP`T!)y&6X9&ndzK#1cP1?(3^~+{ z__HcZi&Z+hj5xp%hxZ*AC-Bu6fEhf)w z+T;@Mb3-Uz7^+6zkZ<|rr)!CIHVwB;&A|VWV9I`NrFli4U{VA6jQB`T53z{d0Q+LI zjRBwjJL*p6*^I9Oj?kkyx^7-fV-txc0j%|-9boG~=hjzT{L9{}hWkfA7<->>`p}(3 z`nBTL$dyae>J@diY!0vGv|;HB^u6mz92Kzd&)p-b;*%J;vD=V`PDVnB9KyTIzsGNw zM**0R{! z{4F%q2A-9aH5gzq8+Ybg=3H#B8%kXh7pH710xbFefh{|kh{&(2GMbM1wW#s zBraE+h_nKkhlp=EvEH-Q*KHzGui0auF_gA;9U?2S5UZ-(GG;p=P;0_vlT7y0z{)&2qhFO>*1O{KaFbK6 z(!tK|P5Wbo`fB6nJndV)+i9CumR6H3&l1%UnlP?q79o#=gNN(*a?8#+YFGzh^MW}- zXl#$2X!`o^)@nOma%gr7wDoFAwI8oqqyVM?}M(_~1Iz_|l*=Mt6O0O(#XZYa)8s0B5|H z3ey2k#*fLZ1`V1gp;B}L5CPcFt>2mKv%OH(<)U)To|=ozd*e{Gd7qOC?x(5RV*KQB zd@A5qfMo8jpjX#1Eo=em)gwo^bT~I)tad6X!*ro6l$H6bKC#e={{S>Zq6`UQ?dIBr zWF)F)HwGsy6P5m@;Ybtgdt;=~U3Z}|Y|+MriXeMD#}{hrRqMVXZ4k?Dz`N<6#FVKa zL?y_>p)jOLEU*+bvgx*2>R#C~=h#Ea`W$V+J0#B<)NPbW+CWrA_V_@_Jd^pP zSZ!P?Mg?)P*>%^M1vRrA--+!Mq3;Pkeb3~R4g`0N>^ax}W)K^zg<(0VU%~a)vn3=K zMkBU9kxMW80}X*udZyV5HZ1oABWk-LDIXCnD94FsGm@M>Cu>FXK8kjD25^031&dKj z!Xj1mCuT?9tO|uB4;Iq=`pQ~h7|Qs)2=AH*Qr6$dS)Mg8e54E`gak^`}i#n z0MiJ$fA#hTGwclKw?))^S?V_vLt(*2>rB!{WXoE&ngB~|^Y)dNKd#z1RU}Wg&5vFe z^8P}Um~D%;Qnb5Adsxl6BiWg@vq%3_jpu@P#cj3M-m@CX5!Hj@t)J>@QK*K<{-4Yy zpK;h`EA--A4Mjp7Dkz^(fI=0!t2M&D1)3?;60`9|a9XVvw*4b{eD%J{AZ>1f{qteK zl(o5FSg=hWOppjC!5}Ef%#`{y(!I2ZXnDJa2>g0fnH@3#EWoe<_w|L(f+ugz_RZHL zg5gwH`czlZ&?QDsE$|Q@LD) z1ck0fCg^hT3m=wFdVzdK3qZQ9-L4a`+*@eVVgNs6fG0{~DamoQX|*EeT8Bz4pQ+ng zIwlq28_2#FDav5MmM(dRM|@EW-e|T4+lG9(rw4H`Lh>Y#UeNvY%)}Uk%UCz0LYv zQqKLpk652i>j@iT@@NSqfGjsI_(vkqm;N!?W_CMfDQ~zZr3*Xxb$RT_mo4FBaIt0N zv91d3AF8TEbPGtZ^FlmgNh$_=0(0V+(KLV8&+&PR1r+3ze2DeEW))XJ4GA`8>pn# zJ?BR~>lM?;y1jfLBZjgQsAd&hP2`FVNf9*yw1(*6s3TFG@zn8Z`9QpDeR`)icCa|V z-M;wVblzYL?lVm4fyd?$JdhJ_-Ic;DaAx~Bh!SOfuh#d2-%52ICL<&ALF%BlfT|N= z)&e`PZ4j}`>pGju?xOJAP*x_`cQH_$u(J*{7T{1#m^Jb;3mjG3HWQOtI$r{5=h-ue z2JYf%md^0o*KZLJo>sH>(9*oPhpC$&|Uq;Ve|OW#hqpE_Dh9W`G*pwkE>0# zF`vvGC)?>VUOdyGWL>S4&uO7N11_akkUHp`CF+A5kA1=MeDLWBrR8zY!BM}-iQww> zuJzHv(X#!QkTcceutQIn@LQY8Pp)isvUO`cyXN(ejgZY!8y*_b?R{AuS@>2oa1(5e z3e|`cq;)LE7-14!bUdD2rDxU8NIZ=@?h6PyQ_c^t!ONm1e&}rO7KW|^+uYMUrv@?H z@zhd$zC9I2*JbiF40QZg;KiQ-4ZEwn9&85!58F`Hmo^KT$E)HjPnPhAer!QHh;ugV zbniG&SY^ic>%pJre_crX?}I|=v_|0FC-yj7LTrFSEx-bO3%_?Wb!JCN6e3SRkSTS| zkd`T_7(eUZ%o7tpve`T4;Zn(<2G(1)kx#^rj?1}EzTxj9e*1hO*VL?HX2UnHSOkW3 z2!C4%>F#S&Uw?&m7%HUdJOlon6FsX{97jwy$=JS7^t^PXE6>eRIKU{s%3)ZPUEmSxf*XIio!?Z?2~`e1pk8qG>d0#Bx&btb~w#%r!Co_HPnP;s^X!z9wB zx^7M=42+Xi;g{&Mj<)$Z^-i4+((`vI-6=@AfbmZzL;_M*)v|%uB>}E9FAI$ z&w?F+z>oN*7-rr--_xgZ-5(d%3e0^Cu^3r&vnGhB)B~f#s>YGNB5n5rTIBI}TAM*F zRRtT1qH-RmZDyxE-3bE1`TG&@5bW z-4Yn`eXrq<)zqL6kVgLgsAnsc1D5|JqX4%FW{hJLvQ;r;Fml_QUTfSn(!|Kp6{8^q zJ=AORRxe{9ulHuRDx#Y#7q1ohb3xIx`%q54m`)q&PI<$}3SSf4MWWVHWG~Xj3u(h+ z@$y3-#={Xz=_b=t4Ir;N^R|V|2kXO(<3T)U2DVGh(n~AnDUXp3R&hGWjFySs?O?=Q z;R2b?BCaU0W8_`OG;p)~>GxB@Q!4iah>~@b-w*ZYmKLLd4#Vu=Sj1C zv1pbyz9d_77tPp59`tWp(|vA?s8h*BO7xJXAy-s ztffPpymAxQ3q6;rbl)eRo(PS{|B6Fcw_OAdwHJha*oL?iI#p^dPFB8Bfa}7CM~yvG zUiZ&V`J{JBQ)IbWmz!+B15)cpz!^4mk142r)$=hhF3(`JvoEE!2)DQGJ77<@GC7{a z80Xw2FTC(o6yGEH6fnJDRHC(jGq+Yv{8$qK?ubR?M3?cRG>&P276SbkLySr95Z=ZYngvw;EHG07-50lia94D`FA3 zXV84hGdqTT3N)xNu+BbNOR{b{>F>;_O0)Koy3~SK#oOky^z3OLyRFNQnA*!&a>eU| z#Wc%H`(GC)73l$BLa;i%y!kr&_l%Q>xw-mH#Eb805*~vyaWwJqo67Vqq^YLq+MAgZ zg-&%XTw^Zqy=&-2dCj~0xha_MH!077g zIo7#S0pCZV-n}G}tWbD9Mc_R~)2LPZMjzyoeB$E#v#mKcJTmIYTvtY4c$TZsy>iy< z*FxuN1=HcS83pXpWaA0e+;aNN5;(SUs5CtlZG8evy&nYb*z4K{n6@{cIW9I^?wn{j zVNOR#lm~-M(kCKwVi*oHCt`SYlinqU zlBiGw4)}{QzpPCM*ZszLIG${;;_BJK=%)99TMJi81pD^W3zM-X#bTwx8QgybDz0M* zlAnO(b5om92bPh<7tj4KH?-K!Tuqu2L~{l8!CUd4-!?--qq_2?CJdFka#hDH@o@^_ z0@QSNmtG-7?14G6Z|!u0w*9Gws4R^L3<11}fOPfxnhDPh%UdoSSXMJ@=XAf|*Z@jm zr0d=*LKl3Q7N=NcF!t$-L6|MsOE31-LBE9yOhg-H6f-vgz59GoT?|(M*nRyTdE(^N zHqXmaFS%`uX+wFs3{`G2^Z0SOke-9(6R6-L>~)qRV))2ENP@#Oz=4)x=9VQlBbIS510gS8?vpQ| z9}9VS(=Y5uy&cn$c01Bk7HQ}mGq~p7_sUi# zbMm14K`2njzsId zSlLTmyP>^~ZX#Q-3^$Ues~fdgRw9l-CER)^5u|kQMA~ic#6%G+Ly3TqM_0Fqx7uSu zPl=_E3ynnGw_yQAU0U|#LRktwqj3q#+KBM|rgv5F{1uuH(=(lE67uz3JVs}!i(pHW za|A zW@c{Vo0F6ofg?>iq;H?~V<+J|@()W;DoKT+^1gB6I(tsEvd?`WL#duA4iswBqcvA< zi@WZcu~?g_T8nHjOk@2tpR4or`>pIt24HI0(OE&N_H5J9zP^j7noZN;%c=%@NK;j@ z3@GBsmW5@Xzbs#q_|!DdT9$W-W0fy9@MyqGx^!jz5N@qp?VAHC3=>VwYj6mzWlwiK zG<@GRw)>u$78xmTgt{up4bYm%t#ZI(=hQja2O+O`@90hkC|&Ytz^(aeBy#oKewdfp zzAjD7BA?Ot&vZLwSi+}Hfaa*k9>3NA`bWZi(WuUS=})ADhw7h&oH$a9`th5y_G zn|UF2?WyeAXRoo1-ufV@sIso)jX`v>#8z=hSfF%ecjo1U;le;SFKRm|Fq@q zX$3EBn?tl2oY&$27Qt<~KZ%?jj!S_QHW9I=Ib+s!u+o~3*U9zggwe%D*Q{&5K?LCu zVyRQoXP1x=cxiBX%>#JSMUMe_8f?a}vvk$Q_(nEJT(R9SA)EWXJ{vUf3h_2>4yczn zP$w#X6ZN;`Zgg?#X~tu+dDv7LM7Rxw!v~bgH?gZ334E^Fz0zR`#nTQng35W=wvza^ zqS6tTYtw6T)0Q{}f9LCHH=PX3Lgf`GGHt$t#1_T`M76<8jaz zCGce(oVOkA209eSoEEB=%(4X%&n_9SJ69&3*WeMs3igH^2J$!@KiK=S9|N(-9j!bjXD5=?R_a=jNZfSJthPA) zTGk!FWqRS~jIC^?Af^&u4QZS4N2&+7a#DmI2C81+dHjGq2I*$@3M2dORM(|0(dmni z6KO~KfINd&^1#XRub7eEr}2}HOF*h8&FIwDjE2%0wF1GkdVcODXkB(7`vAjhU6MPi zot}ckIMDA*GyXFKW4;FsoVaXWh0M|k`?4bpn_H^te>uwo{l_PRc)~>kiu= zJ1ZQvJ=Oqsf-J9{+SCu9sk#*hCUs%vNVoOm8`-;B8w}U$O9hKI)6zwhDW)z8C6}_F zlSNF`xs*tYUu?$0i(Zr?e6)u4;k!TVF-mwC-9+R9f=~)EP!TUHo%!8!1p)xZo!!FyzsV(A-J@sF z^A`DxuWP10=ig&L*A~COey$5N-olLSp27D7RK8DIcZcxv8N%G7I;vreZn zbADm;kTu-BH~n89iWaU5fOp%=x-DL+?UpIOg&{G@)l%WXU>OLzpB#Ek%4!-_%FP; zG4WL`MByvF&kYclC)Ev({?LGL$#)28traj91jGlo)x!;mDNd(O28)L;j_0QGV4WAu z;S>C#KW_*F4uB@_r(4ovoydOQB%gcZkAr^HU33mK{v*j#YQ&eB3obEdp9H7P%bu8y zj|Hewl(~5`#$QVodDQSGC6~+URtJ5J3tmQQ;>j?dj@$vRYsUm8{j$mduO!}MDLPbWExmz`Jt)@ zBNTNwF#lJL!GcVchzzGsJe{r7bze(lK`{~XOC@*TsWID7x zK5O5=q;`Yb?lAYyeoFD`=YH$IIXzOn@QCXhF?b=a2XvY|S$I|BK49W8?HDOOk*0hh zRpN1GF{=4}-UaoQc}WE>mufM*3C7xb@k%7i4G4qMizf1Dz^r;#l4T}MR2)UEwiCdc zyXrT!XuNpwqurZ{h_w}x)@whxZ~jW|QmwVQ*GmI0kI4YYYRkr!B?V{Ca^(QQCFAlc zNUTRXJz6_{ItRmWawHYquOqW+3b1gq9J8CC|VrkaKuD&X43N*Ru-JLLg*&NSc+gk!# z`W~Xo#5QN*M%59sCGtf}wOlmi!uvZ-A1|8GuJ*)XRQgwIj4W~5dJr4k6IiNr0-hIA zB_$oL;y!~FJAG6Y&$2qjx^44(2~GlUWl{29DY~>e)f^@Z#TM=%8>p-`M*fjh!8@C% zt5qKnsw~OQ4do}e`#uSIRz7awQYuJoMn}Z={Sf5$-t6|%UxaNB}#s}Oe__6X2(j) zW6!2Jh0F2zln|AM4ku#z*3TH|km;Jn8nL7A5)cfb#gP|T&qsv?*n`*t2s29Cs|EU# zuTFOE z#AgmJ#E4Dy&P{L@M7Y^ruAt(Zqv?Zbd2m-~0K#>RZpNGM8V$rq)GBf`= zqIVg_t>E7$%7OaYlO^-0n3 z0}zk7mx?wqlosPu3KD6w9{fq1aver?MhY=}vA#`~ML^h5v@ccg)ZKAS0*|ETgIyP{ zpM*MFp4@r18lnx)CUjrDb<0Cb`n>T|G^3Q>ucX^G7buaFo!<0imdKZr0r?;?#Lz{} z%T4owXo578?ihFsh|?%^d0I``Di+Dn0xIWs?(<7B8!H#>?lw%ruuW}!y#?6X{0frU zf}Ze#%EVWRhSH~o3e}IQ$V7z&%l8nSrW$qd16=hra{=HVt~|;7|oE;-7#`7;C=T8?7E)qxv$;# z{XOS=jxZISMRU+m&ZJIp1F2L39p!YI>C)mcUtm*k#B+GvWQTm^}itbj$)tA^)eopiHmoq-`pMKS=Xq3{GuuMk^F^PzO&k>qvg%+O877FfLC&fo>Xz^2cUn7g}2Y;PACUm)Gro zda2sM<=#ty67t@LjCC(mmCp#Dk5^{OO37x{y(chDy3NHnL#q^|g5nWx~X6p6#VWA=RVS#4c{2K>7~ zg~>{xlDjP+y9#P|Zpb+GsKNTR%bXlGz>qa16lR4&q*_;{<1f@WvuuggB?*|YePNy+(~R311C zE8^%O(4T~`C7UqlfS1w#$R^wF8V|ppt(6V?%3slWc)(UoZi#UPKA#W-PBE;f*RmPi z9Fk$t8Oy0BI~?b_T0)tb-`>nW+}kmp_?6wUM}r>T1>@AkFwfFwtvk5aF0leXo~0or zge2e%PktV)p4?7bLSz`D;&$Pz6}{&Vj9$8LE`-DNCjl)A6vrQ9vHjcr8D4baz+Eb; zEK-8t{VADlWVELm5CyJ}qo;6TiG_QoC_HSQ=00H}G1?Cg(SyIV^Li_4 z*Ee3j5lq=jB_5Sl7)a1d_?M`@@U}TmLS)auhZESOEF8W*o(k)dPkm0=dFyD-$fL#PzW^Uvlxm5YgAVFPm za&MS?z7UIm{@uL_vALCaP%9an^zm?(Z|nwu$kvvLHMQdXa%mImzsb~47+Q_3JmAg% zB{^kqwb<}b&az1Q*%O2(X3(Ew58!m}RvAha=HVkyrfK+@%brS0sas7CTgm+@3rx0F)dlA=9e2k{n`NQZMcx z`i8oxyK;!^!xgswN_zw#V8H6R)|NL`^64?sitBwJ7A3SKlcHBQ>PUO$w83N(kj5kD z;aRcDOJ|B+Y!?{*hA$I_RLDv7rhCKW%DyQOj%ck6e0>tZo0jWDsU zLVLS;QKIs`4BfvmEeJ;hM2{P zc-1u7I^V~knK>C%aF|0%f+`r^!hKZ-h$YwJnJ>_NYtb>p4yq+=7m^;#Xf{)yf()N|cFdD}%#-1p2Ybxl3W~V~%IFKWfH-r#UG05jNXp zAz_jsgEVo&C(Zi&kvV`+lZ0O5Wk})JK7-JH(Xr)6pP4VBCIPHkr-*!2-XH0&c8wwt zmQPar1XDgLxjG9q{lOXOROu-ym8L8W_-KReU?SXUs3OZ@&DeVu#y)f;I@|lkV-&hE zL5Wxm{LG)JyI?+-;TgN_EBP$|(@&#%j*;!3G8wP(>asoa8rDx4zW$FaL>X1rW>BlH zAghPq_T;Hml_24{X-~owxKD;xddv>ji{bWD=UT4NCT?B*l{S8Kj6{%f&&1zO|NL*! z(h57T#XN*;h-&fq5Kx5FWi9#qr9l!QWcfj1`t7DanfyKFl^|^NA~Ey+-yu2OCU3qPFHNmi8MgAFw5Q!nn6Q@;sl2vg*-wZFp+Ab%;}oUn`2<$P z#*)lHTn&j#2TUOPK-m7t3WMOaBFXjoWIiXie6ZUuEoc^v49USa#*I2oOHKYsRT*gC z#xlM!w)V5{c^|1YyBw!fCU{WXCzdZPwzmKf-(*b*XY*|fvsUanB59RK)VqS&%09%V zZT(o=>`!wxnT$10^SOkg3=4lAMeF9UIU7yixW+)Ng0lYPmF9vK#J5U}dGU2=CvoBu z7~L#X-8#cNCIvLI>|N4#(p+tKlyRyGw%&7_gsa&(|90n-2I0CX2q9ylYCZ_dCDA{^zvxl<)XOBaAR&b|AQ$10E z-wAx|IRmb6eI@TS4AT?=#T@U$#hvlKsAtw3Yu9a6 zpOS^vO~n4Aq&Vptev^JFuo%1-zQo14Y330mI36_G>FXpt6iDs+fZ*wp!didZ2*#va ztv%NW>JX+?(ytE=p@YmUY-z3bFF7?8lfM)i>-3cG?V)_m>|Xo52@za63z?JwHIbH*_)qBIUk0_QYx_5DM;)}zbvCtSMl}V>n!9|tIDXoxWE01zBq3hh4H(aR0N<@)L6DTQ{ zl9_t}Fz$oi@;s@VK#7;xi<^OK^9$Pw**leatiG@{mNn;uzL?^K#=O5zeCPV(uCDNe zxf?TYFv_PSX_fP#edU9jV}ImQ*6siWP?In?ZVO`JMm;5i{#u|s-Mq}?}bG~0$Q@5DO81L@C z9sFyU)sj`$Q5LH8x)2|?B=wz#sM$P4sw*yC5Mbzg3sXB~U#{70w{~LEML^0GA49P} zQ_%yuAKyuDN>|49UdDA-QS?$DM97Oog1AHKt-XZmY3Vs$yv-SgbgQvVI$I zg_b$6Itrz6H)xHVF2)rGrYx6Vi}!f#NoEDwV50Y!DBEu?hM;Jad;T^qz{wQ`qXQ`h zPN19!oB|_fuD>1>3+oLAFyS-0fV}w14Cpc+B21-Gw1>B&_g@FI$)%s+2zH(qMF?EE zX8Mz!TyYszYD@A}^udTBx0N1dD9+8wR6UwHQI~fuqBQkaHB};COX#1m`f_=*YtwQN zdh?%o_yW1-HRFzUbKBJFBz$cQR%vOSmHR+R_>6_v?Y(EF^y5c2 zr7UlZv*#@~Y0a|Aza7#>p0+A}9K~_m=c0u&TEIRCcOm|Ldf1Q8jLyD>?!qZZ($aOo z4VRXEAdl#9K(^_G;BPi2;9NB@3yLjO{=9kI>$=wT{V?e|^9-m>Af@=IrRRLSbqyLu zJeeS=9}k6KKzw{)+R(CnGdjt2shzdORM1W9=`m!E{A z$_qgx5UmrVx|d(a(^6|rZy&Xan;ERmm$RZqzY870e%Hn2RY;>MH5 z1_V}U`8tzIHV=QO=O^Ad(&~58RsyU%ox38(X zx#noC{qyv{?$uj|4972T%|u(k-h{~CKCT6eBhAP&~jMNM2Y%li|TzzR_I(c}>;4iWcfe5U9D0EQCXUy}9K9g=*L4i0G#Mmx6phx~v zE*rAs92fi5%mG)=tY8>0N>0)3_L% z^Ouuz5+C(hV}j#_t39FF-u3T4?mn5bA_Da6?2uzW3tCEY={~%dU6c2h=Vp*qV#Aj` zR0!G5q3X9CW7M@Qkk#W~vMWS4c8z1}opHN$Y@7HSw#cQ4KY?rAQAIURd89ov7G58J zE!pQN_4beLUGD=kwiWZ3cKR#(Wefy8<^#iADZpP>Lpm#e4t)OjD0cK#OJ+2wB72ws zS;}r&){rx&-_GW131)o##b&a<=acM`rCx&xC*i<&AlaWHR?IcI7kOnxXkT2QoD*6{ zlp9ZPLklxP9{XTm0RNdzjHi2m_rqI5E?m#l?j8!%I){OPj5SV%T|YbQp@I~!^e4BvY=4Rl z$r>sO6*mc<1TsuAVCKVOh)b*74WRqVgg_1Do%L(T zpf1D~k|@S{-%Mz_X$yVhdg2H#&qF;hf%S91PSX!Vv`KRz&d#3F!)xY}0VJ%+w-BEe z$mIE!q0Uw__}kMT5ywxwhkUPg93;lFi-puBE90v4(5^m`<{~`_f-ynzJ^R^-kRNTxTMdV`g0>k=%@@(o?zY3oj8EW^gXT%eqZdo zZ2I;RV-Yqd|7*$YlYH0d9LA5mTIQlCFbU6fLt*Ze>mbQ!#)6!v^f=*}oi(}D@Im%A zttLqo#|Ou99e%804KSD_pN1(`d+a=J=#=B*@1IkWbRU0F=!5c8K8WC*2CRN8JN_Dp zP3e+!QA!<<;@CE29>is)&-P5oJ6HJBfnLwZK_ffQpnMgX#-A?YrPH~q9 zmxhRdr+E2!R>JU7mK)xxEq(6S-}2$~M()dXRGivjbc!ShnW#!E|G`jm0@$5rKkP3U z*=B2qSh=leY2s7n;DJ+>9Z3$hrFqlX+nKC#)K zkrxKZ#db`D@%|$Wsxay#^`GC6H74)$#c7Q1E}Mlui5!@?VN@@fEp0x;9SVkpA33wzoDA(F-kkEbA3Ttg-v4xUzOwdw7#Y$ zdtYuR%2+D8pkk0mW?L*ciPbR1xiDe><@)inc^9uK16gaL5>B2Q@85N~Z6b_donBD; z)5HA7o^0FqN}zysBiy>dSn%|Lla={FHFH&VWqc(-hR2xuGkwS(fgX(l3};$l--|46 z6LI$YLjohwFOPN_uZDkn2KpePZHS(e<+O8ys8UXJla_fG&a3>#t@I`J3xii)=|}#- zG3is^L@Mts#%^O8W2BwFWfff6#rpRNn<;UnKhQtnt4Vrt#?(-=c@DyPrTrILSq#ZN zuU0V&KfL)`QO@HwYc!`;@FazDebq4C!CTBhHW(-3c~5^Q34MSsB6xpntVKY}6~J82 zfe$lowNv*>l%k*QQBU7czOj^#x1aK6^iIgEbdh4sl7G3E_Md(`ArUh%eGE7K;1b0FWKvv9GJH9IN%?uxc&WdKS zLH{8__Ne&^iEZj1CWEM<-K=d%&a99*^$quR>9q?qlxARMuMklnrCzQJ!4m8fku>GxfEcnR0IttTs!rwpxfz>=U>7#Lp|;3737RJuITCvqv* zo>jVAAtMunYLgG?M$#)$VcR|}hEifcE$K(|Jr@4&aDf6`Hk#@FER3vO>QEA3;q2J5 z7NKH^EY@i_&K{S}6s)nrh3I2qc^o@`E5*1m(M6Qx_Sd( zu69_6sur1cX0duv5Ps(r5Z#o~$f4ZU%DbMyt~ZP5di$y-cK`&n^JY!zOUL@cdCXC@ zE?(#5OlHjAplU0rIkJy`a54>PFX?Q$KJ&6HN!ro$}OfcwaN`OHFb3?mcOvpCtuT_STKBYi^g`%_0VO zHp`@$4Gz`R<0YKewV@LH3m%Q!09~RocHgrR<_xc-fRs`&_&jfJ5mK4ILC>m7wEC|j zc7xF4{bNJ(npVOO38k7-I{@UQ)AUmGDfP`r{&r2QY<}O=2X}Smgez6NvJlu zx(OS5p=Z7f*E|9SDD}lOC!uGINL~Bdrxy*SnQ@(UXBDJc1~)q-Cn;K9q)xLEEz_gA zOzK4F-)6?$YYh#;N5guY-U0sG#R4XhPn+q5et*BIuh#G_QYT_+xk`%nsMgrnh0r-g z8EM=ZYUv!^97_uPli}g@{yo>z>CY8hP4AZm`%S%7siCF~pcrZRwxEw-`}C-K2mRmK z_f#)dhSnt1H$G@=a8yaD1(Cn##rVKA2=PC%C90Zu`nj``@b9?*OYA4rHsXaQq<7PkIqm+Zbo( zz!z~-Yz{4(vI#gRq5*Q?IGEb4U)T97t2C^KQZ-RH@+Dp!6{y^S9UGJB?p%wQzRUjONch= zP+=`Aa%RT#&rs_doVf0m8O1~m@gF>xvM?XV!2AzhN_e4>J>n=8bxcFX!M_Tm96#CQ zJ`xeu&u;25YB`)+&tgig7H~(dWdxf$43Gj{p3g7BRP?TAM~8GK>~Gj&zWK^_3A|c` zaoBq*()2m3>=}-msFFa`L(u40xgAO4B&%sI@(>I-%A}>uX^`h%Yc5NN4Z5I)p9URnN?4)I&c8#F*MQ71u)giT4)kh^lE+ z(JT9O(nVq@aR1EvKPpE`*Nf7n9{(fbnEr+>Wkg#hXz7r|sIYFSG>;1Ilh#X0^18GQCzE+4R6N==t)_Sa#HuKsoOa@3BK)KLPcw4(Q; zT8{BnL*{7BaGX}iFACh_4PSW=p@6W|Z3R(FncIyWR4`iKaCFvJ>;=rlI5Y%Vt?(&j za`_3hvD+USsXoUzOT?TvEC%zZi%`y7!x>r9j1Ae`|CBuSxZ%i9pApxx^=$moN%sMl zt|J4Qfr`J}^g1nZyaEIQb6nR3!|neL(E8<{Dfnw6V^ZjD?epggu<5DqL#jt{cvcz5 z;k3-qnPW?A9m4qm5AuDRcC!1dLEn{enbva%B6j@_&;IoIeEGKQVbg87nCAwtjPImL zE}PpQr1N5U?GF%uy3y8-eG(e;s?_PsmU3cm>tla|AE2?>raAmNYxnene@|Ou%-%*~ ze{gH&D8`+|SR6dh<|CA>9Ks&B>sjWP+{*s=l|2JP7};sL&YUT~yl5X&B1`86$IB@u zF)f>tEvn4LCFQ%v+*u6`&@={|&5?gkwzA$7j}O8C;{QWv5$8T61J}~%3|n8ka8)b6 z8DPb<9&%K~g(YmWD7J<ERdY1 zlDUVSXwsXHBO-z?7bkP958a>{?ZUVV8!mtPuUv|U+#kf-!sUawOKR}fJX z8K!0Aq@>#xre5i|+6#IX1YSQt3Bc_^v31ikeeA&=J+d1o6*Ewbw(?lPufieT3shZG z1JQt$rHoFi6IN@Ap{Kj0${)De|3wcAYW;hsj zA^c#)aqLz3sQsnsfN#S&E!S@^MQnYX5aRY@cmMyK#g>;?T4@>LyzNQfA9v=`H1u!> zJQgEAw^W=sI4m?oZO0*rwcrG(#jCSWGv^-tqhc1c`I~Mq!{%W*GI8W^s4R;fd+Wax z1FQPB4|rH$2U}@G#{FKjsRd~9YxsL5I4L^hJJHa%n=G2pDS6W+CC>$g!KX=3@|l@5 zzQ(U#r02595w7Q8eeaqyo>AvyhSk3bibo8pADTx!qf%83{UG>j&M3w@ z+hVm^#!yQI=4@)-pQBw)1(gm5Uk zt=}Zk9-*bEH{;IH+A(ZWTJ|Gz&Bo0;NDhCm0sbT#+{Y%Z%iRaq^b?8vG->uEE%{}& zScPxN!AVmkw|!8Tv$)vL5}t=4J4TW*y!ExJuKCN0?B9?aWic-&pX(4`#{1pf&?e zqnOD+R~>EIMBuFC!;a+wQfnP46dUt~u0+YzTOf{9bHa|-xo%qFYC|SoFY}LWLb(QO zIVR22!Q{AwSHE-Y0H+UV2UsZl7AM7*i(<`r8^KLHprht~?C!sn5IyZBjEDhp4w0-G z>@&hSQqkOWC&D;oik&rVrCX<}f8d+FVV8!DJ9~HRpY$eY5KPF5z)8@-rs2pUo zkn||RhFjV#***EU)z&}wl@unbHp-)=^8{YXfV6s_FsgX$2_IG^db+TUMgoz%i5ysP0XU_Kzpkyk6%6DuBE*_HGxr6kK~?XH3@`{2ht)Kfc;LO?%n#N}ioG z&s#DEhldNDswIoF_UlhD1yLrg#j0G-C47;u_Ir%2an*K@9+|i@=c{bfcYQyq+MD_v z=1_L|%M0`p&@(w1#qGiu)Se_c)y1Syj>guC{5w9r?V#nZVQQ#vgkYG1<;Nt&ymwXF zSetEf!qW_QosF5xAV|^QU^jhsbl@>5_`F=}+Nt8@NU04AHnbfXxUo*B^m=zt@{r z%98u)g?w<)3D0|zLNmMg=0>5>FDzw@U}U%_x*egB;V=2P;`dK~yOYn86GW!>_JW3Xp6{jBaLdTEa`&ISJIY{sXj`-;>CJdf*HvOdrm5!QG<>lP z*MgfmfX-_AC{$V)Y>hM>Q{b|OtFv^B^74_?FVQ7sD9!2p|<}ZGkIcxet&TB{#&F>8of&AJ& zbw>tcWLjEeA}E!}KP$}e%xT8R&3*WG{V&~^5qDq#)BN+s30Oc3_&-Z)=@<6vRrX+p zEFuYEBdz22BjqVE+8~g=n>skJHA=&o=joRZ5_c0*aJ@KkBkErDIVcCCp zZ;evSg4>HXCEC45w!_g8#sev5j2lWr)rEu=v>jH3ABbSpVf)WufAixuv~lG1@f?)`_q-&#G}$bPftppjUvi5ehySf46ud88)eFRgPpD@KVvwuqEj9>kOi>EI;cXv zCocso*$sx@iX%*bO6|<#(vBrDShXlM#s|puE7-;> z?Wfu|y`vb(pLah z-bh#Mjvry&6#5<2BkMsr;%+=$H!@;1n^%B`z#h0J5{kwb%|7F9}o;l%9jQ@80ojPHEDx2>~JT|m!NR$jft zT#y1(FKqb|gs6#0veXRdJ~;sEXV4|82J#__qqyDDdAwX_icXm*nCr7q{swGYPB6>k zRgk7wOpWVQtL64QFOjjzlgF#hLDFT&@?rgvBE>%h{$A*0g}A!a*1FQOnUD#OiYFNY z85wTfFyI9CY!}|Z>jFu3JmXw0Uzq1#z=8k$n>U*kp_A8?%pDHEa)y`9I;8VsptjE! zRDGXEZ|swaLM(fNr*FXochhBH)ozLow2i%h4N3`zs^rJ>* z1^+9c6>8=J*A$w8Rh?X4T)q>B2rnO2n{ME$PnJ9@p&$ErqMV_K^V_b&4r)OfecZ-( z(6u?QLaro2QjwK}ux+1w#Q2`oaq8OiU%4LjqRzo+96ANiiuy#)XBg+#*NP8}3l~JC zNz4?XERPyho>y!&*QeL2nA)nq_KQDXYw5$z#n)}x3abbpe*-&BZ+g|y*B9+cO7_Bu zqOmSx716HM)fZ4M$fO17dIX69-1pO9{NT{`4Hq;JA6Puw%l_0?BxioZ(mpCOWBn1hD|OFWpqCJr=J78e%Mm)o?|GLuyY?c+6(Q#gz1Wt9%z(Y z?%fLVbTxkQ`P%IMdAlT$N)f(BG~1)etox8CA+Unv{H4*!!plNFAAOJBDr{YeN#?T} z?+=cF zODld7;y_GeQdj<*7P|uPT@AT$s-44Wi~jXxBvprL8+(z+!hXgM zjB>f@cGG_7Vg@SQm8)5DT@0A%;;z;VJ8Oc^1m6WW#0KV447XI#4SsAgI81CQ`_Z1Za>9a5YBUNBAdXLRD^7;v0I72+()kI}BkG7mv z)17S8WAp3$k~HDH!f8p1?3?O6S6$!wso}QppD%W4mGAK|+YSaqh8QC!6C8^=B^}uM!%EjA%djW09R%W-DFb zeCl}*sqN7VQ5Q-nieT{S_F-#Bvd$tJFcJ~s zPeNWO^e&kyhlhK#9Q?QpY&}+WW*>#Q=T;eFpASsKs% z#LY+gfP?^uTByzr$8)_ieQO6Cb+xzR{xVy6p-%oAh&a^_yP~#Ki~uk|ldt=e|7*SV zG<^sUisJ3r2nP)&h4O(iE!$|6Em-Uf>uq~Ed9G7XZw`s3H-m@Cx$rIIm@xoT^{?Ql z+cAQMcH+|DAdEEe0?HENJmN5H>AC`!9n+Kts z+;Qj;Q&YPj{@O{nlL}*KiR8CD|n7jA%I4&@qA`&Mf3 zNjf&aoY{=+yQNkOXYR^E7%O|6t8mu;$mq(k_wpe+wT73RNgbqw6Uu&RXVWnc&%4*{ z{w!Ya|wET&G(oP3tH^Y?LpVON=@%VbZ`1IoDF7(S} zu)TB4JwXu!H4S!qdC1c$ydUWHR7QrkCjGo6@VWmU4i-ef=BAYRoq9{b=QAv{*SEAd+%{cfaPHc9#~k6!x6r9 z7d)OZ;oBPFzQ^;(^;^?{xrz^T8RDbVgCSYJdiLvs9}PakJ`KMGH;IL0zj{JzhB%t~ ze)B~A@BmHC8B*C*EQtM`_)MGU{gX_XyxXjy_0j@Pa%L&KGim^%(~j^QP!`)^tf3x7 z{N?hLJY{nop~$>1@{d?O#YFk{7 z3kIc7YOvRP+Qcw5VuM60$n~o(n zMZBVXKz?`mH~xN8?p6>TBp)MGG)z@F~(Tj{qlO%Y6$%)fnHbL1t;2+1CE_p+DTS z{E!jeX(`x`cpbPONX`L`z@J||;ZvltTUQTa-$6rn zi&9+7=ovOQA(cLV<(C_T+il_=ww3etp7Elji0(gLIM-?2Y{}bh3ix-Kv5fPLu_>Vt zb?N=DdxVg%^wDrtGs5tThI<}*?iuwl+>wcK65jIP%%g+Zf!tvGOGFkrreF(uHS@2J zD%-m2(^d4F4r!Syfz;7@=s^tdr6+#9)tfKpB`GOA=44brwerQ(=+jEhP%C)<#yRY0 zc6$5!aQ%Z#vI$`(B23txshN!sN$jC)2c3$q$c(V4vB zbiU&I=ZVGW+ZaCbhhpX5(3*h~Q^>69x;oEi+&b-<;Toyi*JQtNF-rTE>8y!8Tq50) zuK&&TF6W<<9$yiXqxN?&^%d80&Tjck!-UWhZ(-Yat3kh&HR+wJot_)g1*8KE-$&$< zf42-9RtgWZztQc&4<8qPh!gJ3fG{w(HY6JXE3HKbNGNO~!Ux(iPNlqnsMk8nm^@m&A@3IY8#$UJTkYq*qN zZ4SbG*X~(6EMf5aKq2i-Wyz9n>$>lx=&@VVrENuU$Ge~>INqMsI^WM@l1fmvFU}8H zR-*mg&1Hs392+FQD8v>H@$s~Z?eeFV9J8G}bAgn0pe0F;PPAW22q1?}j5HHB2jlqN zB6wNOhgUw%2FQ;(HuSld3{DN;;4m%*6%5hs~3!7j~ zcmGBJn@_TT#55^;*za=-)SSiK>pmP?rNm7X08iRN!)#)$6<*6VHNV()tq%rU_xy_E|Jwp%{g5Xb9ppJja_~{_KL2=@Z#Y?CR>fNydymu^ z@e<8HSJkzJdgICC+F4z%n)Lm^{LgvW_r>#v7Tful)$u&_Zxzz1$I%{@Azky7?N}$b z{+7T{Kj-f^w;gbWXwor@2II}=GWMRs@VLisA@t_1vCroLuU7cxC4WhuT4jRDRR%+G zeY9b-tB((^y~ zADNRWkDJ3|4Ey`d8Pwdy-S}*QNz@xBzQQGiSIRF^D2-LTR`%CxMjhRk1*E6no#sqe zp;?EL4jZ-=$a@$ifm$av{_1xQTtmj$J(gRg-d&fWjGdMtiUjq7vE3^vSdTFa+fZUO zjGX7wA#~+}O`k-%k|9B+Vv>D9FU3~)jW``PexTl6&cRVrVzUln>UUk4r{q4CkQkMw zjybftAKbv{`W<6g!d?jA8NnaF(iO~(GA!Bh-c&Ks9)E`YcJmImn{QF0 zgt&t_Wob^2w6ExQdP!)aM!bIfbiqJP{xP}q1*k2#W#WJ;q|Y$KFQ0LPf);Sm=B<*= zS|!B2f{)kVK2o(Ts6D*NVHk4ziNxPhJFER{U61lI{qqQJoFWTviz7k>TImanO0fqcFvUz2a#%lYSH zzWWOsko1LUJwxnoh`;?xJlU*QHuUYWSpqeWqw}lPpsn!t+(l?$52t6D59_i5AYW}U zew}Rf*$VrVOo@*+NOkUsfCTlQA7MJcZ>?L_vWN;!57Dlpvx6RYMV!v8Lzgy@o_~Yw z3&3p5^iE2OWyfjzZ3o74NKrkh;kv#4=UdBc0ieJId=JI|&#l+(U5lq7t&%*h&}P%w zjC6H|2Kw=t9h1F9Cch@s z+GhAK--p5kB|B(i6bxesrFYqrNUxN!?6YBLbF6={Y6@^G+rGJ5Il ztIq}`VYIbnI@iR+SY?Mt*c}xpAfqhST0b-?e3GGb#@==dO=DJDfHB^DVRx(D}N#jhi zw5a%!reN&H%|WVquR~SEruWBC+lhQS`j*EVISy66?A}aJ>TyKNOZ3TRP?Fp5(3mjo z^pfm%$5se(4WcqwWls-;5_e>#(>vBlg9mr66$q>057re`AJ$Iu%#iw($hu-^DRtZQ zZVq10@Z>AytnUc=5UCkD*Kt;9rkk&G`VwAXn0&aKOlcFnT3*_BRfaC~T?5xYpEDo4 zK;D|-OtOR^kO}Xso118}M<_<<1RF$q&wktPT77IZJ5iC6<^HeG%bnGtvGJnfR2_bm zpAy6T?d9>d_hl${#=ku8&k^y)_6nMh8mYg2%X2f(7REAl1JrnQ>wjeO;{Yhs2mU;E zXqknD)V;|*>Hmyq$AT%__5#gV)f=~3HD0s}L&a9~-J7iwUCq2U(#Iwwh%$L`dpUMq zf7fpH9+w>wU}IYE)fu-yTyK@N>qOx?ir-6%2$#!*RHRBv@Yl=qC=>k8@qFTT#oX(P zYfqHoJ-j+k{OAYu2&}Wot^&rfSQ{l*#ZyuWDB_mpH z0hyg>dy1dyKIgAXI*QY6_c2tcAPg%Xo(Q=iwI!fOd*5DdoB-lzBTgU#{|aj-oOcVG z<XZ^X(VfeDM>ffVOXe;YW!T(GCBd@46_I@{cVm4j`Tj@t z2NyU5_XVsspPOkm&42Yma#Xv55P{E0NMj93G$q}syzT8WiRU3Rnz)c&Kc@k7%%cUC z^`}6~YXM!78QY9w(rd8K)lv|O9EcCc?2DRtmlCA9Klbus3hSNeqAxOh zI#z?ah<+W>L7sOUo4Vd`SwV4@r2ToU3Jzc{NYTa?ApcvFPw0KY73whihDzvJ^Aw{f z<=w!+pwBAMKjX^+P%kVRJNAEy&ibLr_j}_gHf7PJQqtWeA}yfO8zIt+F}mZebd3^_ z7Ku?K7cfe?K^P1e-8s7J^WFCk*bm#Y=f2N5*L7Z({cdgQ!hd5o5R;^}C2sa3xNZIG z*jU}vrh2aEp&}L@$w;0Q)((G{xC%i!lR=I#9uaH9<&%=QvD4AEk?E*>%e1f3tblz% z#1;$dL=g6-P%LrhVSt^774&=wQ~4yovl00u&FEi0dCS(Xr`}Y4tEX)R*O|HXJEL&GyIt z+2#`Z*`wdlKD}@uzFx?j<{tr&x$| zNvDmy&`QlSJ&>M~|4iSsCk*ax=(7P|Z`QbyjnTu2x zz974M!Ct>7zQ$-_&s3V%>?~857J^>-tLh4O=MvLf5H7QT()c~@Sq+>0P55=M^J^d> zf)uKT6~}H4z|L|lf96~>jK%09F_9OkweLp`5Zo7~9EJ|2#V{^w-@*8ZN&-iEiZxW? zsu*_9Anu-piKE4)a7~#DeLQDFz9np6&MwV`s))jN_FPpKmtkw%*I@f;AG~)eqU2Vgg|N7nsQ3wF$+m-@5v8lzf8?&lB{X?% zgU(+ZsE7HKE*u%-1dT_4XI`~(cFh!EdVA%j6R|x$hyY&!bVv-Uvg}TYq=_m3;XTop zDwX&U7V-eABk>4NxT9g}A#2S*;|a+a9umL%x|Zc@x-kps-8%|vWxv#v=*Ic2T~SN& z`RCK(gL6Ll`Z0!1&7CUJ->AgDu|}T7$R&NmxqK&{o2y5P*`_t;wcz6RmBNX1fqnJV z`yaZ4GZ6p4GlNa%KTv*JrXL3er0{nJjj%yHT__e8B(iR1a-hcqJIOFNCT9Z@-|A*F zrt4W>17u79Mpw{DiaV1K+5tKrqR-6=_Hr4NXpIyCF0Fhq2Wv@T?fdUaiB}h1>tP{N zRuWYMrtS=sM)=|xdlp8V_!_Xx7cSD$Mg_ycslrf3k8-E*h}5bsp@{x3uIIS4_otzf z+KaPBdc+6zeFrE_-_5lT`yWZ$?-4mQ*VC9l_Z_!jm*kuKX32}zxP$U}d|coU5pvM* zs>Z_~1<~7tCk?(sGdTBRTxVyI)m#8IqQ2>H{!6%*x$@Wb__m7P{U7Q(sGFMkz73Hg z3^&4{9yKduZxS5o`9~HAgv<6_eaQ0eR70i*6t66wKl(nNW)R^{9`-Rhqk92)l7ib5 zps^2>2f>j0KVtM&%tSRNlSgX>5&Sx%7W0neY{&NukC(X}nTm^ZsZd&Sg+|s~yH(rz zkVf6Y$@e4ug{^h2Nk9Kb^6WhNsGUe9DO2r#bp=PWZ5zyY3YotzlkaRm)3XYSV;KaC z>orBsX9=$?YA^t`nLE9(;I?68odFb>j#nsWKCC~j2%lFSIq9dgGGI64IBNrf7WyeC z{dYNQe|`~NP2{@fYtWXMwf=EdeWAOk$NFp|h+n+`NG30`Ec;;0A<`|1=V{Ou4<}NGfNuHk zS0};@GW+8V4uMtQd~bLX&)wnthrQH`=^AHnVhB}^|8J~nxom=;;j%@0L5;6)-~9K2 z(w&?LOPfnck2Pcj&uiYN%bctVa z62!r`=8k5@2lu(wf|_n$2!uQ-t2SjJqi;;mBXiT*5FPJGp6+$svJ5x_P@gnb_kJt$ z9OmZkwA0N8`Gu{v6J;>n+M7>uEDAqYUyhg#+JCOXplc0`o!@Yb3=b+K0;anAHSA8exy?Sl&d52%&H-434J9Zt`UKRI1x-h82PscxhsERW9scH} zOE}6}!vrIGUmdQP^>N3X`s@BuJ3L!L!#<gwcgI%565;<01UAlCl*tAVc&lAXUDYI1d@7U_;#$!PP+KbW2ppZ1N$5Qal~i_?fYC`D21*0qz0_l zWyjktL!dp87O@dDLOU>6-BRb+u=%Xt8_AiWSZ3(BCBpyHSyHvGl?5@a?Z^%u6%8`5 z?2C&;oSLLFx*AAPZ0j{A4dm8~>=K5lGb!tti8*}5$q?JtwTf;y{7(StFi>6Y6H{@e z%$A{=^U3AcvDBJbWsHWN3mXYGxzn%^HCUfHK`oZtad7d&=ItykTXeJj=giZKRI6sZ zS^d@{7<|0E-h#h&Y^@^c;Ci5EWK3j!s`}`ezMyh0;Oltbg30{f{xxmN`O{);iUBf| z_BK>{CE6o#TGOj&K65UcGMU9|H#uJO@rvqNTAmOhW9dQCM8D$U6xyqx`hZEsW-((k z=Te&1E;Uzx3huB0XpSlA^uUDYvt~3#N>ch==Pn)|ZS^FFC6@c?05Zy{ruO-o_-1{1%D|H7O z>rwX9F+)}dB^h(-d&=gBh}afOk4>W-6Q;1vs#mK+yGB}=Gw{===ujN z|GJ17n+{>zim~KMC)k&vZkiaUh%x9~_3LN5*IkDry@Gyy6?e+L(k(?E!d~DAc};t(b^q8XHPIdE74{ znrr?uREd8#)4!klBFsT?PJU|~ z*%xdpnSHN%MpnlC>G-du^QB#J@PnON z!t^2AIqKVnnQh>u%v#SvM_R*D1&2?y%q^N50#5DoYVMHNC+V#J$vcjxmVud1`lsez zPtzUcqhCkIp26td8oqbTZCsj%cw^Qs*Ot~BGY^8UTZB7ab^lSk(8 zv1?e|>OILtbxc^;YzpkODCgg-I#n-esMKDTKH7!oQigc?3`1PjSJk`cOM~9*tDTy) zCW#{ld?>u28fG`~P80Gn&ip6`ZPVHPl&1KIiV(g;a~0p$k?%G%q80L!y8^d^R0UlH@hDe3nSQzb2a& zuSRpj-A~J!9Dy-`XkDlE$LKxEkmOE~Bkli4@N*1df=NSNHeYRzEa$%u7rNilnE`ek z;7*<~FDM2atqp$#?b|7&G?i`BV8pXxB=R6~X=JMr*=_yw^7*8d%LWVq!(VJz4BUX) z;?5AO9D5q6oe_X!$YZ7urhw+U;zkt@%r1=SpPEZmZ%1MX0zF#2gbSJ|6!U^J$9`gh z_o4ULov&GBa1kN5ylO?;h8B26^0-o zY{}}9lVYK>A5=n<+b+#wVLGg&wV}MZ z{B!>@|B?*4lh5@9M9%)ZETXI1s*VO+Az;}^AzwM;Ov2{CvQ!xS)XLoGm$IGHJJLS0 zeEB0N^83=GHI_FLn81z+*9(;z68VTi{e43ATA`Jg(H z5-u=o@Eq97w8y3V(uQhDTcxPmQz7qFdZJ~b(i30H^D%&JJ!gzROXjKss{SH%1*~)q z2m9pUaHpPE{Mk84)3hFj6##{-!zpR`?bztSn&Bj*n>`B)5bHng7U(lcF|B?LBxXBKjTZGO96_uNTP!daY;Bv%NQ6f{XaAJ-7 z*amk&=?iK@XkD8dVWAXSIV>L!LR-#Ru6*G0rw1(jt z6T4B=LWWOyT%`Kwn?@G!ZOKRA(mo#OMtCrW{{Ra(II)ZxFLR0x1;~Iry-X&U+dVUFNnhiX?q1dxv!LB%cx#fYooF4K zNN>&N$nx@}+4zred>Zc^h5wO-zhxushyrYDUxRVA1raoz~lz zVX0h4g3c5ZX@?YL+h#vs{i(3w1}dkIHu~N)lxctEUmk;l^=tds4$p+?N!N}y&A;z2 zFNL8)Al4CAZ)6)7w_S2=fAM}u^5poruZDD&^t+8)LLqA#k`AR8(J4zwk6MX4(l*Mh zmDEPaa&NC_5BQ=o>;_FOply zKbY5S%jxTqt>Q}jxkH82A$d?RP7w2IjxnhYKa^ELdK|83m$IS{VLm}~^PgC33)lIQ z1l5gP4HUhl;X;QSc7Q)_)tR70j#|a(7urlX5z~8N5?KVcCXduxY3vDw`Dj<505O@z z%=%D`uQB%aP1W*Pxd$T%E@L9D@e0c(`TC>sdGL+rxA~5@8TZgJAoh{#y5|B6LbLn4 zj^QdX^Us=_%4StjdL{!CoFD3-cGYK$RRhB53g@adZ3-^($4 z_6~j~$}zIWRFISfSzz_sU3ic(uS~wrY}*i3_NE}QE1{;QG!~^Uzb(_=K~*$kq4sUm z3^>~v+0Xim>9}_f;IeUE17C^;E!*LQ#Yg||f? z^wXNPUqocf=%47t0+it=vX9d}n^@{yi5io}9_880d*qdNp6*~frYx}9&H4g`e*U+(X+?SqdrXXSabWJ?MrhG0YnZVa zh@k2qD1hJ0DpMnujuoKv$BAxiQ}oTM)|m0he+;?ina_n(_O&=2?v#&xe;s?>%?em^ z-(W)jYf+5j3s73p0N=LZM9nyRKX#}y1=m7TIAF2xYia&LOqAH9p-7SGy8E-RF8PfE zB5Oy`(j9CAVdTdM$ZZ;dTCf?NkSG@V{my<<(3cN*j=$XZUStU+|CSo|>h@E?{^>bp zHmF!rAH?N}!CyPTj&$07Et8_fONt#a1(N^ZD|4nG#t)@Ib{ClEDhB7?2ZCA$EZJ~C zH%B%?Vxd{`YvdtG+5Ge=t>k-V1v5_d$td$(-{@Gu&p9cgF+7Dd_R7vzufvD+@uMX4 z{}K|PO}L#Xq*#3VcVatM!O*3itIM|XwZUXa-uX@3H~Rf7Qr6M~3_$B3*dsPC!z|42Hq2XOAr^gw`G;2#q>G@R?mDoMw?qu5Xh zS}*1!+I4#9$Ot9~5bzrWwH)mYT`cvg_dH@Q&2ruGaWPq8e{rmkaFN>ss-|z1gwHO- zy@8GQH71jFz7p#RqGr!v-h0$L8UN0R^!>H1CA)ByBesi<>gK9_SdE#UJ^*p8zw9YgC>PbkacJvki#l>uG(UDIeN5+ zHN}if4=>&;aKcKy_4MOtMJ`LOlzMx>LV_-5^V4%Ub#scS!0&!n*pA+6@kPYg#DolM z*R%!Gh97bqKIU#JM#1<2o@Jv0TGfq7t(xT3Nq~1?H6kcRNTkJ2O(o#!XNT!oYTWz)z zp4r28Mqr!>{=`OKd*=gCS1&o^tco}am5;6&o2xT%Zsaa5Q(N=3|7w*4cXV#%|0R8<@IZr7?beS$=y()`?o^JAUMy#L&0vY+aqiWyz6 z2D`7`1q06ZLNkmeE%&cuxTa_>2X`OIS##g5uYO*|>`i2UK#y8%@=SDV4yiU^chrIskMyMkgu)CjBlo{Zx#+tbtoc9S=DxvvWFnTO=&D|ywUv9?|&o})u?#s-AAv++iHY8rrKwH zgA~rg?rstsQ4HFb$c{IP8LMhs57DOXre=5|#XaElcsT>k_tlkI|AYDN=faWnc70>;2&l|_fE|qYtw_1Fq+gW~A zqs?U_2ztZ?*vZ+7oGnyHvCx+p6~kzD9z7fE&3E>ex=G|wMYN|tdF-qj+rZMfYr#2h zYeIuSiDT7FPhz&hdd~u-=y#Lm`*q`gbP?0%KbHHU5~*YkD-P^mWFYd}V-?plI7KQ! zwxmsK5WQIsyZ@6z)<+xvS`V0a@sf$od9Ko=t0BK{O-FbHR`qhZ=Y7bQofIUYOYI$4 zAo1t1t$LeS`Ex>N-F|&?;mAON{n`>6L=`l8`{JMHpnOGu>`juhCn;&E`ox@a+0bd|!Q}oDx+8(ge{2aHweHS_;6=;A@?YZCIrf{kho% zr3e;A0cv2{)7o=tv06SanJ{>*mZM^njS7dF;@nr>Wk%^;58uV=F}~ZezBR_UzqgzJ zUCl@REq65rHso*BWI+(|yqx>G?N`G;ZpAVbk%75w0YeiPeKUU*4tUR^m_?!E^FATU zBSGvwfu8cT22ljUB(fpJ#tPaH)ulw(H23$r#32jK08_1;c}w!lWO#=Gszk5+T=r*2 z%pK5&;s=TZ3Zf~7>|p)0qU87V<`*9UEtfa-Xe31=CW&5O!Tkp%D1ItSDnsCT{U!e* zYgD|0t<^aHm+hT{U7+2o+B>trKNF%0tWlSeu{$k@HyQGZW>AN3={5;8(efQ6z`Xl` z6hZH|S<&4cafW@`90AIuQ=76B1yxGOoG-pB&u%~&!EuVuJmX}q6!3dS;a%hx51k+J zkLAdNG(DW!pxoWcC-JBM-Bikv?UyY=qQK()#e3q#Q!TZ4=?9&wE{|G!y8I`)|3^aC zvQU(IsT)y%+>fi6D*B8GC^1+*e|X<-q9=Y4^4tgNnQS^nNX*uZWFJZ@Lu$s4ZS$R!OyYz0~I>Vr%kcppLI!om?;ZFCRqm09CNL;hP zf%w3FuV;xl|IuD{Xs9m?HkgAXlO(n$@d6XNY5Iw}pE6q#%QTjOn-SMqPPDq_>8~n} zO5=DlG!f-U`la9pTXQ^(KeVFqs>8)exagD)PL_j(wWD_G)#p9uVJ z9pA8Uz>TQFEW;a6om`aCs=uK=+Nu?u1lM_zDLg$HO)tqXC+|~VACUC zG1<(NBY`6Sq}jCa>#eoyMO0mklUK9FmEPi@Us3*klZU^4B9a%Rf4eM2uK2_$=oxfv zHXz32#BhULqb}%Yb5)G7-r&lU_Jm(J1=5QCmiY z>hF=+dWgdPZ3UjO&XyF)+&yl*+8XaD0`jAoYX4sJ9OncyPlW3Hh&+t-u(iVPR|WXl zl?fIt0EYNGEU~T`%dW{O zLqqoOCB>=Q;{@ALF+>2E0Mh13@!GVG^4*(9^bbmUiId3(=jMW3??;0X?B;h!1lJP> zt*F8!`?>H)tw2Q$^TIj9=ja8WBF9^i{o&9*?IuLZ_p_Vv!$G1q=T;h%o(RKC*F@CKmCIm1yF@2YB z)%#Yq2}D#@Ljl8Wx6F+V*j@J2L(%%Fs{~%eh`20VtrcV4>~=8${EqiqApJzV{SQ}J zZmu5B&8jWM43mj@y{9lCuc>K|5^l?qbi8y7zNhBkBfwUNN!K`x7As66>(;!WIu#z{ z91RjI%45s^x2dDZywqHTmGi;tQ-FCPbC>EM2mH|@zMgPgED4H%$&WQN-4?`$rh#2xG=$bUE}9#=ifl}9a_g5pYHAk}Qd z5#Y<9jSr~(mb8>b|JZ0qTCp-kNrSy@0739Mn28dZTdc%cUD++)2!U7$;U6)v{6NVm zRv^P;0>Mm*4$ztZk(9H+UVYw~N$GoZLe>xJ70A<1AOG)-%1pHXQ+b}8#HF827>rCP z#p*08-=FK!a;i`AkBiinX|d}Vl<}=ev(iq1TksQ*Pw5aZE9aKD2D_|o4K(h2Laeyq zJO4z?>wnbc>(wTiwOI^hFf)hCQ4PlT2NK?Uvxg+oU2kK)jCwM5{jf>NQ#||!*S50B zf2`0ABf5fO=`)#0R&dQj9}fTg8}|Ta%cELBSIuN%0U1zv5#EMk%^8!z#>CqD#=7ht zgOVL6zku3qh#e?0cs1{C$z1nMjI9-RrO|9hDvI>Z$)8bZUSFIdWh><`0rnhHPtj_9 zp7+J9%6hF-}7x0IZr82rRPD1%!KI5%HmQ^)r5RJT>^-q(h07!AXamGd@q(j`vpsdxw4pp!|>D<*?L z%Kwq*V|SXz*JvEd0Gk-Blf2AM4R%duV^%Qav0SW#+X@w;IgQ7Nb-P1x4bmI50`~ zD+B&V;?L}Rn-W`e5i&VbQUS_!u8Lm5N!i2&8N~Nr$bbsD@=99R3l|kIaT&olVj|Odl$}Z@65uygG)<^Vr1+k53Oeh^Bq8A251>VLCY1dw}2nW&Y`Pr5H0YI{$~yzs`>0}?A&dg5^s>z~n3HDW9i+@UY-&9UUpEn9Q; z%Y<^T+V{mKKUGOtyI?WP)J?Oqjfriz)Y5YH)|DzIVjoFsv4661dU}>xLrgE&d0qe2 zhpY0i82d2^tCRYeomRM(dro>fS&irI2|w$y`0CFfpA0*+qM!W#NVuH;j8AO<_*G1dY}saWCo%r+PwfFZPdx1*jZYlpo3%d@Dv#AqBPBVmNDh$EDHN*# zZW?_WfDO;SzM1xOB^V-^N!$Hw^KIPl{7r2$f^YZ-WC8=X^QT|z?3J_;eInjg_nr{R zM@d#|`Yz=WGiPNqb3EX$WyW)2+VsO|++5WEd~*L{s3JM_p>$3u&0D%x)l@Y{*ITkJ zKLF;r;1?3bp7(p+NTbRfj@dGptyu=ji0^n7Dxv)LYpQBsO_R=O=z2BNyB znv7l@->KxH+oaj@2$nOt#H+Y7e;lQux0b_`($WrG8>@I8sHcmT=tYxBO0z-#)D$w- zvRDIE5m!N4=O783o^7d=__7l z*STU3%lmUfkIYQls@0e~vCPm?HxSA=$o}rn=AktLj6mg>TBFS$JMl#}@o7}sefnj# zCO!*Rwz|aSRGgGC#bCWMac|JWs>yAR|J+RS`!2mD79ybV?$9X`H6&_mPMPTJypqJd zoau7Gzar)DzNO~4dKqQWBL8K0DK&S=jQbmAJSEhP;`^OX#Pj)|0Fqtk!e!P(+j&-$ z1$b0>FVh4GowH8Uh4$>Vg(M=@lY4IkkRYvt(0It|yNaUuWVLakYU6r744Qw0G1jq4 zy!|?Y!q?#CikDO~H16CU**0n_Mx{*<*4__{s(M+8Hx&g~U`9u*#^}?$NyP$9i#zTQ(9nOrF-Oj}_tlB^ za2oF7WY3q0=5Hwh(h|g)gaxU(*DaH8_BfM3v(37jkoez#4=bE$fC4xlfwDKTMn*sa zqfk|lYufzR5=)Q4XH0u?#JCA0Rb1oeB3;btH_Lgx;I>M83tDF+1sJ?mjm%wTFX2R- z{^#xzAc9@1`yYvEjo>HY5JRi;K;b4mOR?01_V!o7ClNRdvr)&T@wh<+9}0bDb&t5| zKUy<(EggJx1KpLL2xGAzSClCp8<@{KHrDa_U^an8YOhI zrn|bx{S`hp;!}7gk;mVi?)0x_{UJ3{CR*kW#QZ=fx0+#TT6e5Sur>T?Gtu&j%qc3~ z64`uvP>y%=9SBX0y-&7@uMd9ibN5VVqyoNUJ5~<(X5|oS1(Gy>T#kY~e_saS%P0fZ ztO}4TuTM`)b$WKyRLin8nHec#En{v(Kwh^(t(g~3=2^I~oXF1*pBi9Q>o-bwbFiIa z6!`c={mj?hHN6YRT4*sU$LA42QL63gGDipp1&nfyr5%q9xpeJI%B4EcxYzVPI*nu<3nRN4;22KtnjX@k&jFBBwH*k`n>ueajjDXCyqWo8Qfl57d_9h7@b~_waRcN?dkl3!Zof8V1X$ zEdGz=#WaEjoyHJwBNz2fErEEHQ0a{##I`MVzBBmxg({xl^$YM?3F{exQ7_-M)@hM&)TV@d zI`8R!J_y3l>076=N`N@2;$=ElcB^z9r|V`;S0vX!RX9O}B26!gC3a_*O#yIyo)G_u zLTPC))ZI}cp_^jjR}JO8NrUT7yTLurNSFzL2&kDQF7unf#*%zZ|R8B(jdp=M4yq(o}#HI zh%pNPPNoCc%kE9}M#w7yhTyz+H2tZn!k-arx(~>4o`0dZ*@E=^!!x)40=f?>N{>Dg z{dnMM??QJ>VLHM#d`~N9(b)L~&+`&O#wx5}hug`}%F3&3MQODxaGC8vsnHzml<&G4 zM@w2(te|68+Q;yvHPw07Rl)r`qqzmszQb?I2datImYlYRM`DmXWb_iv%se+^k9@=E z=6*$mb3yn5-{)ksxBONqpwmiW3OF#^MdcSd$WG(l4zcempIl%CkkqaBs|*~}LRCi$ zJjtXPIMZCd&ARKU{N@36mBX0|C7(NX&Iq@|?$5q^NerlqlMzp(^%9bZb^SbjZpclz zHJ(sds#GC&V1P2Ri8E{yM+%PP-CuDStW2F(a}Iz0FH6n9@H2Si(osk_NPMn$BkHb+OWVE<8e{iWd|!uG0HJMgJlit(aq zz!{w6PbIlmJ+Ny=_fCz9TDi}+t^bi|%+lmJ!^iHw7X=JjqXt(7nRq6bGX}0cctJ;I zemjUBQXl^&CrGyWg_S-Q5Ma=5l3>zrSI6a8$joi{`u2W#%yt4FlvpN^Atc7nN`C*) z=25lLdsE+g_WQ{#1(qE`-WpgmNbW_xAZn_#ra2$A1{)U_+yQ@)?F2?~th)XyK-u#P)j4$sr$=!Bo0YBxTqsjhwEo+J`ia0}+mx!vcRRkRj&$6#?nj0

xvV7aM>){Tqb|=22S6*87mwKYCnjU5A-FoR!Iy@NiEjVQZ19q_;`V z)|xpk{g%(*#Wm8XC6LLndeRQWmuYr%v*7OC(EctywqO`sdgNUGP3WcJq}hLl>+^6v zMREbeg`0;1Vp?BU#6%$Z&IDR>l`r5SXN$LQAVo)N^KWg{0(U8dX(ILas%om zG}GH-RMC!A=B5Dg3kXW*{JBb(l7(BK<7x7}Ia1=#=?Al*;h1;~W5P(T^mhkZ(!FK$ zD3MeQ-gs!j70W(ex!m;qXRnZFqU#}Z7K@CL!@~bas5x$71!fN0lw)Mn_?rLn>E*mC zsM=68czC&lP#btn{~IH#Pu0p3ak7*BpZ_+u$#Jglh26Qd_hy5dwo9eFzseJ<4--)Q z%i8(P`uN^NHlZqmZ))_NmZlCEh3_`vRK}yt0}5(A`by;V)Swv74}9JQ-FfB zfm(ra!~PFhE4hWT8spbxB|Cc+NxPS>t7$)1pI)66$|>^HF=skRcoQKF!_4JCBB|!V zxQXI|_|SF+U#d|s9_)ynuI~7(>>(-4JS58^RZ=O|Z}G%O33o-vh^?)fdMPFRc71m} zBU3j}w!LPm+X~_}Vf2Ll`3^;&`Z^s!MTl+m+Ls+4YJtbg0&9Xx^xn=Iy}}V*t6S+f z?XrXOx|S^Dk2V~M8?$i|6SvA4n!Ubsy}6KnR$OhlVl6C*cLF`_*w-c-5ZFG|!h+b= z{~RUaG&z<(;=%Z~=*JRn`%9|s@h@{>&d%=rkiAWn^)>=+2cBba#00;%s+(fk(M08( z(!Xg=f)%W+5LdUU3qQmu^Y;Ae7yL*ygyCl~f!iJlh&$vAnS3hMnfG~eRCU6TJyjrJ zki0mM!#ujP7H5e(++})rG+renbA;VCosTi+KH0Pt1zaQyHJC;J8Yca{X&zDvVqqQs zQ-@V(cN%p0D=T$%eGuPOR~a);ZnZz~XZX{kil-7?56XX zuXN0@qLSpJ-ZEOHeZ6pOS1)uX8772#d4LQp@PBc<|d!*15|8~rH!=~QHPP6fu z9nzFxxVs-ZS{%e^gT^{rVZs;xz71Uo(K7*|%vqAOPYI7OpP6c<+R~VfwkX`j?v62l zM+sMaiREdZM1RbxNtI%PkTP4fYJPLp(*{9G-@8ZQnqaIJ^l)i;=jxlV>f@^cop;oR zgC4W2dMcPPxMfP(fPH~tBS~z?IZm$pvA88oUg;Ei$~N&Eo6n1{EKMrf-X-n7f!fae zQ+U-cM8TgS{_IA+l>Oi=oG^DbK-@-B85(A<96T=DTp~RD>;u@koTEtaIH8 z?zH1^>ie@}wGAlG2n?%Cdzo4EGoe)CSGa7>@ixaq0VoR4xrnd~O_0is_4?dXmcxI5 z%XAeF&twD+`a0)j&<*baV~JC(8^i_C2X)R&dVWHoj%X1(*aMJdm=(o`C!t=&v)U zg&w9qfi^_oYV)I9y;!S7uN8~u9S+rMwZ99H_oVTP@`SQqH+XxTbpvKH4bkDZS69kP zK7_n)&v!>RjYRFT2wQbKJ{7v1xikcbC$LFiTN_D~B5@{ykUOt?keH zQfXwWPQ1pTCpeF6vHY25hQ&rM&&mB_T>&cf^6L3@SHuKo6+4ixSa_VkYgd*NPhvisP-{0YeH%oQL zT(I>IXn3&O1P!XHn`4*F-&Lljn2B`LH14FRa?zj85<03Cj|eV*t643lAKNGp* zaRm~7YLt5LGEyHcms5XIp=-lYujf5E5#Q#O#*lVu)4>hlTTUm>U%hq|B=PZA#(o_q zK$ge&QlRIRp~EuQzYh=|y~Pj+oSar`8=QNpNpzFF%|S(*-AjV2k?8*`+hM2LDv4* z)W_>du4fv3H`*qnCycC=7CM|rMBVL3s4}TpNd2SrGoQV9^YLCYpQ83YbZusj{oJRL zvcX%-gW)GI*d`wHb?1fAWze}vkmh#!xbs=v(lA)t5l#->w5xfUfmrxnQ@@F~MkWMQp^_$%N&6hx}*hvePH ztK&T&YMzc5K<9s>DJvB;;(Xc*g7KxU;nZxL8z%4S$evZwV9u479wVX}4dB`iWu~SY zE4xiI6RAT%#s^U0ziiXgEd}SSoeZ~mu=XX5<~|G$$2g+KPB{F+U>FJ3Bjw6NSE&-_ zLp%wz7Rc&mWX|xX1bQ~LF%n?Puxl3AC+P}EXAXOFEh+;)+u4=JRH#y*!_?(I;X-T( zsWfxDFPp}P>X&5Y-2oOaLH(pu1V+Porq5vdds)o0iaa|+!$@SK;O_t#q777}TlIi+ zb8Qob`AIWQD6r`g5y~<5VLr&fL5+A#V(d&!ZP!%$1DA=F8X_9hgrmEFx7A~@Q8WWWtL%D z$(*A(8@lcvVHeRLFEIG6In|de=Hkw3ijD}3ZOjl_7)YexrQ{TrD!Vh658ds2@$;_S zV$@wsCCNE&2a6bu5ek(&Z?gG_=52~QgR0b>n=XHYS>5&tBF0365<2&~s=Arx5S_5= zM(vtH%DmmM<*KEx|MsY_Yxsx2tI>5Vd_4C{G9W{j{qboe+jPA*O9(bge~#zevZ#Zt zu@(~-@Q%;~Cz2+0(^x&-e0mp-@D%}4H^jx(JmVppA)2=2$xYr=+N-=x^k(#i0#ksT z*d}hC!_%f3t8;FhGbF-!74JStUmtfM*N@l#jp7#J>6Gg$_Hg#0C00&AlU93 zo1#5X|LOATLPB#ymUl%%r^OxqTe@@G;dU9vil#-BCEt-qV9wV5p~R1Qxv%l&yk6bQ zxgpowuwKge{LN8Etjw$~d&mu!vv3e4t zcAe&A%?A>hkWzq2MIMnOnS_7%OzSyR$!em-)x7nlY#-g7q0-s5A*x)`^V~QHSB84f zWtzpbkOR^*V^rA>4s-IPqSylPO#oW2#86AYw67@g=_P6jjW?iwjXFeVelVpdzbUkJ{Rv!81??!?+U~Zx81O|DbW6q z7g-x7oM8*xgtiKxDT*P@tmZxsJA|gUKJ|i7>MIWKx>ns2bQjHGJoTIUvbtm&WRSe` zXqq_Ohs4|)yH^YDW;(F_O@u`RN(Lp#KH(ynN@?O%H~95@h8M_Y^C(vLigf)eTn&XB-QX_1SK91D z(M-3D|8@)P0lL+4jxn^e^xf3$j+nU0=%8ozX&FBqOE}Y>8Qq@IS2QdjQZW%kTO*~T z#l|1i@DI-V6CBO_RfK@&q4e58}t-}fZ{oY3*tu=&niwPAAdZ(D1+vy4vv>g z7^NApn8vZ03Xys( zqjIc5yKdc*Bq`4Ex;H7YdFvqA9FBAk7`ngs8dT&}rIX8#Ge1~9zFV$jcXu zW+h>o__@(4CaNgN;yzKxn6ySXP?Sp^F#pbxSwlAp5>MQn8{;jt8{3k5U`8ugP_Rtr z0lP%CkByaayP8<$+%)EEV#byjE1jByx9eBbVBn+bkUo!|N4$x8D`rymnP0=aO+(;+|bMU z{@U=v@z0SL%7o=3@mU@FkVqWDw^B~9te*uiUdhhJtlm?}*dXEq`80;Ju`xs>CmvT8 zBC=_(WQI*6b0vmetWn=C@ZI-~@2Y%^kCW7IY$IGT{c`dori|J@%z>{K-p2S;Ut`JW zrtZ+UcDi~qd_L%I^{U2zlQrzE_;b_a<+UO9VCs~VZH=pRnm#6)WARJ}$6+w6ozaH= zdqA)^4upaTxWds33?) zmjZ&45~D*zTBK_uM4B;1cPL7i45?AlIeLr+B}OAPVDu=7(H-;M`!_tF=bUrLb$x*~ z({eV@Cd!oYEq_Cc&XFj;><+C+#EPt-yI7*1H}<{jY51K4^we=hV1Wm z;EvXEM1b0hjM2YRU(wb$T(GotO+Zu!>u2Vyq=Wx&!=1luOQ`UPx?G1iGWnd-*N>0v zT`b+s+w;e?U(M?s02S|fy+3F>mVt~O-GtyrgQ~SqR5P^KA_~KVWQh3e? z=}9H$0bLXl%5q+-H35jsOVoz!jNc_^;(#p_RdO|2MEHGcijQSnO*)*qwPMjCK|@S- zg}f3!IlxM-iDu5UmPc7g35Yv zKvCv1K(37~i~Hq}?8o2N{(bfW!a0<tjrUO0%VElXhap%w_x4yFNdb=5h~-%7z3e)cgKG>W&u8-jNC80q`(dfp}9x3 z=U>e3CGA7?G5Bk|LLSPz;pji#8}KyOMBaFw@y?VcDq~-FR?cqQVD;Rn%x8FA@-AYh z{*aY(f{tT<))Y3F_y-?n0U3Ec9>j*-JS$@c26oSN=J}>6o*yI9>S95f?^VDyMLk*u zu>#qL=HS23M?r&nKF&RC_($qsCbfHJMiKYXK_X^`p_cpT<&B|&X>=J_v22>e6x9aK zGVVE*dbO-8XZiob25;0YUL8#aE=!{)94v4}D*PkyESspam~rp+!VH`aY(Em4443E0 zK~J%_-s!Pm@*Ds|&yzRBlBqJnl0UxN+>Bv1N;{8#e^3->2~H>0T#x4+_NIoO&3?H) z(EFg9mZ=_9Jm_=$ zGseRk#|_Q%fbyz&OsC^j)-!1y;HnCg+f0^|a zrxjIMtaX(>BTztC)kd}x2*9mXud})i$ zRA#dryHh%E6!mx^b~yzD?-faU)!PHI{HA^uSD>>`HnmDHKK=&#r(cS1%FJ8Zri!?E z@7Bdb7>TDba>9ZzhPp>HUY9Ai_yPIdn$E!YG3Dt2d<}Qb@40-Q*Lv_`vh8bOCFi1z z)ty-y@Y{XcFQw*cu6fhviQUL(af25M0^=w}?n#e4MmOFA0Bv%C(=y`e>5y0B>FC0`6EcRr|L4rtb6V8PPAJpr%%ushI^3&&aEIWR#U<= zaG*TBnTT@m+_U3O_qeb76)zpJ@}K`Zqbe5@CwHg+8u^Q0P$1=(Y8abPy?i6R1J@yR zkJ}&<*N*@!x=WhPQ%=I}BPjJ%x_9+8n8K#ER{y@#>pp7w?*tZJ>7MpF9#aj{S(zbq zyiXiBF`Lhi+27T;NppUqJD3(UTUwFw$rzd}aEqqE0mvocy~l_$|L;mXq8!P>@64YI zE*>nk6F?kMOzz0NOb!0c@5wjjH2?ge3jO9-uElNRzj9QGJKMg(AdM$=M^@l!8A!JR zZZj{t^R(U_)Kmwn{-~l5hg|PAHxlg{cUH?VZ?5R{Ub@j8*N9Q`=7{qmYE8|=Kex!b zx-Q+wBHx29WMZ=gBv_MD^13Y@91k4!!&7gYp%G5@Smb(MPA`hVGqmBDc}kAmP_{z9 zY9sfWwg!njk9IEx)}cfY&8oeeL|jMW*UEmS5;W?5mN;zjECG1&rgxCcvi|tdNhQ_( zX+BzK9_oFQEro7V{wYhfLO0^^{m zo5y<$#zRz=o*eR63OV;pAeK9lGbi3!(;UJ}D z)E}7i5cQil)zOX2eDY@Lmoig*P5df)A;Na0z-tAYR%F#gzJak1<$S|Q2>|b<6vpJ4 zU}A3YQFXv-8`BbZ)ScY4);wpfwl-(76fu201j5R9-w1Z4Y_DO35(n6y2_2!Ff&lTg zhob9tuH{4I5KbSS;j%Du$!((~+TZuBh+eGW0y=n+9q+J21?v&|(0t~1HMVAc&)R-u zL0i_5p>j`POtNytFm9NiIEAcl99FW^P4{QiOK~4>0pR?}=4E)c`rL(6bg2H@V77x} z2n$i;Zcg`}3?MPuwpn7YMeP z50!ZNi&sGKG1QGK_(hHc&wMFF!xVOAjEaRaQa|NSowzwbbCn8Xeho(JflRQSjb?eN z^$$J*3!9qjQnV7RTFo#Ul=R;|x1Lu7n;a0MFVeDw7p41mzh|`=Vj_bdb;Eo0-rwvp z+A@r+oa>fo3eDeEE7Fv42g;18mK~LRBK_baiU;iMO*)&~g><=xxxMCe8q?{X9^ zzK%AY666bZt&(JmwcC;-Z#sBajpkpRhFEVtkf|!qQ;GvT{ctWHDo}2nS;+;b|5Gx< z0euwr+HXo8_m70hzCMU%9Dm$fdmihBxr%W{Sq? z-wNuWPyBR)c`T8ux?8lcz#7TI!R%Ms@3VPeQn~5 zn~m)vmYd+nnVNEA^D>?e=$Dq01-n z5xX3y*@-#GgUKz+Jd~{biA(Bd-0RE~G-^cFa@p@Jd;O5Arswpy;bk1}fCuiJK3;R~Ti9JNrzsv$*ckUdt)R}bW^p?~h4;k_D_hAm`2>PHnNBJ6lSIVNN3cLu zyrC%DS1bbm<4<3~bX~#`XBTJC$A2W&mp8n#NTvr*m_YuZNV$%m|48!N7n1yOWm`}7 zkQV%qy&XtAQ{&;tmA+fAWWrHKy`GtmMF(ls^NM z5}9y?%c)U}*Qwv+1UxjhpheuUXdo3EpU}5To#m$wOe**55`JH6Y$klbX(gSq4N|UE z@4AxSicdKik~f<091+UncOFgp&JIx=SwO2tzaL#`9sh2|cTfj=ZKG@ST#GF;kk!Lq zuh1OLk}EpG5c4iCiL_N5w9lHtl36-u90*^|<9ahNXvHjac~W}WNmp$*LK$43+Wd|36qk7!K8m?+xxHQNqzKZ}v$vplJ}U}l$%j4vWlhSl z{Q-cSsQfg{*l99hV@owx?)vLyqT7u4Qb+FX)c0+eIE9*v46)gwX$IFAj=A(rMhXhcZ3YEnD4Wo0 z=pdd!TE9>%8@qzz_8E%xk#nq^2Gx3%gR~QiO8c_zNWtuu3*mAZH2V0ub6bUnd%89h zBVgy-QfR^wPSN`T10cfjQibXERlgb1&&4xVHsODrTSNdE{*YY`9<+pH@6(i^rw#?7 zIW2JxDNG5J>KH(7Qp_scOES_ERFq`hZUR>&5a>@zg#+~3z$tOV3%1(%w>zpxH_e8N zmch}_IbrHC?3VC`V99dJ#Pi`7S3ZWlfe> z6U{)bY%AyO=d-*ebH3XkETHwz#43E@%(H>Pj(bhLfY;eWy&zZ71}8`jyiXYa&$)xd z`|hVjU{DE!*fODkqS{7Rt6I|6VPN;QUHN^g4{iHB&E9TPWu;}a0zzRD1&=K_V`V1> zhaSw<9kbVFq>JW7)vueif4i38NXSc=wHFFaB1`7}(wDojG})G4mFoG@W9smWJ@(Py zB+>_C$q_$*47L9FTQPI?%4!kte9SuA?5J(h74Tb`G(~V>$X#7Ga{eS@aMkwQ7n*NA z`@8;7Q{>&si2loO8)KCUSJI}Aw2$0RBh1veINivggy)zhjaINuqbh#22~HZzM5P*} zv!{_0n8Sx%8hS=@O)SG+Z!N|OI`*$e4gdpQHT@y%r}6hLU&tqvwK47w*t1)n#>=i> z6?zDwHk+Cy0$j_?^S#DEoXPy7vl14L@GIT14Wx}dBw1<235d6RZk*a8$9exm>&}e<>dkr@=OM7SB6i z4L4~uXI3d1O1Y;q9YIQ!7J$}`MDl1t_33BCae1rC(oI%&Nw9-46y$lq#HYsR$5!Aj z9wZ6&H*1O!ep}IuKzYiAr59XVO?r?S#OvQ%U$9F3!FFLc-~Mn+Uy}m9vMk*F8r)~N zf>&In$`C%o8yvS&4p{O1xqFXEQl z-}cNxrGS58R2aFj$H{#@@}7`1z$bf5;cTWk6#O>iCT|+;og<@XwSVPsfc142M+|?U zkU(n{Sk3#?%bMTs;^Ro)em~|JT`Xv3o5;`5OWIhn>OzN!>)Q6lS{o%?1UZgyI^H^# zVP>7=N^xsS{t2-e&EPXLv->Z*%aA;Xr(sf8)m&06Nz!JE7y>P>Fi{~R+~!#auZ!kh zfW7}1WT3yg8XfCw1@1yMy=nmYcw9vk=!y$*#snEKFECX#+E_QhsN$PjzFg=FZI3UQ z)n$2z^XS8bz3*s8lf@FeUQq0M(G;8a;wbsf4$}8L`3k=!_lYiKa zcOh8q#jPk$D8gBJe^ZJ>%}?ETyM|5+EEh+=q*i)^%{H|QiY(CU6zhkZNToPLD(En#Ek^grvLU-uPF>&=(>)XAX&=$sTV~X=> z(~svHxtcEotR_d{7zxO}%a|7|9sE?>t44)S4)}XE3Vjw$aWXn+wKF0`Jj+G&zXW^> zAC4P=&Gu-m=eH^g%IQ9!N3Wm5Lc z)E+j(n_%#0F-*vv5c@0&Ao1X4(NVH9Aq(nhjSW4Lo&4e2EBT1o6130JYEUy?0r8|w zctS964grOIl$Sa_*}tMo$dBn2OIW8|J}af3;s23PW=5+w$2gX$`EaeeKby>A0CE~0 zB{7llWJ#P&y~uTW@io344oGjCJ}|@Yv|D}W^xvhLVgl`AobE9(jU@NWRIgQ!)tyFA zFkW@iVVyoXhz&fGBrglO>uh|!YJycB6UB1||2Br>Ice+vtigFhzuVM%DETw7hkINZX-8Dyo9gqXKA@~v zpO!l|t4J%00D;+G_OguE<5VswPL#L$56KeAN08}1Ww*1_zQh+tgYntCygdp_WA>*t zhzpq5aByg>R`KkBHK!1IL&ni0N)~*M?9HRzK4=cC^w9(X}0ZSH*Im3YK%69JTQtZWP)AEJn^ITt0x}v z2Gc+oy^YvGaE;R0d2`x`Wwj;N-?4Vua8ZMBE1@NF&(g!L{PK|gH$#0LMP@TZQ; z(bSp1*b*m%}riB_t~)6&2rb1G`pp;gRmP7 zR&QmTBNO{HirDrf1Y#y*ZD{t7vSaG`6`lekWBbcvzp2JR=pjjF|t5osX)!SyY zNz1AHo(E3#hn-L2;f9}d!V4*BMu5{`s_EHsh|6J*P_y@CXI3uCSpX0#ypFjC(%o#AToy4qKT*!?rAb%PHUce|0TZ@ zM6BsAf!un3L)#{ctjVT3;|?@Ki|6W?PJ=iH{@tal<$6f$@83B-tNs|b*gBEyCN1u+ z&a%IdVwrVul_Lf#trJC-j_vbFB6ky@4Ut-xHeQBJ;U5~|-=vMNpKyQMHg4nC|Bo@Q z6th1M+Vnb$TvgZOX!Brs9hJ)sa4};s zZ5&kxf-ym}g=P+}3xHLO>jQ@IbrsT>d(6~$bE_hsZya^r4?HO_SEYy}Bl{*oV&Q_B zh9yh6*ZEx4JF!rC^^s`3^e*wB06!;a-6}KXK}pf8VewQcwE!mi%iU(q({{%(tEMa9 z=eGCBT_r3cFlmf)W$`vw|K!K9DLg9mNWAxx#ah&kz_kKbwo8h*Q~j0CA^U|73(rk< zmudj$X)?slsh}asf$qm8d`3?hsyR%#EFJ}jzwbE20Sqh^bt5Xzy^eZG#}``d-W^mV zl{1yi)cM=1xx0)37E`yggsZb#k1OQ$_Aec@TNh8mGn?aIJ?N%|Z8Bz@%DF^g^qzG8 zxBz)092mr_n6SgBGM_0EdnZ<=*QEi7sOOaE0IQSY7N+6_fl`k^4b7@}?S@aeJ9Dww z*&il`Xgr3VtncE8cDZN|reL3|=qFNwPe-0CY}Q{o2{~$#za9?g#tOOI@sMsZ1ms^h zb<9tA@TNo5HSWTOaXvl7Y9Fjef62Vk>RM#pAT=0uP}663MlWQ;J&pNsDO(YRc!cPm z1Fe#D5Y7_jrsT^_9bqF+#j0oPT&f+^1r2278GEvtY0AXe53+LHA)G1qYcE<}J9NlQrXk4OZd=e?Zx3iN2 zm*ko#y!%=CZSM0~vc*fo!puDEz;X$Vbx0tPz$VEzPKJ_k>4XKAeT&;UcS7G?9iK5* zp`aUb>Rqi6zb*gm9|;e_eMRAuC?*w=dGh1-bN;HsHLsBcQcUAAZB~&CQe1TLp6oXf zC6|yzFReW}C$^pvAMamYm&th|^Yg*Evl$u80&;1;5YoYn6~&*BSvYaH4Igl0gJb(7 zceW#VKk>l}1(bDz%>k-N_tb#Q%^e4ZHy;ror-(+=G5Vtl{mABE@2*3x*z z(a;z+V_;~>xJtV?vaxNl@XYnHy;Hxf@w@mg|4~=84tl?dh2rS3qmDXe#m_g?r+38N zj0=TJ_pMXiS-HUtE&!LV=cO<~kxS#aSgu_MMnKlwlV-nO5*f<=Tmufah_RHqXp^iPkxf#4WoFoQQ+D}eqdYu z>8Oj5U-2F6<0YhP=Xs`e&po3Gkt}4tgBZ5!%6#$tB!5ml+xFKA8}5+C5rs`NfrRCm zXEB_8LJe^CC$EgDZjCHhyf-r7(LMEoJ<2%oNJm0c6n89P#&&~#KdN<^fvP(=z7-{PBVd2OaCTX~3`-PQ@Df^C0@fA`oZU1zk=hEgVCvN*Ie{*of zA)!^Mz5?vWpM_j4D2B*u)iOl9^Gf7?3u5s>l<5^|ny=(>qF5|B)Rs2$$MeNpl@~=n zEN)9(PQV4DoNdOFtzKjZgI4-tcw(|n07-}#0&A@8QC^~t0r{NTp6UL6ts^j}h3R>y zNv=$A37RRs`n19i&{*UgW?+LD4%?qUuk6wuaz*^E!r$!Z)U5d)-$Qy>JUI`3w}0#< z4o42HA!0_PK;K@DiFtpllH}lM^+iPnvHc_Y_}56>PcG;F5StW@MO;DI?kk=a_dKn#GVb>=d`ZZEF0V8aigx75Z%=n5_ z;EzA+8mfsyPnge2L|KBMOd1bo7X!mJB0(--7evBIh4~7Or8POM>8Y1gV9XSM+bjO% zS!s%NMZdFLGlBJgB%rIO@Na%j$*MB5UZh^eHtunf(p0rJMZhbG}PZQ@r zzU8@U&u3A8=gac{?A7hvyOs=_(hoxus8aW=>}baj^E)W!RD!}$sLTelQHuE{^~?aE ze5?%A6QZ9Ww==9I6n`47hb>6&tgrVr6xVdUNqwFvvu1VG=R2$)7yDP!<`s7?kJy$M zTqSxrMZe)lSB-IbE}t5`+DESVf9-@RXyDmYOo=bEQ?>7S?AeF(`eMrQn91nPN{-AO9I6^BCofEDf0%H3(W*7pw128$ z*5_VVCv>f`UupIycj8*9a7M((xnK9YRKJajX~Gdj-WHf*9C&cAeMzeZO<#j-`BAW# z>J1q5A<)TGN^F`b5tHhpkYLCMEE1QxD=aq+a2__0ith>6UW8F>S72>tW3~c@^A1?U_eCSjB81_-Z z?X{$2x3N@Kc=7qCaScpivzbB#qjgi5p-n^;gWsM(3(!q9tqR+Iv`Hu%24?j~FXfW6N5V@Appi$g<=S~pip%ZZGhHp|QW6tpA zUvcqazb&W~5$9|g$i{K*!~h+~Z@^OK>uCX5aLSkrOPQ(9?(m}4WS#sIaPpbu3E6$b z>QS6jOg-IrYaoB2kK72l{SI(r6Ahp5qIzUD-v%`Fa)&%}F|1g#xq~hP>K6to?eCoyjPG2l6t?!XzQ9BmSI)q8 zW__Jt#RIC^!DQA}GJb~P-6P?Cf<4JnrsiVC1{1?2EKLvrsaG3q;BIIe^uvqYxA4f7Nz}h4gGMP)~O24g!~C|+UtVG^=Ad{vvIt=fIwkz{G%;KYdC8o)TSu$ zMQv+&`OeobeGGdR)&U$hxh<9z=L{aF<3KGcb7g~fZys=&2IN&*8h8rc3(&S8H8XSc z1I=JTT|^EUcn{{W$M|u7{sPxjvJ8(MjXwzX8Tmn=_z*lq_6gqAqCnq%jlCjIm^ z3@keg>;3I#FeHaIMNX`admMP)#SYp;LUP5yuVmcgn=Movvq|{_eC-P7fZOn){HmUH zCgp}6%7c6KYidOA7h*s0)IR&&sOS|Qq%(R9iWeD<0;ysp1)jz|%?ffpF?P`Z(dbyl z?rU8yC%wP%tCG##Y}qPrlWt7y`J5#+=ys5S zUax$T-!$+KaxZ7g=6H=e=P1MCL5te{2#z+%^~Ft3Ng?7=c4QDVa2<&^U$uf?;clf> zaAwwb9B{Dx_%ba2@TmQcPfFLQ0}5EHa8r=m+6(?nIuzr{AUC-KXC9@L!PMr zRG4RX;2n=x%Dp0&bDgZ$4%xJJ@g@^3ceoE!N|e|McgyZ*OsqtAXt`9`4QD1iJb8P+ zPd<~43W`}Y+UHeyow!p0Q;(il#x}0q!-O8?l}>nS1m5>ef!3*cY_PV=T5Z9TP?0i# zQb5#&)s;b;C3-(98g010jV%|d^)2K4lHzcc9_L${{J8Vy^fsf5r-h*2Cs6g0zMi39 z5?x?ispP5!n}b&wYA{~2;etpRm>fQ=UcN#R`V9_O;%pMyJ;%Li!b`X7;dVWX42i$qgFeoLa(`#qIVwV}U}CdeNFxiA zcC6=ibadqMl~RK-yOJ(L{XxU1qOJ`mN*264Tw=EjWpy|`;~6gTieX~e7CiNv;tSB* zFETZrX2{1>%-!30vK)U%GvpxmGST3mC)=?+mQ!4P=X|+i=asze-lGBU{S|$Xsx`8S z@TbjXXq*+CcrmIgnCg$9*pg?d^D76!fTc4tsNpwTfx*BdDB~<}+njrn&pQ;hi(^XW zm{e9ds?<5R$R(x<{>2S%VYIkFuIuO?{LHo~YtGV@4}p7|jJ?jyH-X(*-UHI@Kszc^ zz8XMHj=9W^+SLc`LOTKMeU&E3;d{(sYFb%bZ=-?7HQi?*rDdV1wi6 zdVgulx-W?_jOeMl?7DZ=_1mtLy9qx6cTYBnvCkeCh&nRK=c)nC{>CyGrHimPcTP&p zdHz^65N9@&DeGD;Wuh_Chsu<#4|n2h{Fy&QSIlg&(Li=~Mw6D1w=%w7jU>Wx0j>qa zNqETPe=GusQT07J26_HDMuyEX|Jd+SJH>KM;TJ2C-l-vbvd_h*~0u|QtOl+lRk zfa;E)jsHm6e9N$Gox%92e?7>(({I_9u@y;Xy-Zoe&=S5R6a?W-GKVfZs%||AYGCzti4t~N;Hzb z?-V53NRW6t%uW=mc~zW_EYW^h)^Iv+?ivG=QNFCw2T1eO(Voo{Mo1dI1=5rI83wfcm7|7yI=BG$C9xf>el^p{d%0;9W$;S8 zjH%e3x1w~`hw@DeW_i!Ggk$q6kYH(;w@+xzaUduoa4g(voR$dw{3vgR!dbuOA_E~)t1y+{kFWZC?xFo~3wWz+l?9)kW*6DLP1Gn^_PSt)-& z8k-w7Zfjtmd~bKMd;VJaNL;08x3$qKUXbCsI7uvNCLgS?Wfr$*e88HLKZh8*DAX{R zX2ey1d{#F6Cnrd#2z;q#+x@LZ@b8FPM9|J$ult{)g-)> zv-uw1$=>9k^C|hcET`{^k5Q)ur2rjXoXnNe^djoNPPx>LN_A{IMn(yjE;=ciO%Vav zxiCD83Hr!73m&Vp2$6fdUl&agUohtra`3@>zGmRg>l1TI^Q1XV9R6+pu(X+O57Wz$ z-7(Cc`{?ls-f$Lwle-T%i8sMlf>7}9j<7|g2^xN9TKjeT4zy6l$^qgA`s>VGq<%{D zimK^UW&xOOhW>F)0{Ar|7_PS>T+Y{yrI@|WmltP=$FzdF{ko?*DIcIB_&JSt^AoTD zj*Ntm@XbWix=)kRwWj*3_7f2K6)P26<21)?2ZLt_};t5x<)83cvb$a zv3zC&jPOlXXeUk|sEaw*i%lNn(`mR;cauZWmB538Nx&aZ_y0=VEc7NsD<20tSmY`< zn`jM#$2~wA(tq=k7Yt+b=o76zCYsX~faPb-9F5lJ&r&@AQ=Pc>omCUQ`B6|9!)oF` zlBr787X9R+eq4S*^^We4ja?yOBOuYg5rLpI3B_}S5a^CxkoIBG1wL&F<(f}_ITvPV z3i5nu1XP0$IIhI@19R0T5ojYP{GK@lF9o(vr<+AwmRBno{cf@#4$LMpwAlb(7%Lj{ z*zUzhd@TL@4?C`c^17aoEh(+KTL%vVM_8Tvz&RiyA$8KD^Fc z;X^ZRFYZ!NICTro#3XsA%Jyo>&(JLwvwB3)Ttygf@M=i!k7LPC+EPEgb~P<5S!P1K z-Oz5Sn`~*;!0;)4m&tb4^gMJFtMuFe7B__Gb|v<6=ZF@vB5`2x_%lkBkjy-sH9t5%-knsBl1=J+k5_BS*or;AMsV zW4?~opZ4NY{cM}~#Z4L}_{wn&DsVSMi*#oDOI+hXGzg^mS_SGW4j;R9m5e3XgU|2B zCOTG&3~>^GA*D1`D=Z%Zw|rFw#YWIOwfHNMoN5Z z1{#lPjOFO8p4EEModHv19zw#FQLoQ%HI(6e_|&{bQ-N3+o>} zROx(g(4u%-O^Tjf+`?o_U+$~*LCW43Wfv8TeDH2ZHja%?LI27%p@P7`3%YI%G_=FJtVDe}QMVc~qlKvU)y^hy4hz`Edg z{As+pbLI*^s0;f}>=m>KYL=W3uqQ>1O9Jh{V`0ZY+bN~08n84TU^P)8 z*5H$!MAO&8p&6{^bo1Vlm8f;3kMHJy!ilLc*Q9Us>Y1 z)K^`a?TL?zF}TScFvjF>{g_v|qsm1Mk2mu8M{;N0tOYjnMJ{Vl?&y=|;t0Zx(LA-@ z%bVY`T=6u%J{9My_L9y1qI!x3i`|M}##`*vII@X;-;3y&)rp&1IRK!lBQ8ut&i!@v ziyImmuA~)FyGgo>E5;~lr9jJd#jE5rb>EN94)n(y?W~ht4%Zr$%KWxqa9FFf?p(>y z=k1IQ8fGl>!lNlC3~iX1Zuf_6-yVY6$kWW12$lNs4k<&y-~iiXvTw$2YKE~Xd!LXp z3j3=HG+BB1TY9FTp2h@D=aw_hiOX+Oz&R03Uocm?jME{6{^45F(!<~|D8yOjIrHNY zPTj6hn-IO^A6I&(fr{97oo!)f=DshG)#KLABrbE~;1SMS@Bcv>DA^MXHCRJvy_WZv{WS*V z^K2KPCN+2gsgH4zypuQ;wT#3IsEn3dhTY$BHbHUz6}sO;$$@ZSmYp1u{rh)Bp?N}( zpOf0$xG0UuH?FG9o9bFd-&<-jg(!<>20!Y8klXYnx;#pNNgh|q9#B*g=adv%4!-1Q z9bcxASNFSonO!Iy=PV-Xn+kKl%r%I%p9l4y*nA&Yq@s}WWeitfmQNa|#)MI*jAN8{ zi!pOiAupeqSWe)&sQnaGNVnLRgemzXcfX28!EgiTXRt*>Wt=if_o?6tkWs#iOyh6R z%vvS3{;Ai8+m!_lCm!-nLvJvbAxPW+l>ETUR!0H0Z9Ftb`QPa8P#aZirqq+aywL~5 z7YLE@fo(DM;SJ`K;&$m+E{!;rgYWr#?bHRJ!>-?=h%qG*HC=bc|8v!okxfG5<8`x* zS|AK7SNvT%wU2}LtwuKV`Ro3EtcBYWQ>iPHDMrw&CuK`eI!8?#W^*nid_+5D&HB@S z&~rDihDB@LWKoJ&wdH9vdj8bFa8BB)kT*^r#Jzpj za(X5GGTZ?%aS*4ZQg$w{2J%r?fwExaK6Y}mBH>w7s5Y2#Xig^LAIU~)&p(nvM~CIL zQLPd?&M)OR+1dyqS$SX<91nuOI?dQ%b%1}#FOiqsJ+l0aT^7;CM1ocbul!W52 z7gt)>hv*^AaoC(WR=Eof+KgRiDrm5kAyK(qhrC|_dq-0ZgNE1~CV&}&*1 zd5Jd5`bQF~o!-q@g|f$}1{n>&iqI3WL=`G+P1&q3oH@W>dzV%XlaTWj z1x4w;70*Mg-@c2ZxL=%M1EGij=SLQd(;BbWh2fz5v|=)&FDA?BM&AmupaPeabm}MC zdQ)`y{`8&Z8|!q)zhbt{LyZU5*3BaTNhhm{K?Q}sPi z`bGX`X*{*dCK3NcZ0}-|O}@YJhGz}RKVxf*j~K?OI3GL6J&!Tfw6UWm)||J|e2eO1 z`uZpy0H4$BmY&KTG?{2ZVmyEt3$hvY5iT5%+`xyJA@XgeH`$SbMBDMt5To^5;!pNt&W4 z-51L80%+nOH9?N)~)4;3z|(c1=fnHHL6<|BhdZsqW`wNO2M#))Kkt0nt5 z>-zANuI*d^QIF?!hE~rqa=rPOVps>e+{~Nc=fK9c@#p1w6*;sq{yE_VM?dYyV zL#!B{=#Vw2QFN)7`VqO_*GMy+C4{YsuRBNN&~PG3(!KW=PJ4F6!t7lM`(@haUR(Sc zPKPQ})lcyh3p=Doa0iZW*1b$?rBcYju{jlYy3EzSBVO92sPU9^oaD{8-y_;FvSh=w zU*91s^MC6*eRq8`!~A=hgmWy$d)QnD(Ts zeQ_;Tw*8DVC@CW+FzU2-uDP|U9ZBpaxZcU4Uv31jcSG}L1XD!R+G~xZWL#xG4PPjCkE{i1Pqa>x;gEC^4ozVL>pG8uOXp=ROE>F&AHX@)pSzV}fk-^%aX?)cyp z)YUR43lr^#$tCF7<)$x)_unE2W;e~ArEv4mn=;wSjnp!`%=G)WP*mtv*JMVb7(0^; zIIk!&ApMMljqjsffuqSe2JFjFK`R|<%MdjO={Q+0({r|Gs_}7S$gu~i=4ojwZ&lKT zEsyOiNHZ-;y9W%sBqf~%yg$!YXvua{b3y$VTqgKuY<7r0`GPY{9F@?fo43vu%NQ>D z0HFEMjEee(_1;m?L1b94`XKCHeXB7>Qmk(~F$#Xqq%!-4eM>fvL+~(a^bbHdOx0G$ zm9t&cEqFo$emf+=JaY${_%5$B*xd$f zr?iTWa$^2$%fb48p}S#5@y(0yv5c4v=Wvly`^$IK7ei1_ai%xIH6($K$BeXaxL_u< z*c3E`m3-d+k7QA~p8MR>JJ{z}QA{UO2+fy^wxlFn{GA2U`BF6JpWk`6nAbpu2-zGS z{@C!}+1PsZF$+-FN>TDw$BpmO3V9jR5orzMs*@6<$*v=ImnvK%x<2Cz0hB zOona4z?1iCYlht}Iw@%NHLKzHZT*$tQZw8sFJ?rST{soLX>rSo*euJ=t zSonV96_VdNTBw5Vc}i<&NgZh-W38E@^Zf(%JU8NdN{QJ-n^l|DbIMSU8BUjPn$LJ< zE)D$OF~{tP+k5M(j-kU&X+G>3?aznC%R4@5p-cicM5kxI?bP?R6q*O2JeeON+;Owr zKT6S#L*l(O-(e5(QWO6mmHr5NbS2orX_;fwZX6NAs3NS%yLP8o)vI!1Dh6IaKs5Eb zc`c46Jtx6uV=Q-4h67q+BA$3=yelwAM4|lxR3umRAPe*k7U1-aduihDJdkDyiu!;6{}jb zPaF!dU8)%WY9J6mOU3D7Jj=*hUQ?SQ|^UfLqn|9v%sFMM_#8w_6wp&6G&k_ zYJMDP>d@yb;NeDnPO+!eZj3!-;Lyt`G0pD(*+7JwPvc5ym9J!6;~IOmwmjo0xuUs~ z2c9X@YqYG(&W9tc25u3PnPSuNi9J5{4Fpfq@`l1w;XVA=r7|*s0kIV zn#j41e2EWGde|HHr?)kp<|!kY*^2)w?bq~-nAY*~g-v*1b(x333Jn7?j90TB&utpU z{lx%J;`n#b^6MGQZX8|;m+e*lKKPoFBx}-)Fi(qQ7E$4>KlDfo+QKB~bA%m$1|9O^ z*Pct{VQ-N$7EF|^R*V}UQ#qq{*%AkA*@RfOmG!^j!9J^oP(59~Z0vpD3VryOEr+Xj zcjfQ?1%3~$C~SzoAq{=oxy}O4ANK^@KZJy-W%CXF5BNX_zn{B~ab91f_=X#cHobjH zW!R+zDzEpCOxA{vr};LqGTu!j_fE|x%v1r?01ry-k371V`$-)XR(fBEHMZ2e#<#mC z&nFxy9jgPz{u+nHamh91)H+;>>*T|4FPM|y^(=a4zCCLjTi5kXBI@$WYace%n6k86 zRq>Bnhr@cUz05JLr7Fx~B*uy`S3QrdRHG{F`fi=Bv zTGS<58%6SLFp^9_z~i3&)n8h={$2V-h->j_n#^JMNG=d&WAhen<356+)x0*>7tb}kEhm^exD{0PHjJ#SbG|gc z)1~nGEHL>xrt&xBl*evObGG*X0B3<@)^9w>`v?eQ`eL>9e+>Ts!Z=m6OG~dV;!H%| za3M~4%~{s0?BchY7*!g0-v~B!TzxA-d2U5z;wv3i$(lE0jxn@vb9HfW2omyee+q|7_-m$E_=oH_`jc5g&~A=V6U`Ggamt`Mt;uw2 z9Sh;aHh&5$f>_B#*s7Z&E-P#Ep@{9`jzx^_Ms~-S zSM|*cPSkacl6jW1KEPd<_hqmD;)io>23xjbpTCQZ089cbyXCHUHbQ(shYS#Y% zWSL=_;g~c{BJK!BOxI`DoT6fI_J0TDk_*V$q>fKHnmO0*u+JXVt!=C7npMrNlXs`d zaSUS%c0uExraRX4ycZrU@gMd)*NGLxnPBkdIG3(C{3n~Jcz0QkO|`h17_JuH`Vg*2I8&U~rM``4bYs-Nvu15R468*Xs$921bM^M7>;4v9 zcG!J}>+DwZF=V$OmIvI9D>@Yo3Nv_4>P=rtxwSf6Hc}~Pjw_)jn7g^$Pb*Si+UYjl zC1tm_YXc#}h?vQ5E`Qr6`BZ73Uuar&ua;uCv5q%{h}RBx9(nxgy_bV7r?j_ritA5> zreMEled{P40*=O_q^~P5HX(~i@TRaW3z)pavu(KZl;_m;siwHn_3d&<-w?wbcnqzT zVh65z`&Mo3m)Ncv>PXa2ZsA$70vGIY>58wZ-`L*SBsOatl0wE;NQewr@_F>lFJ}-d z*j%$~`_+h!!aQL2V1N4csi10BdPb=Zqi-eBLkdV`Mg#^Y@vQwe(i^v)>fsvBDI0u> z&N6c_6odex5+X&QZm@ag((@w(QLb20)9 za(}$Q{JprY@8aY-Z;JE_9}ryln@wAdI?@q>yC;s` z>hVA2x^HLvaqV3=JanqkQ#o-I=L!2Gn1fBYSl(z(r8c2Ec_P(q_dyE-fJ1$G&MRj{ z@P@5zXEZk-Ww*JpbN1(+e=Tk<2j0Q&pK69r7HS$Mk#!6!<;f(u$r)po&&qpNuCe<< zKDqF+*TYxZEOzs0_kLnuOB$IbZMe&~R{Us|B15HzOIAUucy=EZ=n%(l*HCz(+4o%9 zLZz(cj2!;}uj}`FW4#HjUg`2f1;(2;n`Wb87*VBFe^5_a)*8zAGWy<4E@<`r8sabX zi7m&L=;U$kQxypb#i*v@e{VeuAr-Rd*2$-QC5NrT#BDWK(Ff7JTm=H0+s`@8^ z*Gqe;r;a$Gg)tjC?`F>gX1ggUz?o`_8qSHP`05q9(_^@j53`Xd0O1aMb5vr}t~9R* zO{{s7YSt>^+#^WB?ml*$M!b+c`wGy#idb}8dp#*+hT1d9jrKGBPkOPe%c(u*pEjL~ zIU-omt(jvHFc>AhNj1+bGO0VARO|A`l;mUazNMvV9&U!OX$^#ae#s#P<~hb6a3y;7 zs}1A*U&V1xsa;7OmZdV{bc+oXiUv0pBZ37N_7bI)#5+#uy&QR$9>D(qjZ=q6`$mlg zz4Q}X#?po{#~|KVAD1jYhtzkj>CjPvNal~k+0l46P`>cJy`0{4z4X&Joeiuq34?|o zdHb&)%C$A0FG-e0Sk9o=mk4D9ac!PB$EUqx=-wV42fmY2w~g)_Q6FY@SpZ8wzc&lp z(~8o*@q9NB!?jXIX_hOCXvWzDhwirSm;<$W@u%+^k?Fd=pC6lIzFd*B7A)P5TvcBX z_~ZK`-Z`81iy_#h26(I`@LYO?mzPn^Dyfl_VokU?l0hWZ#7P_n-74M1Mw?V=wL7dr<~@btaHNdqw|bwgI6VYpc#~aC zM^UxXF6C==SJ}EDk@FsFH{utlEyR%O6I|*0QaP3)SFYUk$9m~Btui~=iCZQ;Fu^ z5c~mU@S5RT)@!RPql}7;*5RZ* zYkN)oqBP$V=ocDMNNyzae6j>PS_~X#aTp-=z@jys zP0osWlcFYQgL*z1oLwW&?wOUtWa4Y+g&I_G{q@G@w{FqxU-K^(_yjJE#J(J z^f_!UbAz5eKPv2W4+CjBJ=T+X1%qnWOyW@_aqbbE9tg%tk4oFt?cwn*w>9Rq;rRD0 ztcq?RRkyds1_s05-nwa1iqs_e^<^&s*?4B>;nu0Fc*9lHF13qyW2MCe0j;LN96JIr z$2|>s#+~Ay3;avh>|0X0k4&0tg%<6{+|E0nTJ{J$IUkI4=yY!m{f|u3;j~$0`)Ub{ zuE2w|@s5JL@50}+9J-Coyn61R9jjQe`!2$}ie!?<8P6uJRUs06oKB+O!%=t_R9#!b z-Zd8*)tsv0*qHXZ2I-uW(>=Ma$HYGzd{L}w7uwFJtzV>?f)>-DYjw&_x@!ZvoN8$FF;=Oi1Gg8!A>I=|qfV&|OW4B@Su36)4 zqAE2MRgj+%KWZtyC&chKhxDB`{{Tr==S~} zwbOpbtV1z-iCNU8^5Yv+fx*bmNv}I!`IYf5u zlZ@Ns{nx?yvU&PfrhGO1tvnm>+rrv~uCZ}0t*cEuh6$%8TXdKly2hj(jm8PjOjFBo zPE_5^^4!7GdY)IPf59(&P~QiwWwr4YuA8U{78kREWObMvl6mLasdz74TOSljpx#Mx znueQX_SZt<0UT|%GN@I%-T z@y7kWFlp9}r{75wJ7n3Yz{WW|+zrVg`^}c%b{Vfz3t2`kNcUbU6&X*#f49b=;olS6 z+280|XOrLyudpQVbjjzU4Ckl&RrD9Y{{R}={A}>0zlbb;&m1~{xh)(r`HGCJM_Tx| z!{4)f*4_-79cnv);tfthL~=|RKnVNtc07!AuT%I7;!E$@o4_MW(`L86WM^qDRBbD` zl5kk#oc6Bjc<$zPCrtY0+U0abOU>n$=Fu!&KfAQ&cjH^$6b-0q7q<5Bt0jaj&N^gn zub%!9{@A_=_>tkQS#=FV{t^9R(3l?H>@!MhSb86-)Pujx`r3w@Pt(gbnb9GcRlNb=}E zbo2-&OPHg4s?4*3z@FQZzb)G<&4iIz3jeRrza4U zAanTFF(#`do-6SbFvyp&rlRb{K3ohBTIGwnA39ni*L+GMT}w*Tr2X75+d3oTu0Zcx z7lnUkJ3Uj!nzp%lZie4bh%k`eI+OEfrfZ|T@elT=g)L(rzCmv$=zoeb2OQP?Cs|!9 z!L4|VZWbFj!op8UGBf&B2ed|hqibj3Uk@+({{Z<3LjVpAFmugP)?z zXXb2orSayo{{RVH#l4T&W{y*R3#0D&3J$z_`_$elvRM2#CFBvvx0Z6WeRyG<97m+U5Y;_W?j-R}ggIvFW^f|Q;9clCV z%&3G)0YAhIGxlNemYw0B4S2>qStgP@%}^;ZaB?SXbAwt{jg2Yndgq39xioKydgMzs zE%p_M(>sN8Qp<1R--{aadAm)!!gqEgCTkWi6-a(7U0SSdEHX-73=pliwoiI*hPAtS zZ~Rd#FtjUcEMdbiXCEofO=4(vo(Zzk=JDT&EpE3m%cn&q;5y~aT=>tVS*O8o5?ou` zs(E@$>;N4YDZ#5B4Q01F#r5RSt0l&f0S5y+cN|l`DZy_y!9Ns914;goXDn(Muq4g}e{&k{v47^jhmbX&(*w{{RAf z=_JJVu-NAvmwExkV|c>YN24^dOwDgKuayx`lhQz1V5td-%O8+`gyKsmGGpO63--h zUA~=av79rkf!p;JyEU$trz+fBJ?^c14>s~4h88~Trm!?kVi}^EJsuU6!(qseY5dM8 zxqGH|Tj`ffhCF6TBobtI;ZAG1r4=Y$4l?C;Jp$w6^xA%j9Cm4A;YG$7Z1&vl9_-^H zu;tb!@n!bweLnld6RtBn21W$>l~0sbKAqwZ3c_Qz(RBS&#Tvu$o#idT`a0wFuC(f2 zI?yzsZqjIe6}G$>mKbf-r$N*2#@>dpr-f$UT0ri>(U#W8!!`k~5Fet=#xe!@eDe&wVDTuUt%bJI``vLVw*L z;EIw4R zlwC{*xM7i5Gx!$Hd#|@^+Qiz_faJjUB^ZCe6|20n(``fCJ@x0>q>K&G+Np|CPhXm$ zj=~sjzv5k|+r=Aw=`O;}C+F6;`n*n8T2eABt?c2`*>r6smPs+U?RNfDV;+ujO=u>a zaQ14N#pBz%eb{Wknltro2&&q*jHA=Uk~}8~li|^EBFAeD#C(Z_zGJr@hPb_7;^O%-d4=yB5&_%g?f(G2 zYm>IqyjkO@W^Er!n(K1_lC!F)9^r_q(f-bNwwG4(c%xjm(&KTuRYFf}k@AyAJJ{%r zIJI_X_=DnoycVnF%{}zTqJf{~RN%Amw~ZxH0_Ip?sgkYp?Kc!*7b;7}aLe^p((b*7;H8*%K%{XKvt6JW_I~=4Y`rY?TxvwV(|CuL*UPcb4RFr zOz||HDAgb4TN?u;dk>+=V0+i2S@`?Iz6-EeJSX9uKTTNh1oGRboTJpX0j{{wQCFML z=Xj>wj|uSS?Cz1TD52@lKk>t0#oKiacN(W1CbFHg9- zyjBbIuw_rf6~lI z4H2BH9;x7eirUA-OZT(zkB8&8x5-~TNc(~M5NoWp{e}J}d`On_QP;HzEVO%Df3-B? zPnladBN!dM>(Blc_=azW{!P`S+kCehk zxyC)K#l&Y0MJ!s0!T5K>-V^X2#eF6lXj<|cbdX%%$T?>i$8tSuf8#%iW7m8cHk)dz zJ-X@=+=&6@Re%6~7_U6>EsVB4Sh!gkZy3${w%fGlC-pU-HnA^<8_vv|qpV@2Ssv=A!egFXfg`w0`>lmT#ALdd67JeOc$r6y(mmt*pdeTEwHaYF5*Kv9#8? z+OkBfG4&^k#9dy@LeS18X2%5|6=OxzWQ$FiCXt%YINDFm*N$tRRjw&iH(|V!Qjs^b zYkQB~obW29v!~qJ$0J>BlX(H~bDv7%{7a_ZL3XXDT*>6I$=t-S`Wm-m;>nGo!pk!l zm~Jv1*)^N)bMsxDrj#I;NL{;97X8@-^Tk%N(=M;IR%>~Dt2mGDmp^z{8-J=vd9BAE zn#&runXxjwp0%|Hh@`jFbok_1B85lF!Lgou*F8f+(roP}@)k)SZr%tUl)4_I>#x0) zoy3z_+;tLswO38?m9>j4&D0WIDv%UL+gI@&>r+^g19S2 z>G9Yq({z zhTdf>?78%H4BK#Y93v~Ht4w+DmwF5ZKt`_^~d`}amQ~8eDde-o@<1g zRkmv%vd;eiFZ{E3&evbYwKUib){h)7HNvDvbZ5*tWAE!+{>-@SSn*}Gyq-(x_YD&i zJCyOxc&nCPDY%_h+Qvp%qm4@+QU}dk-;FQtC)Dj*%Zg@=cf4=`AIuLQrE6(^C5>&Z zS!`O)<1AgBfzKY)doi1XSikXfPbIs3oxH3C&OU0c_7lZ=zHHlD<;f?Ee-$mZv1etZ zE#>1k+9qxAm|J3J+@|HG!@duO8RWi>(^ioITVlh>C)Bw)&$V;f zhl$r#)-?NeZPHvuTH@p{+-=wZcIk@DzluFp-etPC`%Tb^JdZO6$RzSupS{PWXjteM zGF~)4Xt9&*k@;4vk?j`_c?9$7af9hv)28AQe8%^NE*j3#@vkF|W`X|zaTf3O!##g4 zX}%`c><#sfsjm5!_8@|>#y1iCtB=&|qqew?TdhiH=blB4VcvHuDfzyFyBq%i?A<<1 zyW`a)n}{KmcZK{7XCW$pTN*AlW}insZ832rb$u8)Fb8`=CgEv z5$SiHA^TK<<~CqfX(N|lbK4^sJXd7?A=C72C1h5Dc=M660wiDf7AvwaOWX2R-7%%{weq{`$THu`f_?BaDm-qdu-Bfr!p){d8} zTKT5t5hdrA!?#7I$F5ykY9f2<6}Pj%5VFe68v1<3vh;lcbuB(&_G^cEqw>tM$Qc0k z6+Om)(deyjdvPt&vjs&-aEg0z?MEz1_K5TgPl{H}q@jWQ)Si`xbtRvJlHOf%+(~C+hVwxAeKC%DRg2A2L)4|A z4gP#zEPXftcA`;wjfyB_s6pKbQt6U>Ev+2w+&3Vh3+KJlvdhEk37Fm(-5 zXouP~lxKOm+a81Z)+VL#1-v+tMW|`=$25vusw)v&T5Xo8px-^hwY)L^0M9p%bAEZP z+f8dv)@|c$Qq6p`GAWgll5^9haA=iIQ!|tBFT|-YEG}+izO_p)EGV>mEERAHugZjS>qzIv6csk~?_Q3V?p>kEKVnAm=oT4OSU2 zw-Q|1Dao0SC)9sR=;zla-){B_AxsZ4p=~sQq!(vl6iM|gaikrZQAQHTKv)N8iC|DYJK;E<+#u-R_^iz zWp(}9p;-veT+}+(h8s}PnQSf& ze=5(Y2AAT^H^f#mN9Bm;UJ1ujOQT-d+ars}w?@2z1#|OgYvIR1H_aLTDEQr71iGcK z_D7bXcF4H-hFkNlh=FPwjW+vBX(5Jv#zH{|e(gTTL$!O0tD8;BG>PTM<_sLj% zbenrEEz;qZTZtRw+H=TuolM9NVf+&x#o`#Ur3K}JliAlvVk`I z#B}RTv(x6&wFvy1#c0=sfumyak3!uEpbN&{+{&izMUG|}+n)8GFN?Iq)jq=|)OI%! zo*i+6_?oeK;eo4PJd=6RF;WW|I9@v9vNf#-O0m%xvd5@RZ}((W!+s=+0Moyc8=3Og z%QV1ztO(>)s4Z=7^p?zZal@< zzfsnJG_;F2)@|N&n2cvWl@;an#1|4qw=gV8^JU26bwFlV<7Jslg@!W=g#=Ti@mLnJ zK)P&lEPyF46`PKn)pP+Zq*zz?cI`aFq^b_q_XKfTHg{K+k%^V9R!s68#z3qaJz6_G zvMG`@j!m*fyM-QtvhMW@yMH? zndY~+y<`5b%is)m?N{z~siW7XQETO+o&I2TAy2kC4{GOyNne$RGkP;Y5jD%Ji8Na~ zn>g)L_o7v04}s5LrA4D^cJtY*TsIqX6Z^i^Lrm7zeM)I=@1zqb#BoIB5x%3Ls}}1X zp*Nh-s^gheMh1O_YzBpl_g7ONv&N#_ed2pBdcAY1ENH$=D{&hC01>P~c_pl@MbcZ_ zupv}~l6~sU#BcV9Ay)HYk|4}t`FZa^6IxyQ`tRCSWsSVDp$Qntr(Rjy-?#e64B5!b z4r?}9q0-@xYa+1Aks#dJ9XX;+S}VU31)4b_RfQZnzSYSdlmVq4jeT(?yLnM0t00W9 zJP}qjj|-^OS{sB@cN)8rJ8(#>KNR?)<3YFm-Q;(6toxZ_8)Q@Utmr;0%l)HqaSSrA znGjbFa>N0SDCM~-c4p<)iMT9_AC)@&`3LbITCzL`CaG!WONsXkWSZyg{9|!%1+;;d zcrKz3u&^%|v8NA+jC8HfdaEFi1XM;FdGG5}$j*pQLs~C}w-&ZB$7ZSwGMB+`UMhWK z!PaWIi#uW@cr$V&C%el7)}{`RiAg;w<`w?6NeHNpT>_nF&<(Qr!1v~+ zpH(+nal4N2G;ztW!=7`D=jmBil+j$o@03pv-rogGfKI=w~9-k-eT;^ zk>0iJ^-EhgBe}d=TY)euupr9g)}tD}A(UHNERf0fLXZM2O&2+CY3&is*nCF1xUrHc zZP>K3d1^>J6U9@H#nRa7zv(W^_H(o!PSx03taet=FP99=(yMJ%85 z6|Jq-+jU+8<%l%td=AzTe)8_snYwO>wkl4Fb0$xT4XjEVP>x0C5o`p2zV)RqjjYyK zm14JH>n9b+zl3!tmA0_FfsXCbjk|rhr&{=CZAumrLd&z&hG;LG?6oZmL-B4+b}M@v*C#K)*46E2`##&7VBa)u*yEm+6^4tdX~ODAFCH6sQzkGD=I7S6bZ-&M z4V)pSm&qBCkpWo=9;SfixswON9YV&-?D}fhT1E%@xvn;`{Oc_|KXs;E{hsT?@QD!a zH<7bAKc*|9hsU=UchgSs4=GyVt)#u zd*O?8X)Yl0X1tJQJ;QR9_a3yRQpjG(J&%~44PNrr>TN-O(94`8A2WCKsBAn<9le|| zUU|zDiathw7EfcIe>(RI?*rIN1;^UtYdfhk%t}>z_5!*2z6ILN9i_tFEYR-#`H=}b zaoC@HRa35EQi;Mr;LR^sw31Ce{$y>XiXSXFqVd+U1g|!RDZ?s0%<{m&}y}vPAO0~{9lv;Wb=wA^vKLXn=jK*t- zJi&#Pe|A2N>s?QWym#Ts6M{+%-`JF-Dw_0#k|dR3xcn_Y#&vuNATQt z3|#G(amVhyO|?GJ1Z60mjceg+AolWEYO+ZjkC7YTXZ5J>{1I@La!Vz;EM8#w!S}2i z9XYgnOL*g1u1b$FWJkxB+i9%`e0M$NlUpo~(5cLd{I?$TUiy`L>cYdp_BVI8Q!x99 z*BvZTutDy3l(a@jug z0mfb0L1=c~Dc$s_n@@&VTWjr&#y*wU>$((o@G`x-NhmlHDfw#;Qt(XLWBG8%p=FHj z+&5r*S2!8B*VmdncGj`Nmk_t{nwoot`y`9@HD8~tNj`~nHkKiRY>0Wz)4H=#>fl>Q zA9HyloR0JX4!vkhDt8#Zm7XniDaywZ?vK>>t5f)zEBkoXLW0FkF<13A7Hnr01Z>O3-Wp!ZLRU99jkJ7Ys4P#gE(zn=Q)V!G+{ULJbpy!}HI@dom zsmqnrG^Z6P^BW#E_&2S1cKR8wntOGc+`brl{VST%ei%uo2DochWyx*Cb*ug>@dt`p zORqBd{GTY6wYWm^1n;$*k@!}wh2xpD?Gp0FHEmMP;yCjT&9X+_@h4uj=t{FuSC=); zSxHH?wTjvu$b{~a;h}~~1+cdK1a$FIy2nue$KVR!uFzdhB-Yd0* zY>bdi2tjU6=0l$C-n|O%;iiw`?Rwu(@f^*jq;dSp5117&GC&;;1$nixQlmXloi)tt zc|NOu{g-DAz86oD7?wb1`Fzax6zwZV)U~@`wmzy>73<}&KhnJu#UBd%J*?glYHu$s zW9Hgs+|Mju!}9g1rT8nTS@O%?M6ulTp}J`mj5fhW*s!_FH%zxo8iz;HZnYJ#lE%;M`i8M^w&`sd+xN5cF(1Rnek;13 zAxUgcwB^u~T=*@iX|}eu(`ymD4Lwd;G|_)3#2Y2?Jl-beJv z8&q|!TA7q6&y#Yg)8{y=9VxtVV;$%EL~z@NK@1}*-2VXes`k0!c$dKHx3^J`CSM}P z-*?Rui~>mdR=$DZ{c`id^J&UiiODi*Q?s$=%dSbMYnqfZ-YwPCD}`MD0ILK8R~qHDZ35~mtFP^7_MJ>2r9qNVf7>f%a65G3K*y*(0t! z`c`ytUfQFC*DCI3&B{aI&lPLBJ*3wfbXPh(u97x3#021T{8{y`i$d^*t>dVgHCw4I zZBegeL|leA-;@07FGJCyve#p`L2+>QpXqm~(;bo#!CW2>ZYyiUI?knOBM5Eb-hww}6zwYV&AT>xI5xvq0l z*KRbsxvcb?KeR;*YSFy5+_Mvg+Mw9Y%4boyqOqIg~2 zD;A7j1cA{`)dexmJF8=H#h^L+#=7u4wy<4h6}W#j1A1=%0PEJ| zo+H(Kab~SD#@cR@<G)S6bF6)q7t?h1 zx>dJr?=mAu2q&DV>(ZR5w(oE^{9B=EcODTMh1c3HW0Q$eB@O$|-=$&PXnq~jZtYKp z>~A2xkrd4wk?kN5IqG`zP1WwS{VvW+n<*}2wY>fGF7h!&kZ?Xz$797>)Y|h#)opHd z8+En1o&-O;aKWS7oaA&QbB@(M&RxK7ihLIgm+;9a+9nP6;#I5g=^9UlwG~UD_Is$~ z+xBE1zl<>8vYvy#(v!nJJ&OMTQn<0w*UPwXxi!$Anw-O~L82=g2g>J|;H#-{vdIO)X zYlrbhnQrh~vrBU~ojQiSdp3$GWKs)8szwgqQP(x;TJMcKLE^}-zRRser|7o!=T4GQ z2FVn#4Z`FY&JSMIwgxzP{_dQ#M?<9F?zSYL1tv=-wUGE_}B|x0v~bq~Y*=D7d+UP`9|g(&4zZ(`{smHj-$Gj1$4Z zBMs2>s@@^-HkAr_H>>`S_r${6X_wdxe6=*?MSHQ8*_5MXYU&qFW2LduX9{i(cLUO| zZ@jngbZaK147U;!xn25xmFK=H_@$_8F~wtNYLVEl3nIa_vGvAkJsS5?wT@k-uC4$A z-ZmNCpS}4Cnz&k$e97oz&39v_zR`85Eo1ql`#T<5K7XZW_=YWaL6%sdy16=Ojem65 z%&bQ^Xucx0j%gLG&+l%Wznzhgc8@|vK&p-_nk$)D>xCUI zQn&F{uh?XTr=6bF&z&lOW0CqEYbyPk8KzrFOgAzix;Mn(U6-Qgr$0{B(|l6!j*qQc zHO`)w7V}>pBHASkf-St|*SW`YSM(1D-CcMV<5Y&)JB>EpMYm1Z3|x*sk*KRUQe5{h z*_`IPVQ|_VmAfp~>~T5TKh`tgllb+njvXOw^r$17?F%f4fZJt;~3M&u^&N@|lC(zpL^$j25ELVOZytOg1>e0s0%Oiiw zNRBr+=eHHj&vB>g_Lqt^4=rJ4^5e+m!RPa=jau4iq`1~5M~PvODik|e!5P8FQN?52 z{{U!f9}cfBtSrLGG{nVF zgK^>Oi|wWpYYPKx9N`u6S$j_Fi#ic3S^wa*>tdYqP4vDw4n*zTZ2 zfh~#Kz|U4B)-JX2rsG4^wA(v7$v?GWLvAjvaIw#p&H*2XZq?>+L2o6r(drjhE2u&T z*vm5o*_;p4HLTtik5YwhLzx%D>9$SWvmd{ouRUv)T=gB1o#cA9g>jgWo&)gCyWq>kgTxom>Nj`W;g4#0ZZ`E})E}jIXoBinmcO^S zkzi$g$oY|(BWB9wZ>i`W zEYL14qis7#)~>YZU>mYc44e$z{mKeR}vGKl2cwVSyE`qbJNh8Fr*blV$C6L~Wm6_VvrNZ~~r zgMi!v)MmW8)f8QWk@q~O$KM*PWinl9z7e&tv;P26w>gvjDrWnS)Q&kER|Bbd+WS!O z)|q2#ENe7()6a7b@0tR@WzYM^iuPT5;j-Up?WIKV{i@xXXk^$7M`DYF3PGNMj8P^*pzH zdz$P#P4GWK*L9DxeY!}@Hjv6=g^NaTx%`4Tv%UNrRJ%u zHOw07d3U4{=E6`vG99u$eqKg>O=$Q#z}_3xd~q~4+P0~nuAKv< z52Gr}a9n^_dcOdDJ5?=5!kheas~KC&xR?}?(Y(0Tvz!yt70l?G)#i~5aoOBLw~IKp zxK~i}F=B9c_32)GYgFdZiw}&c7VeLrJ`aA@9xwQH6|Rxu7+b`i9n)^*Yio!JdyS_6 zdCD-Va0gELuSWQ%`&?W2_rS60dVCF~*yuM4aX6Rdz)(rwlPf-t|N1t^1UB}*PlxEu=wX|G$vvRT|mbYXd6f-9ntMp1I5lyFA?;Pk8d4c~~h&x-nkL-sqX&k(7a<66fzTl0|m9riV4phqWjT zzwc5JGzTB+Gtcs`oqjENj@H*ix798f&1ZM6+3b}j-58S?Tt+w*=@%anHLWAY7k5|9 zACq~fMIOYEF!I$-(g6FX*SW7l4IZiE1mdjskD9| z@#J$%@ulREDl70oF_HY}9whr`h@rak5;$7iM;K+?Bj-GNeif0VYIgU&G4VuxU}VH{ zd5iLm%O3)+P*I0FV)lp7{{RRkj!Wq+UT!ST!BpAXr!zm)Bz`J|E7P2_(n`Wxb6 zNvC`x@|6n7C9SHe4uoZUSDO4o(XOsE$n9D+SX#iRm_G7+-H&S8Eu$F5&gX^v6Y!LJ zPmHyT_}hznIZ<)gnAkOVm+c$k0e#@>Hnm;N2Qq$D_D+Z3JAF3iShcr_w;eT|8~e@W z6a4GrAC6ufk}rp`&vf2R#4JEn;aXgB54J1N!PZiIuR~5{&2yR4{{UwF9uERT;_tLG zuB8Ns5)cWuW0mLAo@=nt{{U#X?muER@O*j>np;lRrXSk~Th0S9fDbtB>E5{?6nt5+ zvHhHF?QQNn{Zhu}A>=|h0I?@<9Y@x@xAtZ6F0Jt=#~vo|{-t9*t%drl+DfsNht3ji zZZVF6ynKEgRjX)7>Q$_bPafM`-TX+@1QX1&O>-Dn909oP6{$U@lcfAGxWCpdbtybx zCU~qZJfY=Gpy2bI{ohK<_>-jGY5xEbb%-RFH&@q4q1UWmARHdwg;VkFw>8hi<8E$z zmeREAb&5tC*K;XWIQ2fY>_Xhi9LJX$ob2O9k>gEz%v&rL-V?K#xs1ZJ&U{64k2a@Rvz@deeid!>pyxgl6r``F}HN27Re zRq-XY%IOwXYi(y2n$k%Ev&0l+<%r4YUqwcoV;yC3YfIg-Gm2|kq)ls~+qRFUuLZ*G ze~2a-Vb9jlJ`1>&DS<9?I>00{-X^M7KgzF6UASmY&J zXk{D`kA;9m^+G3?Kqc^BDKx7EPSRXjvq z?C4NYS2-IWfZie1{JFeIuUp#K9$8XcxBLq$SL~0&--g$}XSG!E7579;Zp(fIR^E~O zIo){cP)!q9)vhDeZDjjR#8BKFn=`qFLBZoS>i!7Vz8?Gy*5bGDuZ33YT7j_lT8=KG zk-ymvqj@H$YLRtRj z8<%k(EAM2;J=3YW;`3=yB#j# z5&r-m&ZJB|@JSi27ve9+i+x7q+-q9Z)x+BSqT~e+A7G#i4tiITT=>7lTHA$CvB|s5 zEB2tOK<%93s>i2CEy}~FDz)|T#BrzZqdugtv00MjN_$NQrzpcf7&MF$T9eT z!_cka`?vRdl()YGkJ7L_L*kDVYyKaSOT9`Pd!}F=V}2VwJ5+kMjJj664vz%eg{`O; zGLyr6$2)!gm5nx>(pwoUXMZOw6=cgWBd&Ar(!844S7Yo#ClJ|Bc{7vYE!#S zaJ(@+>kfa1zuA{g42cTQmJ*-&XDU-BN;*f!NWXpm!+tx@dCq-bm&{`)`$lkl4 zGTUwi3jUMW2dbD1QwycbEk^q)()e2HY1SL5ElWAq&U|EgZ~c%$L_0$x0b%M-mP+b& z)+`S>kGP=C$;GRfoSN(*&=~|$T`o*g&C5c<5^c$_e_c#PT=PRKKk?CMa7Tg1+2wVB zAsq9r416?@DCcSZsy1`NUccmF?@>auHN@LKgg2$!$o=mP$n#3?a+$OM2@wF=#!Nyz zrs#K|kfRGV2{-nazcvmLa_sa|rJI{30!vaY5wh$VW1ipu99aMwLj!0~K26Li@5uHJyR*7`fX?_1aFpai6d>han&sv#S2Bi`a zW(!A3!d?=jo%b={Mg;h6Vsg`B3y%AXHx9g{y}nhXx=k5r^bIW@lLZ5F#01M@^9IpN zxhQYaNW&>ro3x1|~ zLd9JF_O6Poem*`n+UqI-a$6@kX*$xF`=?trm0o7&^4FJJD#zhh6=Xh4Sr={M0>yO5 z9(&>c5n&*~ASEk`{$+2tOrYONZI}*e>~N1lx#DK1aLu|TSb70tp=*ES$ukCxN;%eu zJKJU~XoOq*NT9BO4tvQn9ki9x8IdLWdze)Zs1LG_qzKYa|MtLmn)O4^HW=UN#j?9G zRp@Fj0gkmJNA7c2q)!=z5-{mhHYb{Kj#jn0_%)qD=7V4^vF$Y zy3>uvnJgntG%ol%95IPN&1@q5!0Idr zZ)DevWXWlZ%%B!=MXIPM1-b!i9p`Vjp+IQb%czc3T^g*S+EKo&6IQQg%q0?St;Y#r z`8oo2Bc{M^vESoG{PqOdhAm;6{DB=Vkx)@{+*j^9%K($Y>;c2_4dqI+I%v8$ck zE^0{)Zk7>F)x-31kgS134VUc_B=bAivH^p8nrNvigN?5hPA_(MTSl)rMpE;uPK3xO!z$h0c)7&m2rLHO=R+v5;H%C z(bH3es5%PADcy_ku&Mg4c3(|A@e!-^ zJxtcJRfL56J$$M!c$qHlazoSJFy+y`7hHFRjDSyT&Ppw6e-?g2iq(Sx#cC4`+ANfu zSn`w~ksYqYr}>l;xWw1BF1u4SBz27nF6Qisq+wT|6&Cs=%Vko~&mi?Pd@sS==p3H$ zb@fv@Er_3Ecj~?W+L29-NBUNMPS&J#;r1gs9c^H}_?NNL!)}qXn%+Nm*fMMuJ2+GW67<=wA+e7Ag*WN*~`Q!P{Nio}w zVr_y}`PAcfOAI*;Bg0j8egCT{=CTaU#dwiT$DMXDGv)$||J0f0g+j!2!<5=;_e}yS zfdA#3KwRTSYFe{sL^ggXNr^s z+pjjFB)TJyllCXtx(G_^P(!Oa`kJj^uKG4Ov5_%$=*`C*8>kg=CjN7>{d9d;rLIMe z*5`g<+LdAA+#^im3Kv108UU=X%Vu4}nSJglGhG#$TTg3OU z6T2+dHpWCQ8eIR@<%xMe7j!lu{o_KH%p&^$GEaAU5G&v7%1+grzRaD3 zv$8j#aZ(CM6z;LxmX6B>Ew=<#Hf+1h+*hs!l{Suvr3*M*(dLQuIT>cU-T6lp&B#Is zJYWC+(HQZ?mu_hM_VBv1#&c8DxL5l!wk1^z!nRG_$Eh(jp`oV85-I!{eM{?ty5YfW z%b0z2Xo9A^rA&#n{RJmH6>_)d`tjZq_?;#Y7_Z@-6Lt!|dwUNIqMTWH#-yBO=V-Zb zfhb@GB$cFcpNI=}ZRh}7$MRs^e;43{w zMXJgD*H)jzq>A1r_EIEOn(d^fGQrNx1Q@H=$owON7>KIqXqHe$+8@mF zm>L`)+g3fyKc4u7DLh zCP30WE^xC)Sxj7VZr-+siw-7s2(mV)1ls4x^7X61&zIbBd~YJ8_4T zdD&`=M3d7`uZC*<$^o|aSanbCKp6S6CR@yA_uwpc zpN`mzw@k5@N!kK#zw?n1M*{pN5H2x~xGMOz^dJ)ZAN z&*{m<=~8lbufW57rg!Xi&5hBZ<|-|d;v`T7NvoIfm95RcXjX-PFL+d}Y`j)?Oa2v@ zl})uZ93&S;+~k^>9l9w~{kFYP<1^Rxy8<(65JzjAhiaK890tiCv@;rHL-zv55AZtEOGqwiziF zHigIb_49o>Yh56u%!qHTU3_o6Q^h#6Xg&qJuEn%W?A_87{-41Q{RhwB@y6r3UI=B& zp32sA70ba7{nygyRQTRBM)*;jrq4sGeec5gVWBL$sAZ9E@0Z7qbHHpxvIPS+=D+iu zji^QHCp*%7Wwn3!JE9Q$?X|W(K4x|0i;C}gBl6Hd!^A_l^0wIzk~})98BMP73+RQF z=ci)>RB8T&`Ub2O!~P*=-^X;aHB!-;C9d9ql|*wRQIh8ost%Wa-#FQ)i#w<2aU+tjbiEUaGhRf#oZn+tJJD8BC!mu9Q~ z3*~V-+NLgJI5v^U-L-G_#pI^~EBqueOSz7#^?SCTuia%5-Q zq|zw!-Rt)8b?O(GaY_1$d7_NMx#wEdKqzw|Z=RqMe{aQQ4p2b+`y$yI6@Mx~G4Y?-I!^D?Taz4dfh zb9^#mjM}3=asgBOdU*`}o-!7M_PNa(y&b)&L5q0>#%bNMa>r;Le%#eacTUxaIGckUX4Ch7Rj1K$~v8%%)vSOv5Sm4d)=~ ztR!2q-is;I?8DeB^w}Bb09n+pZxj+V;gj{%QQT{_1BQ<>*8*9uWN4UQw4?E1wbvb*3Si1)UCRleE7#`tFB;cjLAuOif z``udpXsry(W)nLzi>!E=qM1Ozh(SyYVB6SV^{_|X`!E|qX33q|V6(-e6z$7qD$@63 z6k$I<|Frpvwj1DW)6qsv_M8Y~aEhxu`|iotG_j=M>Nn%zp7rT^$GDfwS31adSvJ+Q zdzQmKZka((;XF09&@ZAl`}HVKj%ZNn-`(&HKHw^t#8T8_hd820LxY3Z zy>9_A`Zt!Owy)=aI#Ie#;p{ns32NZgo8x!b+etcK(`ZFP2Atoo1wU~ptM)u)@m$ZW zmT0Ht2M&%xowh*ms&-cvk#O}mPA3#OJNgrRME`wq1r|`)lf6Z$^^HL=U%CcQgS+KR`ZZCr3Zg+M|3jYdzpS>Rw$l|?fD3T>9udWbS6{tEq zFpk=8e_kIqZi-po6muu44ztR1_)(MUm2wWPA4+tEh*N)N4xD;frdgC>teja>Yw^Oy zmpy4Zz%VYoNQ<~NFhAKgetm)AIIg!sSZ^jy&uNPvyn>vRP`6&2LGEq3OhugT=+lds zfa4^s20cPr;y@Wwty)1ve{S{5fUVVJVcLepex8<1aS!>HB5v4L)*YQL8=(E=Yg&1J zXxHu4nz2*H1!oaXmK0V|?SnbEecNiPm5VbI-R$GT*!qz%YtZSxJ~SE?fv#;i$1bylY{4 z6Xl0{E0OrrDE8~0oxU~wQ)aM?c5C8@RKCyd5b3>^$~8l=#RspQ$@toBqs5OoZTuX1 zh2QI>ZJw1X8y$pv@#f9aTV7`_;ZY@i>TYyc%zY3v9A2*6Pz~YIW3h3()1he4s$~s@ z@jSVB*9U+kd?&W+`w(cLMX%XB3A{l@&Vp9U-9^+xyYKb)Em?fu!{lq|#%)ex+VA!i z&jN=$Yt5?$ZTEyu%5e%yS%~&5E|2~*#yi_{Zl!q^lVa=k#(PX4@hg!}FJEo@_p`O1 zD#LGbeL46>m6=WUz>s*0+`_EGBx1E1&Gn?8ja2pX zLQdJWLV8_uUbu3s9jvqwlV^3`T-o}uy21=0gS*mvQj*NWzuX zNe=0(!MWMBPFBzlCa$wQ+3U)^2AIA{=^|yCGZ%-phAJWipMMZquuxt*tg8{AKh(wi z6w<=34)6mq-D;O&+4a?u^;Ms{xyx<0jdoqBQ(;ww)3l$21R%NUYcbyatu33Q4<1_2 z#K}2N!HI{!%&KS3w5so=8$urI#l~F31Zf<1Te2NQb^Y9O-`Zh**imXvnBW z{gOhY>HOXoF?K3}9A{gAn`Rvuv-<{Dj``yy+`fSgIj6ceQ)spomSSR~D4^atkV_&( z$c4(>jQb>(x%7FmweEQBzPa8&X4am*ux`{Euzr;3wePaoEW)O9@m*saPrYWEipKK| zC{G#QWt*Mcf{{nlzfS71t>|)f^leh#d*gP=AbC``ks-j_|4dGnPK9oRo&mRyDVyF- z4am;n?u{Tn$zZ$_hBJ12P|1OO`FD;fD9)taaW9K5&p&@NASTri<14`y=-QC>p`XTk z(7=kx54p@vQqV2#aZ_z?B?3B3TQxt*VcdSpANqc(jWW7WTdG3jN@u&P zmOi~xx?wDtO+=W@7B}V&~d<{{C z%HGzWllu6SJ*GNX`8iOz1Qk|G_&*|@xBORkj{cKwP;Cb!AM%%9x`k(J^_&i$qFXG7 z_QaR;LyNAO6*SsxmtO*dOc!LD`O6)$6>95TqGM>_dxeNxz`GiJk+4C0e@!E(Ss{e) zzi?PK;&j-ztJgHs6kw)my}H=|K$@kg&|blMTN})FhE?^BY(=zK_-wiPWlk(Z>%+5P z(X1WmLF+6aK$HAK_S1ed8I^r5P!^i`I^TEa!}jX`?};6GrK6WwSwzv}1wy za#h_=^s`eA50BU}?)v`8+rHcHj753_9lcq@rV13N%%ha5%gh$yI=g;u1z+aWI<{xD zDReLhE(wegwP_a@*jI3vQI9cnO6%Z!g(a8><;EFK(%PQmrT%o_jjsB5hV$QzElYWD zWOP))>t*RGywA!o=a(p~oVHHmBg@G=DaY5JU~&&CLJAzrmC24r`0X6CSQUKa5Be(- z@|*v}Y}xhSO2Qbb%!G=4 zSaj$uOvp*!ZR<9x0Q1{5+Iq5(0?Oi%l#Ia2T)ZJzQ?03XT#S*c{heJpKWYOj#amfm zzjawK73uAa!A`<22gR#D=gfmoSo-|myqsqNGgGEZ%)5AADf)uH_^Xl9y z4rXIHeEz#y-pKPPVCB~gu`CCM(I_Jcq5MC5-I4PT&56wJ#0i@R6Xc{N{2a0MptW!z zLvIsP;G5BBRdNJJr@KeD&O$;1N0cY>mCGWfTOon9!rqR2DK<{@Pyu>4bqE|9{5uUJ zEq+Z=;xQcbbA)!o^BlT!F*Z>!oVBZ&`h6Vpk^5%HXXS6 z9CDId^Y@RY{ixJ>dj_eBA$l!+QF*PMN9ne=pTdmZy~QX#UfB*Gj>%- zbY9{S=Gh=|OGw!U%V)z~v9efso~8!-xFZQe07uxLorQAz8h^d@P425M;`490p)YQh zhJCcNQo%Rek5sW>sq0SaTV@iE8O4w#CEaX+t}35C;0rXK0&#m zHW&nASD#?$teG>pfvc^~?Q%&am z;Dei__nP4i=mO^5`0K!@?>_TUBmst>p>O295-f5=9d~3>7k-{VFHZ7RkAFnew_N6a3daegL(4C3&=W`IiF#w<7uq?%bb2)Kn1^-{W-Y+@vz70tlSugF_YAr1*s^> zUS*Z6`Sz~w${41`x3kUp-P5`vn-Z6JJJ`(Kh}0`Od~apcbs?t#S`5gP+*Mkydn-ue z*Z3|lUK-4^u($js6kS^)zVwc`e?wAtq6l5DWgy0{qY4USeXRouPh@;bLo#Pe<$Jx8 zIziLTl#jvGEShaa^;!Xvzdezm4r#jqfmA01#VEc9N1cpV zfT1f5M_fi*S!!MD=We*m1r~xWG|3Az2=^!>p)hW#bUhBQsO_?(i!iC}XiWq;pb>;s zZ|tjl-M)m6S(zKCOd|!f`$>+No%UWIvv;M-6teEy7F@a?bO(Dyb7M6HA3MmfaAtdU z<&EuQiHZ5f0ww(rsgn#;CSnN{?0t86zKn~(Qyv{*Ij1H+54cjYk{{aZdr#u z--z9jTGQV9uIYE1){pA}-Av|xsL!{^uGnt_^fa@Rk4p;!NiauqBm8|_2D&sq#fhQRNQO9$iJY3Xc1X;z+Id=yE}z)IL8S#0$uSq2mH#6k0BF z2C|Aww5$uU+v4Y@3Q1*_Z?@d7TK`0ePi!=d`2%;@=|dbli189jMd1+s)9F+4K=ZF! z*U%=?r*<^Gn4PA_$IQbnQ@<0jv$Ot2?_I``X-AaPzb+KdMm$FV@q#%RgQBtBlD!^Q z$Ea)WpVLl+tvPQLMeedp%^UHCBZs@5iS8=yDxk>Ez2|WGPmV?9Ns9T4YK-F)chw#d z{Uef2VKu1-47Si6wO)a|AODIu-N8?WkZ%fqH9S%l+N%DX_Gn>3qQReFn<{uO_+>P(!`L31mq#4zVxI4j07@oCp_VVCfmCuXJo4N~av1Cf=^Df5EQ=R_z(tWKr$lQt z5~}sSY$4GKv0j!>YbU%%Nrs(Qhgo}u=P;$ltep;c62IS54j?Hz_}$Lwr)<~M zdL^6kxXNty1$&1#r_}YU#mH82=n`R0ty-4E9Nw>qjKsQ6UsRv$Tf9Ti?mvM|mHGwM zu_|G}0eBghwjwI(?dhb`c z9CkS;K8^YNoxQ7z#kSrTyC24LFx*f;7A}I!?8Pc3#CAKf?;W1}G`N@?hfW{thq06K zpO!5?buo>I7P}$2{99K&TNn1GI3o(jAWI1KB&lGM3`1oC%`(0gCY@6=M9wGlyX*k1 zDC)!JPmfJY``r%BwiX2k-A9iaf7b9t!oQSfm1Zx#9cRgBHdEI#i&nrTBTe3{w3 z#-w7g`6VR1gU>K0QkECtSJS21)X=0bBfeYQ-;&EK*_XS)k428&+e-5DU8ZQQGV6h| zwkO}O3g>iA;%^FOYug!$Be(L5S<+ff@Y}XWeytD8*pX-;I~cf%teM`U(SfU>8aHn zl@3ZT=2$EYgS0sPJ}$c_EZp?aP)J879cgIP-4Suw7`9h8)2aZeVeX@IUJu@PT0O5c zN7WG|s5@pBN>UuCdahV+#&NRoH~-yx72ufYw{ND87^KOge0s~d5i6sJsv9QhUF;Da zE}GeyBEfnsxp6Ygud^J6ssM)DC16k4AGbf-NmQya+dS5y*_wLq-4g#{s5mf8mS)Gw z9cFUi+XW%V%Li%xKBqs&MtoVsiIBF4bF2$cX@rk>=(rpc(CchekjP+?Ml{Z=nR*?g zHqXDR=gDpuop5cH<@o{1GLbw|I#yH=50{lVkkLCl9ZB^r+Bzr50eX(#HdFi{Q}1&C zPIq*Xn{~k#YAI6wjL*(Lq-MA?&;Eykx;+UG_zO^A{T<|3Z1sDoWmblvAd19ZAGUFB zmndY!ZhA~|lU*MjA8m7_wr3ZiMY7EEX(r{QWarcGlNC9@_$AuLc913u&wGFnyD>9E z=hdcxtn3mv)Kv$5wO0_pf=bmSzKeE)tBPAN_6sSxJA%V&#({Ua%I1z3Hbq=TI#2<+>IPu2}~z z7V=~}Rrq15xx<8*5F{+7@~Y)4=c-7&n{uE}+sbi)ALoBl2*MdC0POZgI)2cbRJt1_ zCnZ^t+V_vBX3jU~>C!k3yiz*`#+ei$b-Q$azZuQFe_lQh`itm@fXU@zf!)~f^jxbs z^HJ)eOB4^0dE9_<0M)RR1TfD^PlqtNP*=2S9-OPzp%HstW`F)ZvaMZ<+gx3lTb=sh zXRAAL0#<~v^W^eLALVB4PKGZh#`aX7a81FL4_11ZKD)r9EctxN6|xHdv(-J5gl_FF z)Hl<2>CpIvbW4XfL!pkq;nQ+mbziRHaF~F22`*+OYSX_!N@b%=8`)5}A6rL!5Lf&S zTa;7O^F&9keG_&vS^>~p>e!vB^t**n3q&w4n7@ot&uwIMitE&8FRsw}l{&>HnfmUa zJoOywh0zTva!m=#O-pLL@r2^nk(Cis?HmU;w+QmK(KCznYB33?4^79MP&aYhB5M7l z!nI5M2CHj-V@==T7@aQ9x`Vw5uVzV+h$5p_tP+W$UcUowLh}w zWrLm+YL-E&r)h+>Go!a4lhe2Sz*-Q9C_(k$3DJ73bdzzU1*+7Xg$l6_ON zm81v~(+nj3Uc4cjnY(dN85jHt`}%o=*3_yl$d@R=&1}z2;Rq$4d`2L3he=p+9Zq)M zWNBsf)D1%0TI(t6F8m17=W4qUp7Gg1>erL>x^zKrwR1DED^dcZKzOY}-YqbyH;2mF zLRIPZ&~msxP`UarB#bce2I+-R^JTL}<49o$kncAg5m@ncz&&_sXE5&}Z)XQP-m!DE zXm;*3TRqg)vsx!TEarM+8h#mJg{LaD+7xd*Ly-%6XU*{T=Ln45vA)96N-(kMfga#d<;NB-1a@gkMa?kvyb@KKb^8Us*jeB zfvkpmqB6b>qbyS?Fh0Dlv-j1T9ci~_&`p8knhGbEy}C?8uqy3|AdY4!9OE5CZ*J`U zi0?Gjm_ILWJ{{LD<12X&5K75Ye9d+cfS+A%XfCr*gci-=g9l{196$av7iWC3`j>`) zs?2>alFMX?-C$!YEE=*Dp9KCRipZ(+jg)0)87!Cs{;or&ECSlme^V|Ukb;zWtiL?z zwNmRrIgh5kkjd7N6sQ5*FU8j6a@r9WQrn(Wgb#vQl&VANW8C<(3JW;j_%M5DGTzxV zlVtJKzh#HrjnE-Pg0CeC6@fi%bBPu*#Lpbvp)G99`xet=N3pJag$32++kp8T$?cAs zu*F`S&M$sgkrK*CCu-KPS+NcfW-}WvyFt6v&Tx})O}oUe)9Jpbf=O{0*#@Z#xRWpU zJtONv-|Jhk(%xRI?dglZeulKjLSox9f=2}~za3{o73OaML$c}zBP#f{LQ|3iKG@4V zOJkG#vZI0HUDue?i!cuz@gl)GYrUT#j|PH=O~vC6X;#+rh)c0p3vghdSJJ*(6tgX* ztqE4A3+xQj_0QkICU;Jo$*ONfn*$)#mzI@JE zpT2*TJO|?1Id8H!5Bqwf0S&dSb+zHtQ@gKUIEj!tF0vvq)WancrlL$9&fD$*g1-F@ zu(F)L&KuKefl!4wdNe?z{qhp1G9J+c@1NWgTa52`KmcW(je9K$`PD8b()fF&wB*%% z!V8$~g_Gq~Py*Mcmg(B*e6@>^Fr$N{o|le?J_|Bsl7@1DI>-o|P<+#G@l*LPQ>8G8 zo1}d7=`hdeI}C%l@w|BMnJ**@$M`tED89+;W00~Bo6Jo{-6;m901Vrh6%d=|j`MrN zArf{scFvpk{z@AdueuZ;DNA2ge|#ixn1?vb$FD7H@d0(un?R2K@HAsQ);qAox5{(sA?$aPv|; zp8bwEnj=in7zkdio-B%>ZCjmn2lhDh+KG0^lbEX((bLh|v>;HKZHK3m`9U6a! zsPVxaN)+5~%=uFASBsny>wwu%)WFQr#mLY-UfHGahjMZNEARvK+IN3Sio=bt!U>>E ztlshh$T)aHPezr{I2zG$H4-6c7HhytlRiO(n|Yr9i1@-KZ^#LN7(7)4sKL}KvXW`p zLP8fseAVnh%TUcKAKC&ThuxXv!tg-~spg$TX65F>{u6Pay-y*v@f7{D{_R&?&)D%* z#NQjuE=#tkr6se7Z;r$F!hUX#SJ>3i9dzB~uC@*KGh7tt%O>qg8;!G5+|uA3mxD9d zs@E;dwlPLW7Rq3bnl3JT_mciTMqY*rdVRd6CvS`?gV$Br%fhB7ib$SWZ_=Qf{J5+K zv{@R<60hPy8r*tZD^PDWZRDC2X5Y zQf(_ZeR4BdOv=e*>o)2-jAzv3=)AF4?DuC|f8TX-IQjim3wt86t-d0ou+LUD#2%S- zJ&fqOlNmqK*s54CgO4^jK+EJ+A&=hQaQ@2Zs_%{5N~Kd0Qy~KUKKn z*6$l@fEif?ogy(>}b#U$Hua;>$Hz|f$ywSpqhp2C8 zolZ`XPL@Ts*-ZiObnrl;H``5xHBNtV7SZ`}KCOaKg-b<=%pHVgS!7WC`UMTm4S$~zvH#eo4u ze+A>t2MXs$hw|MiuOCiX`xegN16R0wE@dpNoP$|gP0GO)&xF5+xx05C%_U))b7*wD z3C#&}mn3I`Ae-h-Ce~1WK)e2*o)4>$juQgQ^>8>nQe^~zd zlwsHyTgC4<1P1!$e)t0$>&f)$-vdx07Uy4%eOCw0va%beIK-wLfqaxk{Et3Kl2o5k z5>8X))G1P2OySLgJ(MK!a;iOH4S2yxE z*&%<^J1Aj{KYHYmHH5>fTS+Qiv|81NSe^FZ+HqO@LD&j=#q0D@NWz|7GyhxIob4C^ zY+&_d)_IUiO>dMhu{`gq4BfT#WLXEfrR8STu$!&Zszrh{WpZ0jP{}_cVp_r-2Da5f zx$oL)@;7atU(D~Abw16|r~lUoYD78_`pHVN>!9>X2M>WRgZ#jda3LKm>hUSWly(w>&mGvL}%$%tgXL29S1w62sK^DojezfW%b1c(aU8r&M6Ja(L* zeEubPyl9aiX2SBLz|ONb=DX=+d5coC^LYBM^D+G8NavB};L8F6pQI^JscLV8qL;32iGdoet}*C{FTu0WwIPH3>jJ?pBXtMyI20vIgOu_c6UM%6`Pk`~L9{ zx#l{D2YnYNBo4Rtb(;LiBIV89@7w^YP_Vb{!UtYcu}rh`mgWT~Uv4Hg#pA~e0tG2X zA5nPlSBLM4>Jt7?R>=AtV$pUmfaSR4;>BbKTojqfpXB!A@ zw#NF{p>6964WFDR5MY}j!ydDH>>uKgR~YngGu+1>8Hiu+8;p}I4+1GpoH|HYwZ-eY zDh)Eo+%00Rkz6$>8>J{L`a5ijRu>IQp^>G3MPZ}(sVV_|-oO5RVhx;*DqP#k)9k)< zO)(W&Tr^%(sW3n|e2{GxCB8){s?mJ<9BTv~$rnf!%xO|zu#%&Dt(kbvQHJt+2 z9cW_70NrOYLSAdHvb&o5@fGK0^wJjHLTZ?a;Dg^p8%}PX?=oTqwGa7+S5xT|nTx|G z{}8(IZrYSbm1sHoGYPzBsk1H@T+<&w^j`doteG|3zw+))@joJKtp`hv5!{nv%03jHiw4;+kgRqDfwvHPFkUp6wY;j-d(FJWs_^#!-C9IY<+tc zq~yO|SNR!erhSV~W!*^!cE9uT6w#tsrpV&EVDoh^XYKnBT>SBHuxEyxxD#&}?mh0? z?UMNQ^r$S;3FTLeDBg3a?+ZWQ_wt!0AI=5?E!$p8`oEqrFkxYznJg)tr<#xGmHYk_ zU4ihOIx#s^}^-qI|j6S~wbV)UYe{@D>5Ga>qjAt3x8Dm$p83d|BObwr+*j zBRlhQ>Me-xP1gHiFZ-wO4!JXf`3|~$4hN7wP*$FGEMY8nat2DajF904Awg$aA{I&u zLl@Iwo@aJM_OsI=rnvS;TPU5C9Se5j351(MBb)KBsGzbiy)JH0G30JzC`XeCXJfUQ zp*TSb_$Dmv`j_yws@zVBUzyJL-Eg|Y=2gsPZ&B%}_v*|o&fsAT<*DOU-}Gsr=`g2> zg@t;a%J|l<-UuM{?2Tjv;G8Q%Ke0#Mnc_43{X$uSz+Y-|RND1VH1L4m;8L@_g z3JX*R*oIcF*@@AHjdYj5xZ>0#nx{WL)fl)HRL^-4m?o*6X?wvEn^^NhV0S8k3T~%j z@QZQl0^^gu94kNCeH7vZoR3YOEKg8{-sih=kR(MNA^@!7+h%3{rkCdR zxR<#yi)rPXy}M&QpIvF&@@RK??_3UW_8kxXYgQTaWX?}^79a9%EUCW{oqrei6NB%kleGk=?+7_O8v zFVy#b-#EH#o6bi>OuV#o(R*<1V`)3}6v3C4PMduh*dx?W+L!!pj&9UdA61}}n$2g` z>Mdm#OG({hT{P1rkZkJ>c=x0c8g;ue)g&cXZf`_LPw3691I zgdoBJu9CXD1{X>Ycqeb9^u|i zR9t#!=%Aio4L<7b@T15?zccP%4y=@_YhI4EVD1N;f7~W~%{MNARwUaC{2130GZ22) zetANQ%Yf?FVA635bQWJ$aWK`{LG9PLEGuo%LpUVoweCGdD^D<+BnUCME!|@i5>2wS z6qU^{u=7O?ti71qAP@wlAU?A?&_tr6Z?VlKZj4@zjTyTKh~dgRPoXgzoQO0RbAaAh z>TgeP*8>3Yc$H=JJ`hZ&oUn?mqC4gs)vh1yvBMI;9BXH}$FfUF{%gIIKo_0et6^!$ zpV^u|R+(HoRWws=wwCjA0%c5dN>d3#_$R~{_K-8<5RF0pZr%5=p(ylDM;M(=Q-p4~ zsP_h6lpUGON1efosAeC+p*#MrIvZWw&D?Y>pPX)6z5p_+n(ebaGdAjG6^2xJ&KGY)4weUEK*2D#)DMh*+YxlV3?|Kafq`fI~)2oG{79RCg;ax+-~ie#+?sqTZ*@sLrJ6ui;7IGShvPgiikLA=X zr?)V5<(2y>1R)vVt5_LAUaO&V2siiTI`?>&Nb*s$&Xi&6JuC|vH=v1Q1hFAIc4Z((e7J9hs$HwXbR+@Sj zPI^J7W?ub^bn&=?y;u5rkzxOolGc(ZzG&DjoJ*~Xufo{py6m;7&ThwNQu6Gb=`gU z{0;IO4y^2iOcxpATMiDr1#vIDS|o#wgHhk&`xHM}a@o(&mII191?VWI6n3X37Hjr# z&?3CmQc!Go0sEy-hETnk_aJ~s{*aKAyB_VC8)Z*j`fyY?dnBI`F4P2j`;Q3b&)&N~ z^S;OGKG7tQS*C1WV$c7q?8x)ij-{vn3cu}t0JaF=3845*bt_pg#C&Qtr%n||^MgQz zC`2Q`6IuqRPfD$vop94J6#n-rx2$OTeW5qatZi%aU%v5i+Nr-HkqeA-I_Q!d7Il4E zkgi;!(r=E6su>cX&NpP-Z}vDh#Z;bXvI_o>qO)*o@@>Pgii!$|igc-zO2_C>k(Q8# z5yC)XjM0o%QJNtjAR{KyF(xraBejXd$W1y%2uRn+-?#5S*m1nu-sidR>pD-f#`Lut z-<@{_6wLWo0_PF^iuXkes?XDTYh4_RR3R3`4rZpRu|?-3d+G{rg8uT8BZR{BaU?$_@o= z7nsTOR;7M!gfvG##KU~vG67qEbYiZGEXSAj%4Kj}n7&n(elp#pvNx$#Xd#7(NncTb z%%T6aDBIe&$yqSzc=8B*()!wb)Gyu5OP5ELqF}T@fay_bP29~WYe^Dd)&^+rZoR!H zCOT#%3$d`d1zug|tyRbh{QB&dYPPvlLe`)bpWD2?n)yih5Ug5V=%nVVCEIHwY-FfX z=_=EN7L+yZ#zP;~0JfFw%4(T-{rA`)Xw#G#OeQN5Dv5zVTrRz>L$j#jnovNi7PQ?# z4SwWRxjr0!_vd_)*-n_#Vu)`PB?yiogJYbJhoArUw$1G=pT1nNoC&J_m~um{nSc#W z(xZgEop_n(%azglaHgz6{qmYOVa1kfm@81ZaskIV9Si_6!?~_eeK)?g_k$ff{SP^B z)TJjtQz=&ELt?Ewz}^C&KEqhs=HwFl>m}b=s=M4^lq#%ic>zGoQL}t=B_o3tbx{ zx>dwxU?xVjsqW2r*r%-ut9a{${+h=6SA6JeA>0(}@QFq(n!u}$%L@FBA9Wm?HH*J` zuUm!lP`r!RwyC&bh~DAjkY^uXju@)&6;BnI289n{< zRtr`Y9N9@qo$jHR(1G07Yl&w((98C<4U7g_)WAH%zNJ_S8t*i4Y3Em}DVB;Ss?}5o zaJ(9{wYAlSZ1R78hlcwo`E!@~R@!TT1v;gUiQZpHx!31YlYY0k=g6BEl^Gws-Zfp} zHj(w%&x*k`REnf?_BubS?)rv&yq8T3jBVC|k!0&+1qT3=4ew|rKZlU$iYdKTRjfvx z5{c-W3Zn3X{BJNjk~!w}f-t#Jy_)7~ro)rdh6&#!H1j}ypD!+tWN-bYF$(hHqyt?3 ztt2@mkfB9Qaj0QC;t;YhYjZ5wbAb(4HRYWKEM4#t0T89GBWt}rENgnFALo}l7TLc0 z%;48Mp~307Bh%`kfEzBS%G+nBR#hol*x~X&V#QjugZsR9DB)pZ)P(F3rjSGe-P%dt!ss81fgD z``4k@H>F*qbJlr*dXF2~iX+w&#R{5iXI}k|OlPlrJBcH`y#S*(9zWCxzHX*)xcpUB zh~I+82H}yxnRK!=p!PhDFrE?^FmYa2#fDAM7VS%_>070kLGQsVI!~zoU;)zZJg{EE zjcZyc-(Z*65zC!xa3q&7ckM835aTw_*M7)^yRlniDQ0h#bpCqVg}qWS0Oywh@p6K+FR7q4?`|KOKLcOin6v zd;A+0Iw&@^idadht9v_U)hKxTpSfDBjQ!o`Z`cdr>U56U3X>Gl>rz@LLcUv5iBZB-hTKD{m5# ze6?#_>jG760;EMpvnwyn%pCmV*jUae-$3+3Hbty%UmvrQJhR%`uT*;-|18U7sP+`d z&X;A{;A#hII+FbK%i|nYi3kqWRP8z@Kc`?=8cK1=t8QSZz6Ne4-kAv1RlP`r$1#-k{gvr%8(ln)6z zp<0(=4apu>ht0)I-KJfCJwtu8H_~b)f6hSq{@7j|5DtRC_aVAA@PF$7HQxUGwL9y) zE=}0xW~5gTn^fLlLbPxV@k8C3QLym?bK}p1)zcRfHV7-yW zMr`eCSB3Od?;ZG0B0$`eVcjno~fZhf>NIsr%e=)>z2CJzalUQF*a-dwoT? z^H0_|MTXD0Y`iAJ)wO8$)hF{Iz45af!ihj5drC&nVOUkh+@=rXZ|__Q_k8OoA;X?_ z>B3q&IV`brZFltiAC3R1b-x6R&)oj|5iU=wOwATM7rEJSM7|osfR&aLv0r|C&|Zwq zqM^46qM2x3?pzososVT$%EbueR6JIujF~le6y_}yqx(%wEl2=SR z8)O$8R@G?yq4XUB^*aGsj5%I7t9&aK9h~9{_o~b~glFVMof}#Nv?H}uAGgUiZuWvF zI$=}!4N1Ve!p@$)2SCI?-!?sLP7B4vBrR~l)RIs-6|nq^yKdx~dX_!aLOM35*^7#8 z$rGYy3%&LEghI=S&*-0c9CkR=q-uw|zjNAt^5;Ev&%Fm%g=w%OC=Jnd80Ti4{+24QEe!Gd?TGnrlEOV5Ed8CZf$%iB&bH_gL+GQP za%)UW=K20exfCtWAA;_hU#~3>btIk|6#TjoK6x)Jhr2l$ZeYoESovMwlhD=5JO-e% zrPUcEf9m@V2B&|%h7X&v;CWQ&ZaAe_I%I5j+yz5xjLD#x^`RAjcYA#5_Xjo2WH;t= z&n1o4h$o?}0kUXMf8nUtm^bzaUjiMHfWQcrGR(w>tXL1ct$$j&{AB99{$xzap4;7y zrhdfYd0o80sK?LbqK3KRt1ipe8wH-$6!wF;FyHcw2_Us@S=P_uskfRL*srA+&hw<* zE_U->;FNUqn7o7I*Li42<n8Y z^2h)51j&{z+l#H6S;^+s|D;{Dz7V81RKS8zYV(h*WF{Np>g(Qe9&$1Je$eVuIH=sh zBQYLocl@EXZWG$gJ7$aXA};lY{MJ;!S?)8hsl8v7P<-&g0JbuUI;I{*=UIN-Qu0VP zWakW{l}FPF4M_Sw1ti49t)n(BTnZCA2M^r_t+Rz?r#XhgL}fSM`TE7} ziwxFr&?p~6Sy_UH^2?Rq_@l^62C#~FXml$#<-IJbbp4J#6vU-DGo$y2k4{?r7jHuL z=U@ws^H}sO1%9F+LlL!q9q?M9mu-n)Q?`)NZqJBO;=MIvXKSbaD6H}E_gZ?@pn(9U z*@SS+U;NquXv2MCz3jhBd|7nn1ym`?pM6pOAc^*(PEP(8mmAbz-LztiZ7r;gW(%&R zUesqmO!(nqQ;pm_2ai?AFwOM`GnH$A{VQyQ*&%Y_8+;Q1Ii1-D8xu7ywJl={3D%t# z@eX&JCOU85O5Oeb-=#a>719L%yX3dEQk7<4j!`DOB#G{$d}i9;V4AXw(%HVHz>Xem zJ&-u$QwakPhCe*xw!d^SBtcQ@Vto$*XwwvuRabW7?=Ee!|0K(oF|wqV&Gd4m3qt_I ziIbN_IWHeYHz<>dHI<=v(8WE)qSFW62kn2vDl?bFLD%ni)EUT9m+KH> z<#^ltrO={^B!Rg%rt5Wa-jD$x$dka_wmmgu6(l%7@{@|q7ZlGA%Ruc~dYF7eX*vjjC1_h)k}HtiwY~1Ae&e z^niq>(yf&XzB7rzS|#W0&3M0wq{O_)4LQ<{*+$9a`fFFO+)$4sSRaT-)}@C(#is0n zO#;2=$MrNf<(rKp2`iJsr7*Sm7IX|6{ck@^R$mJ^Rwz|CH*o5j@#uwQcG*fwuw#pe z*cHDgrPPz3qTdRG;X{h#wwlRKU`%0il;$Zt%PRNbx1TW{Kb51nb!d~m@L-14W|(WX zEfR1Uc~i12kU%&7q1xzx-5b1u|3KLX=zP^D;1m0IsvK_NcSjts5TIeqs~IiHDeX=M z;UBB@G&o6nW%MV2_&wO9Q8RXADcZ2AE*au!bdU7`)5cMat)6;s>F+`(-!|HfP37nF zkRXs%!I;P<!&6Mmj4(mpCq;RtaNM#^*MU&MWi7N(86SS9aTa(%dTb9-(soe(9S){Xz4FhD2*fKS0N{){f-=wQZS_RuB zpJ&cN7%g4jjfr2vA|9xX8C>Q^dRPpr@c;}d2K%N%=Ck@lan$C6Ec0C18$YCfIDOJh z%5r(?8`&Kcp3f8goswmap-yagN{jpY{T+OHdQG|I$`?}!+}Hd}5B*4+ee0&a6gZ3F z?H)eMu=RDzymW;zHR3~GlF*Oka$xAwVZRMC3=p|9wvDe+s!DWhP zE#VKtj2vIwei=kQ88*yw&1GTb2qrAg$n{_T+3py$wQ2)6|6b~L{233k) zNPHfufzm-gi#use>kNsW4|){Yq<_9#W3^srMxg;glQ ze5c#j;}uw_0b2}n1o`?075DoWPrI%3unf!f2RuJ5h3EJZ3Y}r=CJ=FpK!k$KWL8II zXj{6(GyDUyc<6+j7A0{ZOx;Mh)J~@e+aJP!-7E+td%WaQSW(km1%!!WIO$MeqjV}} zP%p-q4wRHG$d$VPv+(hNkLFP6Sy6Em|0(0A+Wp;mEA|OD=QKH`D4H)SZ5%y20y*82 z(BVx^Bhic4rPR}3PM(@Kf1(Vj9Z^HZzrKKkL49xED=)q!*U|It3Ug`(p+v-`rQ*9A zo_|ugU#Uyoc)7cqXFq+c2l{So(|B`UEm|=uI*0-HEmyakyD4S{NU3(Wj|=B&B-R#L znuEjezhbi&u@Tbni+#d9N+XeEg~c=><_zzZCu0+mcj2uqVB=YnF|@Lk8v~;7uz{b` z0V8ze?mPNfUhR{caV=TZI)n{ZP~q4<*Bj}BU_rT$RAxJ-+$8fYGe&s?{wf?j;{(qV znA|fE&4UpjyeCVCpmE=b=VIFtd{*}#~5R*h*v`m(_?7*wY~q5rcW+ab`aUR`GG!Gv9eJN#45hY!+V! zt{N@=x5fRlhMYsA&!G|z#6!I4-|5iw$sIHK?#i_)km2Ajk*_kF<$SQchas~3RW#?^ zlDSt7meH=smLDNk=4=23Qb8Yl6g5U7tpB0&uw%`vpry{!n9T3HU*GgCRq-xIMLxND`@c(aP?X8GP>f^boOuPw8mvwa#n|ddN&KGoUVGxxf=fG6EJt%b&x3{U z?(PzUYX2ulV{EFs?bT-c2*mVejxU zRL542J;aWkX@rz=3y8_euox8xm<_%QNM1+u>{w9CM_TiMn)>ACEk&UZ%<`60(Q?>R zXzY$PrN9ojTD3VU)30MEWZxvy4x{dh{FbW@LMU5v*xK|Ed2s;iOEUyi6#J0#=ZU$0 zl?~?plLN)`+6_=`dauTi4Ch~m4|Kg_wHE_-yQod-XMz5!!zF9MR~1PgAPysb<0Ae7 zP^mHYFU<~y19`IDbv{ zH#Vg~jDbbFOR)m-sltQ)3z)-HWS1c(c?RavG3egN4bLq7m15KX6`cop%<($nk&e(f#AeL;!Z#$q?u`0tXron&q9@p64cEgPz09~k=F1$4`)^t6i_ z42^5N;J4+|OEPQ(K}$pj;a3gJC|fqLgpLAt0oHHlH7Y_~if0opF(4L;%Z@1m_5zF| z+U-JhTCYvF0GXQ6CANQl9kEV?8zo$vXtK>=)rs{;Is$*9Z;(uPX1k6-#Ms@>@4~>Z z&(vLt|56KTjmZ_uyPrCBW-OH7EH|GF7ISPdT|Y)<&KgRTa_HXtD`&i&lN~5Vl9rN5 zX8A*PlEKwO-c~t|p!`nPm45wqDJYQczJ9ZJ0_c3kklCGVoU*xYKh{RFv6`sC-`$GY zRZH_#WKHt)Z?aVl(dTfU>c!a0l1E5Ve&_ zRpdaOz{uiAZI0}KLOPv-K#&tsxz)?dy^ZR}<-wiYPSdUW1I4w_8B&n*l>O6wRonuO zIE8-@B_ZhhmrCh%nx%+e+&Smr{TDAkvvy58I;H+k!X>pBW_N=3PC~k3Xs#cF(AQnw zCTt-G6b=x54Si&p4EtU>blr{X%ZS>T-6gG=og3Ojmtl-Kf1`qe-=^ zpku%9W<6PGbV%^icS`l}re3c+dDOw^uzJqvjSuH&YFwN?onfl!sA_MZz|=&j!nOLT z(;p$D0jOfi+uajrEnsZF-7pn?f1%-`N=R5EGsM8c$zJh#Mu`76bm$m~n8|}qpzBpDvW>KzNf@hNf}p zMxD5jeFys`q545WQK6#V?4vU1d*k@0KN>&G$pSrJf@|OWcPXZF6B=j^HZG_@+`C`B z-4?wmUVBVb()gr7q@HEqnrz`gxF<5_#xv{l2Nl5{Qaz;u?Wlr=<7I@hpXR@Yf9}XG zm3wPzVZ6qpS%z?>m@&;Xu-exlhJ3L6Prd-({(x5Iy43E8h%>%mNCivz;<3qWRr|>7 zU;ufh`U_BudD1$2Y^yVimmWi`IaCTl7h)Gw^6$3J6wj)r*}oj2P94YKP5uI1h)+>G zY^YgRsk@Q&Jo#70B3LPP6J(R75r-7La+HAVDoIkNf&N;CF1SL)77b;IHom#(ot3{OKR7;W{ie<1^QSX83y&kJE75|W z2x#$?RJXMM=})Q*5DIu%D}9*w^#VXK@;_%;T-(knS6UAMo|>)tFmf}p;b;K^>uL-h z7#-CyP~j>zd5xvY5;zg0CYtiHiSEJQf~00G8w={30V)@(R#*9;{=EOQ>`>{&15;Tr zXNUC%ioxWm?A{TyzL)D_m@E0%LXC(RPZbZL<}?n2At1ert`zbHhZwT>JTV{pJNnJ| zpN6eSr0z@$B`}{Nr-Mn=O;?H$K{?d|pA~yaRo?WJ@B$Z%0{%FU<=4WWg9`p|lUQjZ zMI!b_a?cybxP=!qk%biMq7iGkT>rHI*tYbI{uy);thS;1c}!Y2*wsgtTJ_oA-pnY% zyMXKLF*en>m(h=Xqz{-!Y+#CEE?tUGZN$hI>;zQMajhG9dpOV5PTa?BnYm=l z>`9Z1H6@d5$Puc$U7O1}x`?rS8!E8ykl}&JPg;eQVzQkovc@6?iWUcncmVZdvUJ*3 zvfu5$@|!~(Fz+~1(XGqwtcig^0-d!Q5-%^w8K~5kD<*a2yMVmCi~2}D+BnAqNkXCT zHjejZ>fjfN1lxhu-u$eg>}0JAJ#Q$t0mG9O21}2?mkG^{4B1_a)B9y zcSx%dK;Iq}p7Wz!i*bVrU1m!WAnJYc|WVsMS`YW?ddg{_he+ zd!L#)IxZPYepSjV~uK`^Nynq&ysnf-=|Se z#)^AdN#;AFibZ+!9*{SDhIIj*k9M-*Z6Ub_qc9iCk%Q^rRdb@*?fmv)MBIS$dg2~D1&f&cN8_-LI<9a9CRQ77*q-R8J3|1=6mc? zWdUd7G|W~Jti)y09i?87vKfb$MF9-jL9KUSXSm|Ki6{E|b>5V+o_o79{Rv*xTeI)l zUfTtEp9-tH07el}7%1DvWBs=@(b1~SAQj31I&aexsw`gC>9KwY`*S0QG=W;X;2b)G zgcKdeSF1hDDdyeXRrgo>-TI-(asLY=pyxb4mg2U}sDsg$J>jj&K46W8V|95HVN43p2HGhNf9fFAsG1H}(!qNRwWLRY$Hp~_J4TN4t z6wfEJzS>6Z_V}0S$oQN^y}I)3Xt_C8Xnu+(Q1>2QJ@e(kZs=Ow706(9>yeh#PU-?+ zrQ221VGM;q3HM@@Hdy?v#LO}h?r6{X_lHu0dE2Ta!$Hs5NoJ(M$U%A3=j!@(X{pWTB>`Y?WSk`?9=Ke zW>4|o4k`#Y&J^k?LGvmgU2LT0SRrXic35Aa!A;cBoWAKMPBOvU7Kh{?S+^ic%ewuQqy;Ugj`T9t8K*897?I$)nC zG&!{LeWc6PQ;rQ3pf`zG%N_OTcdEh^Z<45YwmCyvtU}aZkl-(?1VZZHqhGxFb3Qc7#kv z={NGnr!^hEKKlTz!D$PS0ttuep_1$iilCY2FtP-e5m*;IJ~gy^$MTtbEUUP1-gsWi@7ZW1cSU7uWgxB(WuHp{Qnl zD?VHd)4V<1a+p-2{ri1K8moH&Ex!xbX)sBAVfMO>T!J`2u)z%8F=dTP4@yp}LE8DA zSO~JRi`dIKsyMyYvUbpS($S-d!@?zq(S) zHO8C3OAX!62g7d9tl^E}>>zu|3QwPLHm*0`c7fZQ%i!oyD0`NnPKA& zN^RE=7#wo3LXNNRu23%>gzHUssD;apc*jw|#jU|jn|!?2un5zJlGgMu70RkDveVX~ zmDe5dX&_4g)lJOvoOO|VDe_92MkJ`6JY2e-E1iWq@pU__qgKtskp7*$oq%5o5h6Tg z_R1gRH|)XoQr*hdg=`_Eo}uSZ=WtNZ!nVa9&xLvnHfYiJ(Bkz)rCabK&Bi)#qOCp5 znlPUO&%5?}d~)g)&hywDQkJrYMWbZXwS!c%1-yP!>GYK_mS<4cVr~~ zo*T*HPCt|iyp8c(-pw|Nk=W7AA2kDd4GQJd~*`2sXH z^s%3gOqEJnclTIaJ9nk2X8q!w&eY*n&D2gQ`_ShtNaIm40MK4gzwX|Pa_)vpZ=P`d z*h;lyyu1@X0D3%Hu!(fu>b0L3CQ7_peSwveO&B__J!&%E)lN6dDm)_jy&g zZbqbz{;F3<2>hUMr`%wBt25&6O~`?(|BdcQdyx-D_EGrsODZP|Ew6gT@LG2_RH_q{ zhv|9w^0#;Z)R*qn=`-0;h{yq*dCk}slXsl|R2BZ6=}vOGNiODXkJx--f#JS)ap$u2 zujU}B+fp<6JZY;4e3$GFCLPf@HKOC|4prbBAz$b4DX|>^c~8q`=6v5o2r_w z!ddyr$~@ESzfr=4tTF|yOzwUQ(*s)O+d0I17cdCK(u2yI_*>qwM}m*#&Ch;1EWL_I zirXq4F0N6(Wf>*>#O?Z@#a~w;1Dqu0)!ZAy0P&3wpxW(zZwNH>#Z2Q1ewfkr^8v45 zj=2l*X}yI~v1OvzrYIGY{kRZuBeF*^|6Q5IJE`mP2WWAor>Oy5rZ(qPmj6>3)Ye^! z+Rqd8$1|`S*h%yt22^9US(r>0i(8Otz}p9>cFizs^Tr;4C)1Kl20gz*6hXh_*d-Im zu9FFSBxRBnYr?-!=~CB;^!Kp3E$FRG(WRf$q(z0EZd*$7!nEA0p$WXn_QH&N;3wxK zlXub{COx!l-1sGXrP#dqli#nySH}#QUvNCzF=U1@gfd)5LZPMlW%gjI0UBZkpYv{_ zeQ-PX0Q`KLe*&;^G(r1^`Uz#bZr|}_@4US+f>@R73=|ReF9VJz(w@r_-wHQmUf4FD z+!_RCDqEE4l-ZO`^g!87{P1)Vb6YCa!t2{I-Zx8T)j85i^k!Vr`I*3qIa$E*woLl! z7bLoO@MoRgq`29k2hkc3^MQWrg-t!pzG1GCl_Evb^N^y>LTeqNUV+hQpB~=F?~@pi zhoz0kPyE{d4sKXM#nr}U$Wd4ouXkaa)84iKfyZhy(mnaW2~%%4j(a=FX8UCTOC8GD zO(riPrp5S2zdGrJV&+Sz%oew3JQS9*F7pu8)7dz@0dAd|~oPXs~K^|EaY z*%`kw2bYr33JWOr;NZ3>@~B^y0)N%1AF(1aal875$-@Cv^EFB9h`VBtaQ1c5#e*mn zz!|VK{Nmqpxy)txOV^;ADVOGMp~?Mr5iAjJ*O;v9vSbTA;CWK^k(%lFA&M=W~=t?F_N4YZeTGqwRlD^rVL`If}}$r8I^ zPRq09W{Poz?OwS7qVdn>t4HmoJV0nLECCQMA<> z2QCW=hR&bst{P3+y>iI?nGDwws#4B9STXH(48#6NTO9#5*^%>mtg}ja6IM{l-enO^ z9my15-7$nhpN~z?mFmtt5JPH;mVxlF=mEEISKtNjus)+vs<1_ zYzX6=zqhJ(&7pO8X*9=U(|T!~TDGvXeb1?I`~&=g+;t%X{e-B@E13olo#%r5A%`21 zq8sC0msCBaZEM}%s1!?8WcY_!eNS}7{X@>bykH${He9&Yn%3IM?%KV9@7t-Gy0XIF zqkOA_@}A78#t5Toy1o9g99bYDCKC{8T?6dONYNjX?Wcb>%$3F8HrhwC47kd)c>6~K zOSf1SGevK4osW^BLi~r!gmo4PTS~fKj9BYXRo`oauflh@Zv{YcjfLs+g43?1jbTa= znfRv%Tot{@d;2-U-4~LW*6seJ38Z=KqU`HAoqMIX`h4HNC;bJ3=J4IZvyr)1i2IzH zHPzAPfpO-^bT*};12}Hl{fdYyD#R**vemja&=~oHWkK834cDIIw3?18wx2ZFCVpYG z8%ZU)o_WJABfI9ns9RXZY%b4>v?5y3$ec8FE{zR8ow?ah$x~o#K)#Ken1}~|h`hSQ zp7X$q+ltxePx0?O4|s2_UpyFw%G6t{=+3b7NtbvuJ7k`oAkaY-?#S(rbtUDjXZ*yK zBM7J0&{22}T**QM_4iN~5su_^pK<#96B{*07IeEWjf%k@wLVSA?$K z30edd2IdM;GI$fh@Ah9um)oP3YBeW@iJUK;YE@or&h4a#rVjJ$9f9L!Ks!J$y@}jI1+Kje7 zwbcjZr*B^%m%14<9_+E}G2oY1ZEpb-8u+SupQaoJXh~nIRa8YmR8D%TU;mI_6ld$H zT%WItF0>ETs~Yj6=yz}hk<{Axg2lbUF;sXCV2C;h><^R8azI&6Ra&!ty_id$dSt*HCeG6R>9|0RYiWIyj8jhhkCc z_IcW>{dT#Jc6EJX?xvdPuQmDh_5Dg=Tr2hLR(SH0DI~)M zai-N)R!SQPrN=rcCZ;^~taHiSB;mq8jm*fZfd&Vf)MbGKWxuWTVG~l4KlrBerIqzg zmGqm$>55n18f%zdQ_99AR=SnLKDc&hCVx#x>}63=yY(ldi(1Qs_SSwPBi!h{`?{D@ zbAS4pGj_JH8jOiLjfidnZk*Ndw}1W=yVv#RgHLh8nsDH}KqVERT6eOxqC;gef)b8P zz4;#2PJh1ge$*u)w*%W(W*&)o=s-3woRL4=UQ;foPvPC*G&_Cy3)S_`pWqP=AdS1# z?M&I(_W-iAeBG;RB};i-I>_EfDzLJX#G3ybd(e%|yvjBExDn{Ip z_W=sR*2eQ{a9&kI4cW41ku7p|rnzE)W;Ch&6s4l0s)(X`5MTW9zmV_SX|G52;5j?ruWDVex_db4kBzw)`1< z{OtF&QJ@+Ju5!tVZl%vR*5AK8N({P)Jru856rOzb?P9+Q$DnwY_VCFQVX;1R>6nR4 zm#RQZ)~j8R(VobVv~ajeI3^Xk@O7UZBIeiBTh0Q1)f_10Id#m=$6aIBE%$SlNx#Ik z5i*vZdKl@nt4RtJWXn~V%K39TwU2%{rh0EmuYApX*xGWl=1aQl(e-DlpyB4f478-H7zLEhQE%mcnMUrWe~=B(Xmq{BX0jI0=EssU+oJ2$Gkhq z&B*(^oRVd~V{M0b477g!FqV4cJd~MUBrlcvZe=bLzIj0 z+!7crXvXaEVbX}=VrHEwu<@kuw4<>VSmDhl;-ZYm!bp^G53?sG=HM@6G0DhYYT7h3 zFFMzd%}$L6Ts2%v$IYugDn=dF#2eKe=SAuFFLSvFzfbBT-7TAbNdC%@mYEsm}9*Nbk%oSF*hB#zNPiOOVUACSyd#e6J&0ycbk-)ryGHOqJ>RF)+` zNxvev@aO5$&Cdz9rAdLcpRKJVWK#}6*Y z5S{~v&s0M=dnXTv{i(}7pNDa4EE=PaR7O;E_^^ex%Ffp|$8w8by6yFjce7$5?ym8O zY~NQ*t-Ehpv#=<$2J54O7ME<@@gPbeCe=cw)=Ho$Ah#hgR0IB4y)0ndymk4^lP<^jS7GDwb7Eow6O?9k=OL@CaDs zkcGQF?{$v8+QO6ze6nCk-|i~YL72{b3!d|RHAMFyvr??evnjOhRasBdpg-GD83#k+ zAu@7gswa5$jS7zkr)3;mw#xeiH)pH zmx82k3|Hhc=M{L>gc-_@a1cj~oKx21iL6tODfj7X#&oP= z{?uT+IoIrq7h~~06@+I0dWY?!=HQLVBgZSnR06#SQ7mbJZoS;4wxc?szll0uD&~

aVL@k*deIm8;!s0txc}K-DiDG13(WlSp zz2CKZ{(EO}b!Yvc!UDcpnGSv}lLDO3x?h-e{_5Q>c%)VIEMlzLVP2nQV9zq71d>0^ zWzv=WtKutekhY&fN}hyuh=5rxY#i5LCusLe|MTgt_EF+dw203b?J_MHgTQ~MB#o=O zwd~YY6%~l|gQe}{cb9i%ars5EsYc^d;0*o69^S7g$w$v0?BWdNZAjlJ8S}I;GF!E- zh{Jb-uj1?`I6peSp^fMpqf*Q}+rRg?xKJsqHqU7Kxtx>I5T{RJNWbIRw?;c!F%VUD z8!gaz3dw@>+Vj--cazcn>g&~$qC)EVV1v$66!fX4&OV2r^1P@guwWI@jP$6D+UrS( z<#W_Aq{h)Z!XGqJQX}{|JgDhi?#9&2oG()F>C;AVR6LrGGT=e84gx{d!?duTH4)sQKb>D zQC(Yat4UN46LE|&4CK0ZH6%bqU2VtreYYbe1ef0WWYqsUHaWk#`O9OboGnh!Wtxx^ zxNxt3Jb}q|=69l5q5t`rDOE%@di1N(oslNO zb5aw>$I#eiH63muwNezO?@(YZST|vVi0hY)$`A!iAe)OnG@R54kF-97RT8K(kQovL zjx3X?ulwx1BtAWP4j%7ZI&j1aP4E#8Y+1FMJ?w2n#652$PU|@x(hMH<+it5xXW3wP zL)S|M6OUwo0}lJ0ok@m(ej|{_N)KV^U@-v~{$uk$KVjji>Y;H;4JegVbAiK5_ z_&B)>hOBPpF)5aqQB|i-8X8ZfH>W%o7@nkgC6st) zn+s4!-yrJ{Jb>2D>pY_>2O=Q5>4qW$BqP|t- zI)fpqL!*rRX#oSzRWA-AUB0_j_q&A!%1i_;eQKc1rjH^g@aoQloC6J{EGKeQ^?-RF zW4~@a3t6aoL-G_`(F=RAfSc+R$d?l79wm+*-HpZkQqO&>|HrwX)lYj~S*x=YFI=Uqzw`PT-@yqP_dC@vlf zwKAUFR#$&p(ffG;uiz1c1(b3N$RbRl5px%_vkUwrAgmPsi`0Tnd*H+(MXI}{L)a`V z51pR0#wSUDoQ~|ub)Cw=1`_4S(U?iA>Kf#$G>je#N2W$1{`xT?;UnOm{SL` z17_7_D`(eo#Oul&_X(EO_}Hq^1&Xa`I3D@<9ymWTO3WwP4U8iPt^i6vt5hq%uU~|yCmaD$Krjo&&kkqgcCKF z2yGw8U0&b=^D{SOMA#G0d;iEj(YyrpHA13093v^O{N=V)^H&>FnXXP`WAq|o+XHUr z1A}<$UyP|2r&R{`@l+f1W@j^>Ys%wfV70I`Y_(vh%t!TUHXDj#SM(#-R+z9a69a2_`yL`3O!Jt{vLv_j) zF^kj6kO|`SHkUnl0?!Lo`RLbHs1$6K`XkrS)0oCZ=~br(#jM*G7n6*tMt@I)MO6

)RCT#sxRoPR=xliGD*H)dM<7HVmANMpQ0{Io5Vjw2Jhroy=H@kSW4 zb6m1*Z$YQL=4mhD0hs;{OweKy|_-LPisY;8#b@>v@D|;ilfj3rz2bL!#OS#*@oY@q5c6L+I17P*#F~-Xxs8 z#)WM)uhe2*_QkJGODadM&h`uP!qJ)n-CI=>?qH`t(QhT1zaYu-oS}S4=n4REAe!lxa+qR ze*epv0zK}|DsD->Q)FVx!^5q1D$!O!d@)Q*yBRzH`aHzd+Fw7LV_x^}r+t#Jp$Y^jSeT8w?RboIyi42S*& zq!`yq%Dz0(0U6@sQSy$zGz5<@(a~izpVLIYK%RQ;@1q+kl6Iz1%&*Qs9u>*8hjk|+ z1^O&?;h~uc))w5ul}+kNx~Ae_nV4IV*n^Y$hh0WQ(y z$3?k*{uSf%B@xFv(Q(^5KGxPsLPn*#TdrxkwDAU(amB><=GHTdfl;Q3<|R&ObB4jQ zy-H3pMQn?koK~j(B;cL4EnjC}j-6a=C(Rr#mAnpu40g|78dBFD^Hr9tCk=}?KX^(B zmTNBIwk_K`uyXVFMBLo#9=qwNvWS*)7fHtrn{sU<0t~&XRb|!llS4B`tE&S?%!#1o z=1aM6XqBKUmtx-W7oXxs2qLFJOaZ!MP&eA;tEuCZsfBz$-URa2ScB;9GlnO?uOljE z*UVzXB(0mQeU!nps=#rY^_S=!1 zwr`=T*(ymoiOv;JH%+sb@#%Cge#iC8`kXmJ|mCcqYx;AU|YgsPv^FjS*4U#1mva^o+y&rcidSM?Gv>V{$nm2pBE&4vZ zt!AW}`G*x(Dvc$YAyBC+m^$k><&2G`P)s+BR_-0UhzxJx%BSL-!j?_T4uk?ZloP-_ zj%Zj6t3vP%kZnKZ_WDQGwmlTA9-nd14X7Qn-aI*xY<328+dbiGcZ75R4xKNhJgP8@ z=9`3pO3V!pw!M)x8Tp`G*Tm!JV_!99;C4aUBq0|ilAG>P^H``-WmAnr^1IgL%|}K} zpOY)dNgYbAuTfaY$LnYK$|TG{;J4cm!8gg+(+koI zP9Ot^1!_tnh>#JjqG{7lU6U~@fxA7QE>je>6_E=UMy(eBR$dz8@2*@rYH!lsb`ie( zmdIb^U**q}3&j3+koYs*n}a?W_3KiHu%1g8Kv)an!B@!Q zAG(qN;m_1B#tMBMm~(D0a~X?HWX+XKxKZFfQYRyEn(J5J?5DpMCiBK`P;>>*s*ko_jyD#>Z+a+9|J7`!|2qJljuB&a_gB zl&BdJ>Cq~~d`J9H!?N!EBU`T~Rk4f5bAPx2L6{=sf z3o-WVWL~5&F2tw(W(2mgFQx@5_$1CKpHh@vpKA&?AG_zivi1E{Wqbt z+cOPIBJtL|<029xPt~K0Cxt&|FpJ(e=_Qv-PzYSdSDKI1c2!_3iIE3FW?QD(&yB4h zWCDdPym-;^vV5%DLi*BxMH4}&I@mCA9uSRsO(y8h&kk(>f{f9&r+p$tix%5)mEFm?vrRLqO?E` zF2h=r8G3Z4p2mV*n?P*kF-i92sxhk+y#g@msA7GSN7H3dDv`%-ZiZT^0aCXz2ka&K|rk%_2S1GMkkx~K|0FJ2c;P9#mT9_7xw)Y2)udijmLXs9X41sEAA2Kgw7p8#EXj$EH1f+l~B(Gvg ze~jz2n~NO|>rX-%RCT2(NS~QbH}35lCHHqq=cw{(t$AjClb8Ou z)NUY{#!PNA*I#w$w)u&@i4=P&+qa-gD39uCZECCR3pz2z`2Ux#D#_6W`&c@rYaqbg zod9&MQRqaRR72WHE(+Fz)vbVYFUz##OVwJN&1|%PUW3_IR$cwQyMGRH>{tGgeI%GI z6B4V%WVLM&)7OF%wY5U-@ZC54Sk1kstmu}Wz3d6+TYfzf<)to^SW?4qm%l!7M7zGh z86x!cMvvc1dCoT4flmv~O4P(KUs_sdp0x_~@7uY;0imb>!HAI5p88ABI19r6ynakP zpV36p|3c!n&6!gGy$_Mj;=&0`uG*G(eRI@+o4%sD%Br(Qjs^O$@-c<9tJr^DLm z=l)&vndvP)eLcb35;8pasbqgyg*w#t(D1jC54Y;HK8$|_kO1; zyror z<5Ao@*asebMGAFWUZ00>PU)+;@Z;~weR(QVIgZQ%1A4 zwinm7KaKwuVGktKhI4F3ZVfq3N!+kSlathItpW!m2_FvuLQ8;k&O(R$7X}Pt?PC|D z_k<-F|B;!4KC=&SHebNcP3sM3b%y2g`y~^`F9j#XNx@(BV}S%=VPU8y%Swj!NH#8N zch>^#kX=sAveOFf9=bW4*`oI3KYN*5O5cw5&9>+t){+hmlp{VK?n&zd!CU05oQkCI zZ{m75D{;Vxu#n{E;Rk$w5`m`s^P$n<0}b}o2yk-x%aTu_eOj#Vyi(6ZVBWR~EK{4b zAO^XlL-v1pi&?ZQZzhh9d+f^tLjr`#gc#mXf!s&3Vl;*|uOiqG1eQOQqkh~+GT}WQ zL7q6fbth`n_1RICylst(&3!nlV}-NlNElYz#8~^o*`3g6-l&m_W`$=6+o0;hlULQg zP8bl%nxPe*w`J_Kbf0Pv^YxxIS?VRh$~4Xce5vc`<;Cm9)TzTS#n7~$&)qd=nz5DV zV+lp(-qa3p7`Qc_`HxJ<@}fL^t>qzBS2U zHyisb96qSju@!M2QVr59F;knCy5BlE)CB4b)ZZ+=h6WvNwzv#lHMFHpbg?B&`p%n! zXEZ%|M0;^D_Ak6eccgy=Pt8gB2m8%;x?SAA`v!8840t|Gebm#Ho!(P9cU7SCyD~|7 zz2!29RQG#u)uHj_$Ju90lUZ6-ovWI_>_yc^YNdU-4SF-im|jI;oRw$5)2nAtZZa== z`n-|lvsItc9+qBZt*qPt7Yk{2ni#ms9?s zXrTMuq?v=G{Rz(8at`kie?7mfVWazxEFrizd_-X6uy^!Wn3h5YH(foz2aA0VtUQ>8JE?^gpCd&XNf5aZg8fk4Ao9-00N z(NeeDULVzPF;AOZ7+Fbu=wiyx*`vL9?PGLQ9PepP!Z_OXo0zQ>ywbXw`Foa47*bm9 z$t^I*T$2;h(hrkUZ#~SCfk?3XhNL7#8$Eqt6v5>P!6JB7lTp!N}vXtXfy^ai0g zB6wiJw=7^vxHQe>7yo9&dS!paS6?>jFM4L?0zTYd_?1#T;cvo;YNbv+qrPnlkhNG% zKB`eUq!f^X^WOP%%#O3;RdG22YRcvI$J#iGMvS7(Zp+j^12EVBc6h8En5bxXSMii7 zR)Q0#J1)2?}c99d>6+#3{I&g{n_g=Ei~mglalu^93Q3JN*7>Bf=&E&=Ahf9ivLE*$fF zenxHCE_GVI5xBPc8iy(OPz5R_h6WDtXFEz9^j#Hx@SY*DVxFR^DlDA*74M+-%V15h z2h$I&nCS(?Gh zEWNJvMP}9C|D0Rr=!$erw)`}98hh40z*RJBHo|K8p8OgmKSgwZ%8X8SP!&vmsWj6g zfe;k@s>M2)DX5fCHikW{@Z-xKbQO`)eRN-u8(f?S*RI^omQthUtYfr%+`G_5*fb8M zkirO;TfM~$-%mB=A$s~xi*!b}M(&-#{~NuEeAm{N!QC?Bb%v@2r2Xn@@~O5eR2@zz zA$C}VY`6hN-a+$x<0-h?Wq6*Z1{*?kDbc#i5*KnRRAYT>&8p}@5_H5P8k)8v0_~}s zj{sr@1d(pVyeeMO{4E_!p}=w5W~;Zeq>hZipmz3ksr@hRX>Oa}s3#?sy>! zs@m?OJGw1A=QmLDiuVDiov(%ucdN%Y8)j@= zQ0U5dh%Ib<*U;yUshG!8inXH^|5^nREp4s;+S|wWOm`%yS6VeFIg`cwizHF6aDl)G z8W$R+aWG_;r-@#4+0o?oc{mef2}0=-jEdP4ZxbNXHqo#_Cxgy0 zoxg9bMNkq62b=B&dGtHXePJ7_m_ph|w z71h3~i9INaZ5aYCWtD#m{3XQcP;53?X&g8$`uq8mPwF2ZKyY^FBApYRrEq2CoaPU? zGfB4_88CX_V^D8tw~=7gK=nKa`|29yA|qz|K+{72E+yJMV(T)utM@8v#|vRRDi5Rq zmz%?be~tK66e}4=4cP06Ml#4h>*JuBWDC!+F^6Uxa0?SkGwwKC5F{l(oe_+Wi>cI? zlR}yhn9CHgrC_9oZ69nrBX`b zZ7g)QJLU6#>H{#2D!TNLrdCNDUs^1Y~gkx?-doSWwNzz|(Fzo`N7l#{G^zRqJ+%hd=vM3|0^3*I<1ePybvxoNP2j+ju~ z4S@Sg9sp*{L~p3NyX$xz;3~n+f#K|7^y6CPNq#HoqDK13Q~}3|$NVG~Mq#aRV{y^+ zYi4PipVw#Li#D0YBx_a<4?+f~A7Y;;+lpZ9-t?3Er7!0{Etc$CA`9y-D~+fmlD*bf)hRekQsxVb3YSG4SNj{(Zei*t zBA)#ATv*(ybLZNcEwVwee=r9LID1vOWB_(k1xo#F=`Juo$*4jg)-R@OB827}1F|E{ z*4s_!tHQlT={7%^`(Hnb-Y_|V8B`yr$zP6DSj@Scqq0yDu7)?8-+R-)Z1Lzd<#X`? z(}1iZMBXev7g-pet1Eh;&0~~OmZz5bF#5URP+Ja7jv^Fzq3_4dr0ff$zD}onmSPmC zOGZDhbbrlngvN3kW^(cqIX*jl;mSdK4B0$0SzSng5KTy4pGh2a9GC0?m%3G=&g}_l zr}kXld>f9fs4lO|bG5l}czkFazS*$c(HL0s_tYc-C^dV8Y2nK-ahqH5GR*{ag7ZX(f`6asynf%eoKqa zL6h#+5-{1vaxpQK^J!Nxor-;RkmtO*v_;b6e`E@@GCYvC1OZ*@0@QgOe`!+cg1=%Z zttv8M(q@A#bb;m?b3q#1)lLM}W26T^*X{gR-|Tt&_-7NLpcwzz1<=TRuvx#Z8_Oqp zRYLKfQtr03x)*O=WiqgNGEa9WjJJPW{Zfj(0d`G97^D35cc~SP>swPbpn;c#!m93q zW(VB(m4Iy<^{7hg3^S(=e>uhVDBj8W5j}!70uCUFf9ps}QFDr-+HiGOPu=O7^6#4q zd~gjD5{{l}vUN{}|Cc-+$vJZ0DWHgZnvH=oul859l6alkiVGqN85~5=N8y{ zZIDasT(BLaF8N4dm_5?(Mzdq zvIV9w$ph8ZVGiD;Su|JWzMu!u6nkrts?q7VO3Lyr>c*Ouu^eEPY?XX7GZjjK@JKx8 z?sj_)_WegjGvEkzVYi+S5O}_K@l(!37eR6pLZU8}m(%LE1ge~*?$-BsvEMl`sy`cX z7s$tFCz?=)m>kq)Oow>=LI=eg#Lv>GUkq3_hbl5~nr`bC&C?{?JKnZ4#!sbPK8DGb zryyV_(CKoDuRi?h=Qmb3>LQYOs*b^6MU(+PF1`Y^t180Z-sA+vc>j19dqj}X_z%)_GUHYJ6MOV zWLH-)TP`H;9~tAN8`Z-7hK4w07!*E^u|FQn=dgVFM^0Vu)dW~S&BTU9OFZ1QsVP9X zs;T@R*;J$`Oo1k>&H4Iqap!sAKeAY$GlY%%!6R5(Gg`F5meMZ7@n$iglVo&W$0ut6 zX=*%bH1Fry7R{f&K*~0GVI*)%ht5Xf!vcn-bgXKb0TTOyy0bYJLoc8;RpdEeJS$7M z{+L!@GD4$pm*}Q^+U$xXPs_!$uL19A!GS5TWYY7;lt=YhM86#PNA~U{?YI~(yAe(g zI&fCxp2`rwIRYPwow;~UZ~&R)RjJ-YTL$N>9JpvV)Dw%BR`KfH$noo0$7=KpdN1!p z{7-tW7y}Ajf2>5a2x`BPchTuD@H8F_?il?ByKy7ak(fqS_X=T8ECPky3M+Kyy->J7 zh`WXp-Ya|b=Iz1d2=v2U%N>al@%zk3IcY$jJ2`pXQ-gc+n37#9GY$opK>M(jV10yj zP4)*7%1|GrOUt7C zv7u61mSQydCQ?8Jk;V2dPf*fOz{)wR%u1sP=jEUM8GOA7`BwD>=n$lz8M$gF%iX=v z&ggQQSBnm{ehkZ)gk&%V=^xPCC`ioczxJSWajRxGNz-_#$;+JH%h3P1ng_EVViFE5 zot+J|T|n41C9dK6KXk?3GERB2$4*Oq_DbD{mxZj;oMla}-CE(SPP$(}?*t+AO0T{> zLymo*x}@m*<#_)%-9vA0;E{BXB^G+=1@vI5IEobrF|o^=+`Vl-Bg3cO0d|r~xMUB& zpWr$bAK}}Jy*Oijk;WD-ULI1@2a$Jp*(2j_%5WUlIjPoJ1lGv~l|bPW?na{UoWhgd zW_DAK_o*#=0>i2H@n7wAJR400_ol{Vxhc&pZ33TIy-}L)3F7HtOOIK2sd=-0?K@un zAkgq_9;cx;Y|?h}14v!#t9kC&d0*xw@SFR0=%p^$Y<{y9~KX zUp^OVm!YxR0H5?DbDdrBIChY)-n8mTX@9VlmmLogX+&UZ%rTs@V#VGo>;B8zHX`bu zo~d5&XL_S@_tG59Cmk6we~~)fwRAWa6Lz6Qn7fu{8w+uy{C1e?#j^^LLxC(*B27J%uqvCtDcGu$#sNHF` z#IyHdhuyPe?qp;yea=k%rRX=FT|AssgamB7TZ8!Ad`k=n!==UgY-fFN7V^bJg?Z|q zGX166VEDxNU#mflhA z(P4k`bH|VJp}cA7)%Up}uhIp|cws`H-rc_4g%)7+NgBG4c0OU<=E37;g0BhN_Yzpq zFgDPf(K{mWwNkke9-epd>6>O?lUI+Q0uFiVeiN}mNUpT%Dguqx9-To=CTj)XU)WEw z2t0oOD&~XP^;c8v(G@(}CF`*9U~m{Es`BV+MlgsAet)l$>3Kyfm$v$IZJxr(W6ENx zEgSo>ee`wbX2f_M(_7k?M@Az$uig>pXE(w*0*9-;Q4CC=TpkBQ*rOj@>l9JenM+sf zssy8U+6mWPVCw`%Dw`j(cF38wZ})W?I=zwYRM(LzceRmbeGsdfiX1WB=1riGRH zck{T~5@|yxdAeZMY#n)9gxa-9;g0d~O|w-XhZ|=rsw$Md=%N#q%|_KK97USZfcP*y z;NRU7kulfWZ4p<-YlJ(mW(}g!4ByEIRXV&UbtAD%?JZb|NiZ&Ufzv*Gc)F_eit%bE zQo6@o;EM-~g-Tc}YWHnDw=iRt)$G&ibBD&t`q+cLgL5}#P6}O(e`H}Ca_|~6pVKe( zPg!yNKaUpU(3J_{k3~+b;Mphst$yA8^>x@k!rls-69@~cGUvp^q|xco_9rfEcS@-f z7Z+XgOtP>t?v!uDq|t3;Lw*cI0jwhIyV|z>lj56{7wqLv@p=v2rTstdoe)61nf@8oeyXmno{K#`8hyLqz(x>eO1OlT#O<2}=uxH=4NA&rb5ELb>@X&Xz1u} zV+u&86^{TT+H>74En`%R!e94&c$J4=yth%r7WH2%j9`4y0qk!pV@@v(%eBZD`dtYm z8q4MGwzF@ymdV~}^TG;*qzJi#Prf?y&k@feQ#;Pd7b97^k;tH zc&1x;3D+gxiA@Rux~Y_}+-${#IRCzIr>ZFkZ?0Xu;X)t+%!N&yj;VvJzA(w8>7FR& ztoT^Ra730fFL1V-whgn*Y_vE9vm{?yoLp%0Xika7J%Fb{oDpbdr2JOcC`h? zUUVBbqYV|In>JL66i%{D#eAoU;#shU83(N|ttyO3lV6kVV!5Dei%jy|z~y}=mr;NQ zs>#zl2HNuFdUu#G7h)U{(=KK*)p+5b=kZj#p)f8d!pE;y&gfJj#%nFe9cb~E_VdQy zwFjxmJMH3^!o&a%{5c5Bzil+S_B(Un*M;SfzlRM^#i%3|$KiNOiuw{o!sSyuN#GXS zguePA)!cFMpZDN446}BVv!(`bU0JdnerK|yQ9_`6N~F&2dIUyJI@&z&FF3*_Rxc}Y zx}3HaWJw0lIoH@a9JtVFbceM}5cKp3|ez*?8z{f`V|%917PIv|vsXBi24xHp34612N5wAqJ+%7X}h zeMcyI+?#^kz;)Hqsf#)Wq20M=4wl>|kAH~JU#jPKodmnhul@$1W5A-fDB2T^Q> z2odfSF7K0v%C%-|k1xZE4boF`+c0u(dMMU-y^jf3+U z*8U^sCFl?s$J!G(yj8q7GHm4DmPbtTFDL;}S2lk;njEic6Z8DRT!r|5WCjzw_$zSF zNpvEj@WEB?&(~<(`R%+K?pVR=A?J#LR@TSL&pzo6cBhuK`Xp|+h3YR)EKfs)CoH!F z`I^&`e^mCXV+E9y3Ef=0>Ck6KRtmnmLPKyLQMZ-H$cbUu2-zdgS=X02K{`<_12#;u zL(&hkxgMHY)UfiZT-ACee~{L}+&e}3j5m=m)3{y#^LO+@h}t}Fx;0Dfb)*WJqADj1 zg@?4*ax+pLs$NMn)So`uRqASzY--J_ z|3Hb+av41MiK_(usgqZDFXEY`r>II=XZT z<5`-{$L_~xiK>fjwtvv%MKN$LQ+m@xaDCe%X|M4h)aCYNilaAeL^`?yvr?a84 ztOW=Xh>=oanN+FASbPef56KO!P zMd(P$Y+~01ytfFXEoC{yKW?9QKE0~vW?v9}v#=mcUA*X7J*u(z)*l#5GMk0X_4b0< zGWy-?g5>pu7K}%{KE`r>!6hr-+}4CP_1n6eBnAszPY}P?SS^T_$Ph~trC6gNg63j> z%DWh|+h0A^n5@gVdjyipudj}AQqMi)p|Mcxk-tAdP>R`?R{)8&o7XxtaQWh!`3hT^ zY~BXM`K4%eqO&rCDz}m0*v{0Pad4qGUAn#2eWf54aUVgjLHyp_cP;%H zy_wh8U4H1edhq8=*LFl=kLxbc;vd;P?bV@=&1pIrjKbrU{THuS)>_lzZ0cDeFSQ-{ zPFjM%bEH+!-$tw@8_M`jxwemX&?e#dNjyD7!{DB_Fs})vh@e!>>}bLIWyk zB5);!vvLUpk=P&3lITIc_3sn_SI z?);+LnLwcdZJh8`BC6(fO1=^sehNC_`1qRaaDwBwHpGhFN0Gc{e_hX{R0|}J1zB-w!18C-(EvX18vkML!HuU=e2+?)9F_g4EM|#4?nk^ zpK!iFOvS~9bB8)i_;KIs;_U;vpJq2no!C#lOTNF-`?AZ!Jx*eD3!r<+FFUy@ISZ}X zku0vV30;H_zs-2&29s{9X7;Vni1~G=i`jLLBc}DKP9={a z9ROFHxUm3dKw9md;xl68p;=q~M5AA8jJeRa6rO~>YoQ-x`vt(no-{S+ggg(Za-PR3 z=+)i%ShKP~nMWt!AUFwJdM`ZJcfv~3p;fik*@HJOshSME9G)|zN0h0Rio2erUWsr| z#qvUwN&o??jyNdi>wzlkrAOL{m_vS;JY(vLUNb0qzO3F==i3DV<8i z0B8F*5a+YZlNbXUt|*E#t=ErtS>V-u zumx~Mu_^rMa=(!zBKjfY%;z-V_}z4P_A)3fA)L3h9)F!HJHJt8(^TL->^7wosGECf zDePqxkl4;hx?)8^pYOBQx;9Snhs<7IJ_6>%ZxSb!?uy6a{fn`MAkFqkHm!=-;qenm zZl^+w`0-mR931kGVpch}?FANPS>vUW}k$voWh!eW&RzS@Kr zB&xoPPp5pxt*{0lzt_rkP<+jP^{txhuo2Rof=$N1p<)I*HBKmuStinW5 zJ6@+`DQRA@%P_9r8JukxV38(Xm99ydK2iA=Il5cx7j_?KO5cmhX3^pi5LGm}^HM?f zrgg(oapbABP}>1%8QyVu{Gd;w@2p)f5C^NAt_l=cK8-yJGt`^N!m@9|vYS(sR_8w= zX_upjlM@K($rb5seI3i5OfCN3KeTz7FCIm|OV<4&hV$24;k4%)i1$K5Wh%j1oIi8r2!hU?ApQ4(3YYxMq{4JF#IN*287W?d4bojYaGrXt; z0@(6u$!_u-J!0&ARMY~_Oi z+-`|Tz5jSTA3Zm?U&Xp7YT@A`uZrE3d}!A|0^suIJW2n=>>c+=I3h`&7TIRHlpVc$ zc@GD9U_m@k5b4fv(F?M8Y%pd~IQwTlRZZaU@Qe2~wtQ-O84<5KMC!inzpBHV?k-pL zEDsryk_?hswY+_dT9mvm#c9e4mfU!$XaQ%2Se=Hj+DmhiszY}IIKFdCi43Bfd&{`) zC37S(P5j;YBp`2k`mCa{8BWjFSgC0Ax}YFf$i=yxBd*(81{QmORvj1mX5{>UmTyY= zul1R{X}{+>)oPBtpb^9RW=Ns*;MkD&>y>LBp*09L7b>73w3M+xA|53Rx_i^S-YgCt zC5oM|cWG%UJ#c1=n{_+w<3QT3HOLom*sYLhy9FXq*Yqg>B)c@*qV7Sm$r$;eW#s(t z;6#6zP+IJzY|{s=prT1h(ynl8^yf;L@|9|swZa=PV@2k%{8r$P$-;4&#YDEfh9AU8 zC)Z=Gsi`bOPjpiM5H~v5URjzl^$)g{a$fDU2j%bWlAXB#LNO!x<@%DJR2j@{PUH1x%fu9ImS4p>Ly*q=a1)zxmheqfa(FJp zUXTB_=Tt`e7s2)M`^(~r9C$byy5g-1;<)i&%RjQ>p5A^)M>yy^J}rTF!cUHJI~v5w zul5pC^aSa3n1-)IGd}upSO%F}FEoYrgeu^{sQ7D4qtpYA%=qn@$@>q8X6oOEE@fFO4)63tH3ZVa0E4j{0_;yjJZj;wM zpv|_31k8!K40s57X)r%8;pPQT-pt$U#y-~O9oE1K39thoY0wddKwCf6akrxo6Cj7a zoZ#tO3m<9dLTWxhd^CQi2SR}R>S z@hdkus@-ey3Y1&i{p(T#e*!JB*gP<+&B%3gmlaRnyY1;!H0iv+mXA=fKisc?j@y}A zDnCAJH~e#S)&UNt_y`rC7CY3fZ-Y5D1Dm>md&1HqFo4(;TX64MrI4RsSJjry!|R7g zbYyS4IKI}V}>i#3eA@kX;eT(0$)!ggYVUu z1hgh)^bSdGM}#;!-uRUy&#s}~Z+`g4fwc5TS^E%*-;!=Sb^w-Rb8X!dPs-;kzU<9O z@h{K#Zd)C32_$a=LKy(PU`h&;v@2gn1q-N0U>1m7>28i24hE_Tnl|g@e(C7CzMa?q z)#=$mX~yoJr}bmJ+jPwGkWv=``y5=+$#MLUC$IIyAS@bynNydDX=gUzA;&0%JcWRP zmv6GW*@_x~oRcY34fC!H44oVdiCexMG*(0#Q-@ZVQOq|1Ub4`^yCh&c1ZUPyVz%e1 z*E82FN6|)ykAXh?GDl*7_ti{qh5iY?m}vB%fplw!m?d%_g|M>p1^F~)KNHQ|p02>_ zrtyyNdF07iR5#{(0@b7s{%&3~<$nWB05-DO?5JV8zpkoeNzrNV8`sdqpn;&O1^pMh+#q;SGBnK8G?Ij0R*W;wHm$?33f|jT>oy-Lbb0F8hegKZaD&-R>p~7AH=uf=t`99T zJFf=dbPg@>QU^YqWSVU%T%Kw~uPRZARFW`nodn66BE$ zb7~X>VC20!EuulLyk`}VEnoc~yL%1Z-0>h>O2B{621<`*E52w^dQytJ3A;a^L;r^% z+Too1pJVCp+24{!J{V26Wd7ol9kwE@5vkD6co|ttl~k9Ii2&L zX*OTWUv_apTfQm#aDn+DUZerAMQ!~^ky+&3cUn+m*8Lkj60PgB@ z1hREGfO;o@S?rZ6Q$uYgKxrZ0d00UjDKOFvlvyo#N&zO8>P^69#4zi7=coPCP*w#V)B-SkX02TnG9|^PpQH+<(hS z9ZyP^Um8Pa#2ypa9d|W&!}5dkjpaDb zEm#UPU*xt8rm6lzgT)q&P)Vxj;p>#e;_wKz^A{r+{n6oyxToc+3TX)TzPiSoojrJY zvtFWnHSoPT4H=oq`;7}{~T`;AlV3eKSqV}K|j~DZdqeg z_jB>WJN=uFGIj0Y2_Me>E4l0Qn=?27qSKJ!rt&AR{K#7*_ru_6{m}xPiu_mtNz~OH zS7dcz$xh#!glfr|e3})#PPMvAb_EJquFF?S5&s?Xe-xdCTa%63hVcbOr4*%0NhPJ5 zDM$+lQj#K)qjR)^G;D-`v`EK>#Mo$MqzFh27&%4_7%*~Dzi;1vupMyhxu5&KuJg>5 z$IJa2t;is52r3nXwur=2TbUM%$fa2BNHF!W#6!JhRwicu)FoCPe;7hZ0&I%3Lows< zH{UPi^Daz_(0fZ8ZhVO5m#@*i_cMM5uHg8CYJ%iK&Av^kp}4nT{P+9#9XCHZQbh{o zZSL;^uq?#y+l+H;qF#@g$5D^|sq|9ivPQuNaJB_qCu^R6yio|08dv=5250zNNcQAl z$g_gIe+*);kbncQ`jNKI9po$dO{mNqqmnFUv0 zaB3#!E{aeiX0kECJy;&o(;hO!)G;hC1j)XqR<>f%{TX_@XE0Y^1E@<}IGfmEzUV#w z;vPBn1hTG)W8GD(E2Qo<-&g`HJL|XJ-Ot?rkAsT#P_eGPm;e>(IWJ13%iB_WoGIaf zp!CQb_{loZ6(x-9VgfIlz|n!X!;#L1tI@smFJtbGK!P@j^$(skubI;6dh>QpNkBcX zt&`2@HZn*3Qy`(<$pPex?i2cdRKdnyVX(QuXCgi?xN&(-h)?3p`w2$|GCi_Wrnl>c z{tXQ&97E@>*-)!Um?2z5Am2ZP)8))hPiVrupUTCUXh^N38gQFfsC}Lc!@HOB8>5KylYq3 z6z)Y}-^rZkHII!{9bhZ{7vo%K)k?wDQd9!1b0@&?sydKBtu^5=;n>wOB}#E&(A(?D z$4^iI=v_7?N!6Xh-c{ywibv$o4d3Au0rSorDHf-8F}0DjOYs12BrtY{WAw(Gjg~Zr zLY?gph0{L&>pP^f2|IU#)2PO|e{l=?yk2j$?b|}l>JtF>aIMqgf<*BK8u#pP3;5F+ zQN1OZZt0t>gaNCbNfj6GRa>>uC7|W5H|D|}@xZ-=X{{Nm7s0Ms? zDp@a}ZR&jm^Jcdhp@a8^dI)RC6o=}X22z}`;opXGx0V~!7`e9`Sr528J#sP$9-A*B zjTn&UT$HcwYJPI(*rtl-DNq-pev6(O-DY%iODVE-RX@QKXMxVTQE>kP2@V;HRIK}9 zR-TXrRtw{dg!syyCBbAaT<6TUUw|#Wo8cq$Clv{N}tmffO#V*eq(S@%?kNx zPl@Rad55WknKtOOw^N^abB}M71`pYM+5`Tr<<{lnsq-KHvM(D&@0PcAp!>i^w!H9{HET&wGC>5a z$fXSr4y8sVtE$OC4B;^bdH47W&*ID##*D01;1O?6L=41dj3n{T=Em9$3oen#takcU~bq4Vu=FvmwWLJR~7BpYOVl>l+ab4*tH^Wk>xjKAfyg;Szy zjBd91DMH7nFXiG*Cd9DQk5cphNZSme&N7?)hxTJeG>8m+@(dBoMRVjop8S7f!})u| za`KeN7$Ck>ueNl=JFrOXZUArLa)$$`$eQdv62oTTjBT;+O?>alYZS$UeU0sU&&4yz z|GSzS!Y`ZjRy?ia6j=d_u$ulcG{IiM1CVfp<0ytZI*#T=^rO`ajkUFml4{EHEm zBAT|TaCsm=8HqhK=q6E0(_j6AMP8$>nU=Bw zYyKe^Ej6CKkmgxv{`}#_vS97bkNn~3>#_ERrb+ni_u1PMulBPBN3md~BYU?`XAU(A zKvz#!_IC%TpXleex)x7u5l&P0_(pZvE}tI!=@L4q)vR26nn}2QAUzOsC}&MIql`0g zZm<;qj#)Yiaz=_%D`iU^{;>aP$l0d>Dx5!(~86(_WHl=rh+-!%SkT~mg-|jqFK~WQ;c&x zeg3VS8f%6I=-Rs%?GX6WYe@p8YeMEB&%Wxu#+qX5ddU>ey*Z5SP#Nl?l{}`)YxL=d zd2;7scbJhGMymNV4GF9Rh9c5EUH82?K>21}uI&0{ALZ`(-;Gri+DRE-onMbf$z|_I z!F!|B{Ld^{Z`GLMgg_$VzpUiU#oZxSnWfn;yxTScf_hSaQ&UCC%-fpVMwzrv1WV+2 z-?h=&Rl_>Yp5nnG%p!~OI^&MO5p`vPITr0E_GLXo;9}&Ml4;6*JI|wy{8E#eFIw~} zs)NGsV>DgBn+kMAfMGHULEO4#nZi6DUo!CcS*0L5N}4Cv!^20^+|(BNpMCeU_NI8o zV>R4Bl(`ls{z2T`ZW&=qooUPji^#t@Xg-x6-@d=B!KpVvoj!#57{Ft%x0TJ8i|J4I zz^j<&Sh0MjajP&QSkK#@&FGcE@_G-ck+^hl^B610&pzJQ#;ws-x^G>5yq|YzM6`;P z^P*wo^#Ma~s=h@tL>rzr(wskBd^}e^w*Q@vI;!5CAO>)kpx?G{)|@`hqmsuk%rVY6 z1kmDTe5uSiaU5})zMD)nuYV~uo2}J_vRWREXcm~~R9RJIIePPZG`jicCtpkTwXCFs zy1@PH1W2)~Vv%R2@X2Em-@$d+s)Ntne|>45R8frJZi&$2dJLeyi!B?}@d9fa*ME2% zQM9-8;;`eJQ{zn#g1k==z$}T4Bd`9%j287`9W&WpOJ6GN*m>zgdf&6`e+MRiFKxhp zT>3|xWllBcE2|`pR8TH?+a2{Ilhj8eFIMdn?*F3e|55T`5=yRv!A=Jzq>s3uSNpyr z2deR=S_>P;BR&s^t^}Qyf7Bb7? zEVCOR?H`R)xD>$?S!GpJN?BUhC!j;BMy1^!0AS2n64XoPt#~sL`}KRak@(&SpWDM| zQ!g(A&~?c{%K(|V%T~nU3D`Z4I2m_a(EV!4^Us;(+sR;EaUy_4kSK^aoy*bx8vwL^ zvI1{L-24z^*^})sIDEgjzm_E5o3_pUumRpS1t;)ayw!mAN(p}+^PSmP?UEK6bf4(= zA8O|=_Xyxp=7W*d<@UP7+f7XfwA_N_m`rB#LDuimjAzzW6aieo5~5l)cd~!2!6{Hx z7uMw9N~I@HP#iP*vT{=rSEF1yWzEC}`V9Vx%JKXvGgR;VC(<&5_;^DPl(H6a=%jZIy@O;VoM@64pKJNC3 z)4@`S7bdG$QuOcIUURyU;(3usqwHgW8p+DHow?i%<=fnXQ}UYLz7f8z^XmIgJKNUI zi~Y?5#%P90BU%*x%+QtgINy6&-v7@uvmo%-2@E{hcW60K>UQ6@k0%j(H}I+05T%bw z4C#Fx2#AZzKS)BE#J{u_+1VMGSDjiZmi$`y`}UrcrlSd>d3@J(J6Hj7#~ndNenGK7 zXM~&JoU$@OvcRvG2YV&r^ ziRtPRuUy@SO-j{65%B@j$>T8l#{M7G3m<4pH*&vRcIW8rR$7hgjFvSkF))+NygN8b z2f{W`KySY@2(6ddkW;^*;(%fEGSyDox1v=JN4n0_Iu!jk@yXP0l(XDYO6H0H*(`3A0 zui?U{zEI1y=Bj>~QWn@IJ@oNEJ374roD9Zj1t#RL?83`~^y{#rrM)_cC zV`rLoY8Q2^%SWo>v?$^qQOt*al>SA&pYi}P5u>gAIV{6rzEvtdsCAyKJ2!MTyWCZs z_ptETJl3@)YmN^sX=CIzfApF!1PZ7bk2bEj`kP_`5w(kptcLZ-dUALK-|?=tQr71b zb+n}4b>zdek5OD|c9Gvd5VOaj_rJZ_!lS?=*9rx(3uC0HQG-8E>6|@w4veS zO^LUHd!B+x_@A_cB9K0)+ z`GXm0`k8ZlX(LY{LdWIs*8JW`o@w|NdewvH2OeDpS!~*ndVZ=Pt7v!g@10ecCqA?W ze=dVbk<^(qcpQhJ9JQ{eoMfv#+{!*O2&}+2>7?vU807uQyfwn5rVM+g)%?#F6kGa6DqS^@?^hGgv6%4u@YezPQGn3qpt;*B z@a0p%DInG#OJw3RIn*xK(sloB_w)yN2S?V4BzW?f`ShqIo)RbPiVEi)z+2;oid+G~QVMh*LsZ!F`x$N{;SIX-5e<~1 z|1OXegfaYExtYA|LYG?E_Rc;OdHzm{LOX?-=Oma+qlb;61v5wko1?h84^Q4RY>Twh zJXPoHROV!iLp+=8Wp3jdS#O)~AAY~#&=Gf&npF`hM3e&gT`RJW5!fS+#n>#Zfi*}J zC)jtWf&~0|Zj|f+D{sqiC3uhJ`upk3dl*XJKjVsGFU92-leC!ob($}B6!tlOAMww( zJei6Wg6xAe-HeLdC(LC|3+nJPTy~?u;~oTy-5>ZFXC3*%k#)~UoabmnZ*4ak3ky#b$m?wSEeYHho1OLCve*xOCu`k;!n&VVy+f2eKoM2fiKrE~m&W+o1jV(G3-F;Y}|RgZ;tP zu~OzDWxZol-M%B!ww7Tc!Ie?>5n*FFR#k4TA9-1qbkL_n3K9fX)L@r z_R^6K+7W~Ra$dj8xbv{r`r^0JjpNnUw<-o?q&#BWU28@=?lmM#0A}%)`ops`4v9nT>Nc6Qtzn+MYwQlcIcRe==Z94hQ1 z=nieb$NIHsy!qbJ&dvX#YSlxA>5B{f)MU^SuuFAJsHR|X)eEIWv9I9^5yG%`sQVr7 zU~iv|zQ>wJudXb4UYWDDjrV9QRsN}LSKR8?3sy@ZIo8^c`K55Ka&Ub~AN``<(+^tQ z-nLbh%D0yHX6_R$`jr5XON)p)Hzlh%(ewl_v9p_0pJIRh>{r#cIxN0>X^@Fp;_8_6 zyd3oK=98p$L@APk9QziCxy#@KPI6KOR)4}kmA?9fK-cDx7_^vWP%}L0zp#n!dY~}; z_~RE}(CwDBX>^Olp;}U^>!Fo)5kAkXr5nrhV>?5=xKsIy4$pnukMrqj_>~-&)R=a9 zWuCKnf^g{*s)DDBTdE?X&0<<&M(PuR6%EV)m#?~@B*_NJGb3CQSwHx>bddPcU)R?M z9rQybqlqVzb@0;H-Uo&+yWp0P?uwfxgPCq^l{Ve1TYED0c%e~YCr>UIUe~nEC zi=%L^wP(v?9Q?JCYio|=POb>+WQxFw%2 zm8T`{G!^sE_4R>sRb*~(^?NA=5A-!03oNF$#OD<*_T{;?eRQ=88eNE8iTuFO-)eOC zms}2RFrcp^?pEH4G;&ldr{ZW=xWNV)P*Aw1#ejK}S(wUjZ)-w#)$DMu1Mgrt{8%~K z5GPD=OgBX@@|{#%O(w+3W4r0}`0`)3cgcrF_uV_Zo*mSvrc8N9uVflUAI?C|cFC30 zf@0oSbRi~fY9!aamR{}pUSogNgnr3u!`QfUzx1T^3D6ufVdedtZ| z%iCpbo-B&hgFG2HA1`3$d4)!x{znnhR@nz&nja{cgA(IH9Mg29s^NwJD``<@9!jI4 zj=G?ggOl3d<63IPbe+9w*5r#EPUuk z|3@J$I!u#TqHN$AZ1<>0pmj1_+ELj?HzM)1r`X!l!6k#&)9`jk)1roF$V0MN$){23 z+oI^$nNR3J4+733(v#Oalc(AtWoK7^3bak{eG=)}da`c=%g0VnhKunItj#eMjP}@N zI3Oey>fKEVt}ko8xS5va2Ys-o?|Q8L&9&%R+cb2#{#z4EzsrsI5#D;5osl33P^tE` z8SL|naYABLTlIqqYszw;2l&&ru9)g&8)v5u0S$@s{Dj8a#>@4x+W@M-<=Z<;tGTF9h(JDVBt8%EpaQ!&11oo5PhCo9YITX&{O#0xmsY@e`Oi^9vR$H zxk7~DGZ6!%*4Ap&a@AyjoHDapS92pkIc0<6P zjWd$95~PofJ9r1F8Ul#rGwc`@iuqQB;10%A7M)RxbbxOKv zH^u`Q?&+=q8Y9TnoLrA|3-^w1_j*$t>!BZJLG67@eACR!CQ}s+d3$O>k_IV8?{C*y zHJ`_S>~N#ciK>Lx&)KO{(!Wz#D~1X4+y`Z!nKlCdN6`SqNs+OnqK=!zlWg_=)ZlXa z>dmQg^BfJ0Hxcr^?9LgJVt;bXyL{YmU#t1=N6+p|b{e>aa7I39x)7bTv(vr1KhM^$ z_)mM{NAtWz*TfP~XAdei6;pKU=<;1yOuDHWKPc1k0D!uHYV`b%!V6+Lf6-ZamYxDB z1k9^4z=Pjc^)KzTW+@t$o;53taN4+D^EUvu%u=5HcJj;j&mI(LUoqcIR54lNA)2QL zJ67w;NV)jj&q(}&FDl$-h|T*<9!pALo&Td?y17(&=ZFw_kn$kzr|MM%zL-TORYDgn z*fvhibD|Eu#d_Z0unPPih2_Ukut*PZ?)Rxujq#bGpFS{onNe;P-M&YK_vHx5%)+@0X~I#?>B+e)XU1a)7~ht4%$QBy4#0&cuKS?TxdCabp+7kdW0q@DjvY@p z;e$tT3H=068LjLrGH>_VSx1k0F=^*g9{TALir4LL_Ac?gJ3J@$OjXR!p3`Q=Ubj=f z?Sy1pDy~>Ys*o5*_e82?^0}NZf{M9uoaxaW-fNtJ7t>j*7hh#<4btG`Nh)-=@qU9r zJu52U`iS;gcBZAUl%(9tUQx(gC4pg*F_Ol!*$}%EhUgZ!5>gp=D|Ka`4RK zX;Iwpb7p%mYWn8@3pFvor8pscdWjM6VJ%=-AHw904z{@Yazgz-mPSNz;x1fvz4r)dyt)@O3SYUnmXb~Exn+UzFw%0 zlm(vWA4Nw%`aX;qM=1bp?&CZMcL^EPuZ-#&UV99^GA`y zfyy7U-Y>6;!wrbHHda?vZy7wFA8HT}^0IMM}to^%8BgA!o2T{a-Fq0I{ z;(c1X18KIb9j3iY;Pe<94)0W&$v5y8tgN21eSCj_ok7z7l4;ESJPA~35}}4(-VIKQ zyWK+@{+CwwGS!9@Slyja#5xnO>=GsZ9*_dT))eu6`PP~FtXHooYxix!P7%)xbZ=Pn z{$g4BpFzojgGz{dWbA5ej7{9WX|8iNCg_vrfM7uX4WSnQCHSR(7DEeDL4he$^L4c-yFyE@ePsB#-qnDNRFmZIXsBpryoW;pyEwuca1& zW0Cys7FJ{6-Q8Uc`=}30$V~FkmJ?tjY5Wh}-K)%?HmT{|xXof>px7!m?l@$OqI`6$ zHvP*kKZ|)xohkA9N%-UZo+bV;9q&An`@o-uA{|=e-%7MDv1|mJ;A9oERl2v$$Uu=Y zvxyI`rO*212k+edIjGoA9l9UM8#!qO=?5yO%6W1Z&uGBQCN}Cp)(qX2cvmT!c_~;14$y zhb`7^Ui7h^8-f-Z^G|)dWjvu{--iwtxU@>`tcw^t4^aKt+_XQsTEXXImR(;Lg?L%mEwCYd3v+(-QuG7 zlA;gKeu{fM*tj&3ATKSFn&&w@T`aSK1hwr!UN&T3eB7?=R%^t7S#X+@N6r51FW{7m;3o}Zfs!463KZzil3OTo;(Oap}z za+_;5VgB}YZ-%kv;Zr9@1in?EqtKJXDf9GMQYG#`5(?$%y;`V7lg|;TPWTYbRz`DO zzwx4-lJoqmY5>wf>F-%?@75yE+%{p&uNUb+MPz7e?f>gP2K8ZSYnMpeQyZg2XCL(c zMMkyNMsoo6!Npen2T_UnJ@;mpI2Z^Ka>EvGrGH^s`-`4GMLXqAcQ7rbUwX3iTg+7a zQanaCd-LLwVW52zxGRh=Z?QD2@@9Sr=I(ChU;1zC8(j<}FnXXbuME?yTt{$8bGXIY z?{}|7>1f9~Z@>3SUi0sX$Z##|TI%1cJXMAFH1B^NVq5m-0liS43DA{Y{KNX|~pZesgef_F6VnVOqEO-?lW)bs*j<931*9x24DUyqS)d_YJ^D4XFE-x6Cd9{o}=VHkg z4*+e4QeWL{b1Ux@LWexp^TyBB98H?l9vm;J}at01}#e&i)+HV$jL0@rwuQ zM9SCA?~}ySm8qk@Y5k7^NJ(mCrQVjda@Y=Y-8R!z+)h@qNZKoDZkpWnA_JL`U;d3c z-uHt>hxoUK4v|-r$ zsubwU|L)JHr0;S!sE!A@G@VJ>S6ed8%FF~|9kKzb*{+glq!D(_-uQchrY zJLVI6*CKMQ+{NB5zTPwfe*}DthCdq3P@y+JL1M{s4VUh!fpx#cN`AhA>6}1Wk$vtc zB{#!sqjj6sLS1ath0euwyt#e+>4f0*uLKRw2JJwBjiW~+?OHV}#a|Gw9_;jaS`O0{ z#q&#JYHXGojtHx1w{fb$YrYx4ErvKd@dJt2@CUW7R%fXy^5m0Ek0uXB)VbZo%>h#$ zifdJ@{~E4}bKSjsD+g{lZHt{!ib-drE#Flpi}{)s9D`+#Y!rKC8>abdg>JF{t^Jq! zozg3CB84+O-ZApa56GXt5r1aE?wQxC*?Sh0l5fp-7UJQ;SGZWq)6XpuuR&~^MFoaj zwU-U8Q?+D4TPT8R_ienW+4v?pA$|66STT3CBX;q%JI~x%hFEslJ&2mr^3iaM7auxEeb`5HiaWqb#K+aRbDQ1^dfsCVqke;3^?=gt#u1L3%5mzr&&WzSNWFyUVgqsGR>=)383ox|F8b5{L!PFEMd=&T?PN!n zs`YCGQsHDM1coPlJ^+?L{=sZk$h`I zLV=)2H?a>H7slW{8>WRKyXWSrI**%197!l`qV~tGryAG@YXdmE2;Y&6Uwf0h)vx2E zdWcd%GobA6&ygrsi^{7F%JJu4vU3{xR9@v;SMmv$Zr@U@!==W_eW`rmp6TWpJS!aL z0ccgtoebB|ck|kBp38e8|0=wdhW44*mLG$6m1?L2i;}daXPQQ|!2BWLcqm z5)-WKzYXdKxBV|=6~euQL&CCLYu@ZG6^Fo8WS?BpZ`P5PvBfDeP<9%;idvUvUS~B@ zo`SB|17Kspf{mhF00DmGEl$_UpOytxu*L1x>f9C>@VJ=qC2G7RL9v2xBbIH|Gz!{04#B``WA95eo0_TEL_sRWKl!5zmlnb?hW5&)~~H73{0g`VR)1J?vdS0 zdEv~GXF_>Ws+ud`t!EpXkR^Q1PTrbLw>- ziO`Jzy2yF;Z0EMgwXodu)rIH(SPyPmOULcAZzhn_hJH^j@z z<*rSd6hN&t#vjpa6CSn+GlMJLjd zpbq{oX8hAi5ec>(V5S#20ZUmbbmKQ6dM9Ivgbs$&Tayz@fJO{#vhDGd&NcF^)Og~H zrA*JeCZ?CM(U4M}{x%~jfz+#G5ua$#JC1U)p_t38(?ElGgM3GBqR;X2418MQ^H=mf z)4lk$4@C;p$1t5C;Y5SEfM;iACyLgr*v5lvC{xd#TN*j9GODr0Ia3-gOS<7g1h_>X zF7KZHV98>CUX~`kW9jFH$SzWB85e4Z{f_u(Gl-QE=O3M@-CxdAQN#l4X>Av5WUrPo z5HI5rEmKP?6;A5>ZB*Y{nZjjKG8QVKmugKvro~x|VoTc)=^2VN8Lix3X2S*Cpl%Ei zH#?YT(17;#v=z1wDv*g5>S$R5#^DPM!UI`oZ{3fMvGzZzgO?&wO9IWUE`^3BO$40Elx&h;8FmJU`I!5$m@Bb^EOx+XkIbXHmWv>Y6Q9KEAlnvDFnI} zaU_}+qWegc+jFBpo?qNN(<+Lq4&`-XrwZa_8H;5?;jo`E?JCFQm|8Q?zzi;sx+ec@ zfzdIgnnV4dY_pp4-Hb8O;+Mbc|0ozdPS(LXfp6F4del+FuRn6l?jYBWY&p}4n#;bV z;P^`eTQ{xH#Sta;+JPbLL$d9W8?3Fj3yq9IPwXioM-M^ED8NDbSe@m(wwn5!x4|xI zFGITM&WK;+jY)5b6(m}s)LU@N8KAuJ+mV_l0cwBOc3ykcv;hpPvfc=y{ddEN&$}F? zIOWe;wl!k^7hh_}-25ksC!C!a3oat3-eI-AAwHBUVqc0xqoB%Kl0~Y*6^QRk)+Fv3 zt!O5A(cx}enz_(b%sVPw||X@6%%g+xqTXTE<+xOZ6=G~ zT*^@zpv%k7wf&(TDLg;s=zp-f?%46b4nC}RARfH;$c4K69Z1@`D}zoCH|qK>&BLIQ zg41#1q?FChOjz9Dm1+$SPr{yPGa8=nhuW0XC3mY_Wd9C9@u4CboN~Yi_S*1Dhn@bO z2qT*l81JS<-W@D30UeauzU-)*Z1QR`2x-_xo*V2S%0ked{Xr(a=LMPdrt)CPM4)f+#QSuHB$s(kek@ zDLt=G-N2*GEn(LB4^g+~hN7I&H8py5pPBF8*@TP{VZb*e@c4GEcd5O$r^u5lBJ~DU zfPLuURdN!1x(!@_qaGbC)`CyV5Ddu|JA`$}+T>7PSdZ4=APgXyO8?k# z|4qy)p*(BSVF=m+$;9+NumNemzV7>Em>ICL*jvi@L~(x-jr>9ERp&kzx^I%NpSIUL z#?hqdxS`(Unip8(wv{+m_#>*0A`~q<+L3KeD_5)G1Lc(tOrZacM5>rCF^+p!jEbzc z%Kd2lU1uHndZ}`$8U*NnO{!H#mv!V1$sH!9MFl;gU+1xUx>{B^w=tDFxqU4fW0J=u zYgXn03CMhgqw=|NTC;R(%HbgJP&=?|6|?Ff`hk`}u*r#+kTtNkPJQ4J5{(S*W3pDz})*L>W`PP2>{kX#AtI+j7k|ud3aRZ_r8{T5E;1bwipOB}zQ~I~u zuh=*SR{YNjkW%nOp~t?Ub;1%rDg4P5cI*8$vg-Pm(#{We$)P8$asTng1KMqe!J6+# zdq*}CtA96=Xv_|OFD`tB(8(0UkkQP}Joh@2Hl3%dYuWQ6wwHD$Wu8T}E!yzAc8f2Z z;wIqmLh9FsK2f2+6fmg(7n zOcS65ndf0#sOId{{NbN%&O32x9(i=rBr2~5l$v8mr`qAUslVZ8Ol>n$Y8Ot`MKt`_ z_Bk&VS_rTC7bm3to5A?!iR_}GY<-xk8*8gaNa{v|V1C&=KIrs+6!^4WKxFFX^-)EX z@WjNieyZWdq|wEP9Qm|Tec6f2zn^B@ykfwTrs4YK?(BlO!Tb{5zP{3#x0}_$QAz`( zEUcAP zW9V*Sqpb^4Rk1&$K7f?!=8LoctfW=%d2>oWTdo4_XX*8kQI~ouO}fwX4UG`JYi#-k zw?w=3G86tUW>HsaKqiG!ybnJbC5)Mp<*=@mZ8@CZ#WZ=eIeX^|O?JDl!NS?dm#faQ z@x^U1$=S?1rfKgNM^z(gjZk(>`Mp7mKo)tE7r@eDai+yjev8k~PsngP^EYN5iCKW= z;U(!wSh#U8edfckIzozG zvWDPr`Hzmc^`E>oN&YOp6b214IJl^Ib(zK>ME0b!9Qse-RYGucd`-+w(#bPo-KmzO zaTs^RhO$uvqvVBadXF$_HONmpanYaIgLSlUh|lKJvHOOAQe{ zT4>wNKF21SB!vDj$1HGXbz&EpIUw}fmE`rpU`tR?X2Vi%v#sHcvzkzvvMM?^3KC*V*3(pv3%bK+;gmsS?FXep7AXFqF;ICG!v~J{S4?&-jRAIg{$1EQp5NTBSE|ULh->Ey5mxo6d8*=Io{L%+Y>r4hx|qwgrnRg$Swe0DhpWwn z>4x&nn=FK1mSDROh-v6F>Ry zu*@`SrWN#;a*4%lzdfHwatEcw$E`(-(@&Rk*@fov)pHdzBD#FKl*m5hOADAg=pS(# z9LkAqM_!*n{e4N)z3ele*N8HqswaKZG0Ye59$EkEB&nQ1$LI_7hI>LoIv58+RQy7S z4eOT0Iot(iZ%&i+F||Pw*oE!}Sl*Gz6oD{req^I{>o)h!{t(`#K4r}jJ4nE#!z3IU zGq6RbSUy_+p=$}lllpr2X1t1H>5>7}3^TKF;!CGE{F;;S&tfk+1JUJ!G=r-w{=9%- zmghs_vx5#S>e6_Ey;&@Dm;p3u5ytKBh7&}~dW!QjicYxSczhHTNnm=un3?hcf^JWh z?>~=s1ajwBzcMay?VjRI z{WToO(Pow}@o=DgJDA<8x?1*^?R_T4+muiFw$0C;`R$V=-RM9nVQd^zBn{I zlAg|9^i6^@lzo^@>$MG90VgC>;vPjeN3y;W()AX^nLs>#5)+MXbU%Iriuq~Lc9sf# zY@p7O3ea!SAI=tK$ba;FCF-Z8g+%psrY*>QaUH3%iS*_DqqaAmJ_+>zS)DIy!+pK| zCPT&>oSacQzGFgUGmbCIY^`i-UATP&v7&!g8B#|+3$i>m5|jktRLHv)^7ElzaGFwX#1xxD9JtGS}H{VRIHz~vVD zCCE2lSy7x(PP##hKW13Io*vg|SI*RbU z+2ecju1oe9Ru_8eZ}^emeux0Iu}7Vm-fu?T6Y0%^g=hFCH%-l{-!B~yEEq z4>w)cfl855!<2K_@T@~Xv&|-91g_(BZ4+v~9~~}T4cgeCdxH}5ty&yrR2S0-UvmE+ zMV*iW7g-er86X1N0Fb@By=elRjDH1Fpl%c^-c+UbhWcW+1rkl2jJ3Q?b6fasSov(m z_xQ`V7e>i&uN%{Sexmm%0k?Oi5et zy4!K@GE#Pql>E~epC{JAUN(+#_l2}9KH_Xcj7silzP3*os=fm2ibbv3hyGV;bR=WU z-Fto|ELkvJqh!V-pNldSzrWu3r;ryL(0$9s?NIv^sxg-2)TTRjE;w1V=)bI%Rb77l z_W{XYv%&Saz4l^0|DWf?A;=0N=+kjYLeU1upvj=DE9gjnPHWv6Rh}u8f3)447BXa;rN6_+r7GeGnwro)E z&Mh@nFYefIi^g71gY(|HpWM?S)|#&SMAd7A*i@gjiM39tqB;dKrGVn+iLhe5NelDI z!;HU)lR>KQxx5S>pboV+7YzcTa>H&fx!?DeIY#ckS3*Z62JS$$t)f>shV{d&Eq`Wx z{#AhtyGWna{Z#r5?`GSdL90Vd`$feF!A+AS; zJSw<|{g2|tnAIsHp|}~|4UXhSeH-x&_%TRz4gA%Ro5Pn$ni*;fZ?Yr7j7U25o-^jL z8uLHzcl#`TJ5>4*2utp1E5IBr*=*0$I<{avb_p8-U4JC(xUNZr@gQtnbg&H-<*scafw%#0!Hm2y@$ z%B!{+H7enwLmlv*E}EdI-)+mcFLfJ`#MI*W&FrV5$Hx!s`WZ&R+Cr7(5s}PrrKvC9 z_8(-68=qOpsj-8=+h?m}a`j(r`9$#b#W5JtGQAbal8zBgBg61gsL z%rd&qIYRyXO)y9(O_>nHiV5Rdyb^i)4zH?i=Gh7)v*T|$qOTu9r~9V*yaX-Dp`qk= z@BN3)MncZOkq0}L{3zkDOW9uI|Z#4LZRRoVTuOgu&64V4;&t*5D*D-GQ z3rpS`mp!)PZDGz#YQkxesSdhu?QaSRo z&j1VgZ%8PCol9KDGayXnkT5JEsXsfYZ4A+qC1?676TAUfCS*tE6&L#2?MwQ`KbJo{ z_<>L&k)*0fm}b|KPtENo`3!~B4_CW$+t#|Xy`i9J=7a48jAp>Dp(7iyOGBg``LzY-Su1imbY%}Ps z^t`FmO0Ctl^H+HvDNl z2G0_W#@U<4>Go?3#&hEiTdNQYlfk!G^(au1b(+GjlRjUiv4mZ+i%VpxdGO?^6|n#4GR-K5G4rgz zG0hh&GVy!e*r8PP%~@NnW2=Dt?kR^q)YjGRDKG(}yv*qSWWa^n3IoYD^1yk!FU@pl zuHJq>DKo+?z``CcNxk(=9M=y17%<`lyygT09t_p@@nSIeDlyCSBj!LVTJQju@F^7c zv^naY<)V!ac^i3GC%*HQz?vfQgR#rUM|0atJcnMM{9<&wCPR&y&I+{D{J2cB4%J$`om!`@ zzd;7ESig98^$Cw)QHqA|m!4 zMNun?qIT6DK@c&5S_!o`L2PO#_I}^|gnUTyoOAB`x|H60J7qk`a<{4qPU&Vx@` zG47cB&U`y=P2);!aLxzZnW@}jti0g?5>pX&n=)oyEa~16)>Fmi6A3hD>?4fP!)pl-c!W#?H%N_6V84|B^`s`zNnT~4!)^OQ~3eR633x!bC4n{gX4 zn<%}-%!H_5s)!xKN$i3;*A86bvUPr;%~F%qq0pQVwiV1jq0szKXFNlz6#jFiGagos%_ zI58>KeA8TiyE_Ij&1x9+)o{{+di1ZIXU4k4$#C`g2`!r36pNY^@>=+IVHLRxz8S=; z2<|+wmTq!U@VBqE>O5su?$+Vx@}D@aP_~YD7PR>!z@l~kv&KFeG#wMgCm}5!FI$rk zzSdON&@hQ=<7Ko-w=r6(xy44)P|+tsKX=xOyS5dmm!WrlJclqN2qVgTq4?MJVk(r8 z?{+!$(SCmhJf!?B)x&kPp9ME+%$ZZ0Nv}Gt5XLd7sqC{v?#Aw3vsl! zSZryYS*C*~*MU#D=643VVIbC`GPhvMs7Wc;0}|xA%Q~fGMheJj&DL=vBAlsVO9KXo zdov-mN0;8wJjsa)yNA4kz8J2YV~<6v=t_j5<+chj%{X3Q96vGkR*5sgm(QNYRZYOu zSN1jQp`qHe0k0?hs5#LhR4bp~JOKk7QvQjQW~`L{g9ck(fI@h3w9hRysbxY#xr6QN zBv^VSK{G{W+5=*WO7t;v78){(Sr(O)?2I0x^L_ynH7zW#xs#gY}OhymTOk!$1!?zjPyQBOGYys#g=2m3t zA@Ly5oTP0(Tj$Br67t|5ku}2-ub*C-o^ffIE%*n}_<@1(GdaNQ!-zz;ny7)K;$eFm zH3AYHUvM`1cUvckMdFdU@qJnb&S2-z3pbNC*5Id|1U|H(kv}-TK9e-&+=w=nA~9=k z)hO%g8CXk4xy$fGE@C~Gav0m55QXdRrcJG(;RR4UR zl;{2gx$0rUL08qb#5C@CE#0pVU>jumMDqizxb4xYBp7#=Wm=Hz#QgThtWc$owaJ~& zi8;QvTVn_z9Sae1Q=!Ur2=9Ys`sD*nRjU5t_&>{4-()T%M4xP_#k!W-1u$A4lo-Tr z44)Mdz&pGsc+@*6T~lMEwsy3GvXoq6Ziv=)9Y@^)&@?BTdw0X)4IEAA;=wM}_+9bq zMdy&g@m5d<%~5+L-qD>;O$)Mf)#BEPB+l#}7Y0KV5?HU}WVPXPT+c{QHW_zx*ed)) z^29e#0W@X+s3k)D8I`S-9oV4uE2_!w;800q(1*1jGii3%St6B3&d}Z0Un4{tovP=UIwhEAWT@}S3`>aC#;!BbZG%qUmNtqTg`oXr zpM|+9Qy+imK-1vt#lmAyQ0aFg2nf|Sc0A38f(@Fg)zP)u_q?4PGCXCll>20};{=CZ z7l0`_n%FbG9vMht%vJg5y97q5!WNwLRU}dTHy^c?2Y_B8R$>yYp-|b4m{GiRoW#JR z^+TPB3hVK8GY1d(=RCo$KNa}{i1m%xs#=ta60Qd=ieEsXzTz(-WCFXXL6Q|Zj%gN)$Sm`=P}cK+6KKyL=9&sAZB7LpQGuTF-zG9oe6}mJl z;YP%F8o9)`jHB%*?0v`k}*{W0`gDz$}ZO zu&F8Z@qpC@ol1?VSGVb4p-K)o);NbFqRH>ceCpj+%j?S-o@fg@TG||%ypQH|#9;0!ijdQCox^getV-n~Y`j+V z~;-&^Ru zV7XMH{LXmD>k1NRzu`jYF{)mdicMEMpW@1~vp&3q@fahf?#`kKsW=Yy_Gfc_jIBTV zC*xBCzRaAfu+mrt@t1u}nt0Q#iR(F{p#jooaD+=_zY>gdGyw0WigN|8dkO2#c?@c>UKrhn z^#-^t%kHN>pQI?fRN$sP=t&p!znsu7I?aiceDVV!f6dj64T+YJ70!z>KBkx<#oqsf z)*V3>yQE-1Q{~G#s(s@;AeNHZYg@=fh}QZq0&A*&d?7ktVq~V>hLHK~fX-IvdvU9q z1v&9qhcDV#F-kbft+&vGP{SnPXnUToAdc75JDr7Ua%G)kUWGN|Tb(0b%=&Wj5GG=I z@I=scH)G;IlHp3a)>FpEGMmu+oEHk}Aq8{32qDhPx8rio+Uob#U(_49nuXiMQ@RhqMUs_(Efwj65iE4w#ayg=zxKqb9F_xA<5A|ySCQxqia4x zIj^m~I?%^^i5`OeuMGYp@m+~Ixha+Z;08>NgGx>;Gnl#oR@w2IcR+iD-J(R_7pnu`OdX~M- z0l(4=Om}YSr`HBlppkH%(4i!RtgX&5N;*+oH%pyw6pw>mmt+6z%*E(Fkg{)!Bw~kh z4mF$X?8bS;Fi%(GD@;_b1uBM*OeB@5x5e_geN0~fT?!jQYV%+I5n%UK3^2;rLNeU2 zjOPc?Wh^gCT1jz3(hV>;_AC!86udRLA?V9$Rv2mGMCw$qwAS#$5V8$3EXpTCjA(m7f8K4F}2cASdkDhg4oML(9dcwEAZeQ<=LlwmhJQLzPhE_Oer;WEv ziAmjAtpP~kfYQIUK7fp+EaO#afHe8OGjM0Va_Iepf_1@}vTCFSJIJSXoSPyf$UC;X zU77Q|7AkK@6XPim&+rOC@JxV|XqIaW#m^%-8jaC+fBDP0$h(X0Z+^Uy@7c8f7ID|VktnNhnGlZ>i7k@j%^lveg)47lT;|#Hb;JQ1{_$Uax}35 zehNkE&K)(6|IA+R97#?YZo4;{e0(!ejK7IpCL6N`Dav)cE^UNzpMP0o-c|Uxlx@~w zMY=L2T){qd>GbBIsLcq9kMn18$70NyXSR@JfQ3I6Kc{L3a!&hD>KxwqXN6Gprk=oC zRqE((nHy5VTe<|MB}FC9+2#92&uNesiGiXR_K{Jg`K6sUEev zN167$?P4-9^XaYLr>s+9G-Nq>nDx%G zc#c>RgLs;VfTsD{GCQ-UBidKvkQX7#@n}=(z-g#-8u_z+b4_jF)Td$hNb=Zd$9nNH z=<(8SYe9|bN#BeGV>vY|iD|3-@D&5hm!Y#3s3bJ%QdgTVn5?K7m&7s7&$0wluMq{h7N-E6mrAA}@B$L9V2@*~ZADH-{xHY5 zmNdBJmkbqp=hmt3z9^+4>&_25hlxbZZ1@&FsJbuXkzfV$Gm1h`Pz0$Igy9%r!e5Nn z1TH)DF0JANi@#oR{ao8R&tBQ`bSnafcS%*l#Jx|uPnn}4x#5wMgR(`+z{DKaimK5D z<;Q0bgGtGGDGD6K$G zQB&wzjX&7D2pU-1o#=v_zxkr+0@u)GJe`xXX>)+R;73(Y@@>@pL%&BA0tNzK#_|9kvxo`>;`Y~mT6_=Q+#!eWc=Qc=9HqyQ#f znJ?w?05{oadOu8E-eCa(`$Lh(Dg89uW#au zzLH-SVB^JIEDsHVr9SK_^x8gUr?am42dg<}c`~6wj!uv)Qn+1YEG_$?i znEL_w%BQQh6*Kd|)G5&g^axJNA@QZMx5i~|g!EEw82fCyk7-cZHOzpt^HwKm+@nt7 z?bDrIs1_OfDpdn)I$&#s1Se;aEwalKtI|U9;zRM8jh2|IE9tWqP%&aIARqeT)g#^a zHZ$IfaVO-Jph!Yn*Yo4Yf^uAJdMb5UDEU@HX=?8tmDOs6&!?3EU2?pzC96nwj!8z7 zPVn-9ZPMSj{Yh$20G}*+&WrtcIFJe7k`Sgqr!ESt#5Q#xR zK9b+>!WEi4&q-MVD+W{ktM@-1Nk?f^L*SzYfQKHrZJb+iG%OmctYgPCSRae2W0|_l z()IY9`wsRI()Zjmf6B5w@w__~3*w+fh_DB~GTTIuO%yajYHMv%Q!?}O6iM!|PFiNK zXz#PKscp^M!wr8-TRs6-e2{vsqP$Bvm5||@V*>qZVakx@zgapKv8-VDi1mkI+#wU65CcIOvH5YAoBG~H6<5w1DgF~h9oFG+syFJyFFco)$zDvnjkXd`Tj4<6dy;n z$A2U??H@iVF~Xn*F$u~@2+qM8f7-=m*gT%G8JLpVuYNcE-0Rgd!-aHAOjPWNOc)ACqzb&e_sRWi)w8m%>xN@8kSZh(<5yvf z8*qE9)pysqE`PmMIE?Hs_xmegm3b>5N+NkL-FLB3+gS1 z2-zDsiy2an8M^ehcDiuNy7zz$I!j1w5n7LFLLy;252nd0HdljF@GWI;4a;tE4S#loFhng>Rw#810o}KOvK%`)PC93Q4 zf7?@Kd)K&z=9e1)Y5XrO(dOu5`1iG1(=4|2az$h;+2$kJqQ}u(+#HIZ66*pIF|3C= zcueHyF_fm;GI_1{D45eIb-rEP6fPrk!!>S*3Z}0G`&_<$Jw*i&7!Z4$TF-;z-xxoE z&6`rhG8mdmpLE>+gL$J6k$j16iSdmdOc0!?@H<)?oK1}s>1eyZ75wxpq6*aKjFi(h z=YAoY41|)YplG)-{^cQr9mgF0@q9#WqVo#PoxW#DaBe@WS`ZN1omzRZ)HZ@ZkJL%m zC00pH2$FV%e^HYm;wFgYnUs}A{2gC=dAm$Q0_Ro-l{>oZkXj<^{klZGQGhN)JluCCPmG8GP-oe)Uz~* z#SXSJC%kK;ZOP=1VqrTWqqM|?N!h}kNi;t9!x%C5&-6&U3gPOlVVh{T&rzl8VNv{N z=)s(CEPBOqoOCjG2}Y&a@QBk0_@^H@#OHR`KL5;MX`O@CGvJyXw{1FE$z+qPCoHAM z$Bp{D_(D-kPsK9o(jqVf9)sKN^rw_EByM!0-(phk&vLK1O9RTwZSP=fV9&>^j{QIF zvRCDcZ0#>^z%fp3tdatHtn)hgi0%sMH0M8l@dt4mM7;CrLVaZ3nv)!LUNNRjX!P79 z#a$@c+&^Hw)nmjy>RtWVUS(xv;_@>yICnaB^>c2XVrlSmBGB}PcAxp2|L=F4MwP@D z4~Z<&2LZPJ;~`y2ee2h?rst0S5{K9$Kre^yC_yK?DhhE~I0qhBhL9@#8`R}*i<0yn zscV+^MnVrX;U~tiyN6b_k1sy><{FN9m6NqLWE(O*yRBgyOrRFcI;xabov6EZ*|=0S zY-p_;KDrbWva3(-OP}>t74Y82wl`~57B?F^rj(L$ijms@-)&2)iOJWvUY{BEC z*Z&XoD9w~C)cH4!uy=H5Uoal7Kt&Wd5b?1asZWYVFtqdT;8~KeqlmB z{?l_@_M#YgYRN@-;)9J}4SqT{*8lab)YJH|J9PEM$fTz1~| z7vYCXxR>jSz$BEd420t%kGfvlc7&c95mj6w(8{{Zb$HN6^`?XFwOUwr-PaI?pGxNA z>?{)!0gUU3soLF1;9Bt)6pc_aW#*%OX5X~V`UN=OYz6Van@#u|`Gk;7sL)0R&LO}fX@5Ba3mMAp3A z1=L70ASw#qOi{RXkq{?jI&V*p^e-GACBkt$;=4m`83(gWNK$-bmXAk1l>7s#<5RMf z;W}8avuT1dI!7aOMnRg{zic{B-4i4ErelUil6Q=lze>SQ%427`yGMz~)ej`?*8&y* zcyV3yh|`TgP4%P%Q~u#HOA+mo?gT%|NTlS-IMb4#lvnBk>f3g;7Oj$1V4WO@`nGfB z^}9ZkQvdq*M7m8f)+N7p*?`C zLkwM?6)lAsEgmX*(z@d%Q|fsJTX^4U*OavVe!4QIVCOFFwNpaq5sQWChG8{)9$aE( z78RxqH6+52Mw+4IlEuq<-+Xc?rzB;-ySw}Q1#T3tOZ%^3T>U3tMT0v+4$?@ z@xQ`%O0K69YiQVi?wabu9we?6v@D2mnpU!#axTyW>XiqYPwfeM#cRz<70lTvmgzsc z>=E7mxDY5$h)i3SaR}lt&ebz%9V^CB0BK(VlB91PS_f@s4WsC!(w8+A3;;m@fCB{> z^`(n=#_j#A{`uB`zYh^VwD?O&GJJ6`?-zy#*8kE%OX9R4u2G}!*1;j>uS}H)OL*7o zIzw7D&HWNex6CCxUX;i{w=9=?AsU|D6RNy}!n%FdU7nYUdmK7M zWNC_YTwtP(LTE#PzzcT7l7&!B8r}@tK-_?l^!xski@E5h*3&I`jMl^q|6Bz2;yx%? zy&v6^`1uq%VYqie*2T}VuZ%FCMCnjNtrPSj^~45L&6|>Mst;(c?oK&`EbUWoS+dj- zNTG#%QIMtNU8#CxoGnmI#=NB4kNlft!{?ZbAKrxkh7H1in>~{df7LYF zC`#=j>t<%jNm|E(ZK|6Cn`wB4`E>@-n5d}0ZL9w96T>_|EYfLK6}_WWOjfO+|BL7_ z31bozTA6_D@w{zlL}t|r`(Kr3C7u&ZEJ^TJh8gda9YGo{D8y~xN6&l(FOE*DPl30t zL+kz{IaAA@l(1rP{L6<7i01NL=tE*YsGmjwtVCjA;XSK`FO0{qhW!%7EV*)5?+B%c z(IR{3g$$Ml5&gCi_(X&x=W0FgOuGc@Yh7V`upVU!68BD$Sq^eS7$E} z@F7*pc;`-v_|9vmnvtERAPf(ykSU#)QurCuO2{MM{N>2Ba;JI*qhKaudNy6{a=@CF zRzz%{py*U3_OuNPS~7~fGQ;Pb$aoGYVF}?BTrP{66o9ZUkMpUnU_~poK%o)NF^B@h zgEzXC*2L`H*5pCxmHFbA>CH`Zy$%{I$1+2gqI5TouC=Y@(k?r*)1f5#LAiEf{M>4m zeH+Js44-(nJuTWgqnNtabi48Nr@{iw1IxkE$v}p*c9F8PX3euNNme=_Fc&b{^ijtP zI%@H{gk_BBQl;<_n|M1*MM3iWBu(Qe4v|#11&+xENMsXrX};+eQrp#~l1mtBD|>0^ z(GYiHd0yOIpfU1KibBCZz-pma@bcFE>SCrlb4FX^?xy3_!!f0P9CVp7ljZ(q9NNAj zx>KB^W}lJ{WDzuQZK`&Arms1j3A#?mE1r+OHuCZC-!0uuFO(Mm<2RD68Br&v(d>(} z#PqN2d@t9)~rqKb7Z1dBcB^lW9f=vg-PNbj3{bP7vXI9 z@igbHiWtu~)J0Ii(F2 zo=n{WNR*?!>0N>dmO%=i#%koAd0yniG<_!Wa&@q&C1t9)i}C7Q1hPD^l3Am~WbFZ0 zQ`}uQ0kB#^mhCmLNvGKt{x;S;#NqsVv}*66_!IL*+A&4`hUCd88;} z4PBX1c7=Un*Buvbi}7$+)@`dGVt-Cv&a1M)T4P3jtK~b3bG8GTnG}^xlhF_7d?yDr zd~_fnm%qV-WY%}MNR1CPON5SjCa*Y4ogS-tP$ts?i_J7{Y?H`IwKt|$Hs9wmYUZn0 z7gxk?s6Kku(dpq=ej3ZZ0?jV(Y#$3Ancj-6kqG8>D(=+BVylf|feCdAfE$w@lwioW zUSc?vimk%k!n|Da>M~vL^!@e7*@HCmlwjej*E0cJCcCd)qLsN;I6S>&N)c91Hr=PSm$F)(@(UK?Icd5mR|q-vB<2DUb_1)Vmx+;HQ004V$%{h0% zl@*NmsErp)b(@(Fihv1^)C}?OX&SL%rY7s`Y7yfoagLM1E>{aRt$}Bi3*y`>kYc)c zIqzK2Jka|#A`=I()$v>VgH2qOM;!5o@a=NjZDa?8%(2GlqtFDd{8qE2yY;JXjcajB z%j#`QX2Ms0_S_OI&Sd^A+l*0^pnkLrrQTie>eSnnD#jtggA2Q`?cEO=&Cb6S&f%ZQ zW^`wgH)p*H_Ilrwz-ee{7&$o^E3OzU&lmxbN$nmR4Lk6jn zi{qnKofy&ET#4U}xMoM|5UUd?uEkmMus1c0+*hNRbuKXaQ&phnfU7bsN)=LK4)l*$ zLrH(M=#p9F{_w~zo@VY!Kv408%0D0*&AT%{0=)>kN@1?LVO%T4T4pPAT?R&gPLFvvc`TBF~o zOI*TF|3mY==%NL+X4uYZDsIw5$mUhZ%9g>>(D6DPqo4G96)G$6&pU^1+&Z=+L9%-a zyJ{FQ;mJKXwa17S9>cQ~J&Sk^C-*daC5tqxh<{&vEcQ3R7wZ*r;x4zd!L(m)3QLJ7YN-f)ummSdeiG4Lb zbu0dvwJcK{to0dG(~r9hdV7V{kUf6|zVf1isGd*4bWOUvo07k=dSRwWUY1~bTyiOS zzx1n}Hzs(v#?;q)MmP@%=Edk`!?3tls`J00Qjx10RqQFY@_xtNi-d8G&y*)!M>Q!?A5V@pcwqmF@fA=qTG{Oh>jkR_QRqBUa?2EGzl2Ewk=Z{udu z|MvJKb*?cgWqDTdiS^9|O$$+Ob`1SI77D@8D?N$E0rMN1oP`%vSdUo!jriOuw=6{n zI$74QXI*IHE{w!0H{B7rvtAlk4ST%T9TlPOs{X+S3tLk4{(N5ry?C39_X=LwAPwZP zR-Na>TAqMzOyg|dU&XGV z^xG?a(VD%6ZjqD$9%dH@#>5KNYk3bzM4fFP82oVlN!Es=yU(MSV)8~J9%aOsdQFaY zm59pNdVF4^Zx)9VpbPSKBVxI%DwcEP$V^&ADvv#jJM3WRUYKQ4M7N@mkXIfnitpv@+UT|zO^beoF@%x zsFI_Sf0{~?JHEYn96H%tI+pN4f@q!zrY8tuS+PHmi4{%p+4bPxTZKrPo0iXK0q==W z%iOM02IkdgD<7lZMbi$YJ^5^#{Dhsi7fnQnlGFwF~x4)>>^z7y#Z3>gbecv z;Vf!Ql4S?0dA+&27Iq|`;I_MI>HSPM!-dy;%8ocJT7WVp#;o#H9}(=l^G~7qziG!D zS;HrrA&i0QO*PlKb&XzBT9O*?=gsHdd`d{nBstkMerfV#g4Y)-3p=U@x_mEE`FIdv z^^QYp?yPXJSy6qlD$My;tsQ)V>qTJPpM5F;nl%f*;1`3*ago0Jk)F9ve482c`n}AJH@p3%>P+>>fc_t~bn~7vOz|o=zhE+_8LbmSJ#F&2(p*C6D0Ktk zKSOz*-?JQmq8@nd?j;m&A*a&Jy5s_RpQosNz=S7WOmSDk-Pc*OSN_^M=2B0YH`F$v z#tKHKbZkzRPW?tH*ZKs!Wmd2VuX!E2nhD`bSnve#X>AUaI!F*=I=hpyQ4|F9yezF! zxKrvRm-=hCH-OtUs78~TGZZ?PbZ@D^(?b!VF44KuIvKx|<;cWd@Pg!q5L7b~?0&={g3S9CJ`^4N7p zZq;*mk}AC1AMnF`4So(h4!bmacGc%ogkxso(7_ z+9Y{3MP`-0W|dOok968Ve@cmsP%WvMDj0F7Pr?qasAD7OoB`rYp~_t>#Y+ZnOoJ1i zq`3q2=bS8oDXQ|e^s-dXHYm178>m##M)dUlC7`~DhyA}NPP0m8vmCZ{;Rxc6%Ik^i zkn1kDN>QSDxdzFgxTQ;N!V6TPZ}x6l^q#N?H!y=GwzTL{H(wDx2S{LiE*iRN!3$sN zV{^ZJjXW4wxsyM~ z8J$RIA%Uy~e{w)DaJkbblDV45QsyGlzKvuHptX2X9a#SJC)-p!%43DfMQ!woZ<&c3 zuQkU=oiw6WMxa(*eG%-tkFBdh-D1C#-IHwCXC@@6Y0T)QAj4#;ScerfgNQ8%Mp!p& z`4@a_QscLA@4F3-m;Z=WN~|xRUfYO7Y>3E)#jENJP3Sv8Oqlpka9DLY)2Afuyh06J zHzbzqRIRBYtWytcxZEENGdLv;dAYkqJU9Ht5UN{z;0XDVd@Oy{~rW1}cyt}27NdUPuzJI)*F zx^2YW?Lj|Wa&IfYhA?bI>Lm1O&5Tq_Zq6ki%!7=wA3r)TXax&Hq+EkHx%Zx~Op((K zJ90eXBBXT^cqXGD*9A4114=STziM$3>4q&bG1-C7^Jg*W&UR$2$zZ*!@eYI3P6U?F ze00=~_K2J4%YA@zx}Xj(RROr=&w}t}g&lp78r83tYcvzj9jDx%-s{zEOI!(PtVLD2(?tc=V63pPZ%rdP!i52vG7Gw)tHulA z}ru((EkpbqkhDqasx}2|pFn#`Jf_KBGRsx)6Z zqw426Ks9{b4@kM6^)Q6_U?JL4pm9RtviYkfJw0jOM_N0m$kGG(m&bH~#i7`H_`r{r zmS&yyzfg(2F1C%>`+Wjl@8uH;-Ir_1ewMth;IpI8v>4!*dHX`+G-SpIgSAY#)F3`T zlxv--g9q~&56N~naxDc~t{YngeH}hb)X~>wdkMLl8~3r2NdI7pp7YL`^Qua`QuZm( zt=*27?DEeV(_<@0oC9}#4vepJzfYdAqx9Ixu>4ZqYMZ%B*>R!*^7i*2Ba`j-q9xuN z-C>#Tz#PAJuS3d%REGzZ5AFj4%uh?~$1Umpj1ufCk!s3FE6MKVe9VJK(htdQ=N4=E ze1Wyj7MaDHO2!LzQ4Duo=9LwONEgTXO^ppN9Tp@s2R$1<16ofBpQeLW0lX63`nkE*?+?4{6i;yCA?2C zs81F{C6t;=X#Le9#VGq!DwdVn%+l!~d-jRr;{5S?o%N7MFN*$xBfRxV z2*~M$k5j*tspL_xu|s?N>ZC(w6{(Yyn1#6}qCc`I&+cyy2NtzhS>kz$ z6L28yA>pkt;;gTgBmGXbsH;UkbSX1y)qjd^(JHj{dH6C}sTXxVrIJB8W1hkMllMPl zY;Lum`(aG?igm>kMB}wwrucp3(TOvgQ}y*ovaJ=JgugXH6RfmDYZ=8WFg-*3P$+NH z=>D^@y*+LdBC=r}?Q`pUaF&6heEWf8D{-6uQ^lP^Nzy{cG!zis+FROcBiWPBsWs)hsC@NQ z(RKM3XmotqH=#fw{IwU%#AT@g)H{kBc{XSJ*-s?LsM5Ded`uP`g+7gv?47lfdqJmR>G^R>CyntD63t07XNm3bP$on62hDT5B$mhZYK z?p(NkruQBW)zSO(5Q!B`)y%H{{*vO6lJ!)~S>QHb-}=MqeIgc}w8w#{cOly&jT zTO*y4Yv{sw#oe7l@w^18B=L+biG?^K?^&caO&z~~^5gW48BU|y`*qb9O-e;sSag+h zAlQ!F{ex%^+5oS)#QDrjf_AC%&*S7aM29jK388%5m&8bGn!YTM(_eVmzqYo=YEos)j-3 zZy$FQ)SbX98ImcjLwuQi4leRE5U2Gc8fKP+hYOl+z>`b^JKr~55&+ivMPG>1xw)FP ztg=0DXqb2On1*&v2SAhNuo#4vHZ!3N^9#u1%L0&xo193eS<=@uj9Dk_X6k*f?n1v# zl*VmT6?$hFB8tw+%@ZZkQ%&*{c16Gs7{Mz5wAS_L0?cr{vgix~EXIjU?;~`q|MmdB zilj<|H;*dICU|y6c#WdLf?k>N6nYbtJY&vBUYBa?du&oA8^bX}_?r5PG_1wYg~Rkh z;Nv#4Z-0gqn^*9#*psn3!n)JT)?cO$N!Fb6bzTp|r`ab|JD7D&s>-%(qw^rj8ANX$aY8`>X3JBRQcPQ?BDd_xM*6Au{(?Kp&%ZP=F5NKa#hat;TsBpBFE_ z%g3xL(O$h7yHa1))&ZE03oF+np3-k|>)*s-k_vtQoOXLzF<$vBreBF+b)Zo}AW!g; zwppn3KD+0gA;Ib1yWwB_jZ)4YtB#9Y0USaciZKtH+(6E3D!}OjZor)PdimDF=lcqJ z5Z;uf-u!G>*OXDTpqp9iaRl9ALk;JJXgiLcKs-#-JHt5Ikw?;yUugj;YKUGS4XKIQY(ed{(A1i|sb32In*UG>W5_y4)<{r1M zZ5tO6il}3tdDI^oD$6g6b@iXJd!XF>$`IdLxMXHU?<7YSIrUf(Z4D}UJn)H|dNt8d zDDWi~8M$9DWouF;YPrVuQJ1^1-~E7nZB2AA*bzQj9fYW)_2)HhOg)vH$3dj z5R6|lm&$c2y~E1i>v{kDZ%%pL3Rw&J2f{+=@|g)RL^#b$Dv!na!PKo(srg8Gw^NTn zMFD7$>(+$6T2e>1Fo2kCUt$K2;2fKTCVRO>XXnuIYaOo!H8)4g`ijg|i|5~+YV*Ld zz?WBXoywo|XUU1QBe#_*c5N^A#HD~-czcuabf>dn-x{km#Hf zyS79$tI#9aEjzw^@Q^NIRxLKOS^UIJ=uEo{L=6+ud=A{>)YEg)Ys z;Q-9|*a}n&R*eH+HCCe3Ym1-ctV@<+JoS_ME(P>6yXu!^CGPHse z|2#cVhcUpGWN!reJQn->c0F69W*kyk3i$gq1)!Qr*;{R~f-3|w9jP?e$&=q28TSBW zinB^5{&$k($LvRh5h7yP$)TO!=eOtP+`h{lv&I}3?yUc?*$32onvlW%9*uFF;;A5PW0y0KhyezH=j?t!aM zh0FH;N5TS<@^=<^5)d-Fc3{KRux)Uq&w9gEousI=vJ*)bZ5K#9C45c5sSlk-v3eM{ zKF8M=F2ewtwWIU;H#3=Ji9TS9VwScRT+@rX0K-Y^(fvy1d9VdLVK`;3lU#O3j)1Gl zS3Qlc$pdY9G=+ptwPORr7B+H#8DNQHDJTEu$17ax1KN}s=Khi`Yc0Q!<3^`Q6-^+w ztAB(NE@k?xW>ycso(~XC;7)s4u`B4_YG+{2$P;y{XN_&NP+h`YakuhJauPM505d;f z%J)jWpGa8+s$PRePH1125*$66lDFtXsBH~EFl@y3%NVp5$*?b*o zMuvSSC97XMT z0V`E8YnggULmd}vsTrN`L+ih-q88fU-X%R3Mf1}$%ziQs{xptkOPtb`G(FWiRiZ%= zFQT&&^SnTMZPJ)Ep^2ZSVwec7>%C&hA>WUeHZ0f5@6s$=&2(7rggA&j|HDMCNEmI> z@i1t`UzfpP6KlUW54e*R5hGT0_XdMM7pJV`?KmxjDrgcH9871BPlVTK7q02bDy$v>j-m0`Ax^EYCo9N+bEHT4e`e*CWe|&qF6GM&j8>yJCGAh|09;-hSDzm&&m^>Ef%^1?Y{P2zslB%hs+D>P}qLW17_cX$`dUWN32PUik zrH3)Q^wcrrj_xCpfVY0j+O6Qfq8(KV;adTQ%F#rptd2uPX1f>2jWrSk^4;@xcBXW};OP4P-LQ`z zSIHe<7j{J@bl48H+LNDJzalRJX?{w9ejFBp?X2O)r;>MXdQd-7jde{IX(xi3JS)8g zv|B_vYF`g`K5@6|fXlN7*611s$y7R^7aj=)k{4pz-RyiPC%s>AB4bv5=F0S7AWoYK z_t&=M%rmZs8@$H3W=N8PhKcrs;HCIzzi9jdp5vos{7QHAm?ste0T2eL z?kjAMVJnTh#e`LkRn+qsu$do#1o`|bw-g7`&huNU#RWDOwfD!#(P0=RH^)|o6^l_L zf6nYN>W$+?W4vQ~k~)U=homrHmyQ?PmL=yOSRgU55$VelE&jXb-bQvMf;Acpo?xNq9h|6DI=jBj$Ct7E;`v$Rw zEwaoI6-bMBx$HFklKWw@Qsm=~Pt7Z(H(0g%tPV;OfTj4pz?Z~8prKFN0DIwDHR5Gl zn5<0;VGu-);kdJRED|dxW^Q3NJB^*=%ItO(I#V>cwLvIFd97?GL_REP!1}oXy-wu4 z{OC{vI}hb9UM#MR)}i%{V_1`B_WX^_MuGWQC4oR|f&&x#=0xZ?D?LjGVR}7&I=`hxCeQruDSjP65K$YiB5PQ1 z9v$AR56$3>k5u-opq4t!q82y4d5Nr_s;o1U|GD!&Md#tj*88?`U38)JYmcO=wiZ>unyFK%QlvRE4=npN|!N>p>f_L z!X2K|AWp?U8UuASZA98gD|rI)$tm?zQvZRacvVM`0KMco@D(hcyS7#*x*-K zy}Fb;Ogx0ltUnj`hnB`2()Fs$8Y~V52J+(P{g?q(t3_0y;<$jNRKw4To1`y1Url+w z2663`L5ew7-Hd;{w!gL7kd{ae-K16AxN5shII+QoIX>54Oddk#&0gE_j>~Be*{tl!4aRo8T{-+z9BUlby&^CM3e)&`)eQ9X;mgIuquah8CV8nA5xhtD z9SY-6}{HrcWji2rF!{+g;JI?ncg_(Slk6p`; z7Ob&ZdP94PuJA!!pmXS!!Z%|@UKsnlG;lWFgjPMpi%EQGW*sdY7Xw{P7WX|HWN|dr zc^&cWw|C`s@n{~qLOB>HSTSn-O!F_ql-Et5DIkZbCcX1yd@j0D_>`?ce$(gF-t}4J zfKC_y;lHzyrhccTd8lIZQ2#%NOj0_mdve0Od0`q3gf|(Ob|-j>KJBmlz_W3>CI^fY z7skH|`TF%{f=F3Z3h_ciB2~9grw-}YEnA&8ikN)@_ zrn<1rZ1`#~Tb^?}3;=O(9Jv)(8yFP#S?qO*F6aJW$Vz_SzLLk5`-aut&=LCdTv^Um zNxhW~E65V~XC)-)-bvD{O@Q$bL-bZ{`6gf5`U~?~A?rg4ZNF(s5bv(N5>%$+(Cisb z7j3U%)L3hZ&U9E-MnaWTx8`^8O}ky%`qphBX{d02ZrkhRJJ(0OFAH;iQT_q1UE&Wn zX#bJhfo`Tlk9mF7DY3V))H$eCBLw>5>MFA460F`uX2?#($J=@bpgs$kP2I%&^^e*Z zl#8U~lV&Ta1Trn=M}fDh?pS(I`-4RLcV4zAaH{Xefn;Zeq=@URI(e<$D~IdS+)tr& zlF!0{R&wKPWy@<$kHg>Q#T9klQJJt3$X+BplzznMA>;2<9d*2<@f+xX^Z*paFBe*yjj(pw;+z zC+arRb^o}9h2_N^KH1TbTV!e>l;`&?D^<9}2T0*j#(2>0UcjOhC zaC2TKX6oAbq#+BiA|A!Px2#tWs%3a;+VA2>#3ro{!1cLS^DnkFR@1uWm-{`- zc)A7WuS}`YA|KsHE%S}yTBNULk|Gm}s2>K@hoUWF*Li7*TfEs<-@7Zt{BJk&I_7(q%)=Tptp^!)L zU?Ct!%}9FphwJZHusQE~c@GyOY71jc#s(~07>&{u;+(LQtxBMvIM~Vms;~6I7t?CP z1zU#Jb2lBzN+*D6)>crVA&lTV2*_LvmSAGg8jZr|QB~Rfk8Dvl#QTwJ+_NtncF@xS z{Pl9ZP=&Xck;?zR!O2RFK+7uW5m7fA1l)3vULeWimW6KRS zCdJSCn~7$(oz>)vBG!0$3Gr6>3AOV1JXH-G?Wm*iN_VtEI^hmFSGfC5c=tQWv@7dR zJm)$YjY!n3TvC&mU=9~$Y(%$FW1|rANMwfrA@c9MLTkPRg)lzO>aW7&4C-0(_+z|0>)ROviAxq? zVpsRUP{d%!2zftHb(`*vhKXe+HwvG%IAe2*6D;w%@k`RI3=J=~Zn$Bs+|(EKvM%Q3 zVg~AA+_(93N4KJzCuHmijT+r~5myi&IO7B<{j{YzVD%&gIa6>x7Y3=+ni51NDIk)( zsN$XDg+Jthmyzj3?eR<7f^3MK;7cXk@5VRoMOzl|k-VU3&7Oa>w+pA?U zTt7ZBgDj^Snv7=xB({!fefM!!lg)&%V}%s8j)ofVae>77k9P;~gN7sZ_Dp9lqHtFI zg*)91w(ztIP@i{wDE#_Fg);9KT7&;t-wxS*F(|B)!wU9E&Z%VfhV!@pe9rTWxtA7~Ml^tFUr(iy z{dz!QUr6}C-k1N;@kS+_OBsx8dZ_~PJ@N7~S_s{6QJEQRuzaCK`cvy%(}j^L?A`y; zA|Aw$LDTegS*(cjLC&@B5lP zz)MJosgFO=PeZ$x=S(l05bl;(C$Ve*CVl!wh`5Iyg4&Nqco&)Nz3}#NXAZuA_^mSy zH-&7^-@iEDUhMjKo5Rqe++V#=3d_|BSlz-IZNi%#k+jrDaG6}PL#Ga<*APx<_o3z6 zY(zbWr?Eat0>6^aMFF@FR99utc^ZL{uj9V+>C>wv?-V&Jl+FM!Ul*rva49{iCL=4w z$w}1bEVX-&uN7d`dmb^0*-<88Wd7l?k>_+{QM zw*9L6+|C3&k@H-6w73>K7N8~%5;EPAMO#}(-oI3ww9Rj5N1M<_k%}Y#ISmzzLBKQa zC%3!E$9DRIzM^Qy=W6fYUAm&ghLkSwq3G}5crlg9ldaPVqod_9?~*bgp+)}4je}=A z9{EePN4s^eRp`3~lHtIw4M%H5M*{PRi&*}2hx5|lklR5Dx5GA)GBCV=O3xw6Tt~PT zZClS$xg|c@GqQhm2l2h`CBkJOEV22$KaOqLo`W_)Y0~HfaPHaqKgAd62K*fIv;wtb z%c{Ho8S8-=D$ z=jceD*uet@9KK7Qg*E$JUf7DVmBNi8Vh`sYEp@4;RISZkIs#MX7GC02MS=y~Y3ak` zw3^>U5!D~8{qQ*i6wuKULzv~j#y%eJZLudiZ-Ha|NDEt~?hH=N{Z8XR zEUnlRWpE`%Bm6owvF*5tM>4HoM8($DmTa~VVlNbm?1{~}s^YSr-#u8?P(q)N&u}}WmscA6qyQw@ z$8xJxfVgdd$&c-AN{>^f6C(dd7kqm7w)U1pUq3iQ)Du%#cLZQHb9|Nd?Izk_wU zk_ZuxJ2pV}6TZ*wzW#2`7-{WwERjnk}CAfe&g*I?QrMWY#)d}0RvxcAS}*0XQ)IJ0&B;VBk$ z@offL+-7#tnU*V`fp+(=o7y^GD5qmk&l%69zSpA=+V8B&m*37z)mct^!SL_1%n$g{ z0JJn=%jI9+x%|awo7I2>{AuMcYYRlBC? zkY-bV%$o00c8D{tI}7RU(en1r*-vqHhMb$`wJ^r!3D%flC*w z3x$s-S_)yGC~g3*=njO6$(awFn75! zrMND5C4B9C=fR&&(22ihTUGDMZD$Vm$UIU>7Zh0({WYp;TQ=PU&p}sg+zLl7MI|-- zsoX6~_^`Sy7Xt4C=p>p^9`?t5?+v!7-y!Sw@4Rxg3>xgp6qpRwQ=8L%m)62Z)NKT zuXH9l4%$*}$JCwutS1|Ez@E8}v_)>iG;J)rR#?o61rJn2K7Dlzj!jZEQTTKQU_|Dq z5ArqV9)!BjEu~!g@j<5&U*Ezxfy^hJDw++v1WjtjT-*z4*1%txN=>#6>i9(zfwOmY z^}E*XXoT|9P1LLD(%h^7t6^2m&;y@A$b(56X)EM&pYAeRliY3Fe0;bRT5!VU;I?UP zGO}wzxE&YQYK$&=&!|c_tsPe7P>SxBZWF$_sU~qKsLg zmh0m~*P_XsTwhxsq}IwSCwZ>)D;nfL3{zBc;O52w>pLUs(DWo+Y#1Y%`HoKBwB5Q{ zntAoqAHtiDw@}?e*EJOQQQebAQMB&%>UIE_#ZG%lK{4Rv;@iQ8^h#`7bG0r1SRj<` z_MjZG;WJJC%0gnlP_N}{TjwSIj{ z(S_pKQ(|p0VoZ^Jt3)2XyAv~56pSmhq7sN$4gM+tt=s*){obAbJ)ZafQ|b5QqsK$> z;T1DPFe`MhHHcd{RUSL63o4xX-t(5>Fw9S5-Ujboh_GBu%M9Ods;`ToK10s?U+)4| z3}c^%nNYgg@M{HMY(%?<>Sz7}(m$`9xd4Up3W+*SZcF8tnEoZ^;f&BVG6%80?V%ZA zN|YKSTTj;+pM_K9o)-N5R=VH+6wH}DXkk_S-uTF(`(7N2>S}s%ZqjT7DuXP^;|bta z)r6Gwa8I?RKAu9)lq~SY8lZ+(4{MZlocJoq+P61g(6uy zQ@Rqajp~i8HhzPr(#oe+$_L{ZR!2a}6!d-64 zOff1L5sqJWLs@aDJ_4VY3JRP)n?`&q{DI|40b|!y7Lo?A%L{D;?!-QVqmp<;FHB%Y zCPDSy59d?Y;v*oLw2;KNiU?OhG-YE)R0~q_zJ8$DYL?F|$s3Qny%)?|FmVv2qBx%= z??nx$OyCh4Y_G8dklvN4X_X8eoUVI;;C^>DIC~=(!W$<>XyvU+WO>c&Ea(pmeJ? zLzEKA*|_@oX1l9YFVlCm6GEv~f-7oh@!KSOKFQ-g{24*rS%E{vN_BUMjFLoF%En;E zWKW@BiDrwDsoZbc^e_ovI>jZX6in{kuG9!FUIU$e*hW>pT)D<9ptQYam3fdapX63z z|2nLz-r>MKPeayu1+HZ1w5qdGD)4_H@3cELg{Ahq=gRWJ~1r>6dD7 zZP8&Grc{@D7!$z5W37_N(bf=}B;@Qk;>mHpP9)E$(Psr;rl^AGEjG!R7^@lCglt|a zI+dpFQ>a_IP5{-6`H~YzMN+gfQ+$Cwq{PO)%wnyh;3P-8*X6Y9n#Qbmo!qGXBu%_C zsaw?lxWmbDq#E#06F^Bw!a^U^7gNfPnm7Ihjp!5|(e!M@0AMu8Y|$^rP3wambC$s8 zg;Fk~Qs4}Kw#vha*v?&Oyp0h881!XLUOHX>F)@fJ5*@Ml-l@E3^oWqNs=@fLSzu$w zGlm8cz5YK=A-0a6F>k(DrbM#U8{#6 zDL8sBW?i&UK8<=?0;d{-D-7j)HJn+a?N{tqh>NTh&DOr_2A0n*>Sbzu_(pyBq0N&s zYXK_Kx@-2o(Tm{%k2Jp?I#i=uON}=>^dKky{9lCWTACeWN|(ka-!mmabTt2jwE%B< z?@r8>iQOOO%)z8`06kDfd@O}4A^4*TJ4A3HCHQPL|Jly;V9HWBpi_E1WW5`WuC~JY z|0nhQ#%UAtPNnv^7X+lyp%)51&|p4t)7SK=*%EDVBWn%>t1@@}I&vb>bXPL12WW*f zOfGRpbS}g4duf2<+i)c|$wWzWvK%6@ZQYk<dZuFWf#%4_@~9R?zP?E)ITPRCO?96V>GOYRCv{|BW1jPwRqoSXH49$rRT*< zzWemp!}eSjha+%>!Kk6E?{w4>=Ze_%wJv2-SX_wEL6uXXuIJq38(biUCJIun3uGKR zxv0-)Hcm;Zc{wwSR|N2`rakdgR%_SQAl4phP4hB69qhMMmT!SL6iwTsvfDf}`w}Zjrs?eHXILJM_wRfKEbLviOkbf_M?)gZZoO=3Bgriv&jU`78ZfZ-%yN zXS8{GP}g&m@}DGFg4*6QoNA?LOWDZ&2rgzAYs*$(pHgPLYcHSHY{O3JL|RpzEM#{p zGjF^_pu$g>n!*=XEPxX=-ms;bO+^}}BwMHRw%4t`d#75qSw>j`4HmGqY_RUm(3xLAu@EPsWhp>ZDViy>jX6t>C=%0iU z52*_A0E+k3)G8Lu(+Nju9VDZi5;`g%0n{`$oZ)@smH^ zja17g%Jn#k3G2QVnu=SljY zV5q^%i-$eLI)K%#?Ui}+1gvs7e+^O(T=F=|Z&*><>9qJk6NfP&A7o&$<;>!-qkO$! zfRFQpKR>fH80I4;;Z1a{vselR`e*mwH_8dk5pC--H3j{3_=H?4cx~(Gz9-H&^#Z<$ z6o2P0H!y2o@3dEnl_X{RTaJSEA9CCh^cAh zax~s;leD!^vI^uNGu66Pi?m;#>eNJ#4f*t|#%?&$ZD0>C@`JKeb!w|A|lxzf0GW+jJ_szf#qZ5=t~5cDM8!{2sFQw&sG%Wbz)HOtbuwk0!%N8ugF zYHEn(arTD+mdWgVGaB(Hv~E?7d`cIitc`xQ`V01h;q<|jw{F4v-3ytcW7y}jJgAhl z1V7zypOmtMus^rWZ@cV=7M7|H+u<}9#3TB*odzcEV{8Xyd|TieR)xXk`V0Efe-O1A z?#b`t=wTt)-D3hD0*K$8o}8L3_)IXC!HCd7^3AEYEOiA#oc-knC;QC)-sp~3(`klr zXUa{_wlCKv(#oPI5lv)-z{^y_ng-N}*2`i4(}Q_5>7wVq%}_Grpb`ctT04v{!tq8x zu<{I2#181~sfG}9CG}F8g#?(kfzXG@E$tK}@`hrbE?jkn`{*tPAx^^|3`laL>mlG3 zKHbZIlGQr}l&(g0&3S*DaYnhCHl4#^q!L^Ct;JcOxmEOftcmLPHukk{wp^qKZH4*G zPM>c-J0~~a!)@^MilExrO3W2i{;q~5Yi&ldN_qU1+kQ1-H}1YKIqka9vyf!fm-K=K zO7a31?QD9DJAY#dTMAr#`k-z@FDEQbCZFK}JZ}DgM)iT0Go*u6wFCS#mZ~Z7M+taz z9QkE{nmTdL@WG9Ify1Z|)^%+DI2dci{y6@A6(&?oK_~lW_eS4OVJHowXJz+Oj?q{z zTfMi(=yDS_rKMNp$?=hzlm)(M6;;?@81V0}{eSP0aKz@yBAw#%O0%5t%rll9F(tYT zLvP;^l&aKEd#H!(jE*5*4!ZXp7JXqAw8j3Scwy-$`ya0$ftyXu>c zd%nz{Tj2ao2=KqdRL$T=ZyO=J#C?;aWZScJ4!Z|KMJ*Uil35+51wD`}9M&(T4GZ}b zAixt#yGAp&GR3n~AFrn?jONx97;Vl{g628==-;Z`lwBqVuPS-w zqHYqL&|Ri!tmB{jIoOqn^<%l+p|Vo=mif?Pn5jRQeD2iIcCxdj2Ncrv-Z#H8AOP#V zc|fdC+}<^TZ1dP@QQBNF*1fR1o`gp#g0ED&H@^i+onNY`50gK^y$@K!E8TCXPZX%r z_Cws`6nJ!|qXrz32L)+@2Yb?cNjtg!G*?yPpSMEN7k-m(asS`h%g+yd8P7OYIUrh%M!wU@ml02gA&)z?sF zRpTKZ1TM@M)08+%7YmUeKwjdK(`siV8?w8n16- z(cJ0CGu|NH&qK7!zcY2Pv(dVdr?(^^%iTG}-Su|l)_;kKgAP%+kcr_7wsrpd1X?17 zmcXxxS=NioJ_sIZ_UnML3@8U8^0@;%c9d-XwQK)fF`BpMu%2~~m+N?i>3wLb0Fzji z+LaDxut(n_bOQtlmBOhe84>KB7)Ri%#&@u*r`{&8{H8Dk)H5^}@T(aa~B)Wf)w z+t|HkgU=(0PywGLk+5hPNtq4#n7HFom_EF3B`NmKn zpE}`&KVR2O^}>*X-FP9T@fn8ju5OGr0FpJ^<$&u!7w;!e-D8OwRUQ7WGx>s-8Yr)7 zOO+m&YXucsATwRj{Qlk|L8`Q}f6|c-*q>C%BcB&upmktbvHTgB!kn2`wG$H7hEfe8 zWz#@Y%kd=Gzxplb78(Z1zXB&-Z=k@xaHWN7-gciJXI(*j4C@)%wBv(fe7 zNUtSXL~PAnNo>BTM`Cd1>E?5{E6nb7M8r=A+EI$>#5CKZT-Htgl3i&hl#{H zk!N34Jj66?a=!IJ%WXy>YPw!{5@JhL{xdPk;52O=+|X)s-kReh8itmi{%c%QWw+4Q zEKFr@Jm}4e#U}IkUsIxkbOy4YjH7csC5kP~M5W?VoT7W?-R9&B4#_;WN1mr|!X^JH z2j(T+!}&K2aDX{iN-B8?7Y`Ht?PG{*$b}eH8h0Y&{rA0Ig)qo+6Zdx|(;%yxQdR3M zx6dF58$0Nr{`jGt)m-c;uYc~$^yQz|6!#C1?el0%sLQVxU|tzZT&U>t(O2?~(TbCR zGI*?7xwOWIRGs;L`cr-CwI7?MkK4#b}7HT;GnAL{_+e8V&Rj(cUsL7 z8IE&({-!fj3cAVpA|8vT`djMFRQVZR(kX|gcBWn2!&+-FO_j?8$1+RyX#{bu8isKX zrnL27Sr6^Ka>%n?n*T}72d6w*OWypPU!pJHI1h9I_K)gb_u=yE3pb?2WvcdGde=^6 z@$s)KNeI_{){BpO1CV$SyU4&BAeW%qMdm1A_Hh zBmA&N0-L|)QFI|7?haHzD+PDTW|#xkOTOD(;GlWfgJdR&#O>MhK+Cpun{5eSx=KNh z>}R$aa=pK$QVDNt6&BQU<#U0|2l}6O_@RY&V!nLQdKbt`2xKbslx@P*kMbqbU_pHU z(%FsO*=5$mVGq-SG1tDv_Ovo!tUiD*yEjV_5Z>b8G~Bb%LY8h2b!l3}7E9pqr?wF~ z*fC#bv;xq+e;uv$6PiP}T75+v=4Mfd3UFAwWOWZ8_Q{vuA`F}2W0za-HKqFIfn`W> z!BKHFU2*slw+STSlUqcHsdL~SuUSHPl%keP1sk~CTPpfdyS3*eT zS4iE)L@wkzR$bq^WgS(tdR>L&2ejRjqufeh`mEsaruC`th;H*Z-(RGSw$=T}JuU>8 zd+j8mMlJDtd}XpIXFv$z)ZzrUYI=CF$?|E-mv+p^+TVqytmut;{Ugf3A38G)JbBwzj^rzbQXRvY(WlLx-1}HG!pf{6@Ii6jh~8^QqgGgk(rXAcovyiDZgXbUiHqA;v1Kf97r?k?1{h-J`KGE?RmK`@;vfDV1r5n& zetqU~Q=Wz1x1CAjaaCcMTzb1QVz~k{tW`AkFfr4W-|3yhhsDJRKH##a%VDn6kDw=5 zOEQ4f!|J0i)OC48PsKXbSK3-h=9&OMka>$gW53+oP9yWw>f@ID?qqe`SZ;#>qnG=v z0A(-=gt|4Osw)=HrQ_C1&$Tw_xdS+QU0GN^E~hW|9CPcT<@01J*S*d8^DWVl9FELP z*=03yuWjA9(72ZfW@nRo_EF~)lXPF`1pwha1uYB!GkF>oTcQhnMi`>SyGWwZe~k7k zuTut-ylY!x%sDvHS?6A37)6z}HH|y9`}2RFmDf?gaS}9-mp}J9%S6_tgCIx(=;IT{ z#w4tE$k=GhpW5G{dfc&K4z3KV6a=+D>GT! zB!?@L>EUMQLpw{-{PZ5?h=8BNm5;ZA2l5tH@K@GO#P^7>Y#(%mKN<+-O@5F(a@|s&xGddLi>jH~&G8*M7MC>FG6VEnwiTr4q;Fde2gjMWY0L(Fs)#k*3N{Dl>`k9FEG#OzZWn7gce3zp0H z67W5<{l*c&pHkMPAKIB#lAfBVzG1b~R$CzG1b!nEd1T%}E#lx46NT#T^j7WLsC;VI zVJ$V)X^a?}t3l?fCo|>!(ghZKcscTySv^PV~#q-bo zs=1#8hwbJr3n3{h!FK?or z0>GC;%gM-Gk6@4D8M|i<(IWzpjk#Hw<5d*r)U|Z9f=@*d9&>}+@GyBeM)yiFSEuss ze0EF~3_#3OS$TeK#@y)z=G)4;=zfXABJB^p7%|wlzXrN zDb}^1mZEl^J&;w{H@qH)kVS|+5=nW(ejuD`Yd0zE_ODGqxRlC1cnY|n4P$mq z(~r!rn$hP~B$_R}!PcSb#%dem>t;5|wzGa+J}c`=2mtR0HYY1;+#X1)fxX zDeHfbq|K)f;kH9~wpt1$XmUhjL0J8@BA%^0jVtUX%W4@KshkHY1<1wfzDwmWP*=`|joVd5QFy;hhG`bzn{~<_Z6bC;w#UFJOWSjk^>euZ|DDy4hiHS;o9S^5svzLM&s#*w(1J?aHagY zmEI41bRu~UxFVor=j=U=yJiC?FYWMLNIkVL>0lxc?*}!tDk4%=xa`zLq_p28w|)ew zK{l-gun~B-9aPLGnN73%R5_9v3W`*O9fUj-R}U zn@jwTtihzGNRjpsd>TUuNDR=W&uzf}ApxAD3cUEA`($iTM zFD0ks1AbQ6^BtMD`|1|S{gAe>x5%xvNy&@Q#-(Teg$htLV$*gsOFRy&WP3h}ZMR%e zg&*&4njEpa67~?<jEH0k$`n@oh6a7s3yO+ZDH9O!~%+6j9AmVw}LO!cJyk z_I7?)vr^d{lj=1f$&m9r&dep|7`~XMB@5>{&mRmS`sgwpDs9?7tr`n+DD#kS;M?=> zasD-OK^89!=cgv{?&j!2PFmW57J~-{sY0FE6|O3u@_qi}i;~1l`64Sj?9}WpLDv_w zQA6tEKPT5pyd`FK?}Z;pm%f=mE}$gQeSp2;Z^d;Lzg#U=Vr9 z4()Jry@ruHZHK=*;vWufjj==?jhr!4iqc6_)<895FiNMmOR&*wr8~jrEkUoy9j$DZ zA)9{qW!kQ9Tj1Tbbw6`j0klf)mgK))I@(XiOl!GTlj?A~*MZUs>aCNUY%HHpUw>{w zh}`ImuJ146V9~?|zl|B%2g&6lpPF>Ao7;uB82ld{v-m$+x1ztpOt~xGhd}cnt2k%6 z@^Ja?txzg*IiUG(YG|fO^pF&s+o;f?YT3Z0mD99TDh%uwgl?-lm4CcZT!{Gh6M&Z@a8wA4x)&F^>YFFb?ntaqriJ*( z9d7F-HyCY&PWp-CrM8DVV+^@rxQVY-_L5-zIG(In9DeR&&Ku6V=rXv zdtS)LMaEz7CbflRca;`f9|w*kP?WtFPVnIO6|{8TH59dTY19>e)mI`!z{}*Uyt=B^ zR+MyidSFzqRQH}M8v574Ppaq4lc$o;-7Mz;=EJUD#*4jj-zQwq*-JZUXzOB7rKeOv zI6g_(TnTU!D$RH04QkZO_4IY>bI7g2Jxh+h_~UVeo6ZNrYwK41Sg(eJjG7FoKM^-V z*5&v-!FQ(CGf_XxD3-ITfd^A>L@Rvxisy=7HT`UNBPTKI@&cE%oC2)Hq?YG{_K{)Q z{EvN?)*_)Sr`pin-6+gJ+_HF;)f1Ju5)s zZnpY$&7R1y7{!ZiE?zTJ-&d)Nu55+9qB$Vn2#CJ<+j00${DDvJihg)*nvQ04y!uDx z5foOYvK@)z_$0n*=Hb9pHO11q=oCy#mi>sbvT}>uYq=VxF&e;ySn&|}oo-Uk$~x`Y6Dr$WkAMEnPRE^G|=S;>Xr^%s&ODy8={hh7%krW0g z$RI)O1fMbLJa(oBcI{u*_b48``!KxOv2O9oMp9S(+FPUPk-N)J)bHBOh26t@OV+Br zvIsa;)E&S5+?S=B76|{7s7FaBMW-jqvctQ&$QNuedZpXmcMk)NKIpx^ejzlV4Xx1N z3sW20Kh0KXSC$D5GLK|+f1PERIwqR4_-?<_glgV(4mhx`!XAN(+VFuV=nP3BgywW$ z4Mj8>u?)UY`TbN;(45}kQ|V3bw?WEk@$SP-l%P*2TAaXNOTNb9$a_5~b+bfc@!^9@ zf4nE=`PFEvPjHOv|D%-daD5y;&)GiQA?A@MLmV;KOAF^FZ;oJc*_{)qZ72G*{#Bx< ztIMgE&^dW}mqrubreW|6Gcj2Xk0)_geque+Ug%2jvNqm=ywP#$^mj&i`tLe6Ekevv zzbdhGtlROU#?MUYn|uXq)GEZ?0I)}Ffo_y58qubdEew4*S+w$w4&>@V1Z$KFq78G` zp!jNperBpQg&$F9YdO^xfeV(N=~u`QUNyU)g3Xb^6eEmUhP?J9FS|GB$>@M9S`NNO z-USomkqT}{Zx@OUMDZO)QTUYV-Lb_38PERj5!`r)xNTS#;sHRSUl7N3}#)LdNoF;44?TxpJKAHfxS(o>hS1;1b$S>Es)=_)Gm zfBV;RTb+Mujh_v~bZUr7@yJ-QR39o7xa!7$rfrcw|JW#bC|DmB{oh(+&Lbe%gw!9n z_H&G17xjVzB)h_6`}7U{ zZ?csQXNUd=Qcc%0ip)qumEAx=c$W*K0pL#%CNmm|DqcH*O}f48owA$0j1ZcA>X3Wr z=ZT9E05LyL?P+i!1oT|pvAcVU{^U}?b=GM9)8m3aiL_iPoL=0X)o+>DKQ`4oeBi-6 zdq0#(3P-DU9ew~c%Y{9soTIX_K1Z&aLdz9BiXL1fpUd(}PuqhS5Wz7kHZ%txH$2uJ8xlI{Exy_+Deq?q*txtbCWndw`hnwQJ5AuL)r4 zf)z)3sb=0hg)dyy+=snmPni*96 zq@IHwV=+oDT#qdCzfuNc<+g8yy>6tc7y5>~k!+0zW@j&+F=Pa=_>|@685^j_H1Iv! zQd7XOY(xMWV_%GkgQN;Q)~7%}YiGUyN*Mw}OkoM!2c_hGCJTX-*#*NY_Dx_>`Sq3b zt1h2T|M)OTnX!(Z8Z7Txj3t{92lb?9ehK5}TgFvMFsV zR;yt34b?9DoQA_!N)CUP`hyTm(IdrvRDH3~JEPD^++903DNK1$^s4izLSyyjVA;*Z z3l^S1wlM2@e5C_iC8_4y$mgFouD+`CStzmqZP>=d&Z_}&5$d*%qHLKU`M}U%BVWeG z_nOmmIIUT1*>KG`-**eh<1m9Grvl=7CMdcIv2viE#rp-B$9kEo*O^}JNiZ^$ywV0> zac^MDO~Cf@gbR~kH47hFdvzthwggA=9bW0wBA*++LDyyKy=}l|+51a=;X@~LO?2vL zMy;9Ht`RsQuA;_jkG8Y);r`%Y-ox9yx1%ooW;W3G{VNm7V^(9PO1saP`Fn)ivgh%T z6vWh7HfuIYMbaQhutG4DfyV#)SZN0mORCi^6HBqQ(At&ya(SA$l&y&z@0RAzrq;USleJ(k=yZ}^WQkF=OzxfjNWBLVqa%IK8#gw(G#q$h{ zj!OdfsWg}w2iXQ!XdtzyG#5L23qz0;4>*n1-D@;uX=U9N8kjrwNb)@^M>^#Ja}*RJ z*iBPZ-aeW6bH)+zWRdCbrT%wE9|Eg~7D0>_Hu_#){?4fWzOH04HTqdqAz$~`%(VSP zgO=P*u!5(7x;7^$|8!yv4$Oa2lx+abu~kx8_wul#8{Mo`DJ{Y~{cZ>?6?6}xLtg5> zfs^Bg{#xi#sJ7L{y#_jqd;A{&GeOM0b<%5+KjoS2$@Hq4Hj@X9Y;P^YOAMM(+>=R= z$_c>A9#2Z>?QPA4j-R8-BGk2O>wi8(fdZ*u#dl zLK6|*vF*qN@P76ys#102qZOxet5o-tQGdYKSAI5qURz768(4Smj^^pgu2pe>M|0Ft z>Kd({y?1A(-)_G)EM#|+{a*I;BivTLqv6dhei_>7wzrn?!y*~-k`Fs@Cj@|Vj(Mtg zUk1EE@h3>M@dt%qOB;U;h}P|HNFQn^2v}~$?4EJXE6uGa(5=ZmyPY3aZa0fMZ9iJL z@jr;{^i^LbYfEEmEz@wCaL!D`_6wf1*!WAp*Z%+O;`V z^KPXiQ2}!ldttT=K1jlj8IE?haC+C7o>P`C(8If>*SXb3N`zFZEBO=(Xxd)2u4p$p zqR)4zRKB`nKFHNeJ^?3>=UnfJwTX0{KQ|Dpk*g@&M`Q0@cv^Fw9Z`B%tad5# zsnb>*`t9e4z9Yq@-z=Jjn`(j!V0rsoiVosEayhP_M}`e6!|}ZK@%^G{gqBbd&AekE z#S&_Irk`fF6G@5Ti$CiRZ_2pu4E#jY2Zycnog~E}nf%r>L^7UcIul)fh2UFj z8FeUcuh#Qiu|n}9A1zC9$6PV@uO3`6fTLI2Q%?R&^eN*x=<>%#cOSE#!`}sbXZSpH z9}#HxS|+`x+|18;XAm=p-N{e9arbM<{xEzu(ey79P2qc4FYmN#2QfXuBAe&O8=Zdc zewFKA0j*oY_R-!us@m!jmXxHvY@Z?I40G#Tuf}VbF1{k`nvRbPU5Rrx-@3vw++=ml zd>$7ET84E}oM$dcY_GBC%h}YTxt6DG@I%M{01ba<&x%vscz#b1Ygduz*B0}~Br*M+ zOT+;TLC$OEd9{nbh<-HIwWf_MG`stmAlirSDztZrWo zJcWlp?HqGlMg7dS8a=LuYOQNz(zWDsn|;RMxasOTSKDQH8kjWUDLFgt^*qX$t4GN5)S9X*v(YOB<~NT1Ji=I1+V68!lN_k?+k- z@b|;_7B^SYY7j~DVF~LR`EnW8C@>K$!~r!{*(g@ zZRBS^Q_`F9{{Z0R)?N>jTED%ATDb8A%u-t_I0o4vf0^Afe(>j-`TSl!PP?ZF=zFrj z)AkCj&k2ITr_wF1t?r9nTcC#csrfGucXRG>(y!`v{uP%>j(Bv-sWmH79c{#8$(2Ve z0nT}+&EfmaV#;W3ZmsR)xV~+d7y>vK)rU-W=hn79D0o&s0&DR90A|doEGmNUc-|y( z0aieJb>LU6gO%fEeC20nq0H!d8tR@fwoN`eiyQml=4{43$b=H3`HGHj2VGovp889c zlHXLcR)XefxLJY0B%h`$AH_PxrDZk8*=Csk0BLb8^j75koUhG}I&;Tf_0eeh-S>;U zA*9XXm{)4YwPi^Kyq61;^A#MQPHJSTQkK^U({7(R(q4EwOYq0S&kovH+pe9cLwRvE z#pS@+6D))gljwU>WVEsHy{@FOm6B_eCI^$F-0_0HhHJF=kv*S`eiBJ-Yk6;PeW)xD zBJC1vjv_jH?fh%XHSZSbTD{e;hV-kcnS3{R7WV3Bijb-ZByRi1kb2_+h+9;RupRt!?ivgs$g3)1`9yH-ROvn&Ir=ytur)h+9o6s4I|n5)C)ScbbRx z4vDE-x0bi^%N@jFvA83JLJ#odb{%WB`&5@+7#dI71UHuuiQ3^)Xt#*;Qh&S%KK$1W zYI0{}WfdDFd9I71TxojTm$vs`@hfKXq*8iFcpr~i>n;30XQ}BoGfQzc-9!Bv4xfDv zU-Ycmbq^*Wx;!#2o~mK}>_wVc4@Vgpz&YlwLvwv2Sf-xV_jb7{J7h1HB9ET~vCU^r zkM7*0O*gKm*1jl~&*87^yYTl^wS})Cu+U#(5x_okobg`?_>;jpvwTg{{6TFbizcf) zlQbQIgS-0I+W!Ew4~V3-{f2%V$i;rgd1@{75UI>inIzmn{0B<;uUZp$dq`V$XyLkL z5?eHw%upUwoc{oMF;j=b(wgRqv9=;9qtCo^tv0i+>bgvG?7r~?rG>&D-bP!EgSTwg zOW~`nBSi2$o{yku@!INFJA>S?#M0p5`{UNA_zhQ3@yvH$WwpGs6Yld`Wk!MheMraQ zP|K=DvT4w1>dmb*@~K1~V8N4x81BQ;y$MT}os47d!<8Jr#Jjug0$n#r(@R4f_pQIm z>ol8wWpB$oSJ__zd~>JiUjl8k%?8faJws4slJVmwj)-ozUDUg|526U6tjc%s%# z7T-tz08A+)MdrF3ex&TjA%a;UkFUn9pu2~|q0d9^;4y7>J52k?HOXRAk{&8h@sv9t47`Pty{>^ZN6 z^*a{7*0t#G8qU^DNn9dC$0$xcYuCJ8;=MV1ZFRn8x{}rsw)5jYUB(D?$9!-r$)vxt zhf~q)^<-;3YgA7%Z1NgS`hC8vmlM5> z#G539k&(NMs?2TG(AH_@XfxVbsX07SzJ0@M~{@xw+#OPi8bdZ!&sy{13iS-muo1LHrB$w13riKuR`$mjc+_%p#iN%js2+R-Ussr)p5yo9^=-usTier zyOkE+d@$VJ-w~GhWk*yQFA&e;uxC@p#27F z9|(AA8MQ4+ONNijX#&HNKh>4_zgpeXz8qV4uI5XoxL9Pj+95!|8-R!I{s4BW!O1GV z@Eu=(JY}Tl9u~WjOIYpnok6_sHX(q^j34D#cYX<90Be_;#rB~dt!H%)nWvsekd-F` z-7+)uruELCcL(?du`=bzbLU3k`8d(B&S*lpi;mVf26nCO~>++Hk&A7|eSlapS7ZLCY---nm!rwGK7J6T+_ou9ozHx)g_V`?58 zveE2pmKh zc#p%HrH-2NSjwVi)FD~pT<(bZgUez5$?Np5Liow8YS&%|{>z3H{@9X372Vk!TpV+b z`+l^|H(YDhxYw>BjiR%PIIVymEyIPuC*LFWr}$d?T$4qaxz;tyhI>0Od4>Wq zr16k)2;^5kw*C5~&RjiBL%=Xud@b-D)Y`-?GhS_q!VWjxGXe~Y?5)5zL@xrnVt2IAyzQg>%%KAEHFo*DaGYp7}(n%lPFaLUVS zeHYMls(uKtxbUr=*NQb|hg`mB_Sr+I4e+<3;B$q@#dG(!X4>}DOw(?Y_tS82$~u$i z-izLiLtBnG9`St1r;zPPi8~bsL-oaYILuS4E_!YBZ_n{Om?{i17l zk%TZe_YfXItv>+`KI_Cc*SB*YS=~VtAUH<@T>Q*+;Tti_5q#5l{0!s{Xd*iR?T2#CEOk*Bd z7PWsI_=CqDC)0Hav|FtkLAZ%xiYamanY0pQlLX`(f<47Ytm~5N7jgO1No%IsxQgB@ zgZtU!>K$>$(a80tTX^cqe&}FxkU8mE-YT~6j*B(E zpW{73;K!*rYZtYTbLXHA6pf=f=DD67$=#lmHadF^BHrgrZ5DftKH?RPZf@2ve9xF} zLC+m)uJ~DN;oUw9Yt6FS-B^(=%VY)EI*+Y*Y`#60P4U0>E|(i$S=q&y-cV!nRhNOg zzS^#yDDfwYHu!Gv{{Uxces<=Njf&$K+vain>n8n)N3m+(5HG$U>Q^I0A7_RsL%qX) ztN9K<lj|tZi$kWIt$b%!8pHiLOuMH^d!F;ccRLcSUPOyt;*FQe1q> zi<8g2RMj;ZHJt~-_gaH0--uG~;siWQtO&vUf0cAfuOqpQGiSwL6m>02#j#vWv0K>3 zZq|0OobI>}j5zvZ6(!!CACjx7-`QE&-b3ZUv*&S?47SXF7fRypZSFNMj5gY~pxfp#@`DIv5Q!TvBb#mHmyB0GLM&NP){Oi;-Zxd-h7xjrRZ=||S9jzm@hQP4eWQBH_ z0RI4Fjn(Hq3ewbgGhWrNB$C@swak{Q99!OBmx1f+Ti*#!h&(Cq?(@VNv~p?fdL)9< zG=t2M(ekQx5s{EbBD~BV9;BORT{@AGcfr~Qi{ft;O`+S~OeVOSa%yl88&;6<u%s9y1_3d5X?JKT-!o~Qbf2GNHbz;+7Ln7QkZ|8YnL=JP?*0^x;lyMVM zFA)@yvE=%1hI~KaZF<(mJs(B5y3{UC+h&ZJbf?CC(EYa@Z6s#2r_ki_$BOm6GWzoO zRle3V`Jq|agIbT2C{`VPG4!KE_NLopm1sb$fpi zc$V`;TRAN?2;~#*kxH|ojJ7lDp4IBZ7~*oCL*fsC+MmOJ2Izhk&~!-Sgk2e@)R8#e zc9uiVa0ug$_1pN%!1^nFqTNA|J_Qh@WwN^HL1C{hE{4UU} zd^c%o*HgoBqe8#Bkv`+fzi|^k-E0bNo~Li|PsO@~+QyeE>e@BiT{YVQ#Dk58zkJtq zulyvjw=wB(TkpKn);pzVkfd-H!7(AsO@8#A3su;Xjv{vJnc^T zsHl z$iW|4*YO3ei0-mm>Gu~BESpPRGs2(~)qf94rQ`d=x6t(l(_#JGcF3^Xs`;K|`-bh! z)C_m6BZH`(rg73*o_p~>z>DM8v$0R@hm%l{OpqfkV1OJr{BxSe;| zxwbgc7_LZU)5;D>_dPiDuD8NI2>WiKrr%oXQ0e-G&=6gD?4e_gkIlDtd{&e`1%tsl zwbI$&Hm#=E$nre$VM%u+9ANS@(!Hfd)EVwmsRu=+xl6=eG!w((R>|#C9%yfudr8{_l6bF3)4Wq;d=q)$+pAct z?~K21Tdlb*5{;*m?_Nou>(`q0zkP3e6{N84*=un%-|DKeH^M2h2yq1`a<_Tu;HP{{Zc&pH{tuS?Kn$$c$KRW4o3hA3_aH zN{$x4Qwuiii8b3;@3q@~Cc;M4BQPX$O$Yj$cO0-8=OY}{@2$SE@gc2z!n$sX0Wu%E zu|OpL9MzwM{{RpF0O2I@CB@~P$JumStEm|$l3mDm6?@>6q0MEI37tD#`e_pNc=6Vd|UCQ<>V`GZxWT7_WDqEK!=n} zj_f!U&}zOjk57`$>RaZqvC!oE2+MISN-{C|*PeI+<5Jh)hr@b?r1qK~mlDq*h<^5W zVYx$eI0R#@dR~j+=-iN?)y>!vIsflPq*t`Kf}!*PrTRu-E|Z# zs!eM$Tcp?#iE)GjJaTba4|8+i%SkV;v|D@qLQ9s0?$^#RtiXQ=br~2u@mt~W@s&i) zsmGO^M~%FE`lZgGu$bi2>?RghGp2l*ZWv`#*Cc21u6yF&#c4c0bEW8UTuo;jkt4?7 z;07QTRrl>)o#HJjZ;5)}_Jys#_7dPfnw*D?D9F6tPKF+f&s3&vl{h?s1rT2|zi5Kw=1ZPWBX1v_eaZ00_G$1(#$VaW_QuZgJC77eJ4thW{{Ss5nl>dyanrH( z74uia--Rup_c6O^@N$t9=W9XX*0k7NXPpUKK0nPOXB> zdhHhfPM*3cND+P^=T$K~U%QD146+LZ{!K8W(G)K0XTEP1*%r||+k zE=c57x3^V;P<(N80rM0dxdR`aL*X4lPYvAaQb%=gWVg5i_69#SfNz!I9+DOfMyU!uy!@BYP(^(PP>AJ?BXML$On#K2$B!wfv2t9eM zU0=tVey63!ChMzQve=Ta88E6s2EpmO*0!}-Z8R?pC6%(rB-Ur_@iA?`DCgxr$Mmi_ zuBSapp7n}e2k^v)Q`NN#i-ozk*}BQ3D8?zIhL#8&oup-UU6#Id^r z=H4;S%*Un(AEkN@m#4xtTYEdpT{g};%{qIC*k14?jy$g8{3M>ot$Fu`J}YVV_flJd zbl1}$wYf3dZ}Qk2l0Ukhj5w|um735wE^7FD!4lbCO(d67vRk-z=jGfvAMhVqOMP** z-xw-rI!regmRqDQi`B>>zYj{R{wYroi-{KUHCL5xk&%b)5;2C1{{X$$o@%d-z9HNA zN5sAz)%0Ddquets+ArTSl;ryI#Bozk4yUVTlxi1jd;xdk?+|EuhPS9o;p=@Cb&;&) zY%SV?PR0X0b5y)rp)RqiGTbcD&*n?^DI@C==lH+bBN)Y4(>zymsd$Ro3y35a5yozA z5rO$o9)Fmq^+}`Cb=Y+KYlAiB<8YEM&4quMsrJVi?ORXT(vFOZlDvrY--W5H_-@L^ zX1aUm*fEw8K7KLYwj%IEaoe_`Lk+J$7YkyMJS4W!8-pW5V{`$y9 zi3#%-*DQWTBkwmI`R1{_Q>j?^WZmia5X*e(sI#;#%o0Z*FWu-tHOCmdUWi0CCfC4v zzJoTieJ;JLTq5aKam3kcS4AzmpKOjRGEH3SJ}|t7Nv6}IQ@&eyPtCyo9C43Yx8Yw2 z>9R@T_WN0n`#SyZE+m-ppvFP!Y9Ae5KZb3u?=)81rp*)E#V?j7cQ5>0=b_DVN;Pib z(js!Pz~B62Z395JwYAZ0ZZ9U)E+Yz4JR5!PlLQgrudJ;USvqT0! ziR1@pC;gL-zLn0~c;53+@gv#YO>uc0x_OsZkptxi&In+?Dg0~ZRtBq;& zSGM>kTaGUSYnt?SqS9|Dq>+KSt}vtxs(NSJ*1VN9En~#de{5Lkv8JDG7&69OnJn1w z&SS@X1=fkJ}Wyt zO@7sVc|V_Q_*z~VLtfNc`d{=Q5wDQ==9M>Dad0=b^R)S87 z4)5tu>K-qaT|I6uW?PlhUNn#~&zrb`gYG-l4y~qXzA*UPW1#99rNy*z{`KdHcu$)h zbBgaJ_%GrA00ww&O)lS7lKRh5XAdI)TRGs4PfX^x=HVrBWw7m6Rfa8i+$zHiEvGE0 z5&r;|V7FS-(>^CHx5Irx#@|m{i>R(*TdhD2Ha2(%uigGtSHfQaZoF{To*vQ`PZED; z*(%2SYPOs3yIDa>V!C!8FDlrj{DhL;T3EOG+ zU$n=C^q6eSdR5c3=3)o>OBmdhA53Pmej{H+b#bgsZ>QT&rbiB>lj5F%Va3GSN|ZCqe2}gYf##3+SnGAB-dfy!jgmNJjwT4dJM)ffvQfPyW1=!q zTAp$6zu|?S#p}O2QohtJA+e9m7WT{*GyrF71L=-&UC)FyKMhA`eXdD2onztUHxbIm zFuSj1@4K3(;lCHls9!WcV~)z>$SD|JJmv~Vr+Vl<7I+&=_;2uwR`C~$V=HkS-)tIi z=gpKZ0%NWS_O4nuh{9Tx?V@x34bpWzR$Ga$wM|<4NY#AFtyV_`m3b@&ZKFSxYxuk1 z_M@R_o*a_TRxM^6Uqxg|&CF#;9CZHx^;Z7?i~L2cd_K~3LE^i8J#}qP+8OL%wsk3O zZ0VH2P%zsF$m@}c^8Ww=>c8-l>P`l zPQS&#uByUT@IQ^USZuV|ZsyjCOEtuk6;uki`gAp4#2*$TI?cw7;jyO0d2ep7aVt!_ zL!J*OjU{0vz^I~2JW0zY-u!JCA0AS>aoXlEX-Ag zKX$yY&GfEXc*xDzT!%sM{{Z2mT6{Rs;x0jb+wXnqvBhHW})f3(fz zMvMk!JRA!2p9=gx(`-H?S;uz-cA89<;{H`53n!Koo&Nxtsk|+vS?D@;p`~4FpKkk8 zmV$pRUG{N?$86%Wd_m%^Kg4lb##il-Mh&{bEQZ~NP7CC6K*vs%jY`pugdHA$d?BXK zs%lGbX3<{VO19Que(qpTP(5&S_*W%i;7i{R>atqe!x*#GXAKaPeAql?@z{HIsXS+( zTR|IHn-;vawX{i*H?(OXIr&45;{fqhqks5Gybq$;{{Uv+Tk01Jg_TT)VBAMq$}mn> zMMgI|{eEpoc&^_m_rL!Dgnvk}@a!@OdQ}x$IFqNY11HGyJPF- zbo?`3HZq^@&^O9IQJG}dp|D$ahSp^h+^E1Z?ZMCU{OX^BJZ}$-ylZuDVzXOmntaU@ zjE~*{SpNVJygwe99p;;*BM(DZAoO&?FQvARoUv_Tm*dp^T%!9&bn)_ z2l$g!@Xeo!BWUCB{4hr*pv(^6v|xFI{`w-T{5gu#h^tb;F3pQ zm}i>kekPqm#@bb#!|IINV`EFhARi+Zh;?y zt|HW=yIZYF#@yOl>A(yf$9C)vc=}aO2l$Inu<<;)Ua@9xW50$Ol2>zx&Nl?~7|0do z(U-MnJy=QTiVmH?6zjz&ovt}(_C zq3Lo4T0i%I70OMZTx!?aq|-9rURlZILmYp3az5$LbAemm30~J(GJj|3&1bAf77=C2 ztZV)5e~Yer)mwiI=)Vv2eS228y1tTTlua6<76pQze_CH$b-ApzGy7bokHWtN9Tob!iovubm8X6%I=d;PdHS{{ZcIrs!Iq!+VLEJu(^fS-ipJYz+Jn+v#7CW?mp; zFj<^%G^2GVuaWfFJhqgmN~KPR!#*+bRko939rfm$bqp5UV*6B>^5rjpHuvY>*1ZqF zT1xnq}%IOGB(-TA1DIK%1#xC_8ixv zd~(%1GvHsecrU{0_8wU?-d-^IZe;tWES@&>HTTsjLaZY>?)n}rJS0_5YX`B;__N|S zh_%~AvyNsq_A0Z>ai5l5qj3IJrE&X8cyCkC?6f<*K1(f9_C4uyJ~qtcWbMa2v(0dR zD7(2>?PJqr+vN{3-b`|Zaik%HP)veljWP> zBS(vYl{x)t{*B=MYs4~nnr)m=zMesO3jXd#>&8YuD%0_&!wp|R*QC+)bhov-)NLZV zig4hMoNoG8sfx!?mMS<(6%vx^bB?|b6YLftEzWksOa9Q%W?e#PZ5HYiE$TP?y1z^} zzB#Vb!B?7X?aJybk@@Wbg4|=4R9&QxsIGg${v3XtvA_LVoNAi7~h_i zoq2b8Z*izw+}!=0Y<#7F!hx~RPsbJA>|}ZT<%#b!+dmOasZC z_L*yS;X9AsO{LymN``xsJ7x*a6dyu5(^E^-JYA_H&8>T$0Wh56c%;2>fH@u>$;?#CKuLl+q7`l zxPl4UHkKr3Kb?7aiAq%oF!dh_9Y+35En$YnZAk4!gkZ=NNlUC^~G5DU8rAZ9wofKyo>FgS_^11$px~EN8oFzJ_=nbX_oTub&Mzb zIUvYqIr+Y|>QIA8T-Kbdk37?SYh|U}TiEN`d^)}9LT#NFD}Xs1_Z)sTldX6;n(i%n zGj|=0mC?VEazD|57~A+}x{Xe2T|-%#+Sc7{^tjN*o0l^**uvvH;J2l6{t~^^Ch@}P zHy4T{v2FUKEC)`7e>_)ZO1!Z%sZiOq(e$}v)O8I`8Le-%8@qT_qQJO7!)WKG4+gFL zL+}N+f;C+=u9-C}i&C?+ZOFETh89Eq(dbQXct#CB#JXEFR~ziC+~3J7D9Z>=><`No zm*V^DKN;!w+P0s0B$ro(5mlFg7;r^#)TtJeWy299tZ!X-g2Pm~Zw~1?g@xtS)QVmv z$#EVz{{VpV&1rbI!~X!;z6RDU*5ms=;YGYrrJ>qS&4af+$m?EL@oFZz_?0Yr9-7U2 zb*4$C+_Y@NGRd@wMmk_|#}(**4E!x);5c->KUUPF)UF|iCSrF4k(cfsdBNkF_Iac1 zQGVJjYDuM8O?9Jc9v-xg7Huuh$OXx#kN1$HnT^m!wRnZj2)mT z^Ba%u@@u2H_=|Pm4+&49t*xtFYieR^!;dm2J@b!h=b_bR)^+218pg5R+d57--4~Xa zB!9iuxvNyr=Y>s3Wzs(1W2kCZm)8c)(sq?fx9;zx;1WHEIj&Ds*R6CPAI%J`o^`}u zE`IcR0~xNWPZX{9g>JMtqw;jyyHL^GV;fr|ztC2v!C#7=AMlo#G~4|SJ!{BGYdj?IN|AXS}|TYQ!_U3cI&{HRZn!v}yFui!0(y zO3vf^J~$l7GwYC=gywW)9d__rT}m5S7eltXc?pjj&&|j^xUIu| z{{RUW#62aR&O@X}a3PGK`4u@itDY0^^FuAimp79HrQq@b;BCm=kAAgB#s2^kb?raF z3E{gKY}@-XPxZ(LnD`j<_pC9vi7IY%x*Ji9B<(gHB(v9aPlz%@c()d^UO^*5y(DPW ze(L`Kya=p|-Bv4oFHgFfJBcP+lW!iKE)4sEwfMi`UkUhkOV%}&n&4_mr%$_v4>QkW{7$2U<2=-0ahQqGbsE_8 zC6vNZx@B!o;Qo{2omcG55?kC&Zr6d7e6ef;lkO|I(4@DD!p`4SvWroUI7}t(M&@|c zK2o3kdUwTbA4~WptXW-nZaK9bHu(@;-DJqBBmj`7ka#CJJP}@H@pIyjhFw!V zHK&Fxby0mCoZmDmZ(_PFg^{{#$4`39J{;Cw?&8l)@-cl9X zhqiK7Mn{C8T}xKfudL>=ywszE%QG-1<_D0!?R{%o;NORhxp?)vfEH#Z8KvoL{i7c2 zdHgGY(6w2-A@Nf38;cgPxVVLHB8-!@i#NZpHSHf2^^G^gQhYVmbo-b*?Mm89d2J&j z{c}ncNdvgY>qZxqOG0Sj=PBxQ9wWc|ZnJp7E;RtbZ4+?@Bq~Tg{p+3aXN2y&FX4;j z)MT~uAkDR$il+|=8;D={5jDMSs7q_{0^$XSK5UNzCO+(moyYO0{wH5uTlfb0=1A6f zF@(bnPDBz4_O>X!<^p;?D%= z4Phi!aolZd*Bv4_InU`_E|a3!-*{L1CiXQut)rgaZuu*^QTTMN583m<)|$I&nl8P0 zHLr(S116m!AKnv?26+djd)FOQy`n>-SCt(HiabRRhp(=qh0=fQO`<$^EHVvnzBtjM zz16gv>xF4G4N=9!D1YUPu*rYKaa~`Gt(Q~KwJTT}HCt&T86)X5TANaeIwNB_tSBw+bzcT9l^^=O z8sb}rfOQDpcllQKt^3QX%Zqu0v-wQ>N$JNG&d+x?m*P7OMj03VIw2jzS?jfb#MaVI zn&f7D*3h3o(%0e_!>w&(lI}ZyDW>w_U<#lf^?$>k3vK*4@VEPCSeko#d8JEzBGaVT zgs@o{cEaQn?OT7c-h&^+{{V{jT7A#_OD))&tA}rv80>}vf=&i=+NAi)@k;CC2ki%A zbsfAGJ|ECg>~xsd<%&Y9<;f#)j&_5Z>cdm!h;y8L+IRM4<)1C)Sl<}LC#y{i0}X=C$1{v(NWJ{d66{AYD{b)d(u zM{vz_;G{4?pDUBbMRIdOGv$ewv^Bg#s%o0;yII_SX5H9dTql~!t@evcbZ?PK>GwYO z(z1Rf=xwj~vrmsqy|a@`w*yVGRR?p#xglcoKU(TFsASY`($>?@w1q@&!^z7!@JKoJ ztxFrdH^chX#F1Kgj4i~L%FF($O!fS$+@nXDox|E5d2`}xuMbWAl(2|wA-Id|kgxi~ z&&P_3n}48>X3Jj>NH&Go8(r+8B1hCb|3H3SHWGRdvRn?N?V8}qT}i22TeO#wwadoLyPaes3y*5z<>ip$9%GsC9)T{S;dPOj=l$DH zGcWqGK|eA23hT5<8^d;2wwjZ)##p1C2%$J!ec_zbbh+d3hlXa-=9oigG=dDOg3q{T z^5(HMJ!}3FEdtj^yxk4et;4F^!Z#lzkP|lyFVX1crmPCb78qtoB}H_Tt&F+VW!`wV}2w!9he;u|eW-%q!_ zj{PiQ`$-Fp(qQADu13qk$!2W6$EDm{YYlTU%@ez*+Rw&+3iW-CBSlJ+`;O@4c)2U2 zHXn+q;ja#A7WNuFw0k+u5#~y0xfCrL`Ml zl1U(MocG+M@l4k2=J4j1eIe4>Y~uzUNknD#W3Jc6dgG;U4+`8z`z7hJ$)?<1N8$^y zbsIq=NbxLA%kvS)>(JF=sbb*0_}J*Dmg1=^%^n2~r+uP$n@`hj1>M!`)~O74h)3@q z2Rr?L4_fGTFAd#jo))su?&Xfw^3AbveA(O>lEA6|0G@awAoG!3evjdG@o$F2o_uX_ zq{`$%$`RCH0qRF}Zfm!;^MK9{OZ zsYw))YZ0t)wd9yG&Zp&HDEs^NHS2#7d^@1{C*fX)qKiqS@mGop%x&k|XGn-3#{IgE z)y!xf2wVREh^W!8ooBQxqyBlA$6w4>JMl+PpT-_H)fyc}@vfnAY-L!ov9Dkad)QcW zM)EmL4-Dyxqw6>GLYTL=fQc>t0Eyda<$Ga!)}Mv`Fl#>yd^d9rk0PBvT#x-GB2Mtf zlEb$+0-f+~{dC_0YY%xe@(U0ndD;%8!*w;4X`pGIAMm!Hu6UZx;`-ND)gB8NU@f{t z2pA9-oOP@*RO2aG9TX*G#*|+Ucyq)0{j`2!*q0Fz*&0I5hw4RN_>ZE;rF<&5w}$WR zmYUti_JzXd9!~aG=jwQ^FB<9A_VLYbw-H;}z!oShfdzzaGQf_ZvUu-VdWNrWAG60@gMIY0FAlsa#Rj$o7ja)hZXS)!_oMcOovdpS){kQxl1%0W%m!fKPu$> zOX1J3>o*cCGT7Tfutc&yRy_#g*1b!?S}nbx*ZfC)c{1D0s3pW!tq{aaBLEG(x^(xa z{7KLwx`sOo#<-PDPCoW`pH;nu{_Q7*uZza{DY7qGo zWREZcdat+EvF|l4U&LP!uDmm1u-NH(UA%L>#Gv`EsTgPEan22UcZ|FNYp8fiZxm>; z+)FjZhxT`oA{A(xJN&{rgW9V-+2l8|qI(>WAMnLt=(oD1 z_1s0V{x#j|o<5ICT@O^b zu(yWx#vq@6%#J^pp8Nwup>K5FQnEOBO}~czBzS`27>sj53q`YuF)n)kb>)8$z8kI9 zoUA0exYWG=0JcKl{{Y9k;~B3;w*JzSSk&*MSY)}no6MeR7%GU%5}*#816Z0z+w?CO z-+7H4{f@0E#DfRQQ}pRm?9rFBHE-v+Zw~3QTxr7Y?$cq8Kt6d~V-CLRMN!p1v^+z4 z*H@a9n#6f8dM^&)ZJ*#jM#XiX7H#z326&!LJ+7KHw@77%PRdG#J^gEzv|-{l)S6cN zMAs33qA|2OgOL1s)_9CXN>rRFEm@=*lDAWlxA0!2W3PGlc5vF>i+9sv!jPm&awPf= zE9+m`uULlf;A3c>D2{7gQ6x6jepw1<3cz`do~w>Wdh^>41WV%o0EISwApY30j_T6x zJw2>hkVzy@lG1bYe7MGIC&zzmeRofuNbfClxK!Im9AUqBRd8QDdFx&dDzq!%of*G; z_A#@eOKeouzn#ihe^p}y5HQ0fS7x#VY> z^G!F!y2bpr7y5%NGix_ePXs}^VZ657cE~ssaP1oOX*FM7UZI#uH7nX19+Rl+7Hz3R z;fPhHylWA36$kpqCvO$T_?j!v5os2-Zwa?&n`D8MXEw_j!yI+Te469J z=F`OEB|?y~v=nJ0x%dnFKKN(HUKiFO)VxuB;XPW$JV24i5nl{2`c%Fy)OE}67dE{lc2^PGx7r)YR^POL3unDjm&P_{NLxEN+$$SaoeGF;v*v)Ti zZ$0erU8yR?*M2ysFNzc5CyB1LjW^sV1?W;?`!i z5p6)K0mkLyzI#`-T2WP`j}omNy;OOQV2Z*FRZpMy=G z!EI70u0{2&-Rx2O+69zwefx1%ejnT4YJMNTjw^dvF1&_UmgZk9l{}VGdXep22aGkC zEkCkjdpQ=~IAc#P)l9+Uzy+Hg{PSG36MqS$DiT@veSRYROVaFhi4so;_-g53hT2Yk z(1V{W_Q}n8KaV~u>$-=I?}J`TV7wqZb z+g}$+HI}m+ji#i2ex(ZIbg^^_x#RC`Jl89!Yd0E}t*I`x=1FmHFOh1)tV(hdKHve; zy)Ng#`sSbT$NO5=#tW-^XUZ~hTte#FR*#}IdcB>;*KK*lB8|?AOVOZLM13YsYzr zX-fiex7g;g{0FXDjcso&ZSG^5M9sbJ%tazp&)w(cJo8=DvCeCeqs^zvlRXQBb65<*`^u>IsyIQ`L(^1Hv<3AcUREi)S<9@FAwVtzI&+$`pC%6;6eMO z;-uBQPpMBQ{3ANV(du?P4CyR5*m5@?yT{91Vry60UbUs@Dt_G@x2+^=_Jif6xl$Ay za5)vvD)DpWE1D;w-FRYIowjUWD&h9S`TiL_?paFt8mf(?Z8y}gM zwnjm$j|zAi&szAYV|LdK4UdQJ8f*K>4%2>4R3Ins5ZE{ey>((KsNvm-x@}nc)8n>^ zw_mf5!rd)kRq*WAqUTZEK4pkYAZ0ze8u;hqR<9?Bd|5S|`^M!DfDJwoD5Q6y`2 zBJFHdZjGr2kVxfuGB{l?fg)d7k0|J zdrzoF&prSfNh2kn+~T7eSNtv=4Sa02a%vYE9HZ@VAO)dO^A{iNjy>~OS|$Gg+chbs zpE0yENe$PluS5&#bLn2Q;Eg9*{hdA=L#{M&teUX%+QF1IEjbL1xX5Bgc^Sd2@N{J3 zb`gzAo1CwQb&Gv-;U&{UBzCtj&hI1?orPahe;bBTR73=%8wYSJ}Y zMLH%RAT2p+gfK=7keo=@fYGCSBS-$;z5l`De7@(L=ee)z7QJ0JM7XX^nK4e91JioU z;y<#ZCFY)Xb`K-&w~aF3Vst0u?LL5S3kJsh;|^-*25m-$Irue8{0V6P)K)7h>?}W( zsQoHC8tKGJ7L0Z@!NuWk+eIxXB-KK~XDQ~%xS-NPJ-=y>Sq8csMC>W(GU#pf8} zLf>Sd(o~x-W4{;w8jFc#DTB5b=|=zKt}LcM|~0ApJ!zSEK$zNJn{mUP1+(K z0Ym#KO7#~%ZQl*~6!6K+!j-t^`T6A{gS^E*6J*&fZf$ymJXuzJhV(O*qCF2ZODG$F z(>s|Qj|B1X6xLYk0*rHd&FG9&S%v~M4e1w~>SGgOgiFPk{@+bk5;wG0x^Od8V`oRS zYu+1*M+-H%>uIAz@$1*t5prfue^ttZDAyqMIPSS{Y}t83a}NBO!Ug!^FF zQ)ZB=;O)IhWcf}9$AkdXU1eF{+{~~kfrLZ&-)G_B#^4z?%zUqCs8W55T79;b>E@JR zQ}~fx=NTFll}UkHvzjSuoe!*V6%}BAvteLOsl_o#BfZb)HwZSg!lyRkB=|>hs*Olk z^vG68X5A$>yWd2)YAL*th%oMT0(aZv1LkGtBu|3ca zLz7t5!fa7HSgXY@4)?|sgyiHCwu6mWR_4HcEIU3pATL9|d^!VnNTM9Y>9St*wbG*@n#^ zr405c-2(P?6Q3t-htzqHU=y!Qbnh+OHpG}u34>4EM+PXSgr=2V+JU9Zh0gMny0Xuf zXYH>Q`%mzuq$gS^kuzqbAD3Z5{h*=3aJ!+L5^1P#l4E_g9I!-;Nu_64gNV;}lHp*- zTDaSj7O=#LYj>`)s!k)RxXH3e-iA;;I30D^*|P!17JA0DO2rDS=lsobw@>F}QC#J2 z!SB1a^d@ia6>nd;w4c;Mb@KMj6J^u!X$v#=C)0fBVzK;m>b&7$ZeHZTUbX5; zXsuQ*RIx^abnO@m?<#eSC{sLIY%Kozi}`6XLZUzP9WEvW%Z=KVGj~j7lAD1744cil zRS2UFjRSZf_`GV=Rj>WVZ`o2k$rb=QrZ=JIgZ!=o44S+vXCRh%`%4rb`rm?y&$MP= zt4wTUJ2gb*&ER<^dKQsex23Ap5IJ`$WoWaKD^jRTr$vzb%@fV5E0f_5=!sR>R88?y z@i|V4ZW(+R^^cVZCGF$#@pTYqfar#LZM66Sc@K)ZH3L#U=v?~qvB72vM}a-Q%r~&N zT?G$Fk{0e#T5kJ@=oB|!P6&CCg^A>1trPM39TGZ366yH&9a-xo!+G#a4V&WWAUvR( zEdRnIwej6>7A%EeIpRZ0| z?Pn}?r}poOK5%eo&A{D@|OzrA>7LyVrAp^pQxq%dhq~8v%d&`OICW}g| zcv>q|E}+AvJ-MNTyuku(NQ;)>AQ=tU`cvbv)t!9~MYqLqMET9jJ(%At_+kBwh&<)X zBy&K`k<(iJGmw0vg3-AfhAaV1PNVwa>7Xb0Z}o)T5}Sx)f$M_EjK}Lg7d0pPI8TR_ zjcbiK#hx0s8L@AZYpIm;$}7UdyJ|tT-8P1oP<>g?r#&pS8SHpno^!kyO#a0+B)R3H zR%PbKpaR%{cT?76Rx3+_9sItIXtFpCF>n<~@6w~|`%KqkL>EqFs7DNRQgpX^}f(97$koDlnVLglgN81V+H z0pO&qTYT!$E%@=sGLSjggC~0yj_E`63cAUGnNie1)Z`H;0?m{V2iH9L`TVOSUpIS~_d?i0V9m7&E(nY(=%ccnFYT*tWoe!J zm%RMS(ZiU{Ao~F7Zlv|Q9bZ1LCh%|XJc#N+6)(fB>k=n6DjOGQGA$Ud6#=n^ep8ci z6`CPi$NTaCRpfEeLfVka_b`zt5BKNErbn8SH#Brv-e7CDIQrXRXw?I;tG~WoPk<6O zp&Vtn%pSH+GZ2Sw)^FzwiwBd1p1KV#KWdvhZ4MQywp_B&OcZ=R$5?;;_m9{ev=>0v ziBjZnErnYFUNVoQ+*b;QYbdkfN;dWRoL4O@1&m}#m zO4MWfunk)fDFjW3li-D`mG+$L%)6_2DtI!7_G==}g zGU&;*kxg~~X2H5nr7dlWaKS7eQ}62=(lFr*HyM>c!sV1=LM6Oev&!KCmT$@?lp51G zQ~#FSzdU7cO_I_|QA9v&Br5l4eS7N{x>)~ieLi`?QAVvsdtGC~h4Nj&G z8UCG1v|;%z;TYpqfjhRZOgR937llh-7YNV{&C)P0O zHTVRd_h9++qz^4doPGLoMbAScCnW}N09g1;Cyh7@BOWmEp6}+Aj5jGsbGxt2yVu5W z18_U)$y}z$f_}^%?Xi@1-&6r{&sUYV5hZmTt?rm_G|M*DlliNn8dSTzHL7s3ArO_N zT^CBVG<`8m2JjOfME<`J#=#i#3Apdtfi@m7DeX(bj|kgbzL*!cEyl4wc`X;ipujJT zxZ?_#!q-%=0?g9}QJua4uO1^jEnyh2Wdr^_FeJ>t{a31j%9NVGyS(wnNY1po=`c0f zWWR+h1ou2C;oEu`@DOgtGTmN6(J(h(e2{_tvJ$=SApd>>9Pc4-fd+o!Hi3>EM$ct9tsXKl5_V@>fLIow8|2C|4+`DvvmI;-QQLjw@W!Ws>Fih0R# zYS4|54U%%)<-q=g|qV}TjAOq)zF8Tq^Ehu-rP`-H1j(?tVY3gf7+JWfr;Vf=t zoonrMt9nu3?Fo%?J8K(W`wor6&kiYn6@rQ{B#`aI#g-%cFs$hA%%4_BetPlJP7|YT zLZp+>3o*5FZpm-m7lio$F+`>=`hdHiXdKPxr7wyON->xCtJLOq*i7>ypTBubT&|`1 zM#|wtB&D=D1^LDrTNhZN$ZEF;91s@1Q3Z&i+ypmK-SL{SUmDql3^ZsM_5N<>zZftF zquJ;!_pXF47aiIr9Vl41XM;ZUOIH89Zt*C&=o9WAH2E#JqciR%1 z|1yU##^_CY$n&B#{Yh2#54;6Z3sn|=dWClWWh`E4{{XoOW5)LR`+9P%Q1ZR#(OA+` z;ydkV*VDe$vrGwS6~Sk|Y+O6I+5tzuA-N+TQjJ&xY5tY7{=|cgPEfXQ@-j~}D3`6% z@p_KSy7te*j(w~AfNOW{$61Y8LbV3JD3g9A-JG4(to5@J!s_bB{2+>wnZ2ZeX#qX! z^NklfS7t2Q3;Wr!haYfs+ReyDJOli+pxN7>Ao9Y8gNK7>jJ2pOPq-%)SdM~DiRyAq zl>xk)3E_#E_W0dfB`WHyCPTRNq22zthlucnNkkB$gVZ;8E17fpMmUH{V99PSb(7-R zAcBbk7JtUqXY}o(zE7pGx62thneBEs>y$*h0*Xb)(BV8u_1$lMZf3sEKJ<>0J&P;E&f8Al`S<1hBo*}grVeW_4aW|*CQXKjxBj&w%}D9i=6~XKqZ%!z zK>2^v+lw9n!FlbQwV^W^SJq}mHj5z9i3L6l(Uh@@HBV?zL9g)R9{k8YVaC@+Tx!Pb znLr$4Ep3jOw9MTRtkeX@gB@A^BfFXDUxSc@g10jVxeJ3{zEFRpz2POxIEd`v9bIFK zMIno_DF+6Eg4@7;y;g7r7`Y#llxtHTWq3bTf7ahOyWjqHN3Gc8JuhX^5bPG zp@-&Zh0KAC(s`R*9K?a-Vd7^i$p|Dt?jhmWWKK7u&Fyq@p+C=TW1F~Cn$;;)E2}7Z zd0G9q=Z~yQR$nabq#;t`T8Bw=5WLL(^H8u$&E~V@zhjZ1$-M`GXO|W<7 z(pdZ{sp$C!StF9&%4O8rwlIp_iaXP%U0hI4>4$iwRa?<9rQaiDjbXV3WMLoG2dxx* zbBDN|&*(!eECeC{ibIF)u|-@Xcp&~6t$xd7wdm%wiwv_@qh1Vmr=QeUh-SorMtw<@ zc=Ynxp#J#%@y#~HkN*ftslG(wd=aHU>A%N1s`(~hI!CYNCtD!B{9{s>RW{}8lna5x zz1)=L2k3TFeIl0`Mb-&UPtnJxZKodDR-sWPpMh zA&%s$8O_O@{U+0yxCfdszK=(FBIh}$K?wznYdB}c9Pr${HGT=gua1gTi+%r&07dVe z%gj8j9baL}%X|WN4*Fz*OTZEt<33c*&*4JDs*LmhQf@?seLhrD^m*08q5*LUAN5iz zo!iBacFPB<^((3dylDO!rzoSmmZIUHt|cZAS}I8*j8bPWHlm5S=qb6VkN1T}M0Y!r z9>$|^bAW%AFC{hnTLs_%fplVxbi=BSfJ&vn`ewx&+eO&vn`a)g(d+uWh1)l+n*Yp!Py z{a^1_9J$H)aMSRVRR9aW8XP~6q4a#nK=GQ}A|Z5e{c&d*}(bQF2DmWvQf7M;CKQJIzv9wAM`rA^6KkWNYTb<8%-N#N_ zq-8MOp9*=BRdQ-zTJH==@!b4uf2R{^lg<<+nHIxSJ>St%Uq$k-%bJrA&N%lY&k})p zTkY%6`h4THg)QP(ahAR>cx*DkXnf%XM2%2;{w_aaTnp0FxDA0(81*2 z@bSPv_EiTyL}IdR1mF3~DW9?lku;wL#Dsu95z;Tr)V+V_{4s}WuN;BEh+H%a?=sz; zz#=XIk)5`zGKPX61vxwnmA{NA$=3ebO#@0PPy7FMK6adbK|+^zT!By$o{{Ln)%NDI zV_CCS2di`y@TM6p)7e38GY_9YAB8G}--c`k&yV&>7W@AQ(C}e2Ijy>sRloZx+=AQ5 zrjswxnVz0-q8326r8Zucc5UA?lA)6Jx~>V&&(tuV%1;MDV{equP-y;1cr)YO5_al_e#2ldI~=L(HQ3*rduh56os_~;;Ca$gXSMGYcfrONCbyG zvL*qG(1GFfNs|*w7tEJzwKj$W<<)Glsx&equj$v4r|VaQNySaaDld_kq5YF?jYAvF zz2*~j_9V{gA)yVU^t_0#rq_J|x)G1(1Gg%gqJ`7*pTFwCvP18dP>mGJ)~t9)vTLfO!UwhrR*(Qqa5$I z!DGdhHf~Y_EN8ty0|1QTq0*kyNBL*Pf_w3um^Quk$r}-9jS%g%kbL$@rSZ6^*yVQuRxx>mRQ{&-9Uf#6fq_!kH{_xCo>acRf;jH!8(WmT)L0jIkx=@Lca;COl98>4 zD6ftx<;Gb*1l@`M)f|E{MCQaP))B-8PZIMLi2KD%{72*YS6EbypbcpUna@!287XHj zbwstD^2Snlht&t5-~5KVim3h1`=aq=M0?q_5vy5^wNF*O6?z?d=@>t?hC$HJCM}8V zyzOgFo2`kI$=KX6-v^k`3sna<;f2(A^JqjcxnW1mUg*;;dE$~q zo|s>4ty9^@SJQ{=np^gR?3%r=2rdu=S5x^^$;--E#>Qax%|d@~-gqnBuB;D)e8qJ= z#!XtzSf*Z)+3kKUs*%J7(Sz>~BoQ6fe$Xb02-BL_RyaY&B9p%A7pt zdtXvV#xTVHriuvN8MPliP_(dc=V_?y;bDEh$ucvKDk z(r+8G?gCB)#_^Dj4zzh_c=rWMB`LDuFAN;J0`2ik0Ms5W?Ti?X@E$O`@=XM32PP`A zC(#^;hgs5CHM^suv=hsY3soX_^&fS(ZyXWa=Gn#NhkbgwhQ*77tTfZmPVpY=Q)SV6 zYD#?b4H|%;0Hvrj+!v!UWNC6bTMtW+KOmN~z&3S}lEj5<-i78nVT&v!P@ZvNW-YU) zDWj?N8-+#?-BEnS-?~ij==STgvkC!DkdYEr`2G@dIFQ)s($Xi$Vf4N4MBzvY*PtR; zsqve*Rc?4G&AzCe%J)GsaIHq`+$uhLVDF=KW__Q4Ww#e)T3dwknAA)0oBx(9>56ob zf`gR;6~Pkrdp+a$gU6RB)?cTE6B_j@{-?}yHf)vcy-r=bUd9knO-4)|OzY9BwmPlT zKHus$6_SNh*`e?Sgu&_&88D%%fYRN}AZ?$Kc%raX(|FZUC{g7z**#|e8s|RQd8 zCxK;KL(tA-E2fY0&hO2Ae!Xv!J!L4rqre%!KVlQq8ou{8`S!+QHN8iIx9;J4O6WiwD&`) zs~)!Z+lWsges0wSoFQ;#8*m2n?uZz}DwlqS6`M^yMxd_FaqINeqSpaF>`9w0@R%*)E-B287xEpVWy~^1`qAL<*d@Emh6# zUz1|PjVvJd;;mNJ@eLj{8F0@O21m407v1XfiIIbc6i8iRK7uAVLF%^HwTom8$|KTQOGyH}E4`1f~z3&yhV0 z{sGB(bJCiY_`v3G&%&C-#YBSUWt4$d-q~m$aR>Kq;GnmsM2^cbJxY^2ld|rPv|ZnJ z!KfN;;##2_5^{b%?!NyAilVP*5t8vx5LrgzxAA1Se~*QZ&LD0M%;D++XO9RJiahA; z6%Rvv^BYGQc62J;PBZ}SoSBIU{!G8qU0*Z7UTyOWITf_mg9RXdpKyVub>!l*3R4#| zv<-AWtJs!#rBPx#GufxY)Cj>|QS+UDkgX4@!Y1F7MFGP%Bm%)6?XD7i>lBUkk$fEY zrM~-+`rkcWTw%(Q-fX>oGb{D`B-dSO^-Z1M*3p~E*?)k=;4Di<{8iQW3L(ai>aBK? z-o4g~upFC^pqp83Z8mYvW?ocfj`7R%kbbD|sRs}?N!F*>V=NIFTCy3pDEhEFoI$wF z+geuBK_l53kjzRXz?L0b~vsTZ^VeRawWT4ltt=dxu(F^PCLci&qMNxbDdd+{cJ^B zr^==slRqvsA+hZ}>VY2aE$0Cd)cMK6uIeZE$wl(hcK#1iP3V%-^g;2}>3-pxSryYf}Lyx^sk zBt^G|1m^ToYWT9<=m^QfsH!kS8mgnvcUYk)u6z(6PdU(y>L-j+?){8}wk#*bZ#BD^ zY=+6uK;QBuw^WsxXH;G~%*8A3dBD^gStAYWbLpa;-ab4V>Hw$! z7TY4Bx*@vPQnN>-?G5ET2O2Kywh=N3M)w&Mp-`OWx@CF!YXv|&k+W;_+-h+^^bH{pjFW~AaOsi~QqlddM zRfYV*8&pvUmt2M|4twG>9qKWsUW+yg#e7F;!|qGgy4_lc3lOuf&MHJ$XF{m4 zMS|GzBx*n4SgUiR$M>dk6m%m~WObu5;lAe7o}^_St!VWP6;S;C)E1h#W~0DJT!`N| ztZBDc#kH)?slR6Xa=*Tc)%>`ZgI6jj2f$K3EfJl3%*Cj~z^Fnt|IH_$67)_s&Dt!n zG_6~ZC>9a`t=>D&gPh#E8(@W|qmP~q2V(imvcgA8hCfn$QoZ~AI*a4(3f$vLdK9yN z=os%>jN~HWs^abYopOmyK7Aj=L8n(X!9($KQTkNQGtR*6P6Sh(3b3@zl#B^{!eL3` zouw-DG&1*o)`)&^@yalp7AEs6hXM# zYO6eiFNdr02K{S%ioqKd=a4Dgj3X}J0y%CAQzJ@4=q<(9bSKLio&Dwixyi%i4y1HJ z8n*@SqeIH^Mx$I^PY-3(`Ss7Xdy^#J`3uINsw;Vnxz4o-dHrVF%f2bwOHiZNG|N`6 zFpUMUSgR3h9cZZQ50?7AttDjq)Zsq@)*6s0#?mGK{=|)1vG|OUYv35CbKv09WF10K zcXL&9-!8H6 zC9jp)gn`K{_!%`st7uc41buizH6_f^Pr1JZWfCM6xOSbW%brOTIFv_t==hgvk5$R%935`+ z#NGR+K2ZXe5>v}uvy2jVDtdg_DBPN;aLGDVq95w&<0zR-l@!s<-?z$$w^-B3Kg++q z$_;;>y@2q$`lKt^sIB3@W84+nkNoVoyCDE~TAYN!R}#8tmFv zA#YN!v54)A(|OBb_L<1AyWO*$VSm{?+9H3;(|6o03Y+=A)qnH4IyemRNk7N;ocCq^ zxsYv2iYR|G%z}uyX&bP280-^sH&n{=GjL79lA(74;2+b>>oYc8|2`=J4}K0d0hoJ8 z*qG{sSj5AxLIYrtN_*^4dOer%S>acq!_1Z^7McEY(0i*Sdo0wkC7s{;WMbCy%{4E- z!af{W-+fuLq3gAKD&Drk&?pze6bEtox_^|uZaVwQ*x_lH82S?6jkaOr3{eA%AmWEZl@&JCVCEI&}^RlQB7#hO)# zn2en2ck>EnRpUuI1CcDY4thYq!yF7;ym?u>YqO_iI4Wh&XE-!Nisg(>5^PSFZh)rS zk?PCcxrRjWTK@$r{nv>R^n~2s>aKX4WVj3VbA&7k7dm5E+w*vWgc3Qnz-Q%7)NP>*{M^71Pi*j5GVN z45@9*T!)FrdqOV#h<4jgb>~%u-%7doSpp*^bOy6LYpQ9{t}mS8omwJ9?B~C$D-l}K zwFZFE; z?~qUssY^)XTxUbcRBblb^EDs$QHCqCmIFR@;@5|z^qsM#O{0JESzAN)biR2WkoetO z5bbHcnPa^LLPlddXfgSPtG&paciHDET@DTLD%JE91AqHv%EgmJJ=xT1tqwAd)w0>r z&3x(RyAa5_`PJLa6nB4bpUiP6{-`&$Kgu;ru{Aj=ga^^~-+Wg#|AzMJWO$X zZL3%y8CKO$uxbjza4*d+TBDYia7l#4dh)%>N&lMVzxu|HG$==`vT&o(a(1r}*P{yO zHWX>cMG?;wJD@RDw%g}dg2;-X^8;1uNoxw{jl&G-hGUv{K3+=c;W>XKe*Xju{G&@% zs0LAdiwZ&0jK0Ave>KvZ*g~!~DKa7p>3>?+Ir{HKne156DL$xgWkbE&b%XuOcg*yv zf7VRM^?H}wZ3E7-s*kf`Wtg~8_GE>411fE*eG|%``V>(Nd}NU|6Zwze!3fq0Shj~I ziuNCEe9+Jq5~7~#uo3XxyFicsEYZXN^Et)ca7nHI#1&hi(NJCe4{MIYvr}18BlO4o z3|K}$NNwL~h-3VR%{RljsT$$wky!8=6VUlj28@+%%1*yI`It#lq%d>hNb%>#fkQE@%4UY6NK+A}OPLmP%AgfFSvMMdn?rCLmMT&q((ARU+1`n$iA> zKy2X39SAK*i=5q!Ej*-Q;(BET8Z+Cv-1^pBP4>{9Uv(+WBNRam3`^=-Esm|wo7r`~ zkv%>I(Yc+c)bab<6aToK&M=m_`8o4rPmy9qoL@&lLE=I5iB2=cg5JO7oySC+fj4`z z)uEIyS#FR-Vq$9)EJxukB=@88jSqi1cIX7xv8LJzi>mbxTDRb;Q_eV%xt${{*t7dF zNNz`BG-o_}6GIUNQ4h%I31{A+-NWGh-X6u`iDf`qm&niw@tmgID_@)F(v@K;?G8-D zdERJ7>l5C|HNoo=uSCUIp= zxbBG_&P=7b!;hLoi<}$YSCbWbc`s#j$~VMQH>CCH-Dt1u`gl8f0#JP;g~F&UA1%Z2 z6~13efys(Rtr3K{1IU?zilC_+gRt;3`tcY21e`?!xBEtZw@(!Ku0~DUCXq7Kvi*^q z^@7L-sFNo#4teM)O79_rwy<~p^pJ;#X6;?CY`Tm=TAK5(P-#pltk*_Whn4G6=tjA` zU+6=F(E~x=%hzgKE~tlo5j1xUT9{(AP3G*`^970|0C$j z^Hs*Ft#(qO=3K8b%F6{4xm6gYob0)n)O}v@6ZcKWBvzjdYcKKJJ+rogr)o|#O`EL`0JcqZ<*xERE zGkquwHi6ccwXjsmUVZ&n=0s9H(rq_ozYLA}L@aWZtZG&A#+D~J*WPfb@( z?ZoIcOn*R1=!8^Qqvcm1DB9GRk0Di-kzQe<}_pmB?r$D)_#@gdd@z4 zMy|6Pp#~*xsLii6w8x|>@&gKqqz(I+vxEqT^wa8s2TQirUC!rzsrpb`64wt1ktR{i z^V-)KjifVSiXF@0q(vkcywv0EmPys)!BU#EOi?~cPtq40(_3)grGps%9J|x%AZGon z46XH!L(6-_hlruil_qS&!l(&Ra5rH~1>;bi+K4%LW;ZV{6nXTB;3enPll8S0yQhHG zpd?B*$v(j+?C!@Y%IvDU!<8KWp0x0AFx<(s`xq0>>^8~J+`Hr5@N z@YjQ!da?p?0Zpj&J81v_Gr#JbNqRG*$cd$ClthkmGeB19yXdzz4V7!zgII_M_o#5~ zdUH;sHAW7rf$^uED4yOxkp-rZ2;!b#vVy0@+#mn+q>bgSJJV>anHfi7zjGKkGw$!x=D%iVtx{0fKAaYjWOqOOJ*ANsdqiIGlT6KFQuy zd4@e1AkRqE=bUkH&M?urFLua34SrBQ=f4m;6}YJZu?Qc(lV?m$TP67`5U~6O5LQ0$ z$T9JKF26IM&qJVX0ecOs26BLlr%Q)>wdc2`S1e-x4)a06H`uMCyP~>u8hV>t>Vt6e>^k5-Irk@sp7V3BWuZ)-0zow5 z!qJ|INPffIzZ|ZQjvoIcp-)-=5$Wtp(dJ?L@vq$Lz1)-RnC@_)=qu?D69?2bX`DV4 zqeny<4p-UrA%%9pHluh!MWzafc>0s~A~y}M);N&$;8GK#ian$9%OJgOC1z`s8^umw zhKN;7dHI28If{-@<|r+nKN1@b0o&{_mI&my?;mOh)z^&eG82&Fmk5g)VDk8n(O2%; zXt~@4-c;tWT*O$&6Ic-T{^Yy*?2+9OrUa{mxfkEd^tSN!Q8cvTWozcf=BITC4P6NJ zsh~RGC>E8*l{DQKoyqp2UVZhAo3YgJ>>UjX=h>IhW0<2;-`W7j2XL+JIqu?#{v$|6w?aI7DvEFsdj5bg$zAou@5^$+MD4Ob z%Y~a^OV0ih)9t@lr*ecb40C(dV~!elndFoLqsh5GNY~A(T<_I4sG0k;(iw=l?nV3K+ zde61*{We}b*X6K8^(wvqv_kXSe+{#iO8f|DxGb6&9@3IDiFxi7e3za^+M9v;@qP3` z#HW)7^IPxs=)$VyGDhhk!m-LRy~<+qzvo>t5&#WEw{9?{!m<*{4&HVc91sodghj12 zbCNkpALkUj=FhNB(fu2r&SRt+>>f-`eDm6dM{B|lcNl6@#;*T+A#|T6FImp)>x~f{ zihN7v9lHa*1ODVCMUtH3ZEDqaPD8XoP zp4Yy_ofIKTcEg`#PiLRnE|kDZ)h#Csx5x-inNRkNVk-2Yif%k6j3yjp7W)BL+6cUW zMuldwxVbq(*wMS(EK%fgE^cMrHyKI`S&bvh3M;9`+F0zkIY5&oI0gyJj%=ZmzyZJU zRYCUUxBr)tG&%izTlof+81qP9oUQfO@#C{H)Zsnhbag+q>9$dQ>u^;{rb!Ts@t5>1w;p({4IY24nG^o{ycFY-u9(%`&dm0 z_2PY}TT37bufjjf;|JcB!L-TQ+{J7O{`}^5^Zrb#0JlZjDJ%!;+ApOf!)bW~e~6W( zmM4d5>Ac-x5VO6S?mq&9g^(F_S;KQ$_0!PNLM3;mmXh1d8!G&LbIW4htqJ*>9A5YP zFM?-A;^eTreh#OhImz8~n;eo==p=KG_<`^`L2bnCZ-0_`&|E4YNtPzZql&PcNXCx0 z!*{!9qQf%ze6a6I(_Imp^RCfYw11tnDf8?v>}k2F-53K_YQlOpJ|u@H~K*u{kr(O0jR854aTGe0T1D*JWE4sV@Y$=JNs z7^-^0z2VPx*SaGPqJ_muv|G9d<9i0$EjU~_j>GLzUTJK_pSgCZ`O5yVHfhaW0%NrW z#kv|?Q!Se^-O)0P=)HG@7n$Wf&naaVzY}veFO%S+?Z|N+qeID>mVhrUTXH`Wa}~n~ z4v_6>T9|;WO2B!BlCzDisSXN5)696Xn}W2P z0)RpqKg-_5{NtVT`r}kDenf8vJb4FeMhb0$-c9jbho-9c*=W&s=MNlMDL(87iHOV> zVspM$?eX>C2=}2`e~y2ttC&TU^m#KU+@*VUevJJxq=W3xDm=*1pchA+qc)Af4X)`!^1mjFf5g=?s?9Zi1LH^ z@YkQ?1eWQ2<|Df?ArXOQ>!U;=Ib} z#k~MAcLJ(Yg{lI{?m@yq(}V^E?Vvu(2>6CHe$4_jJRdyBj){f32qIl_LPdNI5Nc0+ z#H>AI{-maMOEzAN}uEi28}38b8iV_>GiG&g~C`gdA}ogU|tS^y!Pv^DlE7z;U- zUC$_6TKL;E%=u+ZRPWxt0sw+B8E$N_0mNInL^W`2BozEi`bOHaHuDQzcgF!#Q6k(^ zLg_H0`qRKxIU#_b{cmm#neY6LNbN@5l?qsLvy%%03LmyNFT7V@1BHQeugoOel5@wC zK7`8D&h4@?Hlr%znT97J_YYf+TOW#&-2!W3-eEp1iC_+p1*^yy-KiQu&StpgjAK$g z@Ij)FS`hA}gEiD5M!NwzQi!*ByQIoIBCXa)T8N+po^H1HbceIrkuIV3jC9xSZ2Pf` zQibv_Z`Ayzs!QKihOlb?M{utst(%o?(u3Qo(CZDX36aI(olAR_d^QO<4`ijC5rS0L zmb66Lf@m?l42GjmsOWcQ@|`T;-6&0@0a$IX4(tHWk1nj_H(QV5|0AIG9}ej=6bP&$irN1atHbPmyr>1sjcDy| zIK95*QN!e-rcX$Er%Jh(^_VjYzS^;_#V_;zJD@W-l_{l83F&2o18VK;-oi+00ZD`i z>RJdU%u@7cw>&e?TnIVut$AmO7QR1VUE}<`Q~6xu}vCG!^b=J z!%me*;gc<+wIV;(TFUeyjyc$*4b|SI(!a;aT|u;uIQv(k}<_$}V==Z}UPUQo8_ z{52z3-zu<`GlhMtji|d32IJzFHBj|v3%@O+J_Lr@`S5`GWU1MXVt0m-ElwD!s~6E^ zw`gyXFP0uHqcu-=L&Ry86IJY;!%8|131wTo1V>AUkE9+-)pVsZu^5Sn(T7R4=Iry|duwGz z8TL%!g6P>$v!dDE>>qrNb>!z(60l>-vq-Oz<_%@GURlPesT;Xfh{FNhcf4$A&)*3z z*E0B_w%D(w2p#H3uhqe_?i3>Fdc~|}wMmf&Ln-bICVvR&_X@PV0q>GxFMc4i}==Fy5FaZ?zpzmus641xQ@K zV3U-QH(p}Ocex>vXiD((EVHquYDGUi$V!%a#Di=1wvKLFS>7i~^6M0`dnT~k5SwiO z_E%4lK*SqgmcPTS;7N>cMB?7lvW0$*N# z(Gu^Z-<uGbu>Zs_pf!TB81SFk6mMLaUZQaf66}G zOEf8X{4f9Mm@wxAR9D$ABq8ar-u+svN$K0!j%|Rja6407a$6oKvUKwFR@-V+mOlvc zJX;dh(j>}X|A6)#khP`O{x+%Oq3fMArv0w1%2J{qbe@sDOyqj4HQzE7QnAfHbfZ>1 zi|aJAF;uIcC;io=`8~Je6s2`Rvu#Hh;i^E_Rq(*`MnVWInF(>Z5Ed$+eh`$F$oKWv z9e(6$f*9}b5l9lK+0~s_-To~L49H1wK99alKD3|Nm=cuNbtVIOlGrFaaEBgL-g6Yr zFWF)?i_~GwTLl2mSb|;5bW&AVxx6MS|08((RMzKpbtmhMKE8P&vscvsDX;7=aT5I% z?cVnOp62{yySE}OCJZ`~1m3w?#ES$%vpS}IIdU)Ky-XtR^B*)T2@iS6gwZ>E43xNH zmg#5zTD89dRcPxsqirg9I&TPItJ?{r5?~B3V2VBBULbqy{g+{szE&I;RJ^A8oyLmW#)bL;M&A_gKY|R;Jngd!|sRw`WncPRk1)(asvT7ljes zT2)mD2f*qjM1ALI>)(iZ*x8}%SN_9-Lv&DhpU*;C?=od$2#c7ZtOtk-6eHlM_V*NO{Uf%veIBpqoD=!Ly|=^Y%kByFve>=sf(X ze&0W?k4j}6ksT_cvLbsXB1y-{JVqhn;Mk5GvNs_+SsBMZjxFmP`(zy)`y3ouNH@!*nyvMtUc?^LrF&sg<`OW9GabjMX70#ZYwFXdmLNbOuvhQ4{kUuJ9D=QV-5?k~U5u5YnwI zkbv4b57X$dz`gN!#1y%QpTjhqWH8wI@kRz%);dUI*H#bTVI0j~8$Z7Dy!)f)at_8N z61S-=K9(Afk9ve|ACHauRCGFp}-^+w?v@hS_j+Qq6xI#GP@V;=Jolv z&U&AUp-C)L7(xGrHTl)VPMbKV3y`zd zN_|@s*z|EIP(b$D%Pq}ecUfFb5GzUJ;6kG_9f@;RUefg7^f~(Dm8!nHp#BviQyeK0i0nxh&_y1>v z9*P`Z3N?25D<8JEaN`U8e@O6?RhfRBrS_5(TpJ)iocdymgtrUFFExCSxHB8?B zNi)R7pld&gzuElE185_k&25*4^PVF6dNIs6W7#P?BWR`a-u9|CZVct^5 z%k^5~&da)+sb9lQ2sCipp0eO>m7gR^{6N3wB-eVrm)N|{WFkYc<&$KVK>!Y@0&uFj zFk(?eL55a77?$>Zk2+7my0&CZ<8;fO-6E+obgd3!+m_t627l=i*15O)-F^)VwFcZb z48B21P+^>10@HZ?8s>Ue6ODh8V9U;YL-B^bkOv4u>9^zHDjuIIYIHF3R_PJm*e|@4 zCz?2(@(71iQ(!QUkSMKqm^1&R?bFYqz6ak!a)+VR+pp!_EBM9=!`EeV!QIn~Pe>tm z9H)g!O+h7(a`_LxMLc--ZCB~meQFEM9%p}bqQ@3zWTXoxUdKWD<*w5hyl44im4>0; zg+as3NY(6}ij_I`ojq!<0bS{bM)$RNmAIDRT7~3S6e!0ltdijC+HrbV+gCH~ryr+T z7qE)1@ArgSOiW*MVR{a_`*?3JivE(E6*a69Fnw8?OqpAyvbF_my=31w=dQs#Spf5?oL(pp16bNecwd7VND2K9g*V{^F{<^bfb); zTu|i$oPxhXNWpB>J^UdyI1iXE{i;>B;uaClId9659t4E6Xs9r$xop!>449svfi zzl>7fO}B}YR^g27?gSK)R0|g7W@lr$ZTh7v6zE=rjp)hv)sVu#IH8Lkn((8U&^OIz zw%@<196hvB07;z8M5Lp1RD3*Ih(Xzc-xO&&b&Z;vCix|1KpP6z?%(7_nV|_uQ<8y? zE2Xmi<-D}+4LG>4yy{hoe3uhb0mdsV7;WfJ!_^OW*R2F!#~m$)-lA04uQyvL;KtF! zPzC=eVXug9zVW{Yd(YlIe`V{oI|2VvE5znUi$I&Fdx zvBwms-l|zjSB)>H+y!f9*wNbGP$MCKt`bDvxcG@f;dL^CJ78%j=KPtb@58~-1j9iG z>1;4QpY2R0^<6sFDLW03_Oi?%_-;%0cqVU65ia6=%e zFW^d9KF{mT*6RRF$E~S?v0a;VilFZQ)XdJKML``eM-yRv>p{!}LRy?;XJVZ?oW=eq zosIg`pp}v1j$gGUSglWn^@@qumt%f^WE@DvH@RZkwqRDUh#QE0LrUfU8eHI1hK3C5C^OgW@wWM{;`9K+T zn$U78X)-t_;2wI0TsJ}N!%)1*a|<)cnzO&Xd{iy? zQo(j>vcTT}K=IX>_&hYW=Q*_g73L%3td(h53jHJN*|TUqcD;Q}D}?7uO2G56NA*OW z_Tnv0i|RZ~>h0UK_v1`7DOwC;(=p5L`{IP?&x3cHalruPDLych$e*T`X+dY%8(gex z8Vv6hL~qhK?r~YxUf31;0%nc4l~wAI^oad?X{*u7g+D1f&u=0;7f8-9lRXN#d30wqln@MV=J018IpR|@d!B*% zYj9BOP}E9GChRY2y(db~x93xn$d(_5z5_;4ab8>&?NqobqrVi?AsBq=LrxVE0-uV& zFG0vf8fkZ~=!fx%voE;~;#es}X`b9|K@CDsG0vBBoFXQgiZr*R=*4DIq}q?3LZR}f zm4het1r)x0acAgmb-H$?%xm!P!uqC0la1uUgI}XT zn>2Ao!96hP#Y3dGIQ(duajq9>U>^apd%dca7uWmoVm+J4YZ-|{7C=7xI)Lpzo(8?U z+o7IHOmZ`?`_qih`4_Js^uwybC8Ub1LX^TJ(qW!&vE97+ZLa%q1Uo_QS(|Mry{*NU ztn>_+!}DE4bGwNVI5XwJhg$CD8Llh7krI`n5D#=^o9>c8FyV zT2!WjUZ~|GThi95znLAH{rxNHv+`o&t1ElXC4LlIYQU_hpv4ox6Z0Fc985wHt~SgG zcn3yOiZ(l>iV64Tox7UbbMw&fx?2W0jL@o}iftJcZo z1MpwU8i*D_S|HB08JD45CY+^vbjA}d+~13@@2z_7ZtHK#V6DE1u4WkZ%EbLrkbAhk z_$Se0^bNi}P?_gfj_Bxp^$#3tjViZ~gQ=l9Ui6MzwqCBabAKvRpV=tHCR8fLgx}rk%uA+9#YJPr)6kLH4=MUtDeZJ^PQ! zEuRr@%{wGnn}|fBAIam_X#RRt=MR?)9sF{@f5@npQFMw#ntfgj|2a3D zkNt+FTvd5|!j~R-4D*22EPMkpRrN20<;MCs3+eBjrFJ=G(Drl(2$z_~wYPD8^B#h% zb$Rh(ot^h&XCl9neRfSPnXj;u$YQ{`fdlM`14Mw`AyL-qoj1$cQxjwXAeVeSD-_kiVZTky0$YvPiD^J@{PhI5N4NdXxp`LFa0;NVYarkU<+irfoNZli1p_0q zY?V0^^*cUe>*agEyo~xKW;%|5i5XE-%MHPqA7LUYvX1JTec(Y?k9-wkCT}nfVA?S zb`Q8x@&6VYM5tv=5)%?yXhJ3bN0q8u+W??PT2l}m?i@)Qh);S~*nFx6D_%0-@5eDX zz4z9VfnFJ{vHD!`Tm1F=o`2ZM;JNtEK_5InH8`P2YZp%!0z{`pjv;pzUzd}NjXs_` z)oHgnz0=rZKfZok!Kyz}1Qm5)8%~kcO4tUp-Vr|yl&cCwzpeW#qNuRUZSbKZNviCE zno@^5szfBAXN}JntbjKM9*O>a_PpzVR3Y9J3MP@QhdqF;RI6w+ z12dgZNLu6{qXf`UA=EA^Fi%~mi;{nLwu{K8$erLOhooSb=Q3|^%njZEQ^E? z+?zeK*Ddu2d8z>Nc^(6B0_H1qxp{vr);Xp!Dt5gkD|jAG7pYDfCHtv4;@K@$1GwVtl2_ zf5Fez8iJ|+$!~=07`>dvPF47>YbQyN`1kM=4-7S)iJu~#DZ)&%B(^ig=2Zo_*kp(b{Gtzq;f&h%}|tG8X%bamyR>0!TpJX73il zZrpU1QvUhbtMw&+*DYY}h&QVx?_|M59@w8hz6dBlE&GrrNN8fv-Q49_ahaga%wSV6 zM{NF{%SSRJLHb8aL~*61NUxhPk>6PQfSsJ8#Vcu(e*MaD+meRV>O~mpGsFFBWX(dt z%hcAmnqiZ#(&y^hWQZ;_IU8BkD}dXxeNNbtFy51hLiU@u8M?V7^M@QDpQU28DjmvOojlISxYlKA~`wSBFbzBAg6 z2#K#2gJ<$xyv8=fo_s#gw=QIV5M+*lSMLf6_FTSDBmAq?l3Zf68iCx?*WgdK)#Vr^ zyS6{4yl;8|=tov6XuVslj)sPOIcI{=H|__uaY}LnE>xYc=Tgn7sy7d4)Bn}IX*CHl zJk#M3K&}wUP1De!qe=3*T)3H4d*J&XP$%0oTRNk;swCp@wumT8k5t2tKbFl_mQ(0N zMzc+Ma;l@W_*T|h4*wJA*J$r*`UrMmx1>PdDxe0T|F7a}UwJqz2CP2+Oa1*$V#aX5 zxeeS$hLZx1n^+;C#KH3N#DK#r&&fwrG42> z4Hp$E3tOhNkFx_;xjhnL2E^;wpn3-gcW#f!R$1n7FCfzw+eBGp`d933zpk#MP`j@9 zD$^C@GMzaPuabn*2`@e^>if#=iDN659Pj=+)tl3FNp)oNK;27-V|*DI8v74;KOt>! z>zU|7idi%8er$#5@7pe6m)0R&8(R)}Z1BXsAh8IA?Xt?<6RAxvRBp3))EV9Zt+X9c zR1HewnD{-~gJe{RY~UCzJ&|;N1+4Gmyy3uiOI3eZ`NYF?2|yOZ0b=t+Q`II`(oH2N zT1Y~^Zv8g_G^MYE@`yYI5F+)(zx*qI;#v`B7+ z1DC%gf8*Ji2bg(f3CbtXO-mJCJ7`?>Q!heoTVhdvjb5Lze%_cO0w)*};BpRMSjz}6 z(dL4t+!nQdXha|8;wpc%!41x>YibWOCG0g~zk672#+5iYzql7{&vC`*>vSb`ynz(M z;jeb-H@X{9+j<*47i8LdYx)65|wZ;Z72}P5puAb0a>E1 znkNoa3x>R)8*Ex$k)dPn7 zuN3P!;W9lRS0O-T>7{HBdLP4U46ZtCk|sX>j|%E@QWb~jA(x6(AGh3nTR4v!hGswS zUBO5EK9+4v*V>E7b6P{dyJ9vQR|LObE_6h4Z)cMV38zNm0bUM#nsmwYJ#-F?pE~+< z?E~@`>0~J`U4)|y8;^*0W0Q8^)5^VZZEd@LU#g8^RBg2qCWzCK=P4=Czd9HUbQSy0 z%<5O9v^1THo|f~jVQf?a6yBOrjIXQEn<-LYFbSbd2Y{c#Qo^% zGUoyNbf44reqohPP%weD!G@dXnx)gu6Vf(~7g=k@;PIl{z=&<#T29c-tXZ{JR>7~p z+0(l#6Jnwnqo6>6Nxn?2?z8d0UgfnKq1!gr=ih1$B1*ht2`KG}qyC`zI!A9KLOA$w zKYt?5+WpQ2XE7ztaooPx=~dw{ZFC;`?nX`WCv5poC+xK&X5iy2VDRNY-3@k~ z{!0cr@T6eXTRh!O7tQ5TUaM|I$O?k-58RAJjxJ@?G3yZ++~&uo@Pa7wemgJvNhYw^ z$`pLG8Ych`4?y$N=2Kc1z7SNp!e!3C`z7SKRPl4+teX!y%r1lz`CwV&mjipQ`SjL@ zPHN!-z9k`^dG~8Uz9Ng-+rD&3=$w*q-~RoBseF>QCyd&tZAuap>#S`soxc+*kjQJB z*31+NWk<>$R~>4PD+=O-6thN;JV1_R=mJu<)$s=P?=FKiXu>@*VeS_X>k*&UFnEdhBd%an`7Gmx4eQqg`0AaJ=mW z@n+Y26i{5edUrZUhGn?N@rr?^=~XJ3!?U*wukYA!DR$JI(ima5`v(xblBdjD8CMjfRSWstqnXTc*f_|CX)%7*ey zFW!dQaVYD;j#P#J_I6@JSDD7TcHpe15;IK=HOwUdI9U)(;~;M}^J+_PX+Y8VWumb9 z2j}ZPvNWB3IP!~+0Z+lwbqE?=fP=7w`-8`>fMZxEy=i`w83lJtp_Ybw_2X0CCSwn}ynlVa|q$P+1)*UN_C#p$z<8=|8k;5qlO<}cj2|qTo zmUrwEz&dX=BeYURcV4{Xf+Y0~SRDwB^K&*L04K_2VyP>a(sQHr2Y`J~QWx?bkL-Zr z`E)8t0kw;BX4vlDFVVVwUOe#J>KSEz+>9RW^Vot?^S~QHu^W@SC_Oz*Rh(x!P8(d2 z@j2on|M##&xIKxcw0h-3Lq4q#sjE;z4a}du&+k}xoAawg>3!StDInt(XGebJW=l|+ z(~xwAK$oNHu>Nvb{+~$|MwjVspZ`(K{L*Ne;s;9a;akc!AgjS|_sin+GwL-n-fJo8 zbTT@g2c>y{i?F@*7LIz~vpfb~-Am$A`J;OEaMI24(+Zk3*nyM{BwQ76_d3{JNO2fH zTeRDWA6Z&^)K1$aQZPU;FqwjLg(n(0Tn&}h@`=<=hg@L*QUhe*Shb=Qe%rznd&@JQ z#X_^x;*A#1hXX2#auyncPQS?dkRgp?I{!d)M3}a-sm%I7C`r@%4Ne==Rje%|Vjp6W zL7bDtwcd$d|NkBMZOS)Xk6@X~oLjt%rCEEl(4$<<_rp9!w=UH6$OV)~=of{AqNy!D zr*a~{**WXygg)7)k4QgAp06f1>@E?jHLu0EnPm>dNdU$B2UAxUnU{ z51GMZxK=zr{V>v8nW}2jhBIwIyR4RC1MXM3=S8pWhkDda7;!D7O$}@1Chc3=1Q&}G zw1iZ#>ZtCQ2!$cgIvYuWky7(yQDPgXQFTdUflsWzEEll#Yd+~xc&-io$IV=`SBGb! zH;Z}ROy@G25^racN?X#|=Fce!`U*hSr$3UCd1?fHg^CxUl)RNX2$ksEm%&OzKXPDO zOV)TPtP*8=TCM^hzl_BT4XnP-9-XC2*B2aIjjvqhc`heq^I=R(O6A@RN}vK__t)W< z%-6;n{foiw(2Mp>sOY@J?t+uwCGWSvV^r6Fo|UpM2T_NZt>9vFMj?He-Q|5A zhgk6ADP#uyq50X};SX!yL*x8^?zCU4)*x9_jzI8zVMAHi5y&GD)T2d<)?DL}?TSA2yi+@d^ z7jJV)OSUtDKaNR(P4Qs=e=eLjTiWP6ky#7abS)n4$|KzZC&m|-#z?m23u;aVkBOB& zGp*dmD>ji$egSJLciH)-pa8h zB9B!^d-Px>b}*KdlmI)foCAjWokl3%o!QQCMW#!<6byJistgao$NT8~9VgmEfPy9L z;Vmii^i}`*+?#h8z`<6eigGQ2(EGn4woMq3%-6|oXs7A^bz&9qd75CUKK{vtCQq-P zsnV(p0XuLa9j8&-A0PcecH_DS>7)`86~s*O%zgvC{<6>46sg-9D1Wu`UhpZW3rE1J zba2%1rg0naeh>#S9NsHFJZ@z|X#?hox~39tGgLaawfG+q7%n;Tas(#=1|~SmQ)wKh z4%2mMo;H@D;eiYc#j*qb7FvBD-pX_4akWHD{4q{%L@>Q+{B6b_cK3lU!~4J&lRhxW z9Y}9u-qU zvLRp<6n=ZdF$F4_PJYPn2n=eQWL&8KJWXSB2Rdr(Bi?3Q`X=2QE0qQ${m$Jv z+SZ824I`va&cm#nTVi#=4$3a&Db81hEQMuyzj1*8-X55Y6)Aq@((O}p!@}dfh2c8| zwwAZfvYS@j%u}oaVeqmlyPF`ignH_IWLeU^AofiVJ-%~?!lQc7g5c3X7CYG>Qm_2( zhxNIt(0Ns$qgV|@rmAPEDzy~YgR8i;sshxjJl~c!ct0U)6ON~b33WXpr@6%-l?>`L zr$316=lOb=c4>6CU*w-o?gqbFQ%NPW8Ek{=TlN{nuT;a=ta`yUn zbS4GZF*LmtMyDFJ@p_JjPR`-s%Vy*7XSvVkWmv)s{ z&QC&3J$Aeom*K%c)3CNz<2hN(|B8(qGSfSI;PB%V+a>Z5hHlM6NNQ4 zdFi%@r!fbmf_sUBjj(V~PD)}MIN)h3h^u0kbvL--S5srcrsd?B+aFUI7wSBaob0x- zSb!U4Fh$lDLOq30JYi3AO(Q0rcA2y8`Th>N0sU`#MKTL-4qR8Y5vEf3AJvdaoojw5 z(L%5{VSV7X;mV`3krEK`Q1iB0^g5TqLE7D0BLx?${phg!OfuoB+Tnt=NhyZ zotpJ>L*zAc#Oa4)?k%v`4QMKkCJx@To^m*v?s4bWu#rU9S*jdzV$(t+Y}r>5eeGYP z$FI3X@H7R#2nwUq(~6AaHRcvFIH@8X7m|4i>dHKXhN1h25S`0KH%aQSL|~gZ8s_lK zZOgb;EuhzR&1_v=EZ52k7olIHKRY}zJ*b|GH(3?yp~1NsipD9 zc9^zK!QB(L#05CUyLTzvWsa<$j=eFNF%WNX`T|~t7F@H0U*1S3RB_!F`~0{?rT7U! zhy8PH<H8*BsX;%R0m zwl8xS>s6j$?y#J(*|%AuNQ%5Dl?IJW%J!E|4YI<`4X2HW>aLrw-eZYYn%t0@M&37M zUn7M3#pvu*`QLXy9ua+=n94?0{RK}(;Cne3XS?&j^#aa!!<8H$$f%rE>%L`B&_i=5 z8}2_!g+?u6T#~d$hEG1Cm2?&}cY3eMy^RiHCLxe{FD9#D&ieupTiAutHUxT4-Cqme z`O>lXlp@&Ny}E9N90!(e)+EYVH*`7JA{9>-c9CGhqVS*bGF9tO&cKb~36?w9!le#T z3x~fX<}0QVUX$9^2-ipcuU@+G=RBp#lknZNG$x!Q=M1Ng{;g&qN6*ifQ7YL8_ko6# z)`}iQ1xjN#6GbkD{5G`&W4XJt^Wh<_9-5)qEr9RSN@ZZKASn@KfmAQ{J?wXDzwQ61 z3<}GxpH&>M_@d3oa}tRSg**o9(ti2sRx@8t^|zR{0;OP83F|^`d>Jn`QZ%b>No`o& z6LmhWh!v}?OvSFrC`j`v+NX{R`S*IqubfO6DLjQo@uxk~t_Zm*zJKsD0R^*zu8(v5 zkBZaOw0nrYr4xNWsONuFFiRegW_ndIPX+N2PKRbiG;cUP*>tvin$t?a ze6XaWFBagRd@o<+8ofb?7Rsr2VU?@`{~G8(D~E zxg+gHWCYC9=o2@#{slE=p2zTiRLfqQHjY57nZW!A5*d-aJR_CsZ+RHdEaLT)WB;My znM{s-Law-(?w2vr`;)`oHx*KA1Z|^_o$k*S+W@Kvm=~u@2=7}2jvY!zHXc!T>k z#W!U1k_eoYQrY3%nNS!mUU2JYAGq?gemeEt`qOcOrcF92pUA68GF;{eQ2i}GZ~3uX zFhOY@b9XR7!jDLrRw1O~Q^^LTPk3KE2!73I87p7$uVFBW(SwMCKUSl#8l`5rYkSY0<+1iLQ2KKVkHHW)Th z5AK8>4n}OkePGDxJ3XuYdO6HTQe9-N zF)6WoS zAgq=a=O5BlEk=DDGv<&Tv>dr|8UZ7#;RChRT2f?p$*MkUCdyFPKU9Gc9(|q+o>q5& zZghcPZL!9;*o>8?hWoHKveRbYsMT9BGl$-@JUe~;jRD9amdBzp=*3WzyXE!$>% z!elO!%Z!R2Ebr`=mD*^`@U}DAXT>cdbp%!(+#RMF1q{DE>hsAHB83PdMXY6yW2<+6 zA4!4ypEK-czh7FV5G$?d$l~Q>ok=G#;a#blwI4Ybopy||w-h~+vknL|Re_^}Gl6u0 zkU!&ATcL7QFno+PLXYKB!|I+Q^)!p%w?}nN#Ej@F>!jkWFo?|lOk4PZAhIk zUrG~Go4r(GwC92{S~pAQ6^cz-AbIKemy2p84zxzanxI#EKl!*!02_L~RE!%98i)8tWS z49~L+{fO6K9lx>T7cLZnQBmgesBfOD#%MFZH!oUrT&E5ELft@(THDJNPwUL}6q`xz z3k<%4Kcusxy=;-T&yNegsp4r2SrIWt6BF`% zMcRTPU`?<+Pvh~{-HFX+N5*m|MNkfiLQ?iSREbEUBnrcoOCWMWvPE$sAF z0=R-88eaiT0r4_fy-+LOTn2xhv#H{hLKrQy2>kmy-9hsg{FOUP+3To8n@okc`v|tN(Fv26vzgg z*yn5fhGMFP->L?`iR5 z;@=RCY~01kwm%UEU)1-CtKYwI__+K9C9B`tP(a zU2sQkuKzzeev6th&bGlcD&3hqSJvxhKw`@ zfth7Jtl%iP(XlO##P@k)A>nW_j(=v7oX$;I=Tm~szN??u6C@RWtaY3Y%^2$tvQPXm zoeqFovy`N+av;{F_{4tVBj&4ZW+uDL5gG3YE=Kr z7noWPjO76m#bqhQ<5TE=6RCwFV-(>SW_(&Ro735&q_L#e)l^!m zI<7L%BlJC3rEl?iq9HM*kKxz&tKrAmtx2P-?{7s>5AF?Y{Ot3xeqBG9*Ndhi-*+)(2cxFc{07E{c%pA1UD4(wyE!An$cAo3+geh2wfsRIq0b+F-EiQUz1d+L zBkR}5eD}Wc!+fPo*AwS-oN~40b-&<5rn!|97*2@v2T_}(@Mb%fNSP{ zd$5F?>g#AiQ17N{RF3^Rd-@c0yZ%@5JU(llU~!on8F#EBjd{ok)@?{p^EuSK{`Cem z^0=+4mEQ;i?LP_seCa|u$Spshu3X7S_R&P?u!V6X+9}X_j}2L`w2Ywp3q zG3UlzoxH+3WE%TQhen|aXsUVrA;iG&KQPEVhnX|M_$PTJFz}sVU5t+cU!?aWuFdUt zA}zcv2#~HBajxbT=;2+W|J+C78>BVwW#xi0UYB^)$ve9{)*#g^0}vWnZFmqD5EDeC zSlW!W@cX<2xc2wmhZnqBwGsE+B<0POb=&yq&N450g8?NDo&S|2_F2ldHc!EJosM#4 zPSej_MqpHhc-i#J>?c>BXQ)VXiF+X}-RJ4->WFO?TC;WhGbzsVOuf z4?Gudcn3qHH@u4z?a$${rH|+Qe3~_LhbTn+y89FiAVi z+AS*yEv+*PP}P5{ZiWFugjU@tvi9S)5W$+J=uC7DN7T!=`DTZmTnp00zrO5nX@j}S zSV7vCkxqA4*VvEefr^wqZJN-IhiWU;Q7!Lk(0p@StnOy{PivW2qC#gVURkMyJ*2b( zJ>(WNPlxr`K26q8@p?#-;f_{s?9P08R+++=K;l1?C;7-LE39p?`v#e{=W;E1$J-#q!$uahQ%sZlL)tdT+no3WQRH9z?`|*H%L0x#LcJ#G?-?wC*2$rgXc&IU_bBm4{+V?ZEa)SMA)dge)VCWiR7a{e%z`gsOC^q z_k>ZK<6;{j{A#5gTT$T%aoLU*ejV2m%SG2DENXVhsJ^nR6@8q6BivgaF7NvrC6mZnaA%qPzP+V2f zOf=^kpSpO;y8jp!{4IzcJ|z{aq`YV!bG0s=ZuL-ED@p~aNnP!CN)KA$BcV~j5X`PM zyav&r$I>zU;$xmM;%z~&b1_+&K=wmLuGH5z4It*2c9~u@7nf&CQ!f8SFpw76$&^h}j}P=8beYvT9UnOZS;j z-O%R3%XsjVD_y~RB1`sd(sf%~6f^LMs47XFJG?t^>2$iM_ZVmEB&%4Aw;Cm9bX=M@FGwbN^q?Nsch2#b<-FDk% zV;!6}I@yU8%!G3%${p=$)UGTTSRy)7J}01nO*)91i_14%ZHo8pv>Rj`8XWi2SqoM`l#~F zBXv@+8<%Xr;JU%MKkwAs_-s0zIM>@~7;3A{&CN9Lri|pNcWMMLJ_>c+k|z$x9#vZr zPJ5g|-DWw-TiCuz5638wj^;vA=~uSPxRc{5pPm*QsGwsBscDePHHQwC`nKmY zvuRqQ&iF%)4VxcV`-HHr-EDKRu;U?cV$=aNST!q9rRM~3V{GW77Mtq24Ggll_2Nqw zdd72x&osF$&0nvP!{<1a+r0yhxBfl4og`!_U5oey^#_ja*i8JisGoT|p)>(Rpj%w{ z6;xUi!ix1;#v3j!Ul<{#V7AU;J+uD)0pGRu{@Z`WOKKL3DRo;8-sxo1DMh;l1_nNE z^Jrlk{+2ogut5b)RTCeH=wGRjGTYk|(zq)$Az!{$XFj~ByF0;`=U8)GuBpw<9-yXy zb>!hE${no2CtN7|q+9&jQtA=z7H*+S&=Ay``F_+DgB6tTwT1D!7@-lz9h{O36jJx_ zNJa0T{G{8qkWJw%nzh(RYR9owM^QiM23`5j)ymKoNg`#w1T^LPFx-t}v13Cm__D-WAD_I5zrVO3lO zym!dzsGyIkA&j(FQ*m-`nsU z(-bw;oDQ+@MpXPGZdLI6XMC6pZF$Id$@60Hiu?6@9tk&_1Y~CvT!9uC^_OI~crSf) zop&jv*~I+gO`fmR*m^}(;>1qINlSpVThz|y=aS{(2Vn?)n;bl$%C{&BzzD7L4kl&l za9ljgZ>!ni`bu&5TQ`BM7{EMo#3GEhOgBeSgW7HW-OMyCitPv*EdQ@C+`W)LT%TOp zN;V_%MtY)e=#)DKecmhcbu@lg7rMNc3DkJ(OC!g!;p`)jhjo(wKIB%U33=QDDwrQ! z^Lf-LC#&dTA`c9AQPgAz;rBj|8ChwaO4xIFBmC#TcaDK_XiAFWI@HIEg~e}EyLYlz}MT5^n71eF2i?joUY(qdk7gHsuj#HqsXW1Foz$7j7wh!F-bYO| zTj@+o2D4g#F4Khu_$S`P;O~krFs_&gy1#e#o7O8Q(Hiq8MOwABf#Q{!_{U-658`6P zCmA${0cI7Ht_=11F{{x@wznNEs$daJUI3!ec0&&3?q)F^v0G_dm-u!XBBgCwUHH@? z;PHQNqL=>*JSSQ%up=c}<%?N{Y@z5FDbphhYapX4ofzMOuY|@9r-tx_*1)RsC>Y$U zZ~28jsHHN-bFEo?&hqSIRcV7Vv3#y?vtLVnp}sK!Lkk!SAQas*|GAzEcn$`7Vau5 zn5AgzMmglzgVFG$Y#a3Ys`OqiIDf_Skm03A)SwhG?$2tnUy>$53XsR`5YtX0zAtC3 z{JJq+!?x#QMvv~t$0t2H1iH==9G7%K5*CYDShfc-OTE>Z3&-t+glv!O@zzfwlpjy^ z)NOCdJ#k1SCzJcf!_``CwOiFfknHr0H_1;uScAI%RR8lpKd$POt;a)5#ij8=Uw{jy zmaQA}OP|#^*d4ayo}1rQB0mSX?y!#8`_b)YSknI?{KwRVwGJ?XNllI(9@#-#;kHZ_h9+!8KKO#RD;G zstTl;)S1PX@wwh+-A?FRvOxBI@pk9j2d;EwT<@_M51Qay2;w0Sx;ArBliPh)Hp??( zE1zJ}-bpnZw(}j)G`YJoHneaLP3kqWDJKt(%Lx(H>*Qe+tetd6h;9frPrUoICjt3% z=6({l^JhQdF1VSlZ*xAwSoa_HQWd{<+LJ2oXjgWpFnp)p$WNL6k67T*Ew~F*S+fBj zSy2X}9K6bzU0Lv#OQMZUM#ms8S7`BU~+X-OO^3Gm<3nFf@j6r>D)#fIh_rAe{rIlBqeAzWQ+g)uB2=6!Qr7&}@)bUNP)$Dd& zHS2If`ZulOZ0KuAo8-t!M0DE(#3zLBiV|z|$MJ{4V*l2MK(i|1x0&g!kp&Ny*PGG- z2N>ypuf{8M8}>gZS{e4_#`v`}h0+6|h6ZEr#vCRAJP-LAUqgQW^?7OYJm~G)-+9L0 zH?ql@_eBda(X2)r=k7O4x&Pc>nmUh&C|oC=k}S6&(i7EYD}RjyxD$|~drnx`t7 zD!10i+Vm{#QGJl3_@mku!rc;&0xQe; zUb~r;^y$MxZtp?&(1~1nj~06Z?QmDUjf1?B%6%R0F#2wVS6z238+l>>(3!?e!euXd zclW%#pr!O!s-U1khG$e?(eE$+sLCi$A$#JV9OY8>hlDU{l-N9unG+gV?SNyVv$8bbwTc%0Du_&2FV`SMBrx{-G@Lw*MYD4Zj%UQ5Q7N z@(d%?;BP%b*IIR-4OUqR2*l~5#SOB#^RQn&YvJdb^?&+tJKdGhGB{3K&cDhf^ztz1 zh|D4o<@Fw(Lg)2|PbotJDQD^RyEnRDUtwI1X>{DhB%KnRqWUvT^?dB8gvQ5XG7kS# zVQfJDhGCq@?RR3|nBLU$)BJPh={9_IXf@U9H8zL1Fg#f~47IFMHHFGsaEwGzF- zrL%Kd9-NQuoBf9`gBf%cxIUJ2^bFRYvkuIC(YK}Eu7&>6H<~)b9V~Y}yUA-xO%jlM z4~c4Y5jL7CYI#n&A%L;#>p8`FO2wt%R?$Hf*|6UrH8F5NQM^axLxzyr>`x!e>1+Z+ z0h2Bn=u7^<)SUX0Yj2AxaQB--kL&$a>&ddeb0usS1I87#F&8McCrR@;w~A5RBi%PY z5e~?de9Py-2Gw|YYVofv8h`InYwgf!LSL^U3#>VTtZ&!)wx1uVU#Ue|PT^r*QF#l_&Qx?0L(G2Y|Fkpe3fi zt2Nxo6o%{@X``=r>*XH^?Ja8w1fjJ_U;0AS2J|8C>WJ?MFDkEoBx=V$gf#zcP6*xD z2tun9ZuOo!X5qd-BJaGbYSYqX8FMwxlQC5=%O<%@WB?+oCk_7c|c@ zwygg-`G^oI77f%XPe9}ATKT%dWj|_(so)Tg9-u*PQJ07B^8AQfqh}WrwS05PUaOWa zyjZY|Hk&#Pec$5M7H7rM-^M+1Euq;$SJzY)_Tkvnt#f7N5egM1Ebq$6t! zoJpg-|J#o4u)?Ez@y@X*DL+LtGnuIMC~-|w`|twNY#%>S@$}ess4kd|z<^!voFY~6 zL6|$CYUAQnR&qL7tiv9GJJ*k(ZH7Er;1bJP%ijm^CFK8$PPRK?|LjJOcf{=b(pvt; z>dK1}_caqpbcVKLg!}GrRs%}oQw+`^mM6+%3I#NpYD)4*)jnVUNtQ4TXenX$mb4p~#r}6I}nP(dl+V7`3U~HtNS(3G<0qw(g=S ze!@@c%(J8=3DakH^N*C~e%DlUB{`9z;z*}p60%e7(Ddb(h(RM84rY+DW)}8te5dYW z@to2C?e{cnK=1<{PlT^hZ~Z9IM%#lI1bFOd5@WXFC3{@a)R60}_%>#GbKnd#v6jAf zeQVM!m=0gDsEx=N5(;-p(#DsM3%ea|TY1D?I+nO1Gpgso;N|HqD=PCuo8|YLvujM9 zP}ec=?}|rF8EgpZ7sfjs(ZBj}$+J^U*({yj7F(Eq>}#_eu}SZhzSv4^G@QwzpRBcO z{JqqSVAuU_SGfTjm$_CxkB}>Q8PY4pQwNER%2SAXG`3QESe#&*A*_?M{H%XNrhzLx7r{{gQbhTFp5+o)?R+r z{{P5SORf33A^2FsbO?U}7T+th5O7fQ6x;eY*fQCy%OppLn5rbL@{Y4NGv#e#x=;^{ zDVLg<*%c4hUIWm8y>G(OsT8N@yg#-Ey=`KyJmCVHRy!wQ*A%Zu@AfDQL`P36;&>_7 zuGOHvZMp}!5b6i5 zEY;)Cw>?qtP|24|+i$a7%C_^)3}lWyY2XR&+daq}sra@D_9<1~Gq=yHUP?ICAhbS# z4qJ-CV_DrCTI$7qJ^EX|+eb!n0>#Li2KMIuCS6{bOr-|=3D|90n{VJ>Cr6l%OE|QT zOty9LB-16bwfJ65IDBI>3_hud(i8?&c|YC=i2si)3SjTJyno+PA2650lLB#m^hg16 zrv=4@Qe2Oj#PfXgrJ}p7;T-beG$Ug26k~sdPMsp8>q!Zf+~&S2n^+ye;gK)|jl>g) zG`&f;la|uO#y{>qj`DobrTO)u!%wXJx-Uo>!F1IgheDSlwx5lA5RI zi9r{tuDNU~79nBpI~R4vcV95{^IaB{tg~GznF%-+7=rR$O%pe(v(s)clic0(eTw^* zi}%`uwioGI7(MNS%F5Y8O9#d5WN)%nALRZ;z{~R|cFK46QWvJS?3|qV)>58rpQHO6 z_2@a>id8H8HDABo@j7?P!f1VYctI`Bnd{;$<*)!Bue8_d)`O@UwT~TJ2qrm=FC9wo zA>Wrsux8zFVKf6kM7F!RsoId$lah7B<%Yypqx}d&UNa#-!lgRi!LNF#c2Q1nh-;k` zi(q(>KkajjZL6feV}Y=+_H)kF^fC&r=-Hkxq}F1_QR%8+^I-fao6$`v-@MF9XN_hw zHR~=f%tR;#!(x`&6mSSBjCU97snaG-n?1qgPt|vg$3H$8H?>Zz%lG#i`lZkQ7xyv) z{8ip&UP>_sxB5Ndi~B`eXTt^2BMOECu{k6tJ3M9e=#7k>FA zi`{tL{gpZwHKX?E%IaVNyjIr&MNP{OkmrmmyA@jw1$h=ypI3A)@X+=yi1~EHb+bZ8 z;3TTV1xd-k6cP_zU$g#kNrF_F(ss45plb$+;@{f)SeoY+klWtbW6z^MemO+vsBW%J z2N*;ot7g^8#REWFer*qk=w;h4O_1R)zI4>6JAZ+I!P(S9`T|Y}O`PDfN-|<~9x_hF zS5Ap5OJ1Ja7W2xe%Hdsn_nr!;FLz3I2z){3y*=9I{D2M4Zn?P{GOi=&x!M<$^f# z)Ejyyv(vd90TMsZ3^j&0-Hvvy(We-m}pI6_iqNT~y_B;%jXOtAn08*cDd%$y=Y9_;hNg4VOz{#ufA=1s zIFq#7UKybze;AA=eiO|bZBw=0vf-+4X{l|_zy_s!oa-KB)zNwAMa}_TS23f&($N{_ z>lG~kXFKZ;ITP8jFTB*=9WzZ5A33S9Pt-MB5^+(dGSLY@9acw2p1pOZV@{UtA9I-* zS#HC|Ux-chW?#P>e8vhCs8Qil3OjqpT!9lqf;nTHGB+Z5WUj!&*F35%3+k;IP8n;4 zkGuT-8t#4nW_VN?HrC=<1p8vcm8)NiXia)j0deO`R8Wbyf3xctu$p^Ay>mrqnb@weqB}_iA zY!_jXsZ)!b>L| zCsGPa;T~AhH#Q_DV)dBXq|7k^egA51w32O6sG!m3?O%O3=xE@rSyM6=U9LVMyj`IJ=FwR)DM*MXc#m2c>u}D!N^CsS{j2#Pp{-T-6Uu;!Y`)?};cX2^Wuh|GBASBrFmup~6=@#Qqo7+9< z&%;(E2p+HBN@Aws^?J}XVa}Vsc)y*6iE8gOEM0+*sLQDFN8fEHh)Iwo_S5-D@2apHEnm%&g@a6Kp&le?4f(ORIWtk3$e2M zlsERbGkS<1TwxU+9>13FfKGmO;n;vAd$45Bf041R;tv)~%(L!uYzaRhP|fxzuRY;l zz4LJkT&kmMmR|RkkBgRN$+r7qpNlB&A&yZF+L)g1zl|hQSupIg%+d7BsKvoJ^^086 zlEmKms;=odcpjAD;=A%L!9z#2V0=5)CR1*zIwZFHW&Zo^!Iljfix8RKrdHR#xZaGb zfdz$`-J_uJ!#*fWH*M_kaF}knMohs~@{_4YEOl|2s*kpxj{tPs3KI0PpYkJ%UP&)8 zrVQ&hsw%zB*q*t3O*G_=DtTqDMaC?XoGy?IYneCXM&x3-Y!vrB*xBMr`3E0IkC697Wu35|bQdN{kn;;a^s-izO;16i zx;k->G$6{@izm$zzhSnRW6-02$Fv^ba{K30NF<7o&YUN@! z(%So}M4*mfx$=rnxFJvQEjzG<^2lg$ZG z-2NS;>xU zAIe-UV0l+{S4%N=zw=6CnKqd`3gUo7PV!IkHmRA5 zp4^WMBOPS-Z{>)(okmGFsPOfSgktNn^#X!HL6umkP!_;w`1Z3YE{VEXY~s@m4gD0i z?2h2BIA3@>OUojDM2(x2O}c`0-EqTV=FWnUsl!FUGFSB*mm16JT-IO)%LNc#!1ym6Du9f+7#K%<)ynl$lSirW9(--wNRUO_|gwsF>!!dqovb-4OPY*Ivobro(>x z9@V0ij^kEvCRErjBfGBDai`J8vE~nxRCN~uxI0iE-#z|ko_kuIBWk}4ivld(CXSnq z2ODx>XGo|*vwc_6)1`Ays~>@$-OoYp2~9p$t+?9HR^z@pbs<_t3CqP|j~F$n#DYIP z`lfgWdmd;i+1dCDhQrO7cc;Z*vPMxYXJO7?g=Zu8H)Hx+KHH2gdsJCzVyGk9&}{t< z>*31ICGg?M1(BE&st{6>EXj3M2O26o*~}lX=8MlWPte}CQVb9Qgk&L=`1DoTs&Upp z`IyZDM1ruJ`B@?9%^4l;HOuui_Wxl^Hk4dIa{3fI4!-@V*u~tRk%JQ;6QRxUuILyKn6TEGo z88Vj)ye|2)YBLgrGL>(qxs(?Cte#FYzkV(SEs_{vycEG9$6EHW0`-}sdo>^^&__>Z zw&D9F`WAZUc#8i}542{JC&YLA?j%M2Hm~4~TC6v%n#Qe9p=HvO%?fx8A|47Ha zw!9i?qFHXb69t@(yGTIjZwL(t48~THK|>dfo!QlL-H>P}$uxJb0v4_&lb|atqj8DC z>tO^UL)F;LZ`ifdt}MRUyuvT_v=V@B2@;IJ&vw;kM666*^Gtk1wucYa>rPDz1vf60 z*pW}QGA^x0yItROF+-zGk@xFM=o{+BCq6y+@avnS<28}h?jcRJg}o&C_F?Uo+dKta<+3 z{e8WQg3`WmmcQF)RG~56y|5&7Sjiw6(3?xMp2j=5Yv1sVr*`)**6N6q?k|&kN_e40 zM;vlQhIav{C|mrJNWh9m!LKqq``-^k-y|%VDU=bL&{+Sk!7n{Zh~XeWw78Al0L!mh9?MH}22ZXDru zXGo^TIwb$L?HsVuK31)yICLb9`NaQzSMdh$ZIZPXw^l~4fUnu{dzm}0@kM1J0w;O!=SEn) zRYrj#Ss9wykE&(;|D+|#iuFO&*-Ee<7qhz6pY`#TUyeeav!*I9(pb&T$yL)OzG~Y-7sz9Hsu^_J-^g0jFCnA7YXt3Dx8LzyB&mG#&#YYwfY^_u#wD z&#SlZL&o#S$n64y)_Ktx#86Rn%8kH;JpT9-rAU&m!=bSSE9?; zg~gV0z^?q#04<`#JuxVWLYV3)jy$Ggh9=-h3JPX@NgJPrgr^3muokl|R&2XoT8xRy zQVM{h**!G(6G4M1vQvbk%={Qee39FYd0CNH@GsR57=D$-);G|7cWDs-HQc4RPy(Hc zFGeEbjm=vX%o6nj1TqX3o$m_W2=k*|2u<0_<(Ax+%l#r>HPeUJ`qd}*T{_DnRfR#V zp~P+WB{4U!)|K5SBkIH?@oMVE;wCYIOKZ|BpEx0c|q{vCRj)+&9Y-5MIJ0ibX4hKkC(8}+A4X0ZX9c`h&Qia8b z%Hy&B2sXfgGH2!|ue|(CmIy;WS6Sm>0i&9V>6(MwJoqvcCaQ}gTcbbHa+r2YWCV+o zQK9Yl{l_h_|4KBCh|CW-{ea?wASWMMz8|>c4W79YooIo`XBI)P2maI}NjxZ|CC9%t zE^MI6>}nxwfg~afeMt25<&y2j1ZlJXNA{Gw&k!rZ`uN@PTHz8-_>g~e*z%U#*%nt+ z*J5-o{zowHI2@yjW`8gJ!GLX&^)T1Ur2Q_`B={LFp2w&GnFw+Fm!`n>4JOS-`{(#> z1IanVpqBlsCXMS!+(?&9`}y5|71Q;|)h4{w=Sp#> zmIma+Ud1Z?zu`XRp@W=HwP$9wHhwN}%c}meW~auTBLb_Q+wv_mQ{#4V9Z+ z`wkM)M*x&jbMnL#YXp;%U#JW*yA<5U*H#t8x(+e@OZD|(oqnZzf{qC!U;XhhT=Gt^ zYW>*0xY=vcwAZj2wmZltzp?JCct4OVaMF;5Mxzw0cMCpz7E$`2(v%NtMD-QJIfL(Q_3X5@# z@gtKa*q_s}Glikz(~K`mA3Jg)Ob$N=kqAo$DEshoeW3?;I^^bNsSx1%Zh3X9ye+bwPYC65q_PAlRAW3;X zP_quY{=Q9vj~sMM!9y7QIh;xJ<92!tMGblc6>BhOzE@d5+ zaZARN!Y*I7E1vPpEF-cb@_CX#{I_qm*)DY8a*prD7FwbTG(d8v^c#V}U47pa_U}6+ zf8NZRI!#b}II~gJ?^d5-1AlK9_rgeJ!LoWu(Y}KpC)kA3RK25_%O7X|t`Z7+R?~)d zGhJ}%J{WH9AE4dH^4<}JUH!Zi{_T3rg?2k?gpdN3@>Pv8wRG}!ta3lR`3-ZBq_QIML7Pa0@1xjRm@S7`dh6};(ZIC_OB-dGyhG(-5HeohxKM+E;9|^)!jf9`e%ej*kdI* zND|kHpB5F8a3c=Yk749_(k`V@K<=$+%0p* zlZG-KMBduD!l%Wq1sTMIz_&Wgj*Vq=`v66+-=`67&x|p&?*)X<$dq6{bvowWQ$@emLi=g6GLUscEN%emFMPZ#)gu(2UTnvjT z0XgITs)Aj=RmXnYCiRhymd4O(%FwLFgr!j`mpt0gMB(3?LjMB>)nYbY!tfiL#*)y! zoI8`pTxttLPwWYI+LF0&3sm8w9KSwv`9D`@UkX-&0ICL|k1n|8o0wS7engy+oo)g% z$m850M%HQ^(U0^cZU}#o=XV;Swn^s#qk5kFWtScAnu>`}FTpKFHr1B|pM%S#yYgfV z{Z(#zvj?wMDOK5IZ-2+9?#GNQ(7bmZv#-rg9jt(VK%XpOWlXBk6?@*+l3!K)!5G`W)qEg*3Dg$HK^dHe1GM`y;X38?HD7lo+>g|WH;%_!jbafwP%-OKxD;BXCD zx+W-RMzv=FX`;+gwg~dJ5>q(AH(Sp`=4N`|u#7;KaAQq{qq~=E;4pvi+B44py2SNP zzkdn$NU=j7)Vld;VB2Q`xRa%c)DB$UgpS(ht(q#dd*v1Vj5jfEMdVm@%U)ksytZC$ zTx+N)_UhLOu}fy~{TQQ24P;=way@3dMF+p%&2y)Xr81gyr@4~cqj$2cY2Wtb5!>_P zDt@>L5uK;8Jj^-h!PMf{1O~oT3INx=GSB@H@`1+BD0B=?@IC>zcv#Wo=8G+s?=4wW|xG8K7;jKr{26wZ8k_bB`| zZ4@WT0EWb;iZh2rVX}KmoK-wGk)eKR4XIZnR@AxJl^lJC859`HdNeO_KBVpQ;~l+D z=+l3PzdpX^*(-~)OGpVan2_|TWZMWdSxoj^ZcKOEi__a8>uZIOJA%_BI%Z=bLT_Nhc{W-MEJ-8{K|bxUii&f#_HEcLfvxW?YQA>et8dn~(W0+mjBy9MK>6sE^AN1?^~g8*8!qFMs6BCuu_FQdBj)F|F&JJk)Zr z^LG$*_iZFkAT2?U>f^AAoYXg9`$-gc8{>_8F*Z3lHc`GMdY+z-!-Z{Lzwt{Vv=cp$0V`-GZYP!OUtd5>}tNKH{ZpLVje^YTpb0Z3}4KgZrdtuHVI_s ze%|%@QIN3})ci^8)sV%{vrle0Ef?ozBVqMlw#h#Qv#^(EqIE$I>9D;n3XZko9;oD> zY$?6vE8`v(hVsCWbeDpLu!cPylUs8+qO15CpRF8&N5OF?9@L>DBwmZ-J;pG~AwgC- zz?;#9ALf5~M9ym5!NwHSwOD8PRfQaGqwyPv*={l@^z{hv=d38Lmf@PYmBn2gbI_`6 z*)k#L0=b@;@>~E?7+BKK3$dyl!%KnXUb8bAY>mmL5gA68OT4|{Sh@ZWCizbWu!$RX z-aYqz%l;;`_iBfM)Z;Twzl!;sj_&U4Fbv%hv#9V>gSyfFMCV?Q^U(10Tyu>9l%al` zO+Dpl+SP;2{i*WPrDPXB540=wh+0e2yVo3g0q^ab8pbLd8Snf@mP&zi;YqK2+(_Vej8H@4hFO1K&yz0)d*U9p0dE#vn64;D z<;uo+F*Z{wqpPk9#e2cGu#+lf4+sOAP;G}ZgZ$l$!g>APjy?Dz@u~ZwXI_C+Awh#lIXv_oe$IrcIKj4_gE)9s^ z+SrqLs5hlhV6|qJljj4xG&JQ#zTa<*e4cw7sz_ALzaHSB7YkOtGm%l?&u(e0dMR^R zarmsYQ3Y?Pi%W5TyV)_Dq-X4XqCeWAYVV8Xd0@FfBl~pB?XRKRU{vdOnH9UNBh^+? zyLDQ;yNa1NcgpfHi2ZCO)$Z;%gaN5?Q8En3T-N7S4eL}eB~d1CHiP7CZL}AgC3`Kj zY;{M*Rm}1SD*9)&P^LFz-_lV}76v#L<`exPT zrW=)b+fee?9WE_M7ZPA-**F<&18K+FN~9P6C5Cs7zN)`cyBLmJuOA<)3kKkEJ)WD3 z?gW99*4DLR@0Di`)&G%|#MO^nGLYY9v`HaQ8F}0YmdRbd&X_{7e<6HtXiDH)vs`aN zzMk`~G{}C^W23#2v^IT)rQt9mP$Lr&t98jXE3>odW>-AU1I^qhauNbW7tB%HOmt+< z_4Eam>o|o@{vPHl)uP>@W>CE7Vj6&IEv;o1+4TROx3<-SKQFY)9egzRc|ry!B*ZkP zQ3`}QcQz$0TF&WE6CzTTK{w&E%v@SU0e{NBFYnS1h-w0}${9h|j|ty_ft!9ON-twl z^acXrM*eC_56^S`M<%j)DZV@?fmzrZx2{R}JEi4cd+{K0&7!(raZ7>vgDU9yCec?8 z=v6mDvJ0cl?tQaJ(Bk%smlr@;_!zK>(5!y63= zB+sjJ3}Am(&QkUH8KPfeotfcXOo3Noc3JaJ)Us97S2?iy&AE9{b*JgpSdc&C-{#Sj zA#I+$gvUn3N{qD=>AGg8hHvrYJ)+ihdJ}?R=-xd)9sjFkE7e(^FK=wY=e>xNZOhoc zyy{Q)Ku!U&b=p5ypQ<>IRl6N;99fILZ8HCdPPS=?YVkT+=l%}0`O6XH+lGc@fCtV@ zmCe^4Vs)ZB8QN|Cwpht4rLY@CXgT@y7MvM`tD5be;^%dsi|&at3y;6a{G#vVDOSlS zBgGpd525?d$9CcT4GDxIvj}e0Z^!DPw#)0)70sPIG;TiAk`E_hfpFNlUrA6q6+`3M z(|zQJj$4X@-q$LGU-PaCvY_*Vz`(81A8x;VonpyXshI*fV+d8po+hgft>eO)#ogUg z04{P3WcAn{e@pWPn=mhZ%A$fM)#txuw5-0I6DKzOv0c^H8ulKpq`3Jzo%9ZWa-teZ znwk{5oU)et6M#B7ZYxy(kxre!Pt)K|)%^21C9KDWcHDGApMA@Wy<5gT(Jt5F!+&HQ zhJ+w=y8*1)O!o9g8xy*+ja1H)JA<*7rx9dfBxkjZ#DtoNPh{D{fWgd4k-4Ow{lc`m zrEYvrNPZK-E<2~2=S92NI*cp?(S*`{Zi?@LA3ers7VxGH0j%a;K4ueG%)0|K)ZJEl za2bRot!U8WN+!#$a!a?Hy%V^BuZMj!y`Dhh@5Z(Ac`Or4yTQsOE^QBCMlECeVi1W3 zUw^j7!*Q`H6cl#S2`wa)iRWnp;qI4aTk#xA?(&XT+XHDo9Wwp%Z=?QrFT}ht4&qBB9LPcRb zM%j%VbLl!F(W=q>Au<<6Ep-zD|B-1`hdoS{Ot<_PWajzHRmqgUT$> zuIt{-3(v8J5W~*cd>1@PgVdL1M$xnenwIh&hn{dfp1QQ)&zOfF+Lr#wFXz^!X2Fp8XM~ahG`5Q{P;m6X~mymN^AUZ=wJz z>}jzTkV{5q@t$rkYir#K@#q^LZ^ut>qmD_p-Auve6Dr2IClOm)dNicB#-%=;^V&MQ zAvDHbh0_Pya5BcxlX2Yaig6q)9Jkbcn4w_ug*rk0cv$y3vagC55n&=NGFE+d{Z7fV z&|FUy0+ZN5$hmf$jQD@H%p_@jVY?7tU-ko&8VCn+68!!^)5x_ zZ7G>>7~a-j{+(uMBGVBTCRW2WL!L$Vx0TNz-gh`E$NFPrPvt}T)KWrf&JlM{)N2DR zwG>Ptd)tn`$`+s%VdzX_v8JHZXMH31soBmcqYQ>3^zZ?>T_=x+?%deXylodrCWKuW z(^qi$n#}8( zK{PdTTl$UPz2mccm5+)^74L9Z;t7ySXQc3hWS@r?4WclOx500IO%t7;=U}Il{U)x>_kj0K z9@BKp?j|5~w@=EOr`qnj+T2ASwbH##){k%+-`jEPrg4b~he;tNru!84z%vOl^a;zm zc_N{c!Gaa>q|9gXH}WxVViYj0p_F?!N*^f zvG>v2-ntXO6A{2#Zk$>ryQ(h?Pe=cb^q`;FHls%-zt4P_!1*sKD=ufU-`Yk4gu5HD zYi{it*T15vclM4*LtF<`dIy2uK|CNQB@j34jg@qcQwQ3{TN$;#5nZkX@VQOu*rGxj zgJy@cUb+{#AnbeZOO5rN zTZ4CM$oB44*!I9e9DG{kht}whzQ(mTYh>qDd4bp0Gj&XAG}U@%S&HjwTktLa8Y9H# zBXCC@LR7Hc##Y)feebTO@A62k{*kxQ{NA5MX`#p4mKluN=daG)OUpc{Gk-`Dtm*sM z&X*32pz7L&4>sXNvzrR3N|A#H9b+O_bw_=wnSR3$28-&NqtReovjkfVI;yrK@H@~VfS^nmy+9aSoi4R zOHKa`6W|9I^b~KxkUo6{d3p&ngql4thcD;JJ^Nt;De}N65`y%(h_vPmsQOkoO@u8A zeEcI&nbxH2|JJ#8d7PX{A*yhsO%jZzbbN$*N%lw15RuR89x^}tk52cX;(C#2_E;#F<>K6eVZqtRmQwe>kR;J2$7i2VfDO|ekz6A} zhUHmkXy0|mA@9Kf5Bax%JPGk*I!5aSR^Y-yzv*;~8+}cq)p{G4vz?x$SR<$WJC9hb zE!B}l{86aG2ukAX3#93mhFrI=;`Txd-3bga)g}fV;s{YN3gS{5^79*j1Ptx$n5YJl zR`L^?o4dhOSUA|G9#kRtEmNu{`|rOG^CG#iYD}myB2J(1V6@+8nF!f(q>$(Ppzuk{5H&+$i4F9>?HR85B0B`Sz72Dlre@uep@_mewqXh~{I z_HnC*CJ*5v3)Eir;ir`0SJx>UEVdK~L+&na*hy?r#+I}PMS?ZlTk0&Xxe3C{O$Qjz zshHhR(yz!Yn(Zo_owcHnl;9^@&3}Inmt+Wtr%a5nIE0K$6*@BIcGr@Ic5@lso{kO)5BY{$2u)4ZYMl) zS-eiKc*1&A&cn8ys<0r`GMOJ=QRxIy`F(#`{M-8aADkbpmvYSdD%vRF8;xJUt#$kLgNv3vVc!n8F`xVSEHzCz4gEPToz0Et?T9y3^9$G;gz5 z-f6tG((tucM)r)-zt@8Sn%Ql`pWI`LM}a?7!!vvM-~D@2e9N_Jy`((s?r&IZ%%48k8^JD zXbTH&UebavSUJnQfO|Eu%zQArJ1e)6wakidkAF|>>S_`{N;5_80;ZCkB{2_c%uqwc zUVFndpMmyOm*114i)$16pq5%KBZ#R^v^tCu znrV5V;!jy#LVxQCbrcEb3?|?fzxs1?G&3JOHyZ9n^+TN>lCC=zsI$`&V~(~xzWTM@ zB{~-WDPGXTcjh`Z>le24b0!sp+<_i@raaX5Vq$wN3H6WhRkn8SkvB{18$r=Dl|98# zP8*{EK}(IYTWP?E)o%Gi_XF_1#RNlBC~yRBG#o9RU!cizbp{-{-Rc=)>KaBB#;SYxHJ zSDBtQ9Vxz>5%&2~*d56tf8d%{+0e!R$>AmW-tVm$rhhKnER%qFckI@SZKhTQ< z>tGn$M!Dq|l$BW$(HXVgV-KYApXm$5g9Ts? zP}Aty68c>j!DkzI%axlW!hwEH$4Nl+P~_q49~`HDY4N8xo1*H)v=S1nDYt*m9D03R`+Y=ebG?Tl`=7)!ez^H8AR#(0Y#c zEh75s5$?h$;W>HU&W;Wccer@r&G!W>&1%vOKF6h@JjAe{n~4jXN}Om*4?daQA88G* zf7S{I+fPs)sa1TJWYqoc`po)QWju-Y$Db5>$lVRw8At^OjHI$?k=gSlob6k9&2Q~5 zMdyv%*9Hyzs@@48J9x)Oi%R(EJ!peRepzd)i+ZW^Ps(cEy`HhZaU6yPV4$7y_>*K` z@bcgtr|=hri|!&&RJ?DRi30>}Xa$I$jTX(j;?5Z|zb8cQ7yErLp2i6b)UF97M- zu%gr8kmdKNK9p?%hd{Pxcy0KQoaY zt?d^=$adRsq?wM^4L);QLpC+~21V1O_M|)y?Pu>cuJ4qXhDQ#6h$#G9efwDv* zf^1`%{ml|FwpyO{PqHiOPA*hiw~x96WfxV@T~nPG!brZZ6ju0~eaujPtuB7y*O>SL zqUAu^>i%=2fvS>m3)SyI?7#rrB%k?&*~RMf0nBjQB-r>p#x~+o)~9HJ^yzgetT$Qp zPaH08_0}+HoeUNYD&F)TK|!Ep+*bv(tc>TBzp*@gZ%?bGzK`p0FXG!zf+yPKUHp`e zl)9CiPh7u8&NZDWkX1jC2oqTgRSSMNrNWQzfdlIXx&KGeS;jT>w_zMbMMb~>X;4W? z=>`!20cpt*3ew#J2B=8K1Y}5#?tyd-RBF5ZH)_&?`)w^#dY=ltT1>;7K< z5bKS+p~2t9)nr%f*#+r-bt%BL?z>z%7Q+n7Qe0?|OXiNL>Mt^GbFaV!hCf>xE@33?a{9=mJ zxp60WGv^3#!Dk%P6T*rZnYnl_i26XwD#vj4@ZDemBa_?0KZpsqKL|t*(I?*4wb$?H ziW!bR-@*B7T=V2c8)L_!+x31B;07~m(U<3vZ8Vm5EY+rj9y5 zt(JmuU#KNK?lT0w8?#COyTm}^t8nRJYD#r0w1)PAiz?xT zEj2MCGbYatRJk86|8k~1QRBD!`<{R-?H3a`@cwna!Gzl(=|HiQ52jhK+Sg>aY( z(7~_%ZBBXW_w3reFLIk5b(@9zr%vh(DQXF*4mgef~hi3k)W3mO8gk`w5B9^ z4|j|2fGIbXLt>tJOdnwcj%Wlp()FgB1?^MNGx{;u-FEUYt%3&=;u+Hx7{XP?Wb=Ai zFZ^BJ-MSd<2WoG3OHo|GybsylLql``$7cMSx>i|M&kQGZ}V3bQYRH*+;RS2nrBxN{h3r2ugwI*#0^XK z_WaZ=&gVIwlT@(LUxDdu$@u}%f|-D%kW~=RNXISpQZtoqpMHfiG!HRh#{`jvUP-!-9FRNprwWBJyzx$g1#o?AK~=UBr#|CsD&i71BAC9>+2 zqP-A6CzJ^a2}_KARqW)^Ag2(I%KHe4X5T)n?l?DG_o-oMJe(q;=EnY$w_leW7IwK{@b$piElX3;HKl*Rl$8% zyLY!jtBf~z^h#=AF&@}#XAl3XDt5Ku)U9WR_(ad)$Xfl74WOd5ix2{UGmcZg)&`~D zrzPtn)O_$RjWogm0rOryfy**EGXF+H@SIlD+a0%>Jd|gYw9QiG#013T-kdz?5HLmB zt@?y&uWTpU;dfpglDVfEvp1!!Uj(59In+Ei{+Tflsr!sEn|~Hvx|#4i z1~BUp){{+Z-ni=dt$MYRC3WsJ8<56o|9oJx_;6SUI!rE9+p5a9h-nBbb#yK)uR?_( zW&dBX2z_?#kJx_{7CuuxdEfS})R%jQYME@b0;Ek9jD|c@X`d}{bVZiF4k5_q8)O(QJq2;hcZaMrc#kp$K77v5>i%lF9wZr?bxe{KC)8PnLMvWHg|&qhgP*%9{y9a z7e-l`2r5w<67-@ArPOUbLCkR{XHPf}Dd=G~&!J05IugzDS^r0dI8C1(l`=dD zf>P2zZ94<(o0}Y%N**+j;wr6G5f<~H z^z5Db;g{>@taNKjmR%*j(Si?oEW0Tdz1s@`K0~E+#%=c{txTVQUhzg@OE; zUwrjlY4Ec$sGj2a{5GbYjs~1zN-FZBJ3-AEHa*qtEdGOP*%?awbfL9BQJM2}uzno| zll$ws&y`Fn^?=aeG{-&Gqq1bVnsN`6gHoteDk?dybj4^9Ig04}-h)_~dh`9-g5HKV z{U)nVA;_xb(iPx(XtVwa$hXYpkkZ{uIsabP9LbO;8+>jpG~&aamqsrGP}yMVVBRiR}^m(LR@Xy(@t+T)T^u(HOamx!YJD!VViv+?6M z&Yf)gVI#QOle|P(6axyZVA>k0S_NQg#!C+DTO0;o-C8`PO)@5{5&Z*~q>wF{d!_Bu z)lX7G;Sy`_8tZ!%u<}>_82=iWgw#!O=}9r}vnTGeZ2DQKJGoY!%tTL$jsB3{lwtD~ zwC`o1Mc}PNW|%xMJgKtz=BrGm3s>tlI(^Tm5?Gddo>hzA>Kfv>75>^c!vBm2%3~t^ zD|c!CEMHpjj<=+&yBH;0%5{(%@^I{}rXH!LQmSn+WgLdx9ID7TR|VOU?u-;~Q7gE# z2c%+VKVNTNsPfD4|L68O1IN)+VbfgVN&MwKvu47|^yLGEVahVYIH~9)Is^AcK625L zjA=Slbv-Cuv5iEAT$M21;-uys$Kae(Wu(Ot{;ENS?icQb0TezihONn-R#Dfu{&#kH zk;?3(Q7@JePfU6SnaeVbFV)`8$M-q*gF9_lRLF9)GBatYKaT?t&d(8+L9-Q$enNfP zqE<(zwHbT z+ukn3xy^cO*B~>-t0!{|@4wu)x^w({#E#^Kul)>BBjU|N6Tr*8-<%EipQpc=-w(L8v?MEnGzrrE_d=JhUV=;!GYqc=}S z4uxk>LlcBS-<$$n1%L<)a6<+>5a5g2ui_X`L%-Ib6CWQ(r=S~lKe~v3?sIcN?7^Or z|2Y~zWxBl*wK^R!&xoag01zG#U>A#5G2NUksc@*wR<%CAq-`ILIZ# z(g9z&?3Nllxq#tMFHYP0_!0HmER4XZH6yCbXZcrUZe6J^U(CjFe)J7z=rzBeBSE$l z$b03^K%B&tW_dZ5cdp;JkX&a63d{V*+>wTU(qT(UGDRECaFagb1lL`JHPhX=nUlCU z{co_7e%4NA#7H3Y80uUQnP+)sSW6$1I%?QVNM1)ZS#r|hkMN9ojPBw?5s_9aS~B_r z3HBd_E-8(8d8pseki4ur?mp7${Y+%2SNw97Q+qR96rwiOCAc}hDGx!oxdgd2>HSBM zwHBum#IC|p0X*m!0!ijgyxS&BC)>Q&_7T_cl0u7G{6&@M5W%2l>CL2;<;E_v?VY!| z)Niz03jQurhhH-%Ih}Y;2~~T&A6nG)9aWhTdE>vKRJEpZPmL}S$xn1+`3wuXliH}^ z(^L{*_wa?jEN6Ncq&*59b>za=IL|Hqf}xOVP2VV#g~*v`dqenAh6L4XXorod63Ab) z*!$2KaW`CJLs;97 zfZ%rTCo^xu1X(%H$}(ydr|SFWx}1T`y`NlMiRjo%Ohn^Y&jlA2a+Z{YcWp0|CiL8Y zHtfW`!Byk7c95trTz+Ab6L#(nSz@G&tc~itIK(Z}=&F1`>N_AnuV+qSXwl@EmCPs=9V!LP??`pnu`S2mb{szu|4#g2p5{Xcupe zbw!OhX*Uv_J|WdhF$?NJZalpnUd`6yJ*h!g|81xJnaPN(*?}++I&`OtTXQ`{YFH*r z%!ByV4KznK)67-wH98RblPX*Sw`x7O31b{ougN1nrdElY23w}PD%}J2fvx7us!3|( zI%y3M$7df~)^Afee*CCmd|7OU(iyuvM0@hc_GW$YOmhZH`h zxcFw^m)Qlii>87|sWhRFrkbdI8IB&xM07f-P@j&JH;UGQx)0oZ@Vl-+iz_9BEoGvC ztczpiq?m1F@vQP;KdF9_tj4iB^UItqkyhfj)DgJJsbJTxZmO%MC`>2s@zh9HTDgrb zU0QJH<3|4@Q#N$w;?Sz=jOp&F{e8eHD*i}6k!2(&X${n%wOTs!1%3>@*ta~G6g??z zWP+^4z~}=8aLC&ex){yh{ffsTV+I8OhY_>X!4;C?y>#jJ9#1JpbNnI~G!w~-5`Bh5 zp#dyB)>MY2@z6@G%8rj4^5sF)%Oi>nVy!kwp8(Y%GL&?l#(L~|nDv~wdR*@xcq5a~ z8JgFmkJm^qC2eR=3;QpM;Fe^&t^0W&mkbN0MDAm{s`$G5bE$Rwwz%}z+8;m!Cnt@j zn?_VjqpJ7W)tmDkb9USlbYRM(;*$J9Ks0gQhJc!;Yl{r+**@`G1RuQT_Bf-JunOwd zk2_5Pi=S<6Ia{9Efs(c}pWMbLp2eRfoO7UiE#Kt%XXk$tY-kxFtAN7~l}icr%<>d|&HwE9cNA0#7#=e~Rr)E(!`3Typl1 z!9ZjnwS#?$SD>A_^BuR%Lz3_N!cY~OOH)-uK3r6PWQiK9rGyaS=Y+eF<&t#~rBbl# zHbjCj$eGB(o9kAidy!kxG?9)XQHWR$^e?mNYN&jf23U+mRmikNyDC`*%J=NjHYTHC$Bs2 z{%GKC-m*qRC4*LTfJAl)-18#hfetp;2scuc@vp!Pa(ac$M+9>LF6*G&lKJ81VNHc_auibLtl zZ1yd#%E&}O$zAWA`a#))mZs={KYK{(N^2o`1aj%6C8>X6+-o@?5**x3C?QRNK}b>WiRj4vsgHz!MwOzd&{>=v4U^{Z3#9}5?6_XwxdDfA~RbR zGXukoCMC{(VV@co*vXJ9pdgIGw#PZ~m>LY7Mu+)eYRJk+Yd##qaiR|baZ=$rVO0c> z_Y8`dk8f0ggG^e8%b$hak@n@mGBEgGz&p4*W!Xg8TG}N?^gJ! z7yv-h4&JHCe92<|x1~d?@ss2vjwK%(!*hOcfYg5!u^D-SQ(P&H04;Dx!2-m`w=Bml zxQh@#0U&(p;N9Itui6_7b1}C=Rs(8h0TL8pqWj$HjpsRMi8q>VqAD`LRWzaw+oIAC zoyiD2I#)Rmit38AkeRFsEDw%qJ@b6NkN_Uf#hh{H} zS8v?98)u^hrax89n_`~!28gTTcdZ>kq{;<9w9W2Q+HOAbJ*PL-BMGFWeHLnZ{GG(Y z;IY$qB1o?Xq_tzs`^`65$Et^6ua3qzQ)q9J>d5v6)>_sf~FdsjbSOt9cTK4a51yW9j3MvG%6}XU##J$<8uXi$3HYo*jWf0y#RBf<#d&%+ zb2c>Q(7O`LaX=S9|U zzfRK^xaK@o|7_H4+Z=K)0I@04Vp>lPsxo^)c-${LYQ_c(P;`lq0#Uz|uMuh16S>lQ zEpy8)SPZ))AFZade_LGz_9~f57rVqoz3;nk-y*vbVgz8Iq|3%QIHrzrpB~XL_A-v6 za5OfX++9E7q914Qyr{WJbAXROp^I1Mm{wH9DOiv>Y2h3QA+etF>Xzqf+;_`w@vi?i zby)CLAlgt9Y_UnF34rtYCQwK+dX6;m%V{x+CqdYbD00kJi3kDj)lkkX<01jkxi^cqu(kLY&R}v zKP$LM*vfBK*`?hDyQToqn^Ik?$L!hQVUFh<` z05TLOxNVyVYc?Y&SNU+eoG3kH5cZPH0i0ShgQ3Z7M+}Q@EMBo(!L?F?)P&(V+heOXH={KiD-u4VK)2_r*(+`#tP`?lq z*~CK({83e1!7@kkc=B+(WoUiiQEv=;bIO;{@nDvPMJu78Xsjcr+)r7lUQ+lvzT4(W z%VCYF^PY&~VRMex3zTP=Hyua%va2DKZsZP&yI}K~mza(E;(5^myt{pKKpuipm#;Xl z@-Qad}HL0wF*;pWzN)J1l?Te*`iHsNX}<+fFD z_&b*A+}nBwyi%^J2O?z;2$TKBV5kN%%EHin@0ySGgM;5U2W(%bk*nCd*lbBXIOa+B z-M#w<`1N>VixBsL54@w#e~E>_UfXjapsO%?Ig#XbeP_J)wg*KTG|G8 zdT=jy(|(kGwnkR}AM25J_4$C&oyf>Cf8%JkVU>G~llL~=%epn40pwol91__^SANJc!FISiruWd7n~ zqOZ^gL4_~cEy|x7fXQN-^2x?|rDfvdxN8T&%L>{z0k67%n)4SEa0`MGqBPTqJ!^S5 z!|c8YgZ<0F)SgN9N^dDSrvYJ1>UX*1(th)oLPoO=+k(B|J|SWfVv**_9*qG#{?37t^^~=A38?CyQ)KFSC;~vj!cYK?z3j4jeHv8S2tS!I*2yZ=I z1FSl1>L||Wi#Gk`N%%@QgJD^tLx(K(>8K5MinP%yeaTNC%SaRE`pP%AS7To;i`nc# zyQ|4U1f_Ox$BSvCiGRIwy~_SB?n*0a=!?Q$-#c)2_myx#@FziaP-vA@9D{{;ilbQ0 z89@4OD{(vJV4d8k%aSDzq`Wp7?MUHN%n3l{*K#TjDRtd05%7YA(JlygHQXA(pfD`S zF-1!*f!hJaa|2G|-cKl3p;LC{q%)xC2b46kc8(qkm4lbiN?1uhro+Pr*mJyeGX!Bzri#qRv z@^JuzwT#NZY<#!WN?pbLQiO|3!?P^v*<16@;gSP=$R| z+6^NjeY`>wj1hO(rNywygw+9u5{(R ziSHnWBLZqGH&4!#TE5yxMQ;zhe?H#9@Y|ymrQCGf_{JS8so6`A zh;&>)YYu-5y{W&zFm^e#MD4{y)@}aRykIfka5Lh^Ct5Mvm*Id&^(Kxevx5#Q*Ni*f zr+J={`qqB9*|&;`1HUeHxV$rtPl&im4kC1YpDzP6XDX5fm@b>0XL4s|^d-iW#gB*m z20xwT0HQ3{cFAQzHmh8b^oh}sx91e`#JWvjX&zc5pd;&lA^MXsD=~=EnUQLzE>w)Q z#epHGVD4hb*eN)L17@8LT&z01f+cxEl3jo$5HlZz3VfH9j`)qfz$#T>u)Rp z(?JTOgXi$Sb@Qu@u9YFLXoIAI;j77K0jXaD)ezsb>nGnETr@PkCsO^2xW1sat{cFu zQ20eehP;1pX4lGj3oL@intizJ^FBFTu)ZX#wletsMc%M9X=m z0;bLXxZJ%T2Fh`@jrrKf6@ul_xP+}NVPu+SwaXLc-L=%$)gSJ^H(V~n6KkpI1Oh-g zAshkPB5s*GVEkW?YMTW${|3q22hGxB3t0DgUKd zB;lBaFQr@`%72MQ>t}H`{-V~kFLi|r=7__bT0+RDE9ky9v=gOf!3!5wZeP_(eaF)?$jtB)WlIg2$OUq*S#ErVw)xnirka*vs%6ezN970};q|&{ za>qXTKZ-$fQ8T8b7f>Xv*uaPm0`iamkK6NLGTQqJ zY07*ZG$SMGSsi{nSkQmvn38*Arc-=teYqdw)0E3N{EUA|I=r*ctKkKN-NF=Fi zJjebaLYy;Z>7Ey*t_-FidP5)73i$+WZ4Ej(8VThhNuzp!0gHgv_16}gl@n1^8Nd3# zN?#2~ey_B6r$vNN^ZCX!Lm6UM9u3IuEOrVmbn!(IoIXPg5iX}m0lcNwyYF9^@%wxn z5n^-I_*x7XO0&6Ay4Zz5c@O*l?^%0zu*uiteC(CQPND*b+2EG^x< zlL39v7`*9+M_o-xo{;32yl1&$(2zqqni1Zr65+Mtaa0(4^{M!Xz}`!KiN@qMsVTBD zserF9;**Bbb4{TLXK-0E>aJGfmFR&T-SF|BHhk~uhlQN&!Zx{%_L%Y7t+&HRv{BIz zxt6I*R+9Akflln7`dsOi-w{FFsdQg2uQo_o3v8{e4pzrwF2-~ zqv*|60|c~$>4x8!iIq^rDGC<3FtYrJ0hV;bdA zXAh(bwqs3vej&Kp5qa;!JcXyym0$bn3}cb;U+0~wG@c@K>F6v;**NLgEYTP~WGd4# zV|}sx;G}RJ*@^A$?DjSYQv&_0sNmsc?^muC5a2Ik6a4-@kKR3b;3d(Ug8!DV(}X4J zKL?ITdjxk3|>iYP1d^SfQ zJk?NbX}*;!v`S%r3%_>{9B@P_vOhIgcQcmgwX~_g>+)FY-v_a72PCgrs6MHmFT$^F z=HQ{-`2>%WRU7f?$8{FF>!H#qo@-3AnUoy4&V}vxwzWZl`=@&IJt~bHaBP)H(-c5n zP_1~W|L)6rOh86%t!t>sp@J;CeQn{+TRMtnFbcT^aErVa1W6w-^Z zot7GrK^bn!_h*!8zFT8-(gIxwf1rX^;Nv}@@scFZgn>fLyxf;Qn#AN~Wt9YU*+F4E z!7UWg==+8=as%yNUD|rs2*x}WUgU!N**%o)m+LcXBb>%6dSOWcG}8SzR(gAy9m3=H zCN}huTcP9X-l1AL-fJ3CsL%U~y4Nl_Q)edi51fGDtrMnU>W_LL^VTnA(^B~fF(2A} zmJ{d0Tj5n2i+DIlyyi1Fiy3(!I=qlLu5K+S70RgZC(#WwLcEZ{9@nQUx0C_@*$-C1 zbPt7J=iZ`Sfc@Gu0zY^?)ns>8+DYxwZe85k-0Ty_O99Y)Qf1G^)%}pdaym$CK+;4b?qt?UVD?l)|lz`#6~39!Q^o^6>S!HCbo<#l;IZE@P4y zP0NRB$=%R5j9JZDOOVr!=-*RGt-nt%{S9puEE~TbNtOmkEL)kALRBZ+_a2VvIY3o0VPmSj)tW*(v=Su#6M}}M`yd9wcSSk3gggV2var{>) z$?hp98J{XO#P)v_gs(zcVc4*FhTY_~JmoG;Hm4s5V^Rbm0YvJ+RaGTX>ZkVh$Xnf6 zy>g#@1n~8V=daC0(AV8w9MeN+;GhNobMn=#mwbdQ+NLuccp=~)HEXw!wxDbjaK~sGB!J?pgCz5&Igm6BL50_wut|4Ll@jwjt17L*F^t*!_Sr)+ommrNO~} z2xqoha%0&=?5;Ie(B!Vpo9>x=kNi?maUcuk6Rv_T;={B#)|AohIPVc~9+v*Gl-)Blo zdG(_t!BQhnbVUuaKm*m>sRSLSMF8+znf&>VJ#cp1B6@7}E!t{NO3r^XT7F}=_5zq? zu%kXWSuLCrY}enc&qxlCTDU-XxWoh&dvk);_hXmq2(k(+2}F7 zF$s!C_;~CdU5IR)i|RJ!pm=`ErUv!j(aEC)eC(`hpFz3P!pKN}AKDvdVPcPvQ0uU;u$;KR6b&~Vu z7q|}(`8MPCZnR>c2}c_Zy3OCexAC3-T=hK5;z#^@Q(in8pS2f46?dE@`i58tcm_#V zNy5CIVtxoW|B-)tsfd1nr@L<((OoHy(eh$JEYx~E)XenWU_~?S$l>AzH=nvh*WX6P zFM)@_4ZY3suKJfLG~J&Q;V_Kyxsrx{gyk9Iue12g|0w7J5{5i9Mne(r?k`PD$Wh+? z^In1WpWFbwp_RW&dEYE18YN8V=+4SWP_(#?j`f(^@tqVD9=t-eCV6X?aH99T4Q|)Ly1M?LdjxS;z)d2s0{r44V z+%Mg8GJ1;8;%dqg4z;vkM(jyGE3`1Xv>C;9*^;T)J6)miiuUDTv!1~^)Y^iZU!2iw zPzk zFRha6$X=S81CCwId}80zv-&mId{AUAIb!-wNWgJneC(dzv10OkcPhwL`NA`ac$O#u&7%w-oG3L!4Vj@Hs-)D| zk1zEjY$3!z@eh`w!BMIE zW6zk$+jLgPRe7RQNC*7GbDpE3%X=3*4J`Ay!}EHM<{D6-drAU`Q)^QgJZc)Nu5rH(OK4`&*&1; zToMGqq&AF>=R2=>OVcR-aHoK=Ri|wmfF~3VcyZcJ9YD@ z`%=?nimMbFoc{d3xbh{ztj0iWT=hEp^4>BX#`%&x%eQ51=;?Hr)~L5W)f!5hzviE5 z&ZdZ?Pf_8_O7C6FvgYmO;5haYq>zowXL!!)AqwDX;Jdw{j2d06>4>9_N6>IYKR4eU zDqCD1z45QuBWCi|85>^M`J1=RwNIGv(A;JIa#8!N0M5BH#j2vB-=54p{K{|yy%{an zso$xdm74qLr2mDYOPVW@Y1gyi#5-z?tHyOEt;E)~x?UR=JE6`U|Ld^(&LQyZkG*Y? zW&2wmF(vFfUJyZ21Y?ra5QAH+P)XF~g3i4-rbPR!UBv*z5Rnm8mERMs({)uTM}k%{DS zfjSds)Rl%*pgosNgzw;SbhpXRJFi?zZvFk*H1^g(|dG{cjUHq3P`Bjyny5K**5`iRVBG-_3T0sSGd%O`tg zs2(nySj1xbSz1L3rLGYYsLsg&p5K*8@TyQ}lNMX8&c7r?1l0YdvW(tvOi@bwV$HJ% zn-s+y$Y@*p)P=tH`3D5!??!d%JwR`wbvy)Ly=#8u0fjffmpo;u2vU7uo$d@2PPo^N z`^#p#E3;()v%mx;s?0E+zgmo?^Yz{+{#D&%zzF8`r48H%9Ii7v>cD;a3zw$x+ZOdz;2=0Ayut5u??mjZVV^3x)JRt#UJaH0Wz?H@ zz&cXcQyx}yaYJ5@ug!=QPrBn9CQIXXu`QHWm|Lk1uU!Hgg!hTyd|qnWNuc5=HXzKz zJ{2bg*98i8;Czl^LWI!mO-T>aO|ptek@wPjaqCKT##1v7db2WP^P>K9_W~cuE;b-7 z6hQf8B%5sCQ-p6=F5Mk>KqDVN%UTPd6JH&y-w=I*FSr`dUq){TudyEC=(sa!E;#{O z5xFw&0wr#b1ab&;v`%!f&Lhz&TZ|#a)uz`UrN6jBne1HHT%xCZD0$)o+jxk1oyXef z7FQ&0Yc;FuLz5l0IP_EB8uh&<5^!E-(f^d?p?NAlQ{o;C^w0D7HH;qbNkpnMtWA~; z*=h{ChQez=E`3atpQ#E>jTc$-R zThVM(S-jo?`LYE*7kE-PH&3sE^wikdKRYRmACe#$Yrw92AG7Z^V>vy6RA}Xe{$lMNubH1e=9dsL3sc?Tg4VSD#OM5b}~KG z&=@oLyR3!nTawGpP5#X1udNk=Mng>hqexO_93gCD_27kz);$j8Ta(#IFQ#K@T(eKr z6Axj=e}Kf*sAp3DWeAgOIf71>$eU7*XoOv|H=SE|mYN2Cx;z$}j%A}mIFe~e7iGhk zvQH3I39FuJM{2>of5`8uX0Oj1!oM2YjM5626i_vx*WDt^FFWbOvGa zOr6cik8b)L>THSql}GEJEb?Jdk^8j>$@$PB1=F6Z!`z~YN5J_u{(Z=MHiQ(Uaa(bw z9VBsviV>rYx*a~i)1tNkzYB40Z0{R4#s3Y40;CqTy0PtDQ^48~~U#!>>ivur8-@pjQ;(YD?0ULg=xF zptRb)x0p|i;^Y$a*BN9f{Lg1c!mi`2gY>7B8HF-zJ7yl+F4TJepLIU24xtUwpq5YP zuM*|@hlW1${~k=Nk>jtZW(6sEVMe>#BdQRZSQm`k-Sh@jwM)~3x{|wb3)qjD;it8^){)w3&8ma@ef>@wf?i^~S8VNg^&W3W%7WfR z-$8eNYO$eu$n40RNf+fXIVhywc1bKS(>j?A+G%zA$pcWbmv}Pht%QlGGaPzZ#%~DI zj1>foCDzSdj@_%tvv2EaOWW~XJ-AEZhw5~5y%bh{G*vCKyK5vW_CzZ^#N#Ioa36>D zGrr1O<=%lz`c;dh5sPdmU)0YoEx(%CAnAAxQ2xCh7C_LkYV9JZcTUw?X?anXf7a+g z&TQNcZ!~;~YlEsV(`Se;Y;`>at9L#5o6-e`oP?Yu{n7i~)d!)5iaW8Bg3x5U)ZVG{?ynPls9rq*>0@wcR&%o zlUhGoPM9p};~<(7m$NAT_7>G-&dOTWu>3($&IXwz+S<&t~mK@NhfjuwUSBZ*jxdd4X;1jeLw|QlemwwDIwmN;Bk~OyQj5g z4k{Q-n_r&kytm7UZOV%*kO(#c%}ajJ9LQWZ=+o@d1HQrQY5DeyXHD7YzLH*tVX&l1 zOx;#O0(AmYGzBnHM88P)jc##enh9ywa$2wEW`{o)w$p87QO4>F zrxq`bs;h$x%P%T{a`^`vF@TWrtJ6)N5mR^(^Ckt?Jo)Q}wQS+u=u$_>9k`keP?y`s zi6jsWX_4*d`C6gFo1g9@ZYgO@qW`0)&El{9>mIEq`9O1Lj>baZHmV3Sr{fh29tmJe zoFR6A?n^{-=7N`ZAK>+wAYv^5oPkSHTHd5}w9Ip?1rjz5R$lsQXsiC>*)W&;OS1v< z6PK0p#`T#p*ueP{h(U_2la$= zSU+6W*6jyexf=F9uX1Qw^4E^}{Tq}^5)ccF??U~2Gbv^Ovf4mCji`ih61GO98?%Zt zz6?*Q34T2BOZ$aVCZlCM@N!KGO+aHux6TbTyq7j&AH^kUXxrqy&H_KNws8oPha=zo znn*VEZ|9))DYV$UjD54;fF z)#7|hem@tFJ%O0{A6;U81>RMIMG!s|+-uY~x;Sd+!K!Z&Ah^Artoz&}btR=;0v%sK z)WE9}N|v(U~2_C&A%404jO!O|xk z+lMNcnXBNYptrqrrGgXW0Nhg~2;n0Cc_QqG5pr@BlSCH3bKOSCxv*=SuczmMWR)P4 zlFz9Ab2FgBC(%UYLvGgruNbDQUc-X(y}f=ttOGEy{g4xnLq4t$iK*N&ZE4S~ax>6s zx;xZ^YP_$U2{cH~9nF6JQ+H)MEJMJL>n<3+5cJIAx(}x#J3D}0jClA{-72t}UVBRy zR6pCW$+<=tn`}4!DmC|K^wz6uY$50>$RpzMes66w5eU!O*S`izmeqcx(jOjIk5D9r z+iKp5OP9YrxUu?g{c1AfJpq4{6oftB?0y+oBnEanPLu6-IF?Lf*4CB)%#Otvx%Fn9 z8%8v&q{MOCW_~>Bqc{7F{Z5}>VySZPz<;yPv|V13(AF@mkCKdY+qiYR?~CRpTrARQ5qSSmnqq1ju~ZvKhHJE#Q9t6;2K=c zU}-0ygoN)|pTWu&_KA7=0O1EMRe_4_TIdKLFV4*P4ZXBP{e~`oxK3|Yo2fM_5~&bOr-_(zw2i+rBP<*Jy(b@q3mR)DL5Fi%?qc0>?zDG+xwPK)=F_CRpH~|LkMVQDE<+jI#PNOzjnnB0Sn`KiwchVk!uN@i2D zQyIQY9bYoL#@U&cas+rOvtF_+K%P!)kr$d=xV}@H_d9j7ZS@wwj9^|SlQ^_hBEhfc{o&{ivPBUjd!$9 zzkmFaI?>}AF$HA{?A57fvX$wZp_Wi%>>-O-fy$yY2+(w^=udTVZJso{eyJxPh!R}| znVG*_P?fRGXvV?b;aqB(_|fN5KSFQWU|S7IhU%eH7pEL{_6$QKQ?>4=mf?- zuHlnhzvo}p(N@w`jF|i(8}pPlh0jQzG@rSrK6Z0V{|r40Kmk92N*X)-gTd_dXp&tB z&lr=jS7OH+!Da)-9d3GnT%F-FcTlFa6nh;>~Xs?Z?bI& zFd59kb4qGG+PnARB><6MD`G1d6-!K-XKkdBgrYXF0It?RVbo!g6VR z?rFsIvg@3?{Sv&r5IZvRq49^qFViIYubijT;i2;*0gf%rmqDq`^)g@N6`fLV@E0O? zg4cziuhH)>(XHd2Ft4VkAGqHCM{!pbKGVsE{gK4o(eXzT1Ql1fY2o-jG9$c*3Fz(} zJlzz&^Gu3!Cgp@Nv5PaJJD5;wFpkes(lKfMDC#jGk=vyWGnfS&Pp}b~@a_|@D3lLb*yLLL5Fs?K4krCCe*u90s?vo{SnPRtc&=7#d|}rY#2*3t z38`qY?;4f2oW%X%Cz#-Vg1oE2_l*~iE$%I5Yg_ARy@2mgRrGsKW>G z#dg27$BUQ1)_xh&VMUJ0W>(k_NN`6#jTBW%w7FT&PEN;~c$)s(z#p^rvv~}PX4RM^ z1RjWY2FK@Fo(<7Xi|}&S#acUBn Tensor: @@ -40,47 +35,6 @@ def tensor_model_parallel_all_reduce(input_: Tensor) -> Tensor: return output -def tensor_model_parallel_all_gather(input_: Tensor, - dim: int = -1) -> Tensor: - if get_tensor_model_parallel_world_size() == 1: - return input_ - """All-gather the input tensor across model parallel group.""" - output, _ = all_gather_into_tensor(input_, group=get_tp_group()) - input_size = input_.shape - if dim < 0: - # Convert negative dim to positive. - dim += len(input_size) - # Reshape - output_tensor = output_tensor.reshape((world_size, ) + input_size) - output_tensor = output_tensor.movedim(0, dim) - output_tensor = output_tensor.reshape(input_size[:dim] + - (world_size * - input_size[dim], ) + - input_size[dim + 1:]) - return output - - -def tensor_model_parallel_gather(input_: Tensor, - dst: int = 0, - dim: int = -1) -> Optional[Tensor]: - if get_tensor_model_parallel_world_size() == 1: - return input_ - """Gather the input tensor across model parallel group.""" - if dim < 0: - # Convert negative dim to positive. - dim += len(input_.shape) - if dim != 0: - input_ = input_.moveaxis(dim, 0) - _dst = get_world_rank_from_tp_group_rank(dst) - output = gather_into_tensor(input_, dst=_dst, group=get_tp_group()) - if get_tensor_model_parallel_rank() == dst: - if dim != 0: - output = output.moveaxis(0, dim) - else: - output = None - return output - - def broadcast_tensor(tensor, src: int = 0): # broadcast tensor to the world group return broadcast(tensor, src, group=get_world_group()) @@ -95,15 +49,6 @@ def broadcast_tensor_dict(tensor_dict: Optional[Dict[Any, Union[Tensor, # return get_tp_group().broadcast_tensor_dict(tensor_dict, src) -def send_to_next_pp_rank(tensor): - send(tensor, next_pp_rank(), group=get_pp_group()) - - -def recv_from_prev_pp_rank(tensor): - output = recv(tensor, prev_pp_rank(), group=get_pp_group()) - return output - - class ReduceFromModelParallelRegion(nn.Cell): "All reduce the input from the model parallel region." @@ -122,7 +67,7 @@ class ReduceFromModelParallelRegion(nn.Cell): class GatherFromModelParallelRegion(nn.Cell): - "Gather the input from model parallel region and concatinate." + "Gather the input from model parallel region and concatenate." def __init__(self): super().__init__() @@ -138,7 +83,32 @@ class GatherFromModelParallelRegion(nn.Cell): # Size and dimension. if self.world_size == 1: return input_ - output = ops.CollectiveGather(dest_rank=dst, group=self.tp_group)(input_.transpose(2, 1, 0)) + output = ops.CollectiveGather(dest_rank=dst, + group=self.tp_group)(input_.transpose( + 2, 1, 0)) if self.tp_rank != dst: return ops.depend(ops.zeros_like(input_), output) return output.transpose(2, 1, 0) + + +class AllGatherFromModelParallelRegion(nn.Cell): + """ + Gather the input from world parallel region and concatenate, simultaneously perform + transpose operation on input. + """ + + def __init__(self): + super().__init__() + self.world_size = get_tensor_model_parallel_world_size() + if self.world_size > 1: + self.tp_group = get_tp_group().device_group._name + self.all_gather_into_tensor = ops.AllGather(group=self.tp_group) + + def construct(self, input_): + # Size and dimension. + if self.world_size == 1: + return input_ + input_ = ops.swapaxes(input_, 0, -1) + output = self.all_gather_into_tensor(input_) + output = ops.swapaxes(output, 0, -1) + return output diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index ff6ea4da2..747023347 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -16,12 +18,15 @@ # ============================================================================ import math +import numpy as np + from typing import Any, Dict, List, Optional, Tuple, Union import mindspore -import numpy as np from mindspore import Tensor, mint, nn, ops from mindspore.common import dtype as mstype +from mindspore.ops.auto_generate.gen_ops_prim import SliceExt + from transformers import PretrainedConfig from vllm.config import get_current_vllm_config @@ -474,9 +479,9 @@ class MRotaryEmbedding(RotaryEmbedding): context_len: int, seq_len: int, ) -> mindspore.Tensor: - return ops.arange( - mrope_position_delta + context_len, - mrope_position_delta + seq_len, + return mint.arange( + int(mrope_position_delta + context_len), + int(mrope_position_delta + seq_len), ).broadcast_to((3, -1)) @@ -531,52 +536,60 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): query: [num_tokens, num_heads * head_size] key: [num_tokens, num_kv_heads * head_size] """ + half_rotary_dim = self.rotary_dim // 2 # prefill if is_prefill: num_tokens = positions.shape[-1] cos, sin = self.freqs_cos[positions], self.freqs_sin[positions] - cos, sin = cos[..., :self.rotary_dim // - 2], sin[..., :self.rotary_dim // 2] + cos = SliceExt()(cos, -1, 0, half_rotary_dim, 1) + sin = SliceExt()(sin, -1, 0, half_rotary_dim, 1) if positions.ndim == 2: - cos_l = ops.split(cos, self.mrope_section, axis=-1) - sin_l = ops.split(sin, self.mrope_section, axis=-1) + cos_l = mint.split(cos, self.mrope_section, dim=-1) + sin_l = mint.split(sin, self.mrope_section, dim=-1) cos, sin = (), () for i in range(len( self.mrope_section)): # type: ignore[arg-type] - cos += (cos_l[i][i], ) - sin += (sin_l[i][i], ) + cos_l_select = mint.index_select(cos_l[i], 0, + Tensor([i])).squeeze(0) + cos += (cos_l_select, ) + sin_l_select = mint.index_select(sin_l[i], 0, + Tensor([i])).squeeze(0) + sin += (sin_l_select, ) cos = ops.cat(cos, axis=-1) sin = ops.cat(sin, axis=-1) query_shape = query.shape query = query.view(num_tokens, -1, self.head_size) - query_rot = query[..., :self.rotary_dim] - query_pass = query[..., self.rotary_dim:] + query_rot = SliceExt()(query, -1, 0, self.rotary_dim, 1) + query_pass = SliceExt()(query, -1, self.rotary_dim, + query_shape[-1], 1) query_rot = _apply_rotary_emb(query_rot, cos, sin, self.is_neox_style) query = ops.cat((query_rot, query_pass), axis=-1).view(query_shape) key_shape = key.shape key = key.view(num_tokens, -1, self.head_size) - key_rot = key[..., :self.rotary_dim] - key_pass = key[..., self.rotary_dim:] + key_rot = SliceExt()(key, -1, 0, self.rotary_dim, 1) + key_pass = SliceExt()(key, -1, self.rotary_dim, key_shape[-1], 1) key_rot = _apply_rotary_emb(key_rot, cos, sin, self.is_neox_style) key = ops.cat((key_rot, key_pass), axis=-1).view(key_shape) return query, key # decode - if positions.ndim == 2 and positions.shape[0] == len( - self.mrope_section): # type: ignore[arg-type] - num_tokens = positions.shape[-1] + if positions.ndim == 2: cos, sin = self.freqs_cos[positions], self.freqs_sin[positions] - cos, sin = cos[..., :self.rotary_dim // - 2], sin[..., :self.rotary_dim // 2] - cos_l = ops.split(cos, self.mrope_section, axis=-1) - sin_l = ops.split(sin, self.mrope_section, axis=-1) + cos = SliceExt()(cos, -1, 0, half_rotary_dim, 1) + sin = SliceExt()(sin, -1, 0, half_rotary_dim, 1) + cos_l = mint.split(cos, self.mrope_section, dim=-1) + sin_l = mint.split(sin, self.mrope_section, dim=-1) cos, sin = (), () for i in range(len(self.mrope_section)): # type: ignore[arg-type] - cos += (cos_l[i][i], ) - sin += (sin_l[i][i], ) + cos_l_select = mint.index_select(cos_l[i], 0, + Tensor([i])).squeeze(0) + cos += (cos_l_select, ) + sin_l_select = mint.index_select(sin_l[i], 0, + Tensor([i])).squeeze(0) + sin += (sin_l_select, ) cos = ops.cat(cos, axis=-1) sin = ops.cat(sin, axis=-1) freqs_cos = ops.cat([cos, cos], axis=-1).squeeze(1) diff --git a/vllm_mindspore/model_executor/models/attention_mask.py b/vllm_mindspore/model_executor/models/attention_mask.py index ccfcfdb3d..0df3c30ab 100644 --- a/vllm_mindspore/model_executor/models/attention_mask.py +++ b/vllm_mindspore/model_executor/models/attention_mask.py @@ -1,3 +1,5 @@ +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,15 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ - """ infer attention mask. """ import numpy as np - +import mindspore as ms from mindspore import Tensor, mint from mindspore import dtype as mstype - r""" PA:ASD-V2.1.5 1.MLA + Q_seqlen =1: no mask.(BF16 mask(0/-10000), FP16 mask(0/-10000)). @@ -48,20 +48,29 @@ class LowerTriangularMask: prefill_mask_coeff = 1.0 if self.dtype == mstype.bfloat16 else -10000.0 - self.prefill_mask = Tensor(np.triu(np.ones(shape=(128, 128), dtype=np.float16), k=1) * prefill_mask_coeff, - dtype=self.dtype) + self.prefill_mask = Tensor( + np.triu(np.ones(shape=(128, 128), dtype=np.float16), k=1) * + prefill_mask_coeff, + dtype=self.dtype) - self.decode_mask = Tensor(np.triu(np.ones(shape=(self.max_model_len, self.max_model_len), dtype=np.int8), k=1), + self.decode_mask = Tensor(np.triu(np.ones( + shape=(self.max_model_len, self.max_model_len), dtype=np.int8), + k=1), dtype=self.dtype) * -10000 self.hard_mask = mint.zeros((1, 1), dtype=dtype) - def gen_attention_mask(self, is_prefill, position_ids, query_lens): + def gen_attention_mask(self, + is_prefill, + position_ids, + query_lens, + attn_metadata=None): if is_prefill: attention_mask = self.prefill_mask else: if max(query_lens) > 1: - attention_mask = mint.index_select(self.decode_mask, 0, position_ids) + attention_mask = mint.index_select(self.decode_mask, 0, + position_ids) else: attention_mask = self.hard_mask return attention_mask @@ -79,5 +88,44 @@ class MLALowerTriangularMask(LowerTriangularMask): super().__init__(dtype, max_model_len) decode_mask_coeff = 1.0 if self.dtype == mstype.bfloat16 else -10000.0 - self.decode_mask = Tensor(np.triu(np.ones(shape=(self.max_model_len, self.max_model_len), dtype=np.int8), k=1), + self.decode_mask = Tensor(np.triu(np.ones( + shape=(self.max_model_len, self.max_model_len), dtype=np.int8), + k=1), dtype=self.dtype) * decode_mask_coeff + + +class MultiModalLowerTriangularMask(LowerTriangularMask): + r""" + Provide multi modal Infer model attention mask. + Args: + dtype (ms dtype): The compute type of Infer model. + max_model_len (int): The max model length of Infer model. + """ + + def __init__(self, dtype, max_model_len): + + super().__init__(dtype, max_model_len) + + def gen_attention_mask(self, + is_prefill, + position_ids, + query_lens, + attn_metadata=None): + if is_prefill: + attention_mask = self.prefill_mask + else: + if max(query_lens) > 1: + seq_lens_np = attn_metadata.seq_lens_np + context_lens_np = attn_metadata.context_lens.asnumpy() + mm_position_ids_list = [] + for i in range(len(seq_lens_np)): + mm_position_ids_list.append( + np.arange(context_lens_np[i], seq_lens_np[i])) + mm_position_ids = np.concatenate(mm_position_ids_list) + mm_position_ids = ms.Tensor(mm_position_ids, + dtype=position_ids.dtype) + attention_mask = mint.index_select(self.decode_mask, 0, + mm_position_ids) + else: + attention_mask = self.hard_mask + return attention_mask diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index 4a9608451..d2db9794d 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -17,12 +19,9 @@ import os from abc import abstractmethod -from typing import Iterable, List, Optional, Set, Tuple, Union, Dict +from typing import Iterable, Optional, Set, Tuple, Union, Dict import numpy as np -import mindspore as ms -from mindspore import Tensor, mutable, nn - from vllm.attention.backends.abstract import AttentionType from vllm.config import VllmConfig, get_current_vllm_config from vllm.forward_context import get_forward_context @@ -41,6 +40,7 @@ from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata class AttentionWrapper: + def __init__(self): vllm_config = get_current_vllm_config() block_size = vllm_config.cache_config.block_size @@ -49,13 +49,10 @@ class AttentionWrapper: head_size = vllm_config.model_config.get_head_size() num_block = 0 self.kv_shape = [num_block, block_size, num_kv_heads, head_size] - self.kv_cache = [ - ( - ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), - ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), - ) - for _ in range(vllm_config.parallel_config.pipeline_parallel_size) - ] + self.kv_cache = [( + ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), + ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype), + ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size)] self.attn_type = AttentionType.DECODER # add for v1 @@ -67,11 +64,13 @@ class AttentionWrapper: class MLAAttentionWrapper(AttentionWrapper): + def __init__(self): super().__init__() vllm_config = get_current_vllm_config() self.kv_cache = [ - (ms.mint.zeros(self.kv_shape, dtype=vllm_config.model_config.dtype),) + (ms.mint.zeros(self.kv_shape, + dtype=vllm_config.model_config.dtype), ) for _ in range(vllm_config.parallel_config.pipeline_parallel_size) ] @@ -121,7 +120,7 @@ class MsModelBase: ) def set_modules(self, model_dicts: Dict[str, nn.Cell]): - self.modules_dict = model_dicts + self.modules_dict = model_dicts # type: ignore[assignment] def _check_modules_valid(self): if self.modules_dict is None: @@ -130,7 +129,8 @@ class MsModelBase: def named_parameters(self): self._check_modules_valid() - for cell_name, module in self.modules_dict.items(): + for cell_name, module in self.modules_dict.items( + ): # type: ignore[attr-defined] for par_name, par in module.parameters_and_names(): if cell_name != "self": par_name = cell_name + "." + par_name @@ -141,7 +141,8 @@ class MsModelBase: self._check_modules_valid() params_dict = dict() - for name, module in self.modules_dict.items(): + for name, module in self.modules_dict.items( + ): # type: ignore[attr-defined] module_params = module.parameters_dict() if name != "self": new_module_params = dict() @@ -155,7 +156,8 @@ class MsModelBase: def named_modules(self, remove_duplicate: bool = True): self._check_modules_valid() - for name, module in self.modules_dict.items(): + for name, module in self.modules_dict.items( + ): # type: ignore[attr-defined] for module_name, sub_module in module.cells_and_names(): if name != "self": module_name = name + "." + module_name @@ -177,7 +179,8 @@ class MsModelBase: def eval(self): self._check_modules_valid() - for _, module in self.modules_dict.items(): + for _, module in self.modules_dict.items( + ): # type: ignore[attr-defined] module.set_train(False) return self @@ -190,13 +193,15 @@ class MsModelBase: inputs_embeds: Optional[Tensor] = None, previous_hidden_states: Optional[Tensor] = None, spec_step_idx: int = 0, + **kwargs, ) -> Union[Tensor, IntermediateTensors]: return self.forward(input_ids, positions, intermediate_tensors, inputs_embeds, previous_hidden_states=previous_hidden_states, - spec_step_idx=spec_step_idx) + spec_step_idx=spec_step_idx, + **kwargs) def forward(self, input_ids: Tensor, @@ -211,9 +216,9 @@ class MsModelBase: value_cache = [] forward_context = get_forward_context() for i in range(self.config.num_hidden_layers): - k_cache = self.kv_caches[i].kv_cache[ + k_cache = self.kv_caches[i].kv_cache[ # type: ignore[attr-defined] forward_context.virtual_engine][0] - v_cache = self.kv_caches[i].kv_cache[ + v_cache = self.kv_caches[i].kv_cache[ # type: ignore[attr-defined] forward_context.virtual_engine][1] key_cache.append(k_cache) value_cache.append(v_cache) @@ -238,11 +243,16 @@ class MsModelBase: @abstractmethod def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: - raise NotImplementedError("Function load_weights should be Implemented!") - + raise NotImplementedError( + "Function load_weights should be Implemented!") def _dummy_attention_metadata(self, input_ids: Tensor, positions: Tensor): - input_len = input_ids.shape[0] + if input_ids is not None: + input_len = input_ids.shape[0] + elif positions is not None: + # input_ids is None in multi modal model with v1 arch + input_len = positions.shape[-1] + max_seq_len = ms.Tensor(input_len, dtype=ms.int32) seq_lengths = ms.Tensor([input_len], dtype=ms.int32) q_seq_lens_np = np.array([input_len], dtype=np.int32) @@ -263,14 +273,13 @@ class MsModelBase: # To enforce prefill and decode are both complied in warmup process. # So set max_context_lens to 0 for prefill and 1 for decode. max_context_lens=0 if not self.set_flags else 1, - query_start_loc = None - ) - + query_start_loc=None) def prepare_base_inputs(self, input_ids, positions): attn_metadata = get_forward_context().attn_metadata if attn_metadata is None: - attn_metadata = self._dummy_attention_metadata(input_ids, positions) + attn_metadata = self._dummy_attention_metadata( + input_ids, positions) key_cache, value_cache = self.get_kvcache() if not envs.VLLM_USE_V1: # V0 @@ -287,7 +296,8 @@ class MsModelBase: seq_lens_np = np.array(seq_lens, dtype=np.int32) query_lens_np = np.array(query_lens, dtype=np.int32) kv_cache_lens = seq_lens_np - query_lens_np - if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max() == 0: + if attn_metadata.num_decode_tokens == 0 and kv_cache_lens.max( + ) == 0: is_prefill = True else: is_prefill = False @@ -296,13 +306,16 @@ class MsModelBase: is_prefill = attn_metadata.max_context_lens == 0 query_lens_np = attn_metadata.q_seq_lens_np seq_lens_np = attn_metadata.seq_lens_np - + + if input_ids is not None: + input_ids = input_ids.astype(ms.int32) q_seq_lens = ms.Tensor(query_lens_np, dtype=ms.int32) position_ids = ms.Tensor(positions, dtype=ms.int32) - attention_mask = self.casual_mask.gen_attention_mask(is_prefill, positions, query_lens_np) + attention_mask = self.casual_mask.gen_attention_mask( # type: ignore[attr-defined] + is_prefill, positions, query_lens_np, attn_metadata) model_inputs = {} - model_inputs["input_ids"] = input_ids.astype(ms.int32) + model_inputs["input_ids"] = input_ids model_inputs["batch_valid_length"] = ms.from_numpy(seq_lens_np) model_inputs["block_tables"] = attn_metadata.block_tables model_inputs["slot_mapping"] = attn_metadata.slot_mapping @@ -316,33 +329,63 @@ class MsModelBase: class NativeModel(MsModelBase): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__(vllm_config=vllm_config, prefix=prefix) self.quant_config = vllm_config.quant_config if vllm_config.lora_config is not None: # native model lora only support pynative mode now vllm_config.model_config.enforce_eager = True - self.is_graph_mode = False if vllm_config.model_config.enforce_eager else True + self.is_graph_mode = bool(not vllm_config.model_config.enforce_eager) self.prev_prefill = False self.run_model = None - def common_preprocess(self, vllm_config, prefix = ""): - self.set_modules({"model": self.model, "lm_head": self.lm_head}) - - self.casual_mask = LowerTriangularMask(dtype=self.model_config.dtype, - max_model_len=self.model_config.max_model_len) - self.kv_caches = [AttentionWrapper() for i in range(self.config.num_hidden_layers)] + def common_preprocess(self, vllm_config, prefix=""): + self.set_modules({ + "model": self.model, + "lm_head": self.lm_head + }) # type: ignore[attr-defined] + + self.casual_mask = LowerTriangularMask( + dtype=self.model_config.dtype, + max_model_len=self.model_config.max_model_len) + self.kv_caches = [ + AttentionWrapper() for i in range(self.config.num_hidden_layers) + ] compilation_config = vllm_config.compilation_config if prefix in compilation_config.static_forward_context: raise ValueError(f"Duplicate layer name: {prefix}") for i in range(self.config.num_hidden_layers): - compilation_config.static_forward_context[str(i)] = self.kv_caches[i] + compilation_config.static_forward_context[str( + i)] = self.kv_caches[i] + + def set_model_inputs(self, input_ids, position_ids, intermediate_tensors, + inputs_embeds, is_prefill): + if input_ids is None: + dyn_input_ids = None + else: + dyn_input_ids = ms.Tensor(shape=[None] * input_ids.ndim, + dtype=mstype.int32) + + if position_ids is None: + dyn_position_ids = None + else: + dyn_position_ids = ms.Tensor(shape=[None] * position_ids.ndim, + dtype=mstype.int32) + if inputs_embeds is None: + dyn_inputs_embeds = None + else: + dyn_inputs_embeds = ms.Tensor(shape=[None] * inputs_embeds.ndim, + dtype=inputs_embeds.dtype) - def set_model_inputs(self, is_prefill): - dyn_input_ids = Tensor(shape=[None], dtype=mstype.int32) - dyn_position_ids = Tensor(shape=[None], dtype=mstype.int32) + if intermediate_tensors is None: + dyn_intermediate_tensors = None + else: + dyn_intermediate_tensors = ms.Tensor( + shape=[None] * intermediate_tensors.ndim, + dtype=intermediate_tensors.dtype) block_size = self.cache_config.block_size num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) @@ -359,19 +402,19 @@ class NativeModel(MsModelBase): dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) dyn_value_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) - dyn_value_caches = mutable([dyn_value_cache for _ in range(num_layers)]) - - dyn_slot_mapping = Tensor(shape=[None, ], dtype=mstype.int32) - dynamic_attention_mask = Tensor(shape=[None, None], dtype=self.model_config.dtype) - dyn_batch_valid_length = Tensor(shape=[None,], dtype=mstype.int32) - dyn_q_seq_lens = Tensor(shape=[None, ], dtype=mstype.int32) + dyn_value_caches = mutable( + [dyn_value_cache for _ in range(num_layers)]) + + dyn_slot_mapping = Tensor(shape=[None], dtype=mstype.int32) + dynamic_attention_mask = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) - dyn_intermediate_tensors = None - dyn_inputs_embeds = None self.model.set_inputs( dyn_input_ids, dyn_position_ids, - dyn_key_caches, + dyn_key_caches, # type: ignore[attr-defined] dyn_value_caches, is_prefill, dyn_slot_mapping, @@ -380,11 +423,17 @@ class NativeModel(MsModelBase): dyn_q_seq_lens, dyn_block_tables, dyn_intermediate_tensors, - dyn_inputs_embeds - ) + dyn_inputs_embeds) + + dynamic_hidden_states = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + self.lm_head.set_inputs( + dynamic_hidden_states) # type: ignore[attr-defined] - def prepare_inputs(self, input_ids, positions, intermediate_tensors, inputs_embeds): - model_inputs, is_prefill = self.prepare_base_inputs(input_ids, positions) + def prepare_inputs(self, input_ids, positions, intermediate_tensors, + inputs_embeds): + model_inputs, is_prefill = self.prepare_base_inputs( + input_ids, positions) # for multimodal model model_inputs["intermediate_tensors"] = intermediate_tensors @@ -392,27 +441,31 @@ class NativeModel(MsModelBase): return model_inputs, is_prefill - def exec_model( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: IntermediateTensors = None, - inputs_embeds: Tensor = None, - **kwargs - ): - model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, intermediate_tensors, inputs_embeds) + def exec_model(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs): + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, + intermediate_tensors, + inputs_embeds) if self.prev_prefill != is_prefill and self.is_graph_mode: - self.set_model_inputs(is_prefill) + self.set_model_inputs(input_ids, positions, intermediate_tensors, + inputs_embeds, is_prefill) self.prev_prefill = is_prefill - # for dummy_attention_metadata + # for dummy_attention_metadata if is_prefill and not self.set_flags: self.set_flags = True if self.run_model is None: - self.run_model = ms.jit(function=self.model, jit_level='O0') if self.is_graph_mode else self.model - model_output = self.run_model( + self.run_model = ms.jit( + function=self.model, # type: ignore[attr-defined] + jit_level='O0' + ) if self.is_graph_mode else self.model # type: ignore[attr-defined] + model_output = self.run_model( # type: ignore[misc] input_ids=model_inputs["input_ids"], positions=model_inputs["position_ids"], key_caches=model_inputs["key_cache"], @@ -425,6 +478,6 @@ class NativeModel(MsModelBase): block_tables=model_inputs["block_tables"], intermediate_tensors=model_inputs["intermediate_tensors"], inputs_embeds=model_inputs["inputs_embeds"], - ) + ) return model_output diff --git a/vllm_mindspore/model_executor/models/qwen2.py b/vllm_mindspore/model_executor/models/qwen2.py index 27cf2b234..87c54c212 100644 --- a/vllm_mindspore/model_executor/models/qwen2.py +++ b/vllm_mindspore/model_executor/models/qwen2.py @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, Union) @@ -25,19 +24,14 @@ if TYPE_CHECKING: else: Qwen2Config = None -import mindspore as ms from mindspore import Parameter, Tensor, mint, nn -from mindspore.common import dtype as mstype -import vllm.envs as envs from vllm.attention.backends.abstract import AttentionType from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size -from vllm.forward_context import get_forward_context from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.models.interfaces import SupportsLoRA from vllm.sequence import IntermediateTensors -from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm_mindspore.attention import Attention from vllm_mindspore.model_executor.layers.activation import SwiGLU @@ -53,15 +47,11 @@ from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) from vllm_mindspore.model_executor.model_loader.weight_utils import \ default_weight_loader -from vllm_mindspore.model_executor.models.attention_mask import \ - LowerTriangularMask -from vllm_mindspore.model_executor.models.model_base import (AttentionWrapper, - NativeModel) +from vllm_mindspore.model_executor.models.model_base import NativeModel from vllm_mindspore.model_executor.models.utils import ( PPMissingLayer, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE class Qwen2MLP(nn.Cell): @@ -469,14 +459,12 @@ class Qwen2ForCausalLM(NativeModel, SupportsLoRA): self.common_preprocess(vllm_config, prefix) - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: IntermediateTensors = None, - inputs_embeds: Tensor = None, - **kwargs - ) -> Union[Tensor, IntermediateTensors]: + def forward(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs) -> Union[Tensor, IntermediateTensors]: hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, inputs_embeds) return hidden_states diff --git a/vllm_mindspore/model_executor/models/qwen2_5_vl.py b/vllm_mindspore/model_executor/models/qwen2_5_vl.py new file mode 100644 index 000000000..e7fc1be50 --- /dev/null +++ b/vllm_mindspore/model_executor/models/qwen2_5_vl.py @@ -0,0 +1,1079 @@ +# SPDX-License-Identifier: Apache-2.0 +# type: ignore +# isort:skip_file +# Adapted from +# https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/models/qwen2_5_vl.py +# Copyright 2025 Huawei Technologites Co., Ltd +# Copyright 2025 The vLLM team. +# Copyright 2025 The Qwen Team. +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Inference-only Qwen2.5-VL model compatible with HuggingFace weights.""" +import os +from functools import partial +from typing import Callable, Iterable, Mapping, Optional, Set, Tuple, Union, Dict, Any + +import math +import mindspore as ms +import mindspore.nn as nn +import mindspore.mint as mint +import mindspore.ops as ops +import mindspore.mint.nn.functional as F +from mindspore import dtype as mstype + +from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear +from vllm_mindspore.model_executor.layers.logits_processor import LogitsProcessor +from vllm_mindspore.model_executor.layers.quantization.base_config import QuantizationConfig +from vllm_mindspore.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ParallelLMHead +from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader +from vllm_mindspore.model_executor.models.model_base import NativeModel, AttentionWrapper +from vllm_mindspore.model_executor.models.interfaces import SupportsMultiModal +from vllm_mindspore.model_executor.models.qwen2 import Qwen2Model # type: ignore[attr-defined] +from vllm_mindspore.model_executor.models.utils import PPMissingLayer, WeightsMapper, maybe_prefix, \ + merge_multimodal_embeddings +from vllm_mindspore.model_executor.models.attention_mask import MultiModalLowerTriangularMask +from vllm_mindspore.distributed.communication_op import AllGatherFromModelParallelRegion + +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.models.module_mapping import MultiModelKeys +from vllm.model_executor.models.qwen2_vl import Qwen2VLDummyInputsBuilder as Qwen2_5_VLDummyInputsBuilder +from vllm.model_executor.models.qwen2_vl import Qwen2VLMultiModalProcessor +from vllm.model_executor.models.qwen2_5_vl import Qwen2_5_VLImageInputs, Qwen2_5_VLVideoInputs, \ + Qwen2_5_VLImagePixelInputs, Qwen2_5_VLImageEmbeddingInputs, Qwen2_5_VLVideoPixelInputs, \ + Qwen2_5_VLVideoEmbeddingInputs, Qwen2_5_VLProcessingInfo + +from vllm.config import VllmConfig +from vllm.logger import init_logger +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import MultiModalFieldConfig, MultiModalKwargs +from vllm.multimodal.processing import PromptReplacement +from vllm.multimodal.parse import MultiModalDataItems +from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank +from vllm.distributed import utils as dist_utils +from vllm.sequence import IntermediateTensors +from vllm.transformers_utils.config import uses_mrope + +logger = init_logger(__name__) + +_ACTIVATION_REGISTRY = {"silu": F.silu} + +# === Vision Inputs === # + + +class _Qwen2VLMultiModalProcessor(Qwen2VLMultiModalProcessor): + + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, Any], + out_mm_kwargs: MultiModalKwargs, + ) -> list[PromptReplacement]: + hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs) + image_processor = self.info.get_image_processor( + **hf_processor_mm_kwargs) + tokenizer = self.info.get_tokenizer() + vocab = tokenizer.get_vocab() + + placeholder = { + "image": vocab[hf_processor.image_token], + "video": vocab[hf_processor.video_token], + } + + merge_length = image_processor.merge_size**2 + + def get_replacement_qwen2vl(item_idx: int, modality: str): + grid_thw = out_mm_kwargs[f"{modality}_grid_thw"][item_idx] + assert isinstance(grid_thw, ms.Tensor) + + num_tokens = int(grid_thw.prod()) // merge_length + return [placeholder[modality]] * num_tokens + + return [ + PromptReplacement( + modality=modality, + target=[placeholder[modality]], + replacement=partial(get_replacement_qwen2vl, + modality=modality), + ) for modality in ("image", "video") + ] + + +# === Vision Encoder === # + + +class Qwen2_5_VisionMLP(nn.Cell): + + def __init__(self, + in_features: int, + hidden_features: int, + bias: bool = False, + act_fn: Callable[[ms.Tensor], ms.Tensor] = F.silu, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): + super().__init__() + self.gate_proj = ColumnParallelLinear(in_features, + hidden_features, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.gate_proj", + params_dtype=ms.bfloat16) + self.up_proj = ColumnParallelLinear(in_features, + hidden_features, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.up_proj", + params_dtype=ms.bfloat16) + self.down_proj = RowParallelLinear(hidden_features, + in_features, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.down_proj", + params_dtype=ms.bfloat16) + self.act_fn = act_fn + + def construct(self, x: ms.Tensor): + x_gate, _ = self.gate_proj(x) + x_gate = self.act_fn(x_gate) + x_up, _ = self.up_proj(x) + x_down, _ = self.down_proj(x_gate * x_up) + return x_down + + +def apply_rotary_pos_emb_flashatt( + q: ms.Tensor, k: ms.Tensor, cos: ms.Tensor, + sin: ms.Tensor) -> Tuple[ms.Tensor, ms.Tensor]: + q_embed = ops.rotary_position_embedding(q.float(), cos, sin).type_as(q) + k_embed = ops.rotary_position_embedding(k.float(), cos, sin).type_as(k) + return q_embed, k_embed + + +class Qwen2_5_VisionAttention(nn.Cell): + + def __init__( + self, + embed_dim: int, + num_heads: int, + projection_size: int, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + # Per attention head and per partition values. + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + self.hidden_size_per_attention_head = dist_utils.divide( + projection_size, num_heads) + self.num_attention_heads_per_partition = dist_utils.divide( + num_heads, self.tp_size) + self.num_heads = num_heads + self.head_dim = self.hidden_size_per_attention_head + + self.qkv = ColumnParallelLinear(input_size=embed_dim, + output_size=3 * projection_size, + quant_config=quant_config, + prefix=f"{prefix}.qkv", + params_dtype=ms.bfloat16) + self.proj = RowParallelLinear(input_size=projection_size, + output_size=embed_dim, + quant_config=quant_config, + prefix=f"{prefix}.proj", + params_dtype=ms.bfloat16) + self.tensor_model_parallel_all_gather = AllGatherFromModelParallelRegion( + ) + + def split_tensor_along_last_dim( + self, + tensor: ms.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, + ): + """ Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = dist_utils.divide(tensor.shape[last_dim], + num_partitions) + # Split. + tensor_list = mint.split(tensor, last_dim_size, dim=last_dim) + # NOTE: torch.split does not create contiguous tensors by default. + + return tensor_list + + def split_qkv(self, qkv: ms.Tensor) -> tuple[ms.Tensor, ...]: + # [s, 3 * head * head_dim] + seq_len, _ = qkv.shape + if self.tp_size > 1: + qkv = self.tensor_model_parallel_all_gather(qkv) + + # [s, 3 * head * head_dim] -> 3 * [s, head * head_dim] + q, k, v = mint.chunk(qkv, 3, dim=-1) + + # 3 * [s, head * head_dim] + if self.tp_size > 1: + splitter = partial(self.split_tensor_along_last_dim, + num_partitions=self.tp_size) + q = splitter(q)[self.tp_rank] + k = splitter(k)[self.tp_rank] + v = splitter(v)[self.tp_rank] + + # 3 * [s, head * head_dim] -> 3 * [s, head, head_dim] + new_shape = (seq_len, self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head) + q, k, v = (x.view(*new_shape) for x in (q, k, v)) + return q, k, v + + def construct( + self, + x: ms.Tensor, + cu_seqlens: ms.Tensor, + position_embeddings: Tuple[ms.Tensor, ms.Tensor], + ) -> ms.Tensor: + seq_length = x.shape[0] + x, _ = self.qkv(x) + q, k, v = self.split_qkv(x) + + cos, sin = position_embeddings + q, k = apply_rotary_pos_emb_flashatt(mint.unsqueeze(q, 0), + mint.unsqueeze(k, 0), cos, sin) + + q = mint.squeeze(q, 0) + k = mint.squeeze(k, 0) + + context_layer = ops.flash_attention_score( + q, + k, + v, + self.num_heads // self.tp_size, + actual_seq_qlen=cu_seqlens, + actual_seq_kvlen=cu_seqlens, + scalar_value=1 / math.sqrt(q.shape[-1]), + input_layout="TND", + ).reshape(seq_length, -1) + output, _ = self.proj(context_layer) + return output + + +class Qwen2_5_VisionBlock(nn.Cell): + + def __init__( + self, + dim: int, + num_heads: int, + mlp_hidden_dim: int, + act_fn: Callable[[ms.Tensor], ms.Tensor] = F.silu, + norm_layer: Optional[Callable[[int], nn.Cell]] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + if norm_layer is None: + norm_layer = partial(mint.nn.LayerNorm, + eps=1e-6, + dtype=ms.bfloat16) + self.norm1 = norm_layer(dim) + self.norm2 = norm_layer(dim) + self.attn = Qwen2_5_VisionAttention(embed_dim=dim, + num_heads=num_heads, + projection_size=dim, + quant_config=quant_config, + prefix=f"{prefix}.attn") + self.mlp = Qwen2_5_VisionMLP(dim, + mlp_hidden_dim, + act_fn=act_fn, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.mlp") + + def construct( + self, x: ms.Tensor, cu_seqlens: ms.Tensor, + position_embeddings: Tuple[ms.Tensor, ms.Tensor]) -> ms.Tensor: + x = x + self.attn(self.norm1(x), + cu_seqlens=cu_seqlens, + position_embeddings=position_embeddings) + x = x + self.mlp(self.norm2(x)) + return x + + +class Qwen2_5_VisionPatchEmbed(nn.Cell): + + def __init__( + self, + patch_size: int = 14, + temporal_patch_size: int = 2, + in_channels: int = 3, + hidden_size: int = 1152, + ) -> None: + super().__init__() + self.patch_size = patch_size + self.temporal_patch_size = temporal_patch_size + self.hidden_size = hidden_size + + kernel_size = (temporal_patch_size, patch_size, patch_size) + self.proj = mint.nn.Conv3d(in_channels, + hidden_size, + kernel_size=kernel_size, + stride=kernel_size, + bias=False, + dtype=ms.bfloat16) + + def construct(self, x: ms.Tensor) -> ms.Tensor: + L, C = x.shape + x = x.view(L, -1, self.temporal_patch_size, self.patch_size, + self.patch_size) + x = self.proj(x).view(L, self.hidden_size) + return x + + +class Qwen2_5_VisionPatchMerger(nn.Cell): + + def __init__( + self, + d_model: int, + context_dim: int, + norm_layer: Optional[Callable[[int], nn.Cell]] = None, + spatial_merge_size: int = 2, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = context_dim * (spatial_merge_size**2) + if norm_layer is None: + norm_layer = partial(mint.nn.LayerNorm, + eps=1e-6, + dtype=ms.bfloat16) + self.ln_q = norm_layer(context_dim) + self.mlp = nn.CellList([ + ColumnParallelLinear(self.hidden_size, + self.hidden_size, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.mlp.0", + params_dtype=ms.bfloat16), + nn.GELU(), + RowParallelLinear(self.hidden_size, + d_model, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.mlp.2", + params_dtype=ms.bfloat16), + ]) + + def construct(self, x: ms.Tensor) -> ms.Tensor: + x = self.ln_q(x) + x = x.view(-1, self.hidden_size) + + mlp_fc1, mlp_act, mlp_fc2 = self.mlp + x_parallel, _ = mlp_fc1(x) + x_parallel = mlp_act(x_parallel) + out, _ = mlp_fc2(x_parallel) + return out + + +class Qwen2_5_VisionRotaryEmbedding(nn.Cell): + + def __init__(self, dim: int, theta: float = 10000.0) -> None: + super().__init__() + self.dim = dim + self.theta = theta + self.inv_freq = 1.0 / (theta**( + mint.arange(0, dim, 2, dtype=ms.float32) / dim)) + self._seq_len_cached = 0 + self._freqs_cached = None + + def update_freqs_cache(self, seqlen: int) -> None: + if seqlen > self._seq_len_cached: + seqlen *= 2 + self._seq_len_cached = seqlen + self.inv_freq = 1.0 / (self.theta**( + mint.arange(0, self.dim, 2, dtype=ms.float32) / self.dim)) + seq = mint.arange(seqlen, dtype=self.inv_freq.dtype) + freqs = mint.outer(seq, self.inv_freq) + self._freqs_cached = freqs + + def construct(self, seqlen: int) -> ms.Tensor: + self.update_freqs_cache(seqlen) + return self._freqs_cached[:seqlen] # type: ignore[index] + + +class Qwen2_5_VisionTransformer(nn.Cell): + + def __init__( + self, + vision_config, + norm_eps: float = 1e-6, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + + patch_size = vision_config.patch_size + temporal_patch_size = vision_config.temporal_patch_size + in_channels = vision_config.in_channels + depth = vision_config.depth + self.hidden_size = vision_config.hidden_size + self.num_heads = vision_config.num_heads + + # args for get_window_index + self.window_size = vision_config.window_size + self.patch_size = vision_config.patch_size + self.spatial_merge_size = vision_config.spatial_merge_size + self.fullatt_block_indexes = vision_config.fullatt_block_indexes + self.spatial_merge_unit = self.spatial_merge_size**2 + + self.patch_embed = Qwen2_5_VisionPatchEmbed( + patch_size=patch_size, + temporal_patch_size=temporal_patch_size, + in_channels=in_channels, + hidden_size=self.hidden_size, + ) + + norm_layer = partial(RMSNorm, eps=norm_eps, params_dtype=ms.bfloat16) + head_dim = self.hidden_size // self.num_heads + self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2) + + self.blocks = nn.CellList([ + Qwen2_5_VisionBlock( + dim=self.hidden_size, + num_heads=self.num_heads, + mlp_hidden_dim=vision_config.intermediate_size, + act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act], + norm_layer=norm_layer, + quant_config=quant_config, + prefix=f"{prefix}.blocks.{layer_idx}") + for layer_idx in range(depth) + ]) + self.merger = Qwen2_5_VisionPatchMerger( + d_model=vision_config.out_hidden_size, + context_dim=self.hidden_size, + norm_layer=norm_layer, + spatial_merge_size=self.spatial_merge_size, + quant_config=quant_config, + prefix=f"{prefix}.merger", + ) + from mindspore.communication.management import get_rank + self.rank_id = get_rank() + + def set_model_inputs(self): + dyn_x = ms.Tensor(shape=[None, None], dtype=self.dtype) + dyn_rotary_pos_emb = ms.Tensor(shape=[None, None], + dtype=mstype.float32) + dyn_window_index = ms.Tensor(shape=[None], dtype=mstype.int64) + dyn_cu_window_seqlens = ms.Tensor(shape=[None], dtype=mstype.int64) + dyn_grid_thw = ms.Tensor(shape=[None, None], dtype=mstype.int64) + + self.set_inputs( + dyn_x, + dyn_rotary_pos_emb, + dyn_window_index, + dyn_cu_window_seqlens, + dyn_grid_thw, + ) + + @property + def dtype(self) -> ms.Type: + return self.patch_embed.proj.weight.dtype + + def construct( + self, + x: ms.Tensor, + rotary_pos_emb: ms.Tensor, + window_index: ms.Tensor, + cu_window_seqlens: ms.Tensor, + grid_thw: ms.Tensor, + ) -> ms.Tensor: + hidden_states = x.to(dtype=self.dtype) + hidden_states = self.patch_embed(hidden_states) + + cu_window_seqlens = cu_window_seqlens.astype(ms.int32) + cu_window_seqlens = mint.unique_consecutive(cu_window_seqlens) + seq_len, _ = hidden_states.shape + hidden_states = hidden_states.reshape( + seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + hidden_states = hidden_states[window_index] + hidden_states = hidden_states.reshape(seq_len, -1) + rotary_pos_emb = rotary_pos_emb.reshape( + seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + rotary_pos_emb = rotary_pos_emb[window_index] + rotary_pos_emb = rotary_pos_emb.reshape(1, seq_len, 1, -1) + emb = mint.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (mint.cos(emb), mint.sin(emb)) + + grid_thw_1 = grid_thw.index_select(1, ms.Tensor([1])).reshape(-1) + grid_thw_2 = grid_thw.index_select(1, ms.Tensor([2])).reshape(-1) + grid_thw_0 = grid_thw.index_select(1, ms.Tensor([0])).reshape(-1) + cu_seqlens = mint.cumsum(mint.repeat_interleave( + grid_thw_1 * grid_thw_2, grid_thw_0), + dim=0, + dtype=ms.int32) + + cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0) + # transformers + for layer_num, blk in enumerate(self.blocks): + if layer_num in self.fullatt_block_indexes: + cu_seqlens_now = cu_seqlens + else: + cu_seqlens_now = cu_window_seqlens + hidden_states = blk(hidden_states, + cu_seqlens=cu_seqlens_now, + position_embeddings=position_embeddings) + + # adapter + hidden_states = self.merger(hidden_states) + reverse_indices = mint.argsort(window_index) + hidden_states = hidden_states[reverse_indices] + return hidden_states + + def load_weights(self, weights: Iterable[Tuple[str, ms.Tensor]], + params_dict: Dict[str, ms.Parameter]) -> Set[str]: + loaded_params: Set[str] = set() + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + + +class Qwen2_5_VLMultiModalProcessor(_Qwen2VLMultiModalProcessor): + + def _get_mm_fields_config( + self, + hf_inputs, + hf_processor_mm_kwargs: Mapping[str, object], + ) -> Mapping[str, MultiModalFieldConfig]: + return dict( + **super()._get_mm_fields_config(hf_inputs, hf_processor_mm_kwargs), + second_per_grid_ts=MultiModalFieldConfig.batched("video"), + ) + + +@MULTIMODAL_REGISTRY.register_processor( + Qwen2_5_VLMultiModalProcessor, + info=Qwen2_5_VLProcessingInfo, + dummy_inputs=Qwen2_5_VLDummyInputsBuilder) +class Qwen2_5_VLForConditionalGeneration(NativeModel, SupportsMultiModal): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + # LoRA specific attributes + supported_lora_modules = [ + # language model + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", # Same name with vision encoder + # vision tower + "qkv", + "gate_proj", + "up_proj", + "attn.proj", # Distinguish patch_embed.proj + "fc1", + "fc2", + # projector + "mlp.0", + "mlp.2" + ] + + embedding_modules = {} # type: ignore[var-annotated] + embedding_padding_modules = [] # type: ignore[var-annotated] + + # To ensure correct weight loading and mapping. + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={ + "lm_head.": "language_model.lm_head.", + "model.": "language_model.model.", + }) + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + multimodal_config = vllm_config.model_config.multimodal_config + + self.config = config + self.multimodal_config = multimodal_config + + self.visual = Qwen2_5_VisionTransformer( + config.vision_config, + norm_eps=getattr(config, "rms_norm_eps", 1e-6), + quant_config=self._maybe_ignore_quant_config(quant_config), + prefix=maybe_prefix(prefix, "visual"), + ) + self.visual = ms.jit( + function=self.visual, + jit_level='O0') if self.is_graph_mode else self.visual + + self.model = Qwen2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + + if get_pp_group().is_last_rank: + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens + else: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + params_dtype=ms.bfloat16, + quant_config=quant_config, + prefix=maybe_prefix( + prefix, "lm_head")) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = get_sampler() + else: + self.lm_head = PPMissingLayer() + + self.common_preprocess(vllm_config, prefix) + self.spatial_merge_size = config.vision_config.spatial_merge_size + + self.window_size = config.vision_config.window_size + self.patch_size = config.vision_config.patch_size + self.spatial_merge_unit = self.spatial_merge_size**2 + self.hidden_size = config.vision_config.hidden_size + self.num_heads = config.vision_config.num_heads + head_dim = self.hidden_size // self.num_heads + self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2) + if self.is_graph_mode: + self.visual.set_model_inputs() + + def common_preprocess(self, vllm_config, prefix=""): + self.set_modules({ + "visual": self.visual, + "model": self.model, + "lm_head": self.lm_head + }) + self.casual_mask = MultiModalLowerTriangularMask( + dtype=self.model_config.dtype, + max_model_len=self.model_config.max_model_len) + self.kv_caches = [ + AttentionWrapper() for i in range(self.config.num_hidden_layers) + ] + + compilation_config = vllm_config.compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError(f"Duplicate layer name: {prefix}") + for i in range(self.config.num_hidden_layers): + compilation_config.static_forward_context[str( + i)] = self.kv_caches[i] + + def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig): + # GPTQ configs do not have a list of ignored modules, however AutoGPTQ + # seems to avoid vision encoder sections for some models. + # if isinstance(quant_config, (GPTQConfig, GPTQMarlinConfig)): + # return None + return quant_config + + def _validate_and_reshape_mm_tensor(self, mm_input: object, + name: str) -> ms.Tensor: + if not isinstance(mm_input, (ms.Tensor, list)): + raise ValueError(f"Incorrect type of {name}. " + f"Got type: {type(mm_input)}") + if isinstance(mm_input, ms.Tensor): + if mm_input.ndim == 2: + return mm_input + if mm_input.ndim != 3: + raise ValueError(f"{name} should be 2D or batched 3D tensor. " + f"Got ndim: {mm_input.ndim} " + f"(shape={mm_input.shape})") + return mint.concat(list(mm_input)) + else: + return mint.concat(mm_input) + + def _parse_and_validate_image_input( + self, **kwargs: object) -> Optional[Qwen2_5_VLImageInputs]: + pixel_values = kwargs.pop("pixel_values", None) + image_embeds = kwargs.pop("image_embeds", None) + image_grid_thw = kwargs.pop("image_grid_thw", None) + + if pixel_values is None and image_embeds is None: + return None + + if pixel_values is not None: + pixel_values = self._validate_and_reshape_mm_tensor( + pixel_values, "image pixel values") + image_grid_thw = self._validate_and_reshape_mm_tensor( + image_grid_thw, "image grid_thw") + + if not isinstance(pixel_values, (ms.Tensor, list)): + raise ValueError("Incorrect type of image pixel values. " + f"Got type: {type(pixel_values)}") + + return Qwen2_5_VLImagePixelInputs(type="pixel_values", + pixel_values=pixel_values, + image_grid_thw=image_grid_thw) + + if image_embeds is not None: + image_embeds = self._validate_and_reshape_mm_tensor( + image_embeds, "image embeds") + image_grid_thw = self._validate_and_reshape_mm_tensor( + image_grid_thw, "image grid_thw") + + if not isinstance(image_embeds, ms.Tensor): + raise ValueError("Incorrect type of image embeddings. " + f"Got type: {type(image_embeds)}") + return Qwen2_5_VLImageEmbeddingInputs( + type="image_embeds", + image_embeds=image_embeds, + image_grid_thw=image_grid_thw) + + return None + + def _parse_and_validate_video_input( + self, **kwargs: object) -> Optional[Qwen2_5_VLVideoInputs]: + pixel_values_videos = kwargs.pop("pixel_values_videos", None) + video_embeds = kwargs.pop("video_embeds", None) + video_grid_thw = kwargs.pop("video_grid_thw", None) + second_per_grid_ts = kwargs.pop("second_per_grid_ts", None) + + if pixel_values_videos is None and video_embeds is None: + return None + + if pixel_values_videos is not None: + pixel_values_videos = self._validate_and_reshape_mm_tensor( + pixel_values_videos, "video pixel values") + video_grid_thw = self._validate_and_reshape_mm_tensor( + video_grid_thw, "video grid_thw") + + return Qwen2_5_VLVideoPixelInputs( + type="pixel_values_videos", + pixel_values_videos=pixel_values_videos, + video_grid_thw=video_grid_thw, + second_per_grid_ts=second_per_grid_ts, + ) + + if video_embeds is not None: + video_embeds = self._validate_and_reshape_mm_tensor( + video_embeds, "video embeds") + video_grid_thw = self._validate_and_reshape_mm_tensor( + video_grid_thw, "video grid_thw") + + if not isinstance(video_embeds, ms.Tensor): + raise ValueError("Incorrect type of video embeddings. " + f"Got type: {type(video_embeds)}") + return Qwen2_5_VLVideoEmbeddingInputs( + type="video_embeds", + video_embeds=video_embeds, + video_grid_thw=video_grid_thw) + + return None + + def rot_pos_emb(self, grid_thw: ms.Tensor) -> ms.Tensor: + pos_ids = [] + for t, h, w in grid_thw: + t, h, w = t.item(), h.item(), w.item() + hpos_ids = mint.arange(h).unsqueeze(1).expand((-1, w)) + wpos_ids = mint.arange(w).unsqueeze(0).expand((h, -1)) + + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ).permute(0, 2, 1, 3).flatten() + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ).permute(0, 2, 1, 3).flatten() + pos_ids.append( + mint.tile(mint.stack([hpos_ids, wpos_ids], dim=-1), (t, 1))) + pos_ids = mint.cat(pos_ids, dim=0) + max_grid_size = grid_thw[:, 1:].max().item() + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) + rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) + return rotary_pos_emb + + def get_window_index(self, grid_thw): + window_index = [] + cu_window_seqlens = [ms.Tensor([0])] + window_index_id = 0 + vit_merger_window_size = (self.window_size // + self.spatial_merge_size // self.patch_size) + + for grid_t, grid_h, grid_w in grid_thw: + grid_t, grid_h, grid_w = grid_t.item(), grid_h.item(), grid_w.item( + ) + llm_grid_h = grid_h // self.spatial_merge_size + llm_grid_w = grid_w // self.spatial_merge_size + index = mint.arange(grid_t * llm_grid_h * llm_grid_w).reshape( + grid_t, llm_grid_h, llm_grid_w) + pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size + pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size + num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size + num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size + index_padded = F.pad(index, (0, pad_w, 0, pad_h), 'constant', -100) + index_padded = index_padded.reshape(grid_t, num_windows_h, + vit_merger_window_size, + num_windows_w, + vit_merger_window_size) + index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( + grid_t, num_windows_h * num_windows_w, vit_merger_window_size, + vit_merger_window_size) + seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) + index_padded = index_padded.reshape(-1) + index_new = index_padded[index_padded != -100] + window_index.append(index_new + window_index_id) + cu_seqlens_tmp = mint.cumsum( + seqlens, + 0) * self.spatial_merge_unit + cu_window_seqlens[-1][-1] + cu_window_seqlens.append(cu_seqlens_tmp) + window_index_id += grid_t * llm_grid_h * llm_grid_w + window_index = mint.cat(window_index, dim=0) + cu_window_seqlens = mint.cat(cu_window_seqlens, dim=0) + return window_index, cu_window_seqlens + + def _process_image_input( + self, image_input: Qwen2_5_VLImageInputs) -> tuple[ms.Tensor, ...]: + + grid_thw = image_input["image_grid_thw"] + assert grid_thw.ndim == 2 + + if image_input["type"] == "image_embeds": + image_embeds = image_input["image_embeds"].type(self.visual.dtype) + else: + pixel_values = image_input["pixel_values"].type(self.visual.dtype) + os.environ[ + "MS_DISABLE_INTERNAL_KERNELS_LIST"] = "FlashAttentionScore" + # compute position embedding + rotary_pos_emb = self.rot_pos_emb(grid_thw) + # windows attention + window_index, cu_window_seqlens = self.get_window_index(grid_thw) + image_embeds = self.visual(pixel_values, rotary_pos_emb, + window_index, cu_window_seqlens, + grid_thw) + os.environ["MS_DISABLE_INTERNAL_KERNELS_LIST"] = "" + + # Split concatenated embeddings for each image item. + merge_size = self.visual.spatial_merge_size + sizes = grid_thw.prod(-1) // merge_size // merge_size + + return image_embeds.split(sizes.tolist()) + + def _process_video_input( + self, video_input: Qwen2_5_VLVideoInputs) -> tuple[ms.Tensor, ...]: + + grid_thw = video_input["video_grid_thw"] + assert grid_thw.ndim == 2 + + if video_input["type"] == "video_embeds": + video_embeds = video_input["video_embeds"].type(self.visual.dtype) + else: + pixel_values_videos = video_input["pixel_values_videos"].type( + self.visual.dtype) + os.environ[ + "MS_DISABLE_INTERNAL_KERNELS_LIST"] = "FlashAttentionScore" + rotary_pos_emb = self.rot_pos_emb(grid_thw) + # windows attention + window_index, cu_window_seqlens = self.get_window_index(grid_thw) + video_embeds = self.visual(pixel_values_videos, rotary_pos_emb, + window_index, cu_window_seqlens, + grid_thw) + os.environ["MS_DISABLE_INTERNAL_KERNELS_LIST"] = "" + + # Split concatenated embeddings for each video item. + merge_size = self.visual.spatial_merge_size + sizes = grid_thw.prod(-1) // merge_size // merge_size + + return video_embeds.split(sizes.tolist()) + + def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: + modalities = {} + + # Preserve the order of modalities if there are multiple of them + # from the order of kwargs. + for input_key in kwargs: + if input_key in ("pixel_values", + "image_embeds") and "images" not in modalities: + modalities["images"] = self._parse_and_validate_image_input( + **kwargs) + if input_key in ("pixel_values_videos", + "video_embeds") and "videos" not in modalities: + modalities["videos"] = self._parse_and_validate_video_input( + **kwargs) + return modalities + + def get_multimodal_embeddings(self, + **kwargs) -> Optional[tuple[ms.Tensor, ...]]: + + modalities = self._parse_and_validate_multimodal_inputs(**kwargs) + if not modalities: + return None + + # The result multimodal_embeddings is tuple of tensors, with each + # tensor correspoending to a multimodal data item (image or video). + multimodal_embeddings: tuple[ms.Tensor, ...] = () + + # NOTE: It is important to iterate over the keys in this dictionary + # to preserve the order of the modalities. + for modality in modalities: + if modality == "images": + image_input = modalities["images"] + vision_embeddings = self._process_image_input(image_input) + multimodal_embeddings += vision_embeddings + if modality == "videos": + video_input = modalities["videos"] + video_embeddings = self._process_video_input(video_input) + multimodal_embeddings += video_embeddings + return multimodal_embeddings + + def get_input_embeddings( + self, + input_ids: ms.Tensor, + multimodal_embeddings: Optional[tuple[ms.Tensor, ...]] = None, + ) -> ms.Tensor: + # input_ids = input_ids.to(mstype.int64) + inputs_embeds = self.model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + [self.config.image_token_id, self.config.video_token_id]) + os.environ["MS_DISABLE_INTERNAL_KERNELS_LIST"] = "" + return inputs_embeds + + def get_input_embeddings_v0( + self, + input_ids: ms.Tensor, + image_input: Optional[tuple[ms.Tensor, ...]] = None, + video_input: Optional[tuple[ms.Tensor, ...]] = None, + ) -> ms.Tensor: + inputs_embeds = self.get_input_embeddings(input_ids) + if image_input is not None: + image_embeds = self._process_image_input(image_input) + inputs_embeds = merge_multimodal_embeddings( + input_ids, + inputs_embeds, + image_embeds, + placeholder_token_id=self.config.image_token_id, + ) + + if video_input is not None: + video_embeds = self._process_video_input(video_input) + inputs_embeds = merge_multimodal_embeddings( + input_ids, + inputs_embeds, + video_embeds, + placeholder_token_id=self.config.video_token_id, + ) + return inputs_embeds + + def forward( + self, + input_ids: ms.Tensor, + positions: ms.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[ms.Tensor] = None, + **kwargs: object, + ) -> Union[ms.Tensor, IntermediateTensors]: + if intermediate_tensors is not None: + inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner from + # `get_multimodal_embeddings` and `get_input_embeddings`, this + # condition is only for v0 compatibility. + elif inputs_embeds is None: + image_input = self._parse_and_validate_image_input(**kwargs) + video_input = self._parse_and_validate_video_input(**kwargs) + + if image_input is None and video_input is None: + inputs_embeds = None + else: + if uses_mrope(self.config): + assert positions.ndim == 2 and positions.shape[0] == 3, ( + "multimodal section rotary embedding requires " + f"(3, seq_len) positions, but got {positions.shape}") + inputs_embeds = self.get_input_embeddings_v0( + input_ids, + image_input=image_input, + video_input=video_input) + input_ids = None + hidden_states = self.exec_model(input_ids, positions, + intermediate_tensors, inputs_embeds) + return hidden_states + + def compute_logits( + self, + hidden_states: ms.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[ms.Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def sample(self, logits: ms.Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights( + self, weights: Iterable[Tuple[str, ms.Tensor]] + ) -> None: # type: ignore[override] + params_dict = self.get_params_dict() + for name, weight in weights: + if "visual." in name: + self.visual.load_weights([(name, weight)], params_dict) + else: + self.model.load_weights([(name, weight)], params_dict) + + return None + + def get_mm_mapping(self) -> MultiModelKeys: + """ + Get the module prefix in multimodal models + """ + return MultiModelKeys.from_string_field( + language_model="language_model", + connector="visual.", + tower_model="visual.merger.") diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index 5846f21ae..009d84a06 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -28,6 +28,8 @@ from vllm_mindspore.utils import (is_mindformers_model_backend, _NATIVE_MODELS = { "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), + "Qwen2_5_VLForConditionalGeneration": + ("qwen2_5_vl", "Qwen2_5_VLForConditionalGeneration"), } _MINDFORMERS_MODELS = { diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index bf40c1fdb..493664cda 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -22,7 +24,7 @@ import mindspore as ms from mindspore import mint, ops from vllm.sequence import IntermediateTensors -from vllm_mindspore.multimodal.inputs import NestedTensors +from vllm_mindspore.multimodal.inputs import NestedTensors # type: ignore[attr-defined] from vllm_mindspore.utils import get_valid_dtype WeightsMapping = Mapping[str, Optional[str]] @@ -247,8 +249,7 @@ def merge_multimodal_embeddings( This updates ``inputs_embeds`` in place. """ if isinstance(placeholder_token_id, list): - placeholder_token_id = ms.Tensor(placeholder_token_id, - device=input_ids.device) + placeholder_token_id = ms.Tensor(placeholder_token_id) return _merge_multimodal_embeddings( inputs_embeds, ms.numpy.isin(input_ids, placeholder_token_id), diff --git a/vllm_mindspore/multimodal/inputs.py b/vllm_mindspore/multimodal/inputs.py index 2673ce6ea..8bc938854 100644 --- a/vllm_mindspore/multimodal/inputs.py +++ b/vllm_mindspore/multimodal/inputs.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -# encoding: utf-8 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -15,22 +16,62 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ +from collections import defaultdict +from dataclasses import dataclass from typing import Union, cast - import mindspore +from vllm.multimodal.inputs import BaseMultiModalField, BatchedTensorInputs, JSONTree, json_map_leaves,\ + nested_tensors_equal +from vllm.multimodal import MultiModalKwargs + +NestedTensors = Union[list["NestedTensors"], list[mindspore.Tensor], + mindspore.Tensor, tuple[mindspore.Tensor, ...]] + + +@dataclass +class MultiModalFieldElem: + """ + Represents a keyword argument corresponding to a multi-modal item + in :class:`MultiModalKwargs`. + """ + + modality: str + """ + The modality of the corresponding multi-modal item. + Each multi-modal item can consist of multiple keyword arguments. + """ -from vllm.multimodal.inputs import BatchedTensorInputs, JSONTree, json_map_leaves + key: str + """ + The key of this field in :class:`MultiModalKwargs`, + i.e. the name of the keyword argument to be passed to the model. + """ + data: NestedTensors + """ + The tensor data of this field in :class:`MultiModalKwargs`, + i.e. the value of the keyword argument to be passed to the model. + """ -NestedTensors = Union[list["NestedTensors"], list[mindspore.Tensor], mindspore.Tensor, - tuple[mindspore.Tensor, ...]] + field: "BaseMultiModalField" + """ + Defines how to combine the tensor data of this field with others + in order to batch multi-modal items together for model inference. + """ + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return False + + return ((self.modality, self.key) == (other.modality, other.key) + and nested_tensors_equal(self.data, other.data) + and type(self.field) == type(other.field)) # noqa: E721 -@staticmethod def as_kwargs( batched_inputs: BatchedTensorInputs, *, - device = None, + device=None, ) -> BatchedTensorInputs: # replace as_kwargs of vLLM for multi-model json_inputs = cast(JSONTree[mindspore.Tensor], batched_inputs) @@ -40,4 +81,20 @@ def as_kwargs( json_inputs, ) - return cast(BatchedTensorInputs, json_mapped) \ No newline at end of file + return cast(BatchedTensorInputs, json_mapped) + + +def from_items(items): + """Construct a new :class:`MultiModalKwargs` from multiple items.""" + elems_by_key = defaultdict[str, list[MultiModalFieldElem]](list) + for item in items: + for key, elem in item.items(): + # transform elem.data to tensor, gpu is tensor. + elem.data = mindspore.Tensor(elem.data) + elems_by_key[key].append(elem) + data = { + key: elems[0].field.reduce_data(elems) + for key, elems in elems_by_key.items() if len(elems) > 0 + } + + return MultiModalKwargs(data, items=items) diff --git a/vllm_mindspore/v1/worker/gpu_model_runner.py b/vllm_mindspore/v1/worker/gpu_model_runner.py index f53d49d4d..7f4e3fe16 100644 --- a/vllm_mindspore/v1/worker/gpu_model_runner.py +++ b/vllm_mindspore/v1/worker/gpu_model_runner.py @@ -1,33 +1,46 @@ +#!/usr/bin/env python3 +# type: ignore +# isort:skip_file +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ from typing import Dict, Tuple, List -import gc import numpy as np import torch from mindspore import mutable -import mindspore as ms -from vllm_mindspore.v1.attention.backends.ms_attn import (MsAttentionMetadata, - MsAttentionBackend, - MLABackend) +from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata from vllm_mindspore.utils import get_valid_dtype +from vllm_mindspore.model_executor.layers.rotary_embedding import InferMRotaryEmbedding as MRotaryEmbedding # type: ignore[attr-defined] from vllm.v1.outputs import ModelRunnerOutput from vllm.attention import AttentionType from vllm.v1.kv_cache_interface import FullAttentionSpec, KVCacheSpec, SlidingWindowSpec from vllm.v1.utils import bind_kv_cache -from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.distributed.parallel_state import get_pp_group -from vllm.utils import cdiv from vllm.logger import init_logger from vllm.v1.worker.gpu_input_batch import CachedRequestState -from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding +from vllm.v1.core.sched.output import SchedulerOutput from vllm.sampling_params import SamplingType - logger = init_logger(__name__) + + def _prepare_inputs( - self, - scheduler_output: "SchedulerOutput", + self, + scheduler_output: "SchedulerOutput", # type: ignore[name-defined] ) -> Tuple[MsAttentionMetadata, torch.Tensor]: total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens assert total_num_scheduled_tokens > 0 @@ -50,13 +63,11 @@ def _prepare_inputs( for i, req_id in enumerate(self.input_batch.req_ids): num_tokens = scheduler_output.num_scheduled_tokens[req_id] num_scheduled_tokens[i] = num_tokens - max_num_scheduled_tokens = max(max_num_scheduled_tokens, - num_tokens) + max_num_scheduled_tokens = max(max_num_scheduled_tokens, num_tokens) # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] - req_indices = np.repeat(self.arange_np[:num_reqs], - num_scheduled_tokens) + req_indices = np.repeat(self.arange_np[:num_reqs], num_scheduled_tokens) # Get batched arange. # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] @@ -73,20 +84,20 @@ def _prepare_inputs( # Get positions. positions_np = self.positions_np[:total_num_scheduled_tokens] np.add(self.input_batch.num_computed_tokens_cpu[req_indices], - arange, - out=positions_np) + arange, + out=positions_np) if self.uses_mrope: self._calc_mrope_positions(scheduler_output) if self.uses_mrope: # Only relevant for models using M-RoPE (e.g, Qwen2-VL) - self.mrope_positions[:, :total_num_scheduled_tokens].copy_( - self.mrope_positions_cpu[:, :total_num_scheduled_tokens], - non_blocking=True) + self.mrope_positions[:, : + total_num_scheduled_tokens] = self.mrope_positions_cpu[:, : + total_num_scheduled_tokens] else: - self.positions[:total_num_scheduled_tokens] = torch.from_numpy(positions_np) - + self.positions[:total_num_scheduled_tokens] = torch.from_numpy( + positions_np) # Get token indices. # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] @@ -96,10 +107,7 @@ def _prepare_inputs( req_indices * self.input_batch.token_ids_cpu.shape[1]) self.input_ids[:total_num_scheduled_tokens] = torch.from_numpy( - np.take(self.input_batch.token_ids_cpu.ravel(), - token_indices, - 0) - ) + np.take(self.input_batch.token_ids_cpu.ravel(), token_indices, 0)) # Calculate the slot mapping. # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] @@ -110,12 +118,12 @@ def _prepare_inputs( block_table_indices = (req_indices * self.max_num_blocks_per_req + positions_np // self.block_size) - - block_numbers = self.input_batch.block_table.block_table_np.ravel()[block_table_indices] + block_numbers = self.input_batch.block_table.block_table_np.ravel( + )[block_table_indices] block_offsets = positions_np % self.block_size np.add(block_numbers * self.block_size, - block_offsets, - out=self.slot_mapping_np[:total_num_scheduled_tokens]) + block_offsets, + out=self.slot_mapping_np[:total_num_scheduled_tokens]) # # Prepare the attention metadata. self.query_start_loc_np[0] = 0 @@ -136,8 +144,7 @@ def _prepare_inputs( common_prefix_len=common_prefix_len, ) - use_spec_decode = len( - scheduler_output.scheduled_spec_decode_tokens) > 0 + use_spec_decode = len(scheduler_output.scheduled_spec_decode_tokens) > 0 if not use_spec_decode: # NOTE(woosuk): Due to chunked prefills, the batch may contain # partial requests. While we should not sample any token @@ -164,7 +171,7 @@ def _prepare_inputs( if self.lora_config: self.set_active_loras(self.input_batch, num_scheduled_tokens) - return attn_metadata, logits_indices, spec_decode_metadata + return attn_metadata, logits_indices, spec_decode_metadata # type: ignore[return-value] def create_block(shape, dtype, name=None, device=None): @@ -172,6 +179,7 @@ def create_block(shape, dtype, name=None, device=None): blocks = mint.empty(shape, dtype=dtype, device=device) return blocks + def initialize_kv_cache(self, kv_cache_config) -> None: """ Initialize KV cache based on `kv_cache_config`. @@ -202,28 +210,29 @@ def initialize_kv_cache(self, kv_cache_config) -> None: assert num_blocks >= kv_cache_config.num_blocks if isinstance(kv_cache_spec, FullAttentionSpec): kv_cache_shape = self.attn_backend.get_kv_cache_shape( - num_blocks, kv_cache_spec.block_size, kv_cache_spec.num_kv_heads, - kv_cache_spec.head_size) + num_blocks, kv_cache_spec.block_size, + kv_cache_spec.num_kv_heads, kv_cache_spec.head_size) dtype = kv_cache_spec.dtype dtype = get_valid_dtype(dtype) current_cache = [] device_type = "CPU" if self.device.type == "cpu" else "Ascend" for i in range(kv_cache_shape[0]): - cache_blocks = create_block( - kv_cache_shape[1:], dtype, device=device_type - ) + cache_blocks = create_block(kv_cache_shape[1:], + dtype, + device=device_type) current_cache.append(mutable(cache_blocks)) kv_caches[layer_name] = mutable(tuple(current_cache)) else: raise NotImplementedError - bind_kv_cache( - kv_caches, - self.vllm_config.compilation_config.static_forward_context, - self.kv_caches) + bind_kv_cache(kv_caches, + self.vllm_config.compilation_config.static_forward_context, + self.kv_caches) -def _update_states(self, scheduler_output: "SchedulerOutput") -> None: +def _update_states( + self, scheduler_output: "SchedulerOutput" +) -> None: # type: ignore[name-defined] """Update the cached states and the persistent batch with the scheduler output. @@ -306,14 +315,12 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: second_per_grid_ts = [] for mm_input in self.requests[req_id].mm_inputs: if mm_input.get("image_grid_thw") is not None: - image_grid_thw.extend( - mm_input["image_grid_thw"].tolist()) - if mm_input.get("video_grid_thw") is not None: - video_grid_thw.extend( - mm_input["video_grid_thw"].tolist()) + image_grid_thw.extend(mm_input["image_grid_thw"].tolist()) + if mm_input.get("video_grid_thw") is not None: + video_grid_thw.extend( + mm_input["video_grid_thw"].tolist()) if mm_input.get("second_per_grid_ts") is not None: - second_per_grid_ts.extend( - mm_input["second_per_grid_ts"]) + second_per_grid_ts.extend(mm_input["second_per_grid_ts"]) hf_config = self.model_config.hf_config @@ -339,9 +346,8 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: req_state.num_computed_tokens = num_computed_tokens # Add the sampled token(s) from the previous step (if any). # This doesn't include "unverified" tokens like spec decode tokens. - num_new_tokens = (num_computed_tokens + - len(req_data.new_token_ids) - - req_state.num_tokens) + num_new_tokens = (num_computed_tokens + len(req_data.new_token_ids) - + req_state.num_tokens) if num_new_tokens == 1: # Avoid slicing list in most common case. req_state.output_token_ids.append(req_data.new_token_ids[-1]) @@ -368,8 +374,6 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: # Update the persistent batch. self.input_batch.num_computed_tokens_cpu[req_index] = ( num_computed_tokens) - start_index = (len(req_state.block_ids) - - len(req_data.new_block_ids)) self.input_batch.block_table.append_row(req_data.new_block_ids, req_index) # Add new_token_ids to token_ids_cpu. @@ -391,7 +395,6 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: # NOTE(woosuk): `num_tokens` here may include spec decode tokens. self.input_batch.num_tokens[req_index] = end_token_index - # self.input_batch.token_ids_cpu_tensor.copy_(torch.from_numpy(self.input_batch.token_ids_cpu)) # Check if the batch has changed. If not, we can skip copying the # sampling metadata from CPU to GPU. @@ -402,12 +405,7 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: removed_req_indices = sorted(removed_req_indices, reverse=True) for req_id in req_ids_to_add: req_state = self.requests[req_id] - if removed_req_indices: - # Fill the empty index. - req_index = removed_req_indices.pop() - else: - # Append to the end. - req_index = None + req_index = removed_req_indices.pop() if removed_req_indices else None self.input_batch.add_request(req_state, req_index) # Condense the batched states if there are empty indices. @@ -427,7 +425,7 @@ def wrapper_gpu_model_runner_execute_model(func): return output except Exception as e: logger.warning( - f"Caught exception {str(e)} when processing req_ids {self.input_batch.req_ids}" + f"Caught exception {str(e)} when processing req_ids {self.input_batch.req_ids}" # noqa: G004 ) return ModelRunnerOutput( req_ids=self.input_batch.req_ids, @@ -466,7 +464,7 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: dtype=self.kv_cache_dtype, use_mla=use_mla) elif attn_module.attn_type in (AttentionType.ENCODER, - AttentionType.ENCODER_ONLY): + AttentionType.ENCODER_ONLY): # encoder-only attention does not need KV cache. continue elif attn_module.attn_type == AttentionType.ENCODER_DECODER: @@ -476,3 +474,58 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: f"Unknown attention type: {attn_module.attn_type}") return kv_cache_spec + + +def _calc_mrope_positions( + self, + scheduler_output: "SchedulerOutput"): # type: ignore[name-defined] + mrope_pos_ptr = 0 + for index, req_id in enumerate(self.input_batch.req_ids): + req = self.requests[req_id] + assert req.mrope_positions is not None + + num_computed_tokens = \ + self.input_batch.num_computed_tokens_cpu[index] + num_scheduled_tokens = \ + scheduler_output.num_scheduled_tokens[req_id] + num_prompt_tokens = len(req.prompt_token_ids) + + if num_computed_tokens + num_scheduled_tokens > num_prompt_tokens: + prompt_part_len = max(0, num_prompt_tokens - num_computed_tokens) + completion_part_len = max(0, + num_scheduled_tokens - prompt_part_len) + else: + prompt_part_len = num_scheduled_tokens + completion_part_len = 0 + + assert num_scheduled_tokens == prompt_part_len + completion_part_len + + if prompt_part_len > 0: + # prompt's mrope_positions are pre-computed + # gpu is number or tensor, but we are numpy, so we transform to int + dst_start = int(mrope_pos_ptr) + dst_end = int(mrope_pos_ptr + prompt_part_len) + src_start = int(num_computed_tokens) + src_end = int(num_computed_tokens + prompt_part_len) + + self.mrope_positions_cpu[:, dst_start:dst_end] = \ + req.mrope_positions[:,src_start:src_end] + + mrope_pos_ptr += prompt_part_len + + if completion_part_len > 0: + # compute completion's mrope_positions on-the-fly + dst_start = mrope_pos_ptr + dst_end = mrope_pos_ptr + completion_part_len + + self.mrope_positions_cpu[:, dst_start:dst_end] = \ + MRotaryEmbedding.get_next_input_positions_tensor( + req.mrope_position_delta, + context_len=num_computed_tokens + + prompt_part_len, + seq_len=num_computed_tokens + + prompt_part_len + + completion_part_len, + ) + + mrope_pos_ptr += completion_part_len diff --git a/vllm_mindspore/worker/worker.py b/vllm_mindspore/worker/worker.py index 8ce1bc91d..0978ed4c5 100644 --- a/vllm_mindspore/worker/worker.py +++ b/vllm_mindspore/worker/worker.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -# encoding: utf-8 +# type: ignore +# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -15,23 +16,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ - """Worker functions""" -import gc -import os import math -from typing import Tuple, Optional - import torch -from vllm.config import VllmConfig -from vllm.distributed import ( - ensure_kv_transfer_initialized, - ensure_model_parallel_initialized, - init_distributed_environment, - set_custom_all_reduce, -) - from vllm.logger import init_logger from vllm_mindspore.utils import get_valid_dtype @@ -39,34 +27,41 @@ from vllm.model_executor import set_random_seed from vllm.sequence import SequenceGroupMetadata from vllm.sampling_params import SamplingParams - logger = init_logger(__name__) -def _prepare_input_for_warmup(model_config, model_runner, cache_engine, is_prefill, is_mtp_model=False): +def _prepare_input_for_warmup(model_config, + model_runner, + cache_engine, + is_prefill, + is_mtp_model=False): bs = 1 seq_len = model_runner.scheduler_config.max_num_batched_tokens if is_prefill else 1 - dummy_data = model_runner.input_registry.dummy_data_for_profiling(model_config, seq_len, model_runner.mm_registry) - block_tables = [i for i in range(math.ceil(seq_len / cache_engine.block_size))] + dummy_data = model_runner.input_registry.dummy_data_for_profiling( + model_config, seq_len, model_runner.mm_registry) + block_tables = [ + i for i in range(math.ceil(seq_len / cache_engine.block_size)) + ] + + # adapter multi modal warm up + seq_data = dummy_data.seq_data + if seq_len == 1: + seq_data = dummy_data.seq_data.from_prompt_token_counts((0, seq_len)) + seqs = [ SequenceGroupMetadata( request_id=str(idx), is_prompt=is_prefill, - seq_data={idx: dummy_data.seq_data}, + seq_data={idx: seq_data}, sampling_params=SamplingParams(), block_tables={idx: block_tables}, lora_request=None, multi_modal_data=None, multi_modal_placeholders=None, - ) - for idx in range(bs) + ) for idx in range(bs) ] model_input = model_runner.prepare_model_input(seqs) - block_tables = model_input.attn_metadata.block_tables - if block_tables is not None and block_tables.numel() <= 0: - model_input.attn_metadata.block_tables = torch.zeros((1, 1), dtype=torch.int32) - previous_hidden_states = None if not is_mtp_model else \ torch.ones([bs, seq_len, model_config.get_hidden_size()], dtype=get_valid_dtype(model_config.dtype)) return model_input, previous_hidden_states @@ -78,19 +73,31 @@ def _warm_up_model(self) -> None: is_mtp_model = self.speculative_config is not None and self.model_config.hf_config.model_type == "deepseek_mtp" if is_mtp_model: # prefill mtp model - model_input, previous_hidden_states = _prepare_input_for_warmup(self.model_config, self.model_runner, - self.cache_engine[0], True, is_mtp_model) - self.model_runner.execute_model(model_input, kv_cache, None, previous_hidden_states=previous_hidden_states) + model_input, previous_hidden_states = _prepare_input_for_warmup( + self.model_config, self.model_runner, self.cache_engine[0], True, + is_mtp_model) + self.model_runner.execute_model( + model_input, + kv_cache, + None, + previous_hidden_states=previous_hidden_states) # warmup for decode if self.vllm_config.scheduler_config.is_multi_step: - model_input, _ = _prepare_input_for_warmup(self.model_config, self.model_runner._base_model_runner, - self.cache_engine[0], False) - self.model_runner._base_model_runner.execute_model(model_input, kv_cache, None) + model_input, _ = _prepare_input_for_warmup( + self.model_config, self.model_runner._base_model_runner, + self.cache_engine[0], False) + self.model_runner._base_model_runner.execute_model( + model_input, kv_cache, None) else: - model_input, previous_hidden_states = _prepare_input_for_warmup(self.model_config, self.model_runner, - self.cache_engine[0], False, is_mtp_model) - self.model_runner.execute_model(model_input, kv_cache, None, previous_hidden_states=previous_hidden_states) + model_input, previous_hidden_states = _prepare_input_for_warmup( + self.model_config, self.model_runner, self.cache_engine[0], False, + is_mtp_model) + self.model_runner.execute_model( + model_input, + kv_cache, + None, + previous_hidden_states=previous_hidden_states) torch.cuda.synchronize() -- Gitee From 14afcca39139f0ef4430d2c948ad5c2bd904db9c Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Wed, 18 Jun 2025 16:15:25 +0800 Subject: [PATCH 20/76] add qwen3 moe --- .../layers/fused_moe/__init__.py | 2 + .../layers/fused_moe/fused_moe.py | 0 .../model_executor/layers/linear.py | 83 ++- .../model_executor/models/qwen3_moe.py | 531 ++++++++++++++++++ vllm_mindspore/model_executor/models/utils.py | 33 +- 5 files changed, 647 insertions(+), 2 deletions(-) create mode 100644 vllm_mindspore/model_executor/layers/fused_moe/__init__.py create mode 100644 vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py create mode 100644 vllm_mindspore/model_executor/models/qwen3_moe.py diff --git a/vllm_mindspore/model_executor/layers/fused_moe/__init__.py b/vllm_mindspore/model_executor/layers/fused_moe/__init__.py new file mode 100644 index 000000000..a38a67cd9 --- /dev/null +++ b/vllm_mindspore/model_executor/layers/fused_moe/__init__.py @@ -0,0 +1,2 @@ +class FusedMoE: + ... \ No newline at end of file diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/model_executor/layers/linear.py b/vllm_mindspore/model_executor/layers/linear.py index e08511492..0dee09d6a 100644 --- a/vllm_mindspore/model_executor/layers/linear.py +++ b/vllm_mindspore/model_executor/layers/linear.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================ -from typing import List, Optional +from typing import List, Optional, Union from abc import abstractmethod import numpy as np @@ -185,6 +185,87 @@ class LinearBase(ms.nn.Cell): return None +class ReplicatedLinear(LinearBase): + """Replicated linear layer. + + Args: + input_size: input dimension of the linear layer. + output_size: output dimension of the linear layer. + bias: If true, add bias. + skip_bias_add: If true, skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + prefix: The name of the layer in the state dict, including all parents + (e.g. model.layers.0.qkv_proj) + return_bias: If true, return bias together with outputs in forward pass. + """ + + def __init__( + self, + input_size: int, + output_size: int, + bias: bool = True, + skip_bias_add: bool = False, + params_dtype = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + *, + return_bias: bool = True, + ): + super().__init__(input_size, + output_size, + skip_bias_add, + params_dtype, + quant_config, + prefix=prefix, + return_bias=return_bias) + + # All the linear layer supports quant method. + assert self.quant_method is not None + self.quant_method.create_weights(self, + self.input_size, [self.output_size], + self.input_size, + self.output_size, + self.params_dtype, + weight_loader=self.weight_loader) + + if bias: + self.bias = Parameter( + mint.empty(self.output_size, dtype=self.params_dtype)) + set_weight_attrs(self.bias, { + "output_dim": 0, + "weight_loader": self.weight_loader, + }) + else: + self.bias = None + + def weight_loader(self, param: Parameter, loaded_weight: Tensor): + if len(loaded_weight.shape) == 0: + loaded_weight = loaded_weight.reshape(1) + + assert param.size() == loaded_weight.size(), ( + f"Tried to load weights of size {loaded_weight.size()}" + f"to a parameter of size {param.size()}") + param.set_data(loaded_weight) + + def forward( + self, x: Tensor + ) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]: + bias = self.bias if not self.skip_bias_add else None + assert self.quant_method is not None + output = self.quant_method.apply(self, x, bias) + output_bias = self.bias if self.skip_bias_add else None + if not self.return_bias: + return output + return output, output_bias + + def extra_repr(self) -> str: + s = f"in_features={self.input_size}" + s += f", output_features={self.output_size}" + s += f", bias={self.bias is not None}" + return s + + class ColumnParallelLinear(LinearBase): def __init__( self, diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py new file mode 100644 index 000000000..275331157 --- /dev/null +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -0,0 +1,531 @@ +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" +from collections.abc import Iterable +from typing import Any, Optional, Union + +from mindspore import Tensor, nn +from transformers import PretrainedConfig +from vllm.config import CacheConfig, VllmConfig +from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from vllm_mindspore.attention import Attention +from vllm_mindspore.model_executor.layers.activation import SiluAndMul +from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE +from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ( + MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, + RowParallelLinear) +from vllm_mindspore.model_executor.layers.logits_processor import ( + LogitsProcessor) +from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader + +from vllm_mindspore.model_executor.models.utils import ( + extract_layer_index, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +from vllm_mindspore.model_executor.models.model_base import NativeModel + +logger = init_logger(__name__) + + +class Qwen3MoeMLP(nn.Cell): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj") + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results, + prefix=f"{prefix}.down_proj") + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Qwen3MoeSparseMoeBlock(nn.Cell): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ): + super().__init__() + self.tp_size = get_tensor_model_parallel_world_size() + + if self.tp_size > config.num_experts: + raise ValueError( + f"Tensor parallel size {self.tp_size} is greater than " + f"the number of experts {config.num_experts}.") + + self.experts = FusedMoE(num_experts=config.num_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts") + + self.gate = ReplicatedLinear(config.hidden_size, + config.num_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") + + def forward(self, hidden_states: Tensor) -> Tensor: + # NOTE: hidden_states can have either 1D or 2D shape. + orig_shape = hidden_states.shape + hidden_dim = hidden_states.shape[-1] + hidden_states = hidden_states.view(-1, hidden_dim) + + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits) + final_hidden_states = final_hidden_states + if self.tp_size > 1: + final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 + final_hidden_states) + + return final_hidden_states.view(orig_shape) + + +class Qwen3MoeAttention(nn.Cell): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[dict[str, Any]] = None, + max_position_embeddings: int = 8192, + head_dim: Optional[int] = None, + rms_norm_eps: float = 1e-06, + qkv_bias: bool = False, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = head_dim or (hidden_size // self.total_num_heads) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear(hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=qkv_bias, + quant_config=quant_config, + prefix=f"{prefix}.qkv_proj") + + self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.o_proj") + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.attn") + + self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) + self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) + + def forward( + self, + positions: Tensor, + hidden_states: Tensor, + ) -> Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + # Add qk-norm + q_by_head = q.view(*q.shape[:-1], q.shape[-1] // self.head_dim, + self.head_dim) + q_by_head = self.q_norm(q_by_head) + q = q_by_head.view(q.shape) + + k_by_head = k.view(*k.shape[:-1], k.shape[-1] // self.head_dim, + self.head_dim) + k_by_head = self.k_norm(k_by_head) + k = k_by_head.view(k.shape) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v) + output, _ = self.o_proj(attn_output) + return output + + +class Qwen3MoeDecoderLayer(nn.Cell): + + def __init__( + self, + config: PretrainedConfig, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = Qwen3MoeAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + rms_norm_eps=config.rms_norm_eps, + qkv_bias=getattr(config, 'attention_bias', False), + head_dim=getattr(config, 'head_dim', None), + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) + + # `mlp_only_layers` in the config. + layer_idx = extract_layer_index(prefix) + mlp_only_layers = ([] if not hasattr(config, "mlp_only_layers") else + config.mlp_only_layers) + if (layer_idx not in mlp_only_layers) and ( + config.num_experts > 0 and + (layer_idx + 1) % config.decoder_sparse_step == 0): + self.mlp = Qwen3MoeSparseMoeBlock(config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp") + else: + self.mlp = Qwen3MoeMLP(hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.mlp") + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: Tensor, + hidden_states: Tensor, + residual: Optional[Tensor], + ) -> Tensor: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class Qwen3MoeModel(nn.Cell): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.config = config + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + prefix=f"{prefix}.embed_tokens") + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: Qwen3MoeDecoderLayer(config=config, + cache_config=cache_config, + quant_config=quant_config, + prefix=prefix), + prefix=f"{prefix}.layers", + ) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.embed_tokens(input_ids) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + ) -> Union[Tensor, IntermediateTensors]: + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None + else: + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + for i in range(self.start_layer, self.end_layer): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, residual) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + def load_weights(self, weights: Iterable[tuple[str, + Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + # Params for weights, fp8 weight scales, fp8 activation scales + # (param_name, weight_name, expert_id, shard_id) + expert_params_mapping = FusedMoE.make_expert_params_mapping( + ckpt_gate_proj_name="gate_proj", + ckpt_down_proj_name="down_proj", + ckpt_up_proj_name="up_proj", + num_experts=self.config.num_experts) + + params_dict = dict(self.named_parameters()) + loaded_params: set[str] = set() + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + # Skip non-stacked layers and experts (experts handled below). + if weight_name not in name: + continue + # We have mlp.experts[0].gate_proj in the checkpoint. + # Since we handle the experts below in expert_params_mapping, + # we need to skip here BEFORE we update the name, otherwise + # name will be updated to mlp.experts[0].gate_up_proj, which + # will then be updated below in expert_params_mapping + # for mlp.experts[0].gate_gate_up_proj, which breaks load. + if "mlp.experts" in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if ((name.endswith(".bias") or name.endswith("_bias")) + and name not in params_dict): + continue + # Skip layers on other devices. + if is_pp_missing_parameter(name, self): + continue + if name not in params_dict: + continue + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for mapping in expert_params_mapping: + param_name, weight_name, expert_id, shard_id = mapping + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip layers on other devices. + if is_pp_missing_parameter(name, self): + continue + # Skip loading extra bias for GPTQ models. + if ((name.endswith(".bias") or name.endswith("_bias")) + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + name, + shard_id=shard_id, + expert_id=expert_id) + break + else: + # Skip loading extra bias for GPTQ models. + if ((name.endswith(".bias") or name.endswith("_bias")) + and name not in params_dict): + continue + # Skip layers on other devices. + if is_pp_missing_parameter(name, self): + continue + # Remapping the name of FP8 kv-scale. + if name.endswith("kv_scale"): + remapped_kv_scale_name = name.replace( + ".kv_scale", ".attn.kv_scale") + if remapped_kv_scale_name not in params_dict: + logger.warning_once( + "Found kv scale in the checkpoint (e.g. %s), but not found the expected name in the model (e.g. %s). kv-scale is not loaded.", # noqa: E501 + name, + remapped_kv_scale_name, + ) + continue + else: + name = remapped_kv_scale_name + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + + +class Qwen3MoeForCausalLM(NativeModel, SupportsPP): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = Qwen3MoeModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + if self.config.tie_word_embeddings: + self.lm_head.weight = self.model.embed_tokens.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + ) -> Union[Tensor, IntermediateTensors]: + hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, + inputs_embeds) + return hidden_states + + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def load_weights(self, weights: Iterable[tuple[str, + Tensor]]) -> set[str]: + params_dict = self.get_params_dict() + self.model.load_weights(weights, params_dict) diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 493664cda..9ecebe2ab 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -20,7 +20,7 @@ from dataclasses import dataclass, field from typing import Iterable, List, Mapping, Optional, Tuple, Union -import mindspore as ms +import mindspore as ms, nn from mindspore import mint, ops from vllm.sequence import IntermediateTensors @@ -261,3 +261,34 @@ def merge_multimodal_embeddings( (input_ids == placeholder_token_id), multimodal_embeddings, ) + + +_model_to_pp_missing_layer_names: dict[int, list[str]] = {} + + +def get_pp_missing_layer_names(model: nn.Cell) -> list[str]: + """Get the names of the missing layers in a pipeline parallel model.""" + model_id = id(model) + if model_id in _model_to_pp_missing_layer_names: + return _model_to_pp_missing_layer_names[model_id] + + missing_layer_names = [] + for name, cell in model.name_cells(): + if isinstance(cell, PPMissingLayer): + # NOTE: the trailing dot is used to match the prefix of the layer. + # without the dot, we could match a layer that is not missing, + # e.g., 'encoder.layer.1' would match 'encoder.layer.11' + missing_layer_names.append(name + '.') + _model_to_pp_missing_layer_names[model_id] = missing_layer_names + + return missing_layer_names + + +def is_pp_missing_parameter(name: str, model: nn.Cell) -> bool: + """Check if a parameter is missing in a pipeline parallel model.""" + if isinstance(model, PPMissingLayer): + return True + + return any( + name.startswith(missing_layer_name) + for missing_layer_name in get_pp_missing_layer_names(model)) -- Gitee From 6b50bfebd0b2901eda1cb9d77bf3a6944a7b8a0d Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 19 Jun 2025 16:25:01 +0800 Subject: [PATCH 21/76] add layers --- .../model_executor/layers/fused_moe/layer.py | 695 ++++++++++++++++++ 1 file changed, 695 insertions(+) create mode 100644 vllm_mindspore/model_executor/layers/fused_moe/layer.py diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py new file mode 100644 index 000000000..bb2c02d6f --- /dev/null +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -0,0 +1,695 @@ +# SPDX-License-Identifier: Apache-2.0 + +import importlib +from abc import abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import Callable, Optional + +import torch +import torch.nn.functional as F +from torch.nn.parameter import UninitializedParameter + +import vllm.envs as envs +from vllm.config import ParallelConfig, get_current_vllm_config +from vllm.distributed import (get_dp_group, get_ep_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.forward_context import ForwardContext, get_forward_context +from vllm.logger import init_logger +from vllm.model_executor.custom_op import CustomOp +from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( + is_rocm_aiter_moe_enabled) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.utils import set_weight_attrs +from vllm.platforms import current_platform +from vllm.platforms.interface import CpuArchEnum +from vllm.utils import direct_register_custom_op +from vllm.model_executor.layers.fused_moe.layers import FusedMoEParallelConfig + + +from mindspore import nn + +class FusedMoE(nn.Cell): + """FusedMoE layer for MoE models. + + This layer contains both MergedColumnParallel weights (gate_up_proj / + w13) and RowParallelLinear weights (down_proj/ w2). + + Note: Mixtral uses w1, w2, and w3 for gate, up, and down_proj. We + copy that naming convention here and handle any remapping in the + load_weights function in each model implementation. + + Args: + num_experts: Number of experts in the model + top_k: Number of experts selected for each token + hidden_size: Input hidden state size of the transformer + intermediate_size: Intermediate size of the experts + params_dtype: Data type for the parameters. + reduce_results: Whether to all all_reduce on the output of the layer + renomalize: Whether to renormalize the logits in the fused_moe kernel + quant_config: Quantization configure. + """ + + def __init__( + self, + num_experts: int, # Global number of experts + top_k: int, + hidden_size: int, + intermediate_size: int, + params_dtype: Optional[torch.dtype] = None, + reduce_results: bool = False, + renormalize: bool = True, + use_grouped_topk: bool = False, + num_expert_group: Optional[int] = None, + topk_group: Optional[int] = None, + quant_config: Optional[QuantizationConfig] = None, + tp_size: Optional[int] = None, + ep_size: Optional[int] = None, + dp_size: Optional[int] = None, + prefix: str = "", + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + apply_router_weight_on_input: bool = False, + activation: str = "silu", + ): + super().__init__() + + if params_dtype is None: + params_dtype = get_current_vllm_config().model_config.dtype + self.params_dtype = params_dtype + + vllm_config = get_current_vllm_config() + self.moe_parallel_config: FusedMoEParallelConfig = ( + FusedMoEParallelConfig.make( + tp_size_=(tp_size if tp_size is not None else + get_tensor_model_parallel_world_size()), + dp_size_=(dp_size if dp_size is not None else + get_dp_group().world_size), + vllm_parallel_config=vllm_config.parallel_config)) + + self.global_num_experts = num_experts + + # For smuggling this layer into the fused moe custom op + self.use_direct_call = self.dp_size == 1 + if not self.use_direct_call: + compilation_config = vllm_config.compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError("Duplicate layer name: {}".format(prefix)) + compilation_config.static_forward_context[prefix] = self + self.layer_name = prefix + + # Determine expert maps + if self.use_ep: + self.local_num_experts, self.expert_map = determine_expert_map( + ep_size=self.ep_size, + ep_rank=self.ep_rank, + global_num_experts=self.global_num_experts) + else: + self.local_num_experts, self.expert_map = (self.global_num_experts, + None) + + self.top_k = top_k + + assert intermediate_size % self.tp_size == 0 + self.hidden_size = hidden_size + self.intermediate_size_per_partition = intermediate_size // self.tp_size + self.reduce_results = reduce_results + self.renormalize = renormalize + self.use_grouped_topk = use_grouped_topk + if self.use_grouped_topk: + assert num_expert_group is not None and topk_group is not None + self.num_expert_group = num_expert_group + self.topk_group = topk_group + self.custom_routing_function = custom_routing_function + self.scoring_func = scoring_func + self.e_score_correction_bias = e_score_correction_bias + self.apply_router_weight_on_input = apply_router_weight_on_input + self.activation = activation + + if self.scoring_func != "softmax" and not self.use_grouped_topk: + raise ValueError("Only softmax scoring function is supported for " + "non-grouped topk.") + if current_platform.is_hpu(): + from vllm_hpu_extension.ops import DynamicFusedMOE + self.hpu_fused_moe = DynamicFusedMOE(self.global_num_experts) + + moe = MoEConfig( + num_experts=self.global_num_experts, + experts_per_token=top_k, + hidden_dim=hidden_size, + num_local_experts=self.local_num_experts, + moe_parallel_config=self.moe_parallel_config, + # TODO (bnell): this needs to be fixed for quantized types. + in_dtype=params_dtype, + max_num_tokens=MOE_DP_CHUNK_SIZE, + ) + self.moe_config = moe + self.quant_config = quant_config + + # Note: get_quant_method will look at the layer's local_num_experts + # for heuristic purposes, so it must be initialized first. + quant_method: Optional[QuantizeMethodBase] = None + + if quant_config is None: + quant_method = UnquantizedFusedMoEMethod(moe) + else: + quant_method = quant_config.get_quant_method(self, prefix) + + assert quant_method is not None + assert isinstance(quant_method, FusedMoEMethodBase) + self.quant_method = quant_method + + moe_quant_params = { + "num_experts": self.local_num_experts, + "hidden_size": hidden_size, + "intermediate_size_per_partition": + self.intermediate_size_per_partition, + "params_dtype": params_dtype, + "weight_loader": self.weight_loader, + } + # need full intermediate size pre-sharding for WNA16 act order + if (self.quant_method.__class__.__name__ + in ("GPTQMarlinMoEMethod", + "CompressedTensorsWNA16MarlinMoEMethod", + "CompressedTensorsWNA16MoEMethod")): + moe_quant_params["intermediate_size_full"] = intermediate_size + + self.quant_method.create_weights(layer=self, **moe_quant_params) + + @property + def tp_size(self): + return self.moe_parallel_config.tp_size + + @property + def dp_size(self): + return self.moe_parallel_config.dp_size + + @property + def ep_size(self): + return self.moe_parallel_config.ep_size + + @property + def tp_rank(self): + return self.moe_parallel_config.tp_rank + + @property + def dp_rank(self): + return self.moe_parallel_config.dp_rank + + @property + def ep_rank(self): + return self.moe_parallel_config.ep_rank + + @property + def use_ep(self): + return self.moe_parallel_config.use_ep + + @property + def use_pplx_kernels(self): + return self.moe_parallel_config.use_pplx_kernels + + def _load_per_tensor_weight_scale(self, shard_id: str, + param: torch.nn.Parameter, + loaded_weight: torch.Tensor, + expert_id: int): + param_data = param.data + # for per tensor weight quantization + if shard_id in ("w1", "w3"): + # We have to keep the weight scales of w1 and w3 because + # we need to re-quantize w1/w3 weights after weight loading. + idx = 0 if shard_id == "w1" else 1 + param_data[expert_id][idx] = loaded_weight + # If we are in the row parallel case (down_proj) + elif shard_id == "w2": + param_data[expert_id] = loaded_weight + + def _load_model_weight_or_group_weight_scale(self, + shard_dim: int, + expert_data: torch.Tensor, + shard_id: str, + loaded_weight: torch.Tensor, + tp_rank: int, + load_full_w2: bool = False): + """ + Load grouped weight scales for group quantization or model weights + :param shard_dim: dimension to shard + :param expert_data: parameter for a particular expert + :param shard_id: either w1, w2, or w3 + :param loaded_weight: checkpoint weight to load into the param + :param tp_rank: tensor parallel rank + :param load_full_w2: whether or not the w2 loaded should be sharded. + """ + if shard_id == "w2": + # In the case where we have actorder/g_idx, we do not partition the + # w2 scales, as indicated by `load_full` argument, for all tp cases + self._load_w2(shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=tp_rank, + load_full=load_full_w2) + elif shard_id in ("w1", "w3"): + self._load_w13(shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=tp_rank) + + def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, + shard_dim: int, shard_id: str, + loaded_weight: torch.Tensor, + tp_rank: int): + # for per channel weight quantization + if shard_id == "w2": + expert_data.copy_(loaded_weight) + elif shard_id in ("w1", "w3"): + self._load_w13(shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=tp_rank) + + def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, + shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): + + # Index the loaded weight for tp sharding. + # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim + shard_size = expert_data.shape[shard_dim] // 2 + loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, + shard_size) + # Narrow parameter and load. + # w1, gate_proj: Load into first logical weight of w13. + if shard_id == "w1": + expert_data = expert_data.narrow(shard_dim, 0, shard_size) + # w3, up_proj: Load into second logical weight of w13. + else: + assert shard_id == "w3" + expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) + expert_data.copy_(loaded_weight) + + def _load_w2(self, + expert_data: torch.Tensor, + shard_dim: int, + loaded_weight: torch.Tensor, + tp_rank: int, + load_full: bool = False): + + # Index the loaded weight for tp sharding. + # down_proj: "RowParallel" so tp sharding on input_dim + # Narrow parameter and load. + shard_size = expert_data.shape[shard_dim] + if not load_full: + loaded_weight = loaded_weight.narrow(shard_dim, + shard_size * tp_rank, + shard_size) + # w2, down_proj: Load into only logical weight of w2. + expert_data.copy_(loaded_weight) + + def _load_single_value(self, param: torch.nn.Parameter, + loaded_weight: torch.Tensor, expert_id: int): + param_data = param.data + + # Input scales can be loaded directly and should be equal. + param_data[expert_id] = loaded_weight + + def _load_g_idx(self, shard_id: str, expert_data: torch.Tensor, + shard_dim: int, loaded_weight: torch.Tensor, tp_rank: int): + + if shard_id == "w2": + self._load_w2(shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=tp_rank) + else: + assert shard_id in ("w1", "w3") + expert_data.copy_(loaded_weight) + + def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int: + if self.expert_map is None: + return expert_id + return self.expert_map[expert_id].item() + + def weight_loader(self, param: torch.nn.Parameter, + loaded_weight: torch.Tensor, weight_name: str, + shard_id: str, expert_id: int) -> None: + + expert_id = self._map_global_expert_id_to_local_expert_id(expert_id) + if expert_id == -1: + return + quant_method_name = self.quant_method.__class__.__name__ + # compressed-tensors checkpoints with packed weights are stored flipped + # TODO (mgoin): check self.quant_method.quant_config.quant_format + # against known CompressionFormat enum values that have this quality + if self.quant_method.__class__.__name__ in ( + "CompressedTensorsWNA16MarlinMoEMethod", + "CompressedTensorsWNA16MoEMethod"): + loaded_weight = loaded_weight.t().contiguous() + + if shard_id not in ("w1", "w2", "w3"): + raise ValueError(f"shard_id must be ['w1','w2','w3'] but " + f"got {shard_id}.") + + WEIGHT_SCALE_SUPPORTED = [ + e.value for e in FusedMoeWeightScaleSupported + ] + # Fetch the dim to shard the parameter/loaded weight + # based on the shard id. This will be whatever + # dimension intermediate_size_per_partition is used. + SHARD_ID_TO_SHARDED_DIM = {"w1": 0, "w2": 1, "w3": 0} + + is_gguf_weight = getattr(param, "is_gguf_weight", False) + is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False) + if is_gguf_weight_type: + param.weight_type = loaded_weight.item() + param.data.copy_(loaded_weight) + return + + # is_transposed: if the dim to shard the weight + # should be flipped. Required by GPTQ, compressed-tensors + # should be whatever dimension intermediate_size_per_partition is + is_transposed = getattr(param, "is_transposed", False) + shard_dim = SHARD_ID_TO_SHARDED_DIM[shard_id] + if is_transposed: + shard_dim = int(not shard_dim) + + full_load = len(loaded_weight.shape) == 3 + if full_load: + shard_dim += 1 + + # Materialize GGUF UninitializedParameter + if is_gguf_weight and isinstance(param, UninitializedParameter): + final_shape = list(loaded_weight.shape) + if shard_id in ["w1", "w3"]: + final_shape[1] *= 2 + final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size + param.materialize(final_shape, dtype=loaded_weight.dtype) + + expert_data = param.data if full_load else param.data[expert_id] + # Case input scale: input_scale loading is only supported for fp8 + if "input_scale" in weight_name: + # this is needed for compressed-tensors only + loaded_weight = loaded_weight.to(param.data.device) + + if ("compressed" in quant_method_name.lower() + and param.data[expert_id] != 1 + and (param.data[expert_id] - loaded_weight).abs() > 1e-5): + raise ValueError( + "input_scales of w1 and w3 of a layer " + f"must be equal. But got {param.data[expert_id]} " + f"vs. {loaded_weight}") + + self._load_single_value(param=param, + loaded_weight=loaded_weight, + expert_id=expert_id) + return + + # Case g_idx + if "g_idx" in weight_name: + self._load_g_idx(shard_dim=0, + shard_id=shard_id, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=self.tp_rank) + return + + if "ModelOpt" in quant_method_name: + if ('weight_scale_2' in weight_name + or 'input_scale' in weight_name): + self._load_per_tensor_weight_scale(shard_id=shard_id, + param=param, + loaded_weight=loaded_weight, + expert_id=expert_id) + elif "weight" in weight_name: + self._load_model_weight_or_group_weight_scale( + shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=self.tp_rank) + return + + # Case weight scales, zero_points and offset + if ("scale" in weight_name or "zero" in weight_name + or "offset" in weight_name): + # load the weight scales and zp based on the quantization scheme + # supported weight scales/zp can be found in + # FusedMoeWeightScaleSupported + # TODO @dsikka: once hardened, refactor to use vLLM Parameters + # specific to each case + quant_method = getattr(param, "quant_method", None) + if quant_method == FusedMoeWeightScaleSupported.CHANNEL.value: + self._load_per_channel_weight_scale( + shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=self.tp_rank) + elif quant_method in [ + FusedMoeWeightScaleSupported.GROUP.value, + FusedMoeWeightScaleSupported.BLOCK.value, + ]: + self._load_model_weight_or_group_weight_scale( + shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=self.tp_rank, + load_full_w2=getattr(param, "load_full_w2", False)) + elif quant_method == FusedMoeWeightScaleSupported.TENSOR.value: + self._load_per_tensor_weight_scale(shard_id=shard_id, + param=param, + loaded_weight=loaded_weight, + expert_id=expert_id) + else: + raise ValueError( + f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}") + return + + # Case weight_shape + if "weight_shape" in weight_name: + # only required by compressed-tensors + self._load_single_value(param=param, + loaded_weight=loaded_weight, + expert_id=expert_id) + return + + # Case model weights + if "weight" in weight_name: + self._load_model_weight_or_group_weight_scale( + shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + expert_data=expert_data, + tp_rank=self.tp_rank) + return + + @staticmethod + def select_experts(hidden_states: torch.Tensor, + router_logits: torch.Tensor, + top_k: int, + use_grouped_topk: bool, + renormalize: bool, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + indices_type: Optional[torch.dtype] = None): + from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk + + # DeekSeekv2 uses grouped_top_k + if use_grouped_topk: + assert topk_group is not None + assert num_expert_group is not None + topk_weights, topk_ids = grouped_topk( + hidden_states=hidden_states, + gating_output=router_logits, + topk=top_k, + renormalize=renormalize, + num_expert_group=num_expert_group, + topk_group=topk_group, + scoring_func=scoring_func, + e_score_correction_bias=e_score_correction_bias) + if indices_type is not None: + topk_ids = topk_ids.to(dtype=indices_type) + elif custom_routing_function is None: + topk_weights, topk_ids, token_expert_indices = fused_topk( + hidden_states=hidden_states, + gating_output=router_logits, + topk=top_k, + renormalize=renormalize, + indices_type=indices_type, + ) + else: + topk_weights, topk_ids = custom_routing_function( + hidden_states=hidden_states, + gating_output=router_logits, + topk=top_k, + renormalize=renormalize) + if indices_type is not None: + topk_ids = topk_ids.to(dtype=indices_type) + + return topk_weights, topk_ids + + def must_reduce_shared_expert_outputs(self) -> bool: + """ + The shared_experts are typically computed using the RowParallelLinear + layer. The result of this function is typically used as + the reduce_results argument to the module. + When just tensor-parallel is used, it is not required to reduce + the shared_experts results immediately. Instead we reduce at the + once at the end of the MoE op. (Refer to DeepSeekV2MoE module) + With EP and the pplx kernels - this is no longer viable as all + GPU ranks in DP, produce the complete set of hidden_states. + Therefore it is required that we reduce the shared_experts output + early. + """ + return self.use_pplx_kernels + + def maybe_all_reduce_tensor_model_parallel( + self, final_hidden_states: torch.Tensor): + """ + The pplx combine kernel reduces across GPU ranks by default. + """ + if self.use_pplx_kernels: + return final_hidden_states + else: + return tensor_model_parallel_all_reduce(final_hidden_states) + + def forward(self, hidden_states: torch.Tensor, + router_logits: torch.Tensor): + if self.use_direct_call: + return self.forward_impl(hidden_states, router_logits) + else: + return torch.ops.vllm.moe_forward(hidden_states, router_logits, + self.layer_name) + + def forward_impl_chunked(self, full_hidden_states: torch.Tensor, + full_router_logits: torch.Tensor): + + full_final_hidden_states = torch.empty_like(full_hidden_states) + + def process_chunk(chunk_start, chunk_end, skip_result_store=False): + hidden_states = full_hidden_states[chunk_start:chunk_end, :] + router_logits = full_router_logits[chunk_start:chunk_end, :] + + # Matrix multiply. + final_hidden_states = self.quant_method.apply( + layer=self, + x=hidden_states, + router_logits=router_logits, + top_k=self.top_k, + renormalize=self.renormalize, + use_grouped_topk=self.use_grouped_topk, + global_num_experts=self.global_num_experts, + expert_map=self.expert_map, + topk_group=self.topk_group, + num_expert_group=self.num_expert_group, + custom_routing_function=self.custom_routing_function, + scoring_func=self.scoring_func, + e_score_correction_bias=self.e_score_correction_bias, + activation=self.activation, + ) + + if not skip_result_store: + full_final_hidden_states[chunk_start:chunk_end, :].copy_( + final_hidden_states) + + ctx = get_forward_context() + max_tokens_across_dp = ctx.dp_metadata.max_tokens_across_dp_cpu + moe_dp_chunk_size_per_rank = MOE_DP_CHUNK_SIZE + + num_tokens = full_hidden_states.size(0) + for chunk_start_ in range(0, max_tokens_across_dp, + moe_dp_chunk_size_per_rank): + chunk_start = chunk_start_ + chunk_end = min(chunk_start + moe_dp_chunk_size_per_rank, + max_tokens_across_dp) + # clamp start and end + chunk_start = min(chunk_start, num_tokens - 1) + chunk_end = min(chunk_end, num_tokens) + + process_chunk(chunk_start, + chunk_end, + skip_result_store=chunk_start_ >= num_tokens) + + return full_final_hidden_states + + def forward_impl(self, hidden_states: torch.Tensor, + router_logits: torch.Tensor): + assert self.quant_method is not None + if self.moe_parallel_config.use_pplx_kernels: + return self.forward_impl_chunked(hidden_states, router_logits) + + if self.dp_size > 1: + hidden_states, router_logits = get_ep_group().dispatch( + hidden_states, router_logits) + # Matrix multiply. + final_hidden_states = self.quant_method.apply( + layer=self, + x=hidden_states, + router_logits=router_logits, + top_k=self.top_k, + renormalize=self.renormalize, + use_grouped_topk=self.use_grouped_topk, + global_num_experts=self.global_num_experts, + expert_map=self.expert_map, + topk_group=self.topk_group, + num_expert_group=self.num_expert_group, + custom_routing_function=self.custom_routing_function, + scoring_func=self.scoring_func, + e_score_correction_bias=self.e_score_correction_bias, + activation=self.activation, + apply_router_weight_on_input=self.apply_router_weight_on_input, + ) + + if self.dp_size > 1: + final_hidden_states = get_ep_group().combine(final_hidden_states) + + if self.reduce_results and (self.tp_size > 1 or self.ep_size > 1): + # Default set to False. (May have to add shared expert outputs.) + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states + + @classmethod + def make_expert_params_mapping( + cls, ckpt_gate_proj_name: str, ckpt_down_proj_name: str, + ckpt_up_proj_name: str, + num_experts: int) -> list[tuple[str, str, int, str]]: + + return [ + # (param_name, weight_name, expert_id, shard_id) + ("experts.w13_" if weight_name + in [ckpt_gate_proj_name, ckpt_up_proj_name] else "experts.w2_", + f"experts.{expert_id}.{weight_name}.", expert_id, shard_id) + for expert_id in range(num_experts) for shard_id, weight_name in [ + ("w1", ckpt_gate_proj_name), + ("w2", ckpt_down_proj_name), + ("w3", ckpt_up_proj_name), + ] + ] + + def extra_repr(self) -> str: + + s = ( + f"global_num_experts={self.global_num_experts}, " + f"local_num_experts={self.local_num_experts}, " + f"top_k={self.top_k}, " + f"intermediate_size_per_partition={self.intermediate_size_per_partition}, " # noqa: E501 + f"tp_size={self.tp_size},\n" + f"ep_size={self.ep_size}, " + f"reduce_results={self.reduce_results}, " + f"renormalize={self.renormalize}, " + f"use_grouped_topk={self.use_grouped_topk}") + + if self.use_grouped_topk: + s += f", num_expert_group={self.num_expert_group}, topk_group={self.topk_group}" # noqa: E501 + + s += f", scoring_func='{self.scoring_func}', activation='{self.activation}'" # noqa: E501 + + return s -- Gitee From d8011fb81ef281363baeb143a86c651534bfd0d0 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 20 Jun 2025 11:41:48 +0800 Subject: [PATCH 22/76] update --- .../layers/fused_moe/fused_moe.py | 86 +++++ .../model_executor/layers/fused_moe/layer.py | 321 +++++++++++++----- 2 files changed, 314 insertions(+), 93 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index e69de29bb..b5c20ef2b 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -0,0 +1,86 @@ +from typing import Optional + +from mindspore import Tensor +from mindspore.ops.auto_generate import FusedAddTopKDiv +import mindspore as ms +def fused_topk( + hidden_states: Tensor, + gating_output: Tensor, + topk: int, + renormalize: bool, + indices_type = None, +) -> tuple[Tensor, Tensor, Tensor]: + assert hidden_states.shape[0] == gating_output.shape[0], ( + "Number of tokens mismatch") + fused_add_topk_div = FusedAddTopKDiv() + e_score_correction_bias = 0 + num_expert_group = 0 + topk_group = 0 + scoring_type = 0 # softmax + group_max_topk = 2 + topk_weights, topk_ids = fused_add_topk_div( + gating_output, + e_score_correction_bias, + num_expert_group, + topk_group, + topk, + group_max_topk, + scoring_type, + renormalize) + if indices_type is not None: + topk_ids = topk_ids.to(indices_type) + return topk_weights, topk_ids + + +def grouped_topk( + hidden_states: Tensor, + gating_output: Tensor, + topk: int, + renormalize: bool, + num_expert_group: int = 0, + topk_group: int = 0, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[Tensor] = None +) -> tuple[Tensor, Tensor]: + fused_add_topk_div = FusedAddTopKDiv() + assert hidden_states.shape[0] == gating_output.shape[0], ( + "Number of tokens mismatch") + scoring_type = 0 # sigmoid + group_max_topk = 2 + topk_weights, topk_ids = fused_add_topk_div( + gating_output, + e_score_correction_bias, + num_expert_group, + topk_group, + topk, + group_max_topk, + scoring_type, + renormalize) + + return topk_weights.to(ms.float32), topk_ids.to(ms.int32) + + +def fused_experts(hidden_states: Tensor, + w1: Tensor, + w2: Tensor, + topk_weights: Tensor, + topk_ids: Tensor, + inplace: bool = False, + activation: str = "silu", + apply_router_weight_on_input: bool = False, + use_fp8_w8a8: bool = False, + use_int8_w8a8: bool = False, + use_int8_w8a16: bool = False, + use_int4_w4a16: bool = False, + per_channel_quant: bool = False, + global_num_experts: int = -1, + expert_map: Optional[Tensor] = None, + w1_scale: Optional[Tensor] = None, + w2_scale: Optional[Tensor] = None, + w1_zp: Optional[Tensor] = None, + w2_zp: Optional[Tensor] = None, + a1_scale: Optional[Tensor] = None, + a2_scale: Optional[Tensor] = None, + block_shape: Optional[list[int]] = None, + allow_deep_gemm: bool = False) -> Tensor: + ... \ No newline at end of file diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index bb2c02d6f..151946fc8 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -7,7 +7,6 @@ from enum import Enum from typing import Callable, Optional import torch -import torch.nn.functional as F from torch.nn.parameter import UninitializedParameter import vllm.envs as envs @@ -29,8 +28,208 @@ from vllm.platforms.interface import CpuArchEnum from vllm.utils import direct_register_custom_op from vllm.model_executor.layers.fused_moe.layers import FusedMoEParallelConfig +from vllm.model_executor.layers.fused_moe.layers import (determine_expert_map, MoEConfig, + FusedMoeWeightScaleSupported, + FusedMoEMethodBase) + + +from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk, + grouped_topk, + MOE_DP_CHUNK_SIZE, + fused_expert) + +from mindspore import nn, Tensor, Parameter + + + +class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): + """MoE method without quantization.""" + + def __init__(self, moe: MoEConfig): + super().__init__() + self.fused_experts = fused_experts # type: ignore + self.moe = moe + + self.rocm_aiter_fused_experts = None # type: ignore + + def select_gemm_impl( + self, prepare_finalize: Optional[FusedMoEPrepareAndFinalize]): + + assert self.fused_experts == fused_experts + + all2all_manager = get_ep_group().device_communicator.all2all_manager + assert all2all_manager is not None + + experts: Optional[FusedMoEPermuteExpertsUnpermute] = None + + if isinstance(prepare_finalize, + (BatchedPrepareAndFinalize, PplxPrepareAndFinalize)): + logger.debug("BatchedTritonExperts %s", self.moe) + experts = BatchedTritonExperts( + max_num_tokens=MOE_DP_CHUNK_SIZE, + world_size=all2all_manager.world_size, + # dp_size actually means tp_size, bug in pplx kernels + dp_size=all2all_manager.tp_group.world_size, + use_fp8_w8a8=False, + use_int8_w8a8=False, + use_int8_w8a16=False, + use_int4_w4a16=False, + block_shape=None, + ) + else: + logger.debug("TritonExperts %s", self.moe) + experts = TritonExperts( + use_fp8_w8a8=False, + use_int8_w8a8=False, + use_int8_w8a16=False, + use_int4_w4a16=False, + block_shape=None, + per_channel_quant=False, + ) + return experts + + def create_weights(self, layer: torch.nn.Module, num_experts: int, + hidden_size: int, intermediate_size_per_partition: int, + params_dtype: torch.dtype, **extra_weight_attrs): + # Fused gate_up_proj (column parallel) + w13_weight = torch.nn.Parameter(torch.empty( + num_experts, + 2 * intermediate_size_per_partition, + hidden_size, + dtype=params_dtype), + requires_grad=False) + layer.register_parameter("w13_weight", w13_weight) + set_weight_attrs(w13_weight, extra_weight_attrs) + + # down_proj (row parallel) + w2_weight = torch.nn.Parameter(torch.empty( + num_experts, + hidden_size, + intermediate_size_per_partition, + dtype=params_dtype), + requires_grad=False) + layer.register_parameter("w2_weight", w2_weight) + set_weight_attrs(w2_weight, extra_weight_attrs) + + def _maybe_pad_weight(self, weight: torch.Tensor) -> torch.Tensor: + # Pad the weight tensor. This is an optimization on ROCm platform, which + # can benefit from tensors located far enough from one another in memory + if (envs.VLLM_ROCM_MOE_PADDING and current_platform.is_rocm() + and weight.stride(-1) == 1 + and (weight.stride(-2) * weight.element_size()) % 512 == 0): + num_pad = 256 // weight.element_size() + weight = F.pad(weight, (0, num_pad), "constant", 0)[..., :-num_pad] + torch.cuda.empty_cache() + return weight + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + super().process_weights_after_loading(layer) + + # Padding the weight for better performance on ROCm + layer.w13_weight.data = self._maybe_pad_weight(layer.w13_weight.data) + layer.w2_weight.data = self._maybe_pad_weight(layer.w2_weight.data) + # Lazy import to avoid importing triton. + from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( + shuffle_weights) + + if self.rocm_aiter_moe_enabled: + shuffled_w13, shuffled_w2 = shuffle_weights( + layer.w13_weight.data, layer.w2_weight.data) + + layer.w13_weight.data = shuffled_w13 + layer.w2_weight.data = shuffled_w2 + + if current_platform.is_cpu(): + if current_platform.get_cpu_architecture() == CpuArchEnum.X86: + import intel_extension_for_pytorch as ipex + layer.ipex_fusion = ipex.llm.modules.GatedMLPMOE( + layer.w13_weight, + layer.w2_weight, + use_prepack=envs.VLLM_CPU_MOE_PREPACK, + ) + else: + raise NotImplementedError("CPU MOE only supports x86 arch.") + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + router_logits: torch.Tensor, + top_k: int, + renormalize: bool, + use_grouped_topk: bool = False, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + apply_router_weight_on_input: bool = False, + activation: str = "silu", + ) -> torch.Tensor: + return self.forward_npu( + x=x, + layer=layer, + router_logits=router_logits, + top_k=top_k, + renormalize=renormalize, + use_grouped_topk=use_grouped_topk, + topk_group=topk_group, + num_expert_group=num_expert_group, + global_num_experts=global_num_experts, + expert_map=expert_map, + custom_routing_function=custom_routing_function, + scoring_func=scoring_func, + e_score_correction_bias=e_score_correction_bias, + activation=activation, + apply_router_weight_on_input=apply_router_weight_on_input) + + def forward_npu( + self, + layer: torch.nn.Module, + x: torch.Tensor, + use_grouped_topk: bool, + top_k: int, + router_logits: torch.Tensor, + renormalize: bool, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + apply_router_weight_on_input: bool = False, + activation: str = "silu", + ) -> torch.Tensor: + topk_weights, topk_ids = FusedMoE.select_experts( + hidden_states=x, + router_logits=router_logits, + use_grouped_topk=use_grouped_topk, + top_k=top_k, + renormalize=renormalize, + topk_group=topk_group, + num_expert_group=num_expert_group, + custom_routing_function=custom_routing_function, + scoring_func=scoring_func, + e_score_correction_bias=e_score_correction_bias, + indices_type=torch.uint32 if self.moe.use_pplx_kernels else None) + + return self.fused_experts( + hidden_states=x, + w1=layer.w13_weight, + w2=layer.w2_weight, + topk_weights=topk_weights, + topk_ids=topk_ids, + inplace=True, + activation=activation, + apply_router_weight_on_input=apply_router_weight_on_input, + global_num_experts=global_num_experts, + expert_map=expert_map, + ) + -from mindspore import nn class FusedMoE(nn.Cell): """FusedMoE layer for MoE models. @@ -59,7 +258,7 @@ class FusedMoE(nn.Cell): top_k: int, hidden_size: int, intermediate_size: int, - params_dtype: Optional[torch.dtype] = None, + params_dtype = None, reduce_results: bool = False, renormalize: bool = True, use_grouped_topk: bool = False, @@ -72,7 +271,7 @@ class FusedMoE(nn.Cell): prefix: str = "", custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, + e_score_correction_bias: Optional[Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", ): @@ -133,9 +332,6 @@ class FusedMoE(nn.Cell): if self.scoring_func != "softmax" and not self.use_grouped_topk: raise ValueError("Only softmax scoring function is supported for " "non-grouped topk.") - if current_platform.is_hpu(): - from vllm_hpu_extension.ops import DynamicFusedMOE - self.hpu_fused_moe = DynamicFusedMOE(self.global_num_experts) moe = MoEConfig( num_experts=self.global_num_experts, @@ -213,8 +409,8 @@ class FusedMoE(nn.Cell): return self.moe_parallel_config.use_pplx_kernels def _load_per_tensor_weight_scale(self, shard_id: str, - param: torch.nn.Parameter, - loaded_weight: torch.Tensor, + param: Parameter, + loaded_weight: Tensor, expert_id: int): param_data = param.data # for per tensor weight quantization @@ -229,9 +425,9 @@ class FusedMoE(nn.Cell): def _load_model_weight_or_group_weight_scale(self, shard_dim: int, - expert_data: torch.Tensor, + expert_data: Tensor, shard_id: str, - loaded_weight: torch.Tensor, + loaded_weight: Tensor, tp_rank: int, load_full_w2: bool = False): """ @@ -258,9 +454,9 @@ class FusedMoE(nn.Cell): expert_data=expert_data, tp_rank=tp_rank) - def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, + def _load_per_channel_weight_scale(self, expert_data: Tensor, shard_dim: int, shard_id: str, - loaded_weight: torch.Tensor, + loaded_weight: Tensor, tp_rank: int): # for per channel weight quantization if shard_id == "w2": @@ -272,8 +468,8 @@ class FusedMoE(nn.Cell): expert_data=expert_data, tp_rank=tp_rank) - def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, - shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): + def _load_w13(self, expert_data: Tensor, shard_dim: int, + shard_id: str, loaded_weight: Tensor, tp_rank: int): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim @@ -291,9 +487,9 @@ class FusedMoE(nn.Cell): expert_data.copy_(loaded_weight) def _load_w2(self, - expert_data: torch.Tensor, + expert_data: Tensor, shard_dim: int, - loaded_weight: torch.Tensor, + loaded_weight: Tensor, tp_rank: int, load_full: bool = False): @@ -309,14 +505,14 @@ class FusedMoE(nn.Cell): expert_data.copy_(loaded_weight) def _load_single_value(self, param: torch.nn.Parameter, - loaded_weight: torch.Tensor, expert_id: int): + loaded_weight: Tensor, expert_id: int): param_data = param.data # Input scales can be loaded directly and should be equal. param_data[expert_id] = loaded_weight - def _load_g_idx(self, shard_id: str, expert_data: torch.Tensor, - shard_dim: int, loaded_weight: torch.Tensor, tp_rank: int): + def _load_g_idx(self, shard_id: str, expert_data: Tensor, + shard_dim: int, loaded_weight: Tensor, tp_rank: int): if shard_id == "w2": self._load_w2(shard_dim=shard_dim, @@ -333,7 +529,7 @@ class FusedMoE(nn.Cell): return self.expert_map[expert_id].item() def weight_loader(self, param: torch.nn.Parameter, - loaded_weight: torch.Tensor, weight_name: str, + loaded_weight: Tensor, weight_name: str, shard_id: str, expert_id: int) -> None: expert_id = self._map_global_expert_id_to_local_expert_id(expert_id) @@ -487,8 +683,8 @@ class FusedMoE(nn.Cell): return @staticmethod - def select_experts(hidden_states: torch.Tensor, - router_logits: torch.Tensor, + def select_experts(hidden_states: Tensor, + router_logits: Tensor, top_k: int, use_grouped_topk: bool, renormalize: bool, @@ -496,9 +692,8 @@ class FusedMoE(nn.Cell): num_expert_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, + e_score_correction_bias: Optional[Tensor] = None, indices_type: Optional[torch.dtype] = None): - from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk # DeekSeekv2 uses grouped_top_k if use_grouped_topk: @@ -516,7 +711,7 @@ class FusedMoE(nn.Cell): if indices_type is not None: topk_ids = topk_ids.to(dtype=indices_type) elif custom_routing_function is None: - topk_weights, topk_ids, token_expert_indices = fused_topk( + topk_weights, topk_ids = fused_topk( hidden_states=hidden_states, gating_output=router_logits, topk=top_k, @@ -550,79 +745,19 @@ class FusedMoE(nn.Cell): return self.use_pplx_kernels def maybe_all_reduce_tensor_model_parallel( - self, final_hidden_states: torch.Tensor): + self, final_hidden_states: Tensor): """ The pplx combine kernel reduces across GPU ranks by default. """ - if self.use_pplx_kernels: - return final_hidden_states - else: - return tensor_model_parallel_all_reduce(final_hidden_states) - - def forward(self, hidden_states: torch.Tensor, - router_logits: torch.Tensor): - if self.use_direct_call: - return self.forward_impl(hidden_states, router_logits) - else: - return torch.ops.vllm.moe_forward(hidden_states, router_logits, - self.layer_name) - - def forward_impl_chunked(self, full_hidden_states: torch.Tensor, - full_router_logits: torch.Tensor): - - full_final_hidden_states = torch.empty_like(full_hidden_states) - - def process_chunk(chunk_start, chunk_end, skip_result_store=False): - hidden_states = full_hidden_states[chunk_start:chunk_end, :] - router_logits = full_router_logits[chunk_start:chunk_end, :] - - # Matrix multiply. - final_hidden_states = self.quant_method.apply( - layer=self, - x=hidden_states, - router_logits=router_logits, - top_k=self.top_k, - renormalize=self.renormalize, - use_grouped_topk=self.use_grouped_topk, - global_num_experts=self.global_num_experts, - expert_map=self.expert_map, - topk_group=self.topk_group, - num_expert_group=self.num_expert_group, - custom_routing_function=self.custom_routing_function, - scoring_func=self.scoring_func, - e_score_correction_bias=self.e_score_correction_bias, - activation=self.activation, - ) - - if not skip_result_store: - full_final_hidden_states[chunk_start:chunk_end, :].copy_( - final_hidden_states) - - ctx = get_forward_context() - max_tokens_across_dp = ctx.dp_metadata.max_tokens_across_dp_cpu - moe_dp_chunk_size_per_rank = MOE_DP_CHUNK_SIZE - - num_tokens = full_hidden_states.size(0) - for chunk_start_ in range(0, max_tokens_across_dp, - moe_dp_chunk_size_per_rank): - chunk_start = chunk_start_ - chunk_end = min(chunk_start + moe_dp_chunk_size_per_rank, - max_tokens_across_dp) - # clamp start and end - chunk_start = min(chunk_start, num_tokens - 1) - chunk_end = min(chunk_end, num_tokens) - - process_chunk(chunk_start, - chunk_end, - skip_result_store=chunk_start_ >= num_tokens) + return tensor_model_parallel_all_reduce(final_hidden_states) - return full_final_hidden_states + def construct(self, hidden_states: Tensor, + router_logits: Tensor): + return self.forward_impl(hidden_states, router_logits) - def forward_impl(self, hidden_states: torch.Tensor, - router_logits: torch.Tensor): + def forward_impl(self, hidden_states: Tensor, + router_logits: Tensor): assert self.quant_method is not None - if self.moe_parallel_config.use_pplx_kernels: - return self.forward_impl_chunked(hidden_states, router_logits) if self.dp_size > 1: hidden_states, router_logits = get_ep_group().dispatch( -- Gitee From 60309c95fcc7240634476cce6399c56e6f2641e9 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 20 Jun 2025 14:56:20 +0800 Subject: [PATCH 23/76] update --- .../layers/fused_moe/fused_moe.py | 30 ++++++++----------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index b5c20ef2b..c6416b47e 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -1,6 +1,6 @@ from typing import Optional -from mindspore import Tensor +from mindspore import Tensor, mint from mindspore.ops.auto_generate import FusedAddTopKDiv import mindspore as ms def fused_topk( @@ -9,27 +9,21 @@ def fused_topk( topk: int, renormalize: bool, indices_type = None, -) -> tuple[Tensor, Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") - fused_add_topk_div = FusedAddTopKDiv() - e_score_correction_bias = 0 - num_expert_group = 0 - topk_group = 0 - scoring_type = 0 # softmax - group_max_topk = 2 - topk_weights, topk_ids = fused_add_topk_div( - gating_output, - e_score_correction_bias, - num_expert_group, - topk_group, - topk, - group_max_topk, - scoring_type, - renormalize) + score = mint.softmax(gating_output, dim=-1) + topk_weights, topk_ids = mint.topk( + score, + k=topk, + dim=-1 + ) + if renormalize: + topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) + if indices_type is not None: topk_ids = topk_ids.to(indices_type) - return topk_weights, topk_ids + return topk_weights.to(ms.float32), topk_ids.to(ms.int32) def grouped_topk( -- Gitee From 68b007b6d28463e64b6fa539919a97bb8aeddf1c Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 20 Jun 2025 15:30:58 +0800 Subject: [PATCH 24/76] update --- vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index c6416b47e..ec1642bd9 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -3,6 +3,8 @@ from typing import Optional from mindspore import Tensor, mint from mindspore.ops.auto_generate import FusedAddTopKDiv import mindspore as ms + + def fused_topk( hidden_states: Tensor, gating_output: Tensor, @@ -40,14 +42,14 @@ def grouped_topk( assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") scoring_type = 0 # sigmoid - group_max_topk = 2 + topk_in_group = 2 topk_weights, topk_ids = fused_add_topk_div( gating_output, e_score_correction_bias, num_expert_group, topk_group, topk, - group_max_topk, + topk_in_group, scoring_type, renormalize) -- Gitee From 52423267154cb3ca82627e8eaa9b92096a55eac7 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 12:08:30 +0800 Subject: [PATCH 25/76] update --- .../device_communicators/__init__.py | 0 .../device_communicators/npu_communicator.py | 4 + .../layers/fused_moe/fused_moe.py | 46 ++- .../model_executor/layers/fused_moe/layer.py | 354 ++++-------------- vllm_mindspore/platforms/ascend.py | 2 +- 5 files changed, 129 insertions(+), 277 deletions(-) create mode 100644 vllm_mindspore/distributed/device_communicators/__init__.py create mode 100644 vllm_mindspore/distributed/device_communicators/npu_communicator.py diff --git a/vllm_mindspore/distributed/device_communicators/__init__.py b/vllm_mindspore/distributed/device_communicators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/distributed/device_communicators/npu_communicator.py b/vllm_mindspore/distributed/device_communicators/npu_communicator.py new file mode 100644 index 000000000..3885baa93 --- /dev/null +++ b/vllm_mindspore/distributed/device_communicators/npu_communicator.py @@ -0,0 +1,4 @@ +from vllm.distributed.device_communicators.cuda_communicator import CudaCommunicator + +class NPUCommunicator(CudaCommunicator): + ... diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index ec1642bd9..169716d72 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -3,7 +3,7 @@ from typing import Optional from mindspore import Tensor, mint from mindspore.ops.auto_generate import FusedAddTopKDiv import mindspore as ms - +from vllm.distributed.parallel_state import get_tp_group def fused_topk( hidden_states: Tensor, @@ -79,4 +79,46 @@ def fused_experts(hidden_states: Tensor, a2_scale: Optional[Tensor] = None, block_shape: Optional[list[int]] = None, allow_deep_gemm: bool = False) -> Tensor: - ... \ No newline at end of file + use_ep = False + if expert_map is not None: + use_ep = True + + if use_ep: + _run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + else: + _run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + +def _run_activation(x, activation): + if activation == "silu": + return mint.silu(x) + elif activation == "gelu": + return mint.gelu(x) + else: + raise ValueError(f"Unsupported activation function: {activation}") + + +def _run_ep_moe(hidden_states, + w1, + w2, + group_list, + group_logits, + activation): + hidden_states = mint.group_matmul(hidden_states, w1, group_list) + hidden_states = _run_activation(hidden_states, activation) + hidden_states = mint.group_matmul(hidden_states, w2, group_list) + hidden_states = mint.mul(hidden_states, group_logits) + return hidden_states + + +def _run_tp_moe(hidden_states, + w1, + w2, + group_list, + group_logits, + activation): + hidden_states = mint.group_matmul(hidden_states, w1, group_list) + hidden_states = _run_activation(hidden_states, activation) + hidden_states = mint.group_matmul(hidden_states, w2, group_list) + hidden_states = mint.all_reduce(hidden_states, get_tp_group()) + hidden_states = mint.mul(hidden_states, group_logits) + return hidden_states diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 151946fc8..5bc0aef70 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -36,9 +36,9 @@ from vllm.model_executor.layers.fused_moe.layers import (determine_expert_map, M from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk, grouped_topk, MOE_DP_CHUNK_SIZE, - fused_expert) + fused_experts) -from mindspore import nn, Tensor, Parameter +from mindspore import nn, Tensor, Parameter, mint @@ -50,124 +50,47 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): self.fused_experts = fused_experts # type: ignore self.moe = moe - self.rocm_aiter_fused_experts = None # type: ignore - - def select_gemm_impl( - self, prepare_finalize: Optional[FusedMoEPrepareAndFinalize]): - - assert self.fused_experts == fused_experts - - all2all_manager = get_ep_group().device_communicator.all2all_manager - assert all2all_manager is not None - - experts: Optional[FusedMoEPermuteExpertsUnpermute] = None - - if isinstance(prepare_finalize, - (BatchedPrepareAndFinalize, PplxPrepareAndFinalize)): - logger.debug("BatchedTritonExperts %s", self.moe) - experts = BatchedTritonExperts( - max_num_tokens=MOE_DP_CHUNK_SIZE, - world_size=all2all_manager.world_size, - # dp_size actually means tp_size, bug in pplx kernels - dp_size=all2all_manager.tp_group.world_size, - use_fp8_w8a8=False, - use_int8_w8a8=False, - use_int8_w8a16=False, - use_int4_w4a16=False, - block_shape=None, - ) - else: - logger.debug("TritonExperts %s", self.moe) - experts = TritonExperts( - use_fp8_w8a8=False, - use_int8_w8a8=False, - use_int8_w8a16=False, - use_int4_w4a16=False, - block_shape=None, - per_channel_quant=False, - ) - return experts - - def create_weights(self, layer: torch.nn.Module, num_experts: int, + def create_weights(self, layer: nn.Cell, num_experts: int, hidden_size: int, intermediate_size_per_partition: int, - params_dtype: torch.dtype, **extra_weight_attrs): + params_dtype, **extra_weight_attrs): # Fused gate_up_proj (column parallel) - w13_weight = torch.nn.Parameter(torch.empty( + w13_weight = Parameter(mint.empty( num_experts, 2 * intermediate_size_per_partition, hidden_size, dtype=params_dtype), requires_grad=False) - layer.register_parameter("w13_weight", w13_weight) + layer.insert_param_to_cell("w13_weight", w13_weight) set_weight_attrs(w13_weight, extra_weight_attrs) # down_proj (row parallel) - w2_weight = torch.nn.Parameter(torch.empty( + w2_weight = Parameter(mint.empty( num_experts, hidden_size, intermediate_size_per_partition, dtype=params_dtype), requires_grad=False) - layer.register_parameter("w2_weight", w2_weight) + layer.insert_param_to_cell("w2_weight", w2_weight) set_weight_attrs(w2_weight, extra_weight_attrs) - def _maybe_pad_weight(self, weight: torch.Tensor) -> torch.Tensor: - # Pad the weight tensor. This is an optimization on ROCm platform, which - # can benefit from tensors located far enough from one another in memory - if (envs.VLLM_ROCM_MOE_PADDING and current_platform.is_rocm() - and weight.stride(-1) == 1 - and (weight.stride(-2) * weight.element_size()) % 512 == 0): - num_pad = 256 // weight.element_size() - weight = F.pad(weight, (0, num_pad), "constant", 0)[..., :-num_pad] - torch.cuda.empty_cache() - return weight - - def process_weights_after_loading(self, layer: torch.nn.Module) -> None: - super().process_weights_after_loading(layer) - - # Padding the weight for better performance on ROCm - layer.w13_weight.data = self._maybe_pad_weight(layer.w13_weight.data) - layer.w2_weight.data = self._maybe_pad_weight(layer.w2_weight.data) - # Lazy import to avoid importing triton. - from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( - shuffle_weights) - - if self.rocm_aiter_moe_enabled: - shuffled_w13, shuffled_w2 = shuffle_weights( - layer.w13_weight.data, layer.w2_weight.data) - - layer.w13_weight.data = shuffled_w13 - layer.w2_weight.data = shuffled_w2 - - if current_platform.is_cpu(): - if current_platform.get_cpu_architecture() == CpuArchEnum.X86: - import intel_extension_for_pytorch as ipex - layer.ipex_fusion = ipex.llm.modules.GatedMLPMOE( - layer.w13_weight, - layer.w2_weight, - use_prepack=envs.VLLM_CPU_MOE_PREPACK, - ) - else: - raise NotImplementedError("CPU MOE only supports x86 arch.") - def apply( self, - layer: torch.nn.Module, - x: torch.Tensor, - router_logits: torch.Tensor, + layer: nn.Cell, + x: Tensor, + router_logits: Tensor, top_k: int, renormalize: bool, use_grouped_topk: bool = False, topk_group: Optional[int] = None, num_expert_group: Optional[int] = None, global_num_experts: int = -1, - expert_map: Optional[torch.Tensor] = None, + expert_map: Optional[Tensor] = None, custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, + e_score_correction_bias: Optional[Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", - ) -> torch.Tensor: + ) -> Tensor: return self.forward_npu( x=x, layer=layer, @@ -187,22 +110,22 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): def forward_npu( self, - layer: torch.nn.Module, - x: torch.Tensor, + layer: nn.Cell, + x: Tensor, use_grouped_topk: bool, top_k: int, - router_logits: torch.Tensor, + router_logits: Tensor, renormalize: bool, topk_group: Optional[int] = None, num_expert_group: Optional[int] = None, global_num_experts: int = -1, - expert_map: Optional[torch.Tensor] = None, + expert_map: Optional[Tensor] = None, custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, + e_score_correction_bias: Optional[Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", - ) -> torch.Tensor: + ) -> Tensor: topk_weights, topk_ids = FusedMoE.select_experts( hidden_states=x, router_logits=router_logits, @@ -408,126 +331,103 @@ class FusedMoE(nn.Cell): def use_pplx_kernels(self): return self.moe_parallel_config.use_pplx_kernels - def _load_per_tensor_weight_scale(self, shard_id: str, - param: Parameter, - loaded_weight: Tensor, - expert_id: int): - param_data = param.data - # for per tensor weight quantization - if shard_id in ("w1", "w3"): - # We have to keep the weight scales of w1 and w3 because - # we need to re-quantize w1/w3 weights after weight loading. - idx = 0 if shard_id == "w1" else 1 - param_data[expert_id][idx] = loaded_weight - # If we are in the row parallel case (down_proj) - elif shard_id == "w2": - param_data[expert_id] = loaded_weight - - def _load_model_weight_or_group_weight_scale(self, - shard_dim: int, - expert_data: Tensor, - shard_id: str, - loaded_weight: Tensor, - tp_rank: int, - load_full_w2: bool = False): - """ - Load grouped weight scales for group quantization or model weights - :param shard_dim: dimension to shard - :param expert_data: parameter for a particular expert - :param shard_id: either w1, w2, or w3 - :param loaded_weight: checkpoint weight to load into the param - :param tp_rank: tensor parallel rank - :param load_full_w2: whether or not the w2 loaded should be sharded. - """ - if shard_id == "w2": - # In the case where we have actorder/g_idx, we do not partition the - # w2 scales, as indicated by `load_full` argument, for all tp cases - self._load_w2(shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=tp_rank, - load_full=load_full_w2) - elif shard_id in ("w1", "w3"): - self._load_w13(shard_id=shard_id, - shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=tp_rank) - - def _load_per_channel_weight_scale(self, expert_data: Tensor, - shard_dim: int, shard_id: str, - loaded_weight: Tensor, - tp_rank: int): - # for per channel weight quantization - if shard_id == "w2": - expert_data.copy_(loaded_weight) - elif shard_id in ("w1", "w3"): - self._load_w13(shard_id=shard_id, - shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=tp_rank) - - def _load_w13(self, expert_data: Tensor, shard_dim: int, - shard_id: str, loaded_weight: Tensor, tp_rank: int): + def _load_w13(self, param: Parameter, shard_dim: int, + shard_id: str, loaded_weight: Tensor, expert_id: int, + tp_rank: int): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim - shard_size = expert_data.shape[shard_dim] // 2 + shard_size = param.shape[shard_dim] // 2 loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, shard_size) # Narrow parameter and load. # w1, gate_proj: Load into first logical weight of w13. if shard_id == "w1": - expert_data = expert_data.narrow(shard_dim, 0, shard_size) + # expert_data = expert_data.narrow(shard_dim, 0, shard_size) + param[expert_id, ] = loaded_weight # w3, up_proj: Load into second logical weight of w13. else: assert shard_id == "w3" - expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) - expert_data.copy_(loaded_weight) + # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) + param[expert_id, ] = loaded_weight + # expert_data.set_data(loaded_weight) def _load_w2(self, - expert_data: Tensor, + param: Parameter, shard_dim: int, loaded_weight: Tensor, tp_rank: int, - load_full: bool = False): + expert_id: int, + load_full: bool = False) # Index the loaded weight for tp sharding. # down_proj: "RowParallel" so tp sharding on input_dim # Narrow parameter and load. - shard_size = expert_data.shape[shard_dim] + shard_size = param.shape[shard_dim] if not load_full: loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, shard_size) # w2, down_proj: Load into only logical weight of w2. - expert_data.copy_(loaded_weight) + param[expert_id] = loaded_weight - def _load_single_value(self, param: torch.nn.Parameter, + def _load_single_value(self, param: Parameter, loaded_weight: Tensor, expert_id: int): - param_data = param.data + param[expert_id] = loaded_weight - # Input scales can be loaded directly and should be equal. - param_data[expert_id] = loaded_weight - - def _load_g_idx(self, shard_id: str, expert_data: Tensor, - shard_dim: int, loaded_weight: Tensor, tp_rank: int): + def _load_g_idx(self, shard_id: str, param: Parameter, + shard_dim: int, loaded_weight: Tensor, tp_rank: int, + expert_id: int): if shard_id == "w2": self._load_w2(shard_dim=shard_dim, loaded_weight=loaded_weight, - expert_data=expert_data, + param=param, + expert_id=expert_id, tp_rank=tp_rank) else: assert shard_id in ("w1", "w3") - expert_data.copy_(loaded_weight) + param[expert_id] = loaded_weight def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int: if self.expert_map is None: return expert_id return self.expert_map[expert_id].item() + def _load_model_weight_or_group_weight_scale(self, + shard_dim: int, + param: Parameter, + shard_id: str, + loaded_weight: Tensor, + tp_rank: int, + expert_id: int, + load_full_w2: bool = False): + """ + Load grouped weight scales for group quantization or model weights + :param shard_dim: dimension to shard + :param expert_data: parameter for a particular expert + :param shard_id: either w1, w2, or w3 + :param loaded_weight: checkpoint weight to load into the param + :param tp_rank: tensor parallel rank + :param load_full_w2: whether or not the w2 loaded should be sharded. + """ + if shard_id == "w2": + # In the case where we have actorder/g_idx, we do not partition the + # w2 scales, as indicated by `load_full` argument, for all tp cases + self._load_w2(shard_dim=shard_dim, + loaded_weight=loaded_weight, + param=param, + tp_rank=tp_rank, + expert_id=expert_id, + load_full=load_full_w2) + elif shard_id in ("w1", "w3"): + self._load_w13(shard_id=shard_id, + shard_dim=shard_dim, + loaded_weight=loaded_weight, + param=param, + expert_id=expert_id, + tp_rank=tp_rank) + def weight_loader(self, param: torch.nn.Parameter, loaded_weight: Tensor, weight_name: str, shard_id: str, expert_id: int) -> None: @@ -535,14 +435,6 @@ class FusedMoE(nn.Cell): expert_id = self._map_global_expert_id_to_local_expert_id(expert_id) if expert_id == -1: return - quant_method_name = self.quant_method.__class__.__name__ - # compressed-tensors checkpoints with packed weights are stored flipped - # TODO (mgoin): check self.quant_method.quant_config.quant_format - # against known CompressionFormat enum values that have this quality - if self.quant_method.__class__.__name__ in ( - "CompressedTensorsWNA16MarlinMoEMethod", - "CompressedTensorsWNA16MoEMethod"): - loaded_weight = loaded_weight.t().contiguous() if shard_id not in ("w1", "w2", "w3"): raise ValueError(f"shard_id must be ['w1','w2','w3'] but " @@ -556,13 +448,6 @@ class FusedMoE(nn.Cell): # dimension intermediate_size_per_partition is used. SHARD_ID_TO_SHARDED_DIM = {"w1": 0, "w2": 1, "w3": 0} - is_gguf_weight = getattr(param, "is_gguf_weight", False) - is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False) - if is_gguf_weight_type: - param.weight_type = loaded_weight.item() - param.data.copy_(loaded_weight) - return - # is_transposed: if the dim to shard the weight # should be flipped. Required by GPTQ, compressed-tensors # should be whatever dimension intermediate_size_per_partition is @@ -575,93 +460,14 @@ class FusedMoE(nn.Cell): if full_load: shard_dim += 1 - # Materialize GGUF UninitializedParameter - if is_gguf_weight and isinstance(param, UninitializedParameter): - final_shape = list(loaded_weight.shape) - if shard_id in ["w1", "w3"]: - final_shape[1] *= 2 - final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size - param.materialize(final_shape, dtype=loaded_weight.dtype) - - expert_data = param.data if full_load else param.data[expert_id] - # Case input scale: input_scale loading is only supported for fp8 - if "input_scale" in weight_name: - # this is needed for compressed-tensors only - loaded_weight = loaded_weight.to(param.data.device) - - if ("compressed" in quant_method_name.lower() - and param.data[expert_id] != 1 - and (param.data[expert_id] - loaded_weight).abs() > 1e-5): - raise ValueError( - "input_scales of w1 and w3 of a layer " - f"must be equal. But got {param.data[expert_id]} " - f"vs. {loaded_weight}") - - self._load_single_value(param=param, - loaded_weight=loaded_weight, - expert_id=expert_id) - return - # Case g_idx if "g_idx" in weight_name: self._load_g_idx(shard_dim=0, shard_id=shard_id, loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=self.tp_rank) - return - - if "ModelOpt" in quant_method_name: - if ('weight_scale_2' in weight_name - or 'input_scale' in weight_name): - self._load_per_tensor_weight_scale(shard_id=shard_id, - param=param, - loaded_weight=loaded_weight, - expert_id=expert_id) - elif "weight" in weight_name: - self._load_model_weight_or_group_weight_scale( - shard_id=shard_id, - shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=self.tp_rank) - return - - # Case weight scales, zero_points and offset - if ("scale" in weight_name or "zero" in weight_name - or "offset" in weight_name): - # load the weight scales and zp based on the quantization scheme - # supported weight scales/zp can be found in - # FusedMoeWeightScaleSupported - # TODO @dsikka: once hardened, refactor to use vLLM Parameters - # specific to each case - quant_method = getattr(param, "quant_method", None) - if quant_method == FusedMoeWeightScaleSupported.CHANNEL.value: - self._load_per_channel_weight_scale( - shard_id=shard_id, - shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=self.tp_rank) - elif quant_method in [ - FusedMoeWeightScaleSupported.GROUP.value, - FusedMoeWeightScaleSupported.BLOCK.value, - ]: - self._load_model_weight_or_group_weight_scale( - shard_id=shard_id, - shard_dim=shard_dim, - loaded_weight=loaded_weight, - expert_data=expert_data, - tp_rank=self.tp_rank, - load_full_w2=getattr(param, "load_full_w2", False)) - elif quant_method == FusedMoeWeightScaleSupported.TENSOR.value: - self._load_per_tensor_weight_scale(shard_id=shard_id, - param=param, - loaded_weight=loaded_weight, - expert_id=expert_id) - else: - raise ValueError( - f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}") + param=param, + tp_rank=self.tp_rank, + expert_id=expert_id) return # Case weight_shape @@ -678,7 +484,7 @@ class FusedMoE(nn.Cell): shard_id=shard_id, shard_dim=shard_dim, loaded_weight=loaded_weight, - expert_data=expert_data, + param=param, tp_rank=self.tp_rank) return diff --git a/vllm_mindspore/platforms/ascend.py b/vllm_mindspore/platforms/ascend.py index 43d5d1773..7a31885cb 100644 --- a/vllm_mindspore/platforms/ascend.py +++ b/vllm_mindspore/platforms/ascend.py @@ -136,7 +136,7 @@ class AscendPlatform(Platform): def get_device_communicator_cls(cls) -> str: """Get device specific communicator class for distributed communication.""" if envs.VLLM_USE_V1: - return "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator" + return "vllm_mindspore.distributed.device_communicators.npu_communicator.NPUCommunicator" return "vllm.distributed.device_communicators.base_device_communicator.DeviceCommunicatorBase" @classmethod -- Gitee From 72c90dc355544d975b70a838b1f129f556fc4765 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 15:17:11 +0800 Subject: [PATCH 26/76] update load --- .../model_executor/layers/fused_moe/layer.py | 53 ++++++++++++++----- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 5bc0aef70..f5ea5590a 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -333,7 +333,7 @@ class FusedMoE(nn.Cell): def _load_w13(self, param: Parameter, shard_dim: int, shard_id: str, loaded_weight: Tensor, expert_id: int, - tp_rank: int): + tp_rank: int, load_full: bool = False): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim @@ -342,15 +342,38 @@ class FusedMoE(nn.Cell): shard_size) # Narrow parameter and load. # w1, gate_proj: Load into first logical weight of w13. - if shard_id == "w1": - # expert_data = expert_data.narrow(shard_dim, 0, shard_size) - param[expert_id, ] = loaded_weight - # w3, up_proj: Load into second logical weight of w13. + if not load_full: + if shard_id == "w1": + if shard_dim == 1: + param[expert_id, :, 0:shard_size] = loaded_weight + else: + assert shard_dim == 0 + param[expert_id, 0:shard_size, :] = loaded_weight + # w3, up_proj: Load into second logical weight of w13. + else: + assert shard_id == "w3" + # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) + if shard_dim == 1: + param[expert_id, :, shard_size:shard_size*2] = loaded_weight + else: + assert shard_dim == 0 + param[expert_id, shard_size:shard_size*2, :] = loaded_weight else: - assert shard_id == "w3" - # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) - param[expert_id, ] = loaded_weight - # expert_data.set_data(loaded_weight) + if shard_id == "w1": + if shard_dim == 2: + param[:, :, 0:shard_size] = loaded_weight + else: + assert shard_dim == 1 + param[:, 0:shard_size, :] = loaded_weight + # w3, up_proj: Load into second logical weight of w13. + else: + assert shard_id == "w3" + # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) + if shard_dim == 2: + param[:, :, shard_size:shard_size*2] = loaded_weight + else: + assert shard_dim == 1 + param[:, shard_size:shard_size*2, :] = loaded_weight def _load_w2(self, param: Parameter, @@ -358,7 +381,7 @@ class FusedMoE(nn.Cell): loaded_weight: Tensor, tp_rank: int, expert_id: int, - load_full: bool = False) + load_full: bool = False): # Index the loaded weight for tp sharding. # down_proj: "RowParallel" so tp sharding on input_dim @@ -368,8 +391,10 @@ class FusedMoE(nn.Cell): loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, shard_size) + param.set_data(loaded_weight) # w2, down_proj: Load into only logical weight of w2. - param[expert_id] = loaded_weight + else: + param[expert_id] = loaded_weight def _load_single_value(self, param: Parameter, loaded_weight: Tensor, expert_id: int): @@ -401,7 +426,8 @@ class FusedMoE(nn.Cell): loaded_weight: Tensor, tp_rank: int, expert_id: int, - load_full_w2: bool = False): + load_full_w2: bool = False, + load_full_w3: bool = False): """ Load grouped weight scales for group quantization or model weights :param shard_dim: dimension to shard @@ -426,7 +452,8 @@ class FusedMoE(nn.Cell): loaded_weight=loaded_weight, param=param, expert_id=expert_id, - tp_rank=tp_rank) + tp_rank=tp_rank, + load_full=load_full_w3) def weight_loader(self, param: torch.nn.Parameter, loaded_weight: Tensor, weight_name: str, -- Gitee From 31a6c465ae8b71022a81f35c00fc6383a58c448b Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 16:23:38 +0800 Subject: [PATCH 27/76] update moe --- .../layers/fused_moe/fused_moe.py | 62 +++++++++++-------- .../model_executor/layers/fused_moe/layer.py | 11 ++-- 2 files changed, 42 insertions(+), 31 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index 169716d72..1f6f4b7e2 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -1,9 +1,9 @@ from typing import Optional -from mindspore import Tensor, mint +from mindspore import Tensor, mint, ops from mindspore.ops.auto_generate import FusedAddTopKDiv import mindspore as ms -from vllm.distributed.parallel_state import get_tp_group +from vllm.distributed.parallel_state import get_ep_group def fused_topk( hidden_states: Tensor, @@ -56,37 +56,39 @@ def grouped_topk( return topk_weights.to(ms.float32), topk_ids.to(ms.int32) +def _ep_dispatch(x, topk_ids): + return mint.distributed.all_to_all(x, topk_ids) + +def _ep_combine(x, topk_ids): + return mint.distributed.all_to_all(x, topk_ids) + def fused_experts(hidden_states: Tensor, w1: Tensor, w2: Tensor, topk_weights: Tensor, topk_ids: Tensor, - inplace: bool = False, activation: str = "silu", - apply_router_weight_on_input: bool = False, - use_fp8_w8a8: bool = False, - use_int8_w8a8: bool = False, - use_int8_w8a16: bool = False, - use_int4_w4a16: bool = False, - per_channel_quant: bool = False, global_num_experts: int = -1, - expert_map: Optional[Tensor] = None, - w1_scale: Optional[Tensor] = None, - w2_scale: Optional[Tensor] = None, - w1_zp: Optional[Tensor] = None, - w2_zp: Optional[Tensor] = None, - a1_scale: Optional[Tensor] = None, - a2_scale: Optional[Tensor] = None, - block_shape: Optional[list[int]] = None, - allow_deep_gemm: bool = False) -> Tensor: + apply_router_weight_on_input: bool = False, + expert_map: Optional[Tensor] = None) -> Tensor: + use_ep = False if expert_map is not None: use_ep = True if use_ep: - _run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + hidden_states = _ep_dispatch(hidden_states, topk_ids) + hidden_states = _run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + hidden_states = _ep_combine(hidden_states, topk_ids) + if apply_router_weight_on_input: + hidden_states = mint.mul(hidden_states, topk_weights) + hidden_states = hidden_states.sum(-1) else: - _run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + hidden_states =_run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) + if apply_router_weight_on_input: + hidden_states = mint.mul(hidden_states, topk_weights) + + return hidden_states def _run_activation(x, activation): if activation == "silu": @@ -97,16 +99,22 @@ def _run_activation(x, activation): raise ValueError(f"Unsupported activation function: {activation}") +group_matmul_ops = ops.auto_generate.GroupedMatmulV4() + +def _run_group_matmul(hidden_states, weight, group_list): + return group_matmul_ops([hidden_states], [weight], group_list, + None, None, None, None, None, None, + group_list, split_item=3, group_type=0, group_list_type=1) + def _run_ep_moe(hidden_states, w1, w2, group_list, group_logits, activation): - hidden_states = mint.group_matmul(hidden_states, w1, group_list) + hidden_states = _run_group_matmul(hidden_states, w1, group_list) hidden_states = _run_activation(hidden_states, activation) - hidden_states = mint.group_matmul(hidden_states, w2, group_list) - hidden_states = mint.mul(hidden_states, group_logits) + hidden_states = _run_group_matmul(hidden_states, w2, group_list) return hidden_states @@ -116,9 +124,9 @@ def _run_tp_moe(hidden_states, group_list, group_logits, activation): - hidden_states = mint.group_matmul(hidden_states, w1, group_list) + # hidden_states = mint.group_matmul(hidden_states, w1, group_list) + hidden_states = _run_group_matmul([hidden_states], [w1], group_list) hidden_states = _run_activation(hidden_states, activation) - hidden_states = mint.group_matmul(hidden_states, w2, group_list) - hidden_states = mint.all_reduce(hidden_states, get_tp_group()) - hidden_states = mint.mul(hidden_states, group_logits) + hidden_states = _run_group_matmul(hidden_states, w2, group_list) + hidden_states = mint.distributed.all_reduce(hidden_states, get_ep_group()) return hidden_states diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index f5ea5590a..9dcc459d7 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -145,10 +145,9 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): w2=layer.w2_weight, topk_weights=topk_weights, topk_ids=topk_ids, - inplace=True, activation=activation, - apply_router_weight_on_input=apply_router_weight_on_input, global_num_experts=global_num_experts, + apply_router_weight_on_input=apply_router_weight_on_input, expert_map=expert_map, ) @@ -592,9 +591,13 @@ class FusedMoE(nn.Cell): router_logits: Tensor): assert self.quant_method is not None - if self.dp_size > 1: + do_naive_dispatch_combine: bool = ( + self.dp_size > 1 + and not self.ep_size > 1) + if do_naive_dispatch_combine: hidden_states, router_logits = get_ep_group().dispatch( hidden_states, router_logits) + # Matrix multiply. final_hidden_states = self.quant_method.apply( layer=self, @@ -614,7 +617,7 @@ class FusedMoE(nn.Cell): apply_router_weight_on_input=self.apply_router_weight_on_input, ) - if self.dp_size > 1: + if do_naive_dispatch_combine: final_hidden_states = get_ep_group().combine(final_hidden_states) if self.reduce_results and (self.tp_size > 1 or self.ep_size > 1): -- Gitee From 95a1b96501e2a22139af9298cf5a296413e1d3ce Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 16:25:17 +0800 Subject: [PATCH 28/76] register model --- vllm_mindspore/model_executor/models/registry.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index 009d84a06..50dde9a41 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -30,6 +30,7 @@ _NATIVE_MODELS = { "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), "Qwen2_5_VLForConditionalGeneration": ("qwen2_5_vl", "Qwen2_5_VLForConditionalGeneration"), + "Qwen3MoeForCausalLM": ("qwen3_moe", "Qwen3MoeForCausalLM"), } _MINDFORMERS_MODELS = { -- Gitee From 395791820f33cc1a161bb629e1adc63fe29f311f Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 17:21:05 +0800 Subject: [PATCH 29/76] update --- .../layers/fused_moe/__init__.py | 3 +- .../model_executor/layers/fused_moe/layer.py | 70 ++++++++++++++++++- .../model_executor/models/qwen3_moe.py | 2 +- vllm_mindspore/model_executor/models/utils.py | 4 +- 4 files changed, 71 insertions(+), 8 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/__init__.py b/vllm_mindspore/model_executor/layers/fused_moe/__init__.py index a38a67cd9..29502460a 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/__init__.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/__init__.py @@ -1,2 +1 @@ -class FusedMoE: - ... \ No newline at end of file +from .layer import FusedMoE \ No newline at end of file diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 9dcc459d7..6b8db5ad2 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -26,9 +26,9 @@ from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.platforms.interface import CpuArchEnum from vllm.utils import direct_register_custom_op -from vllm.model_executor.layers.fused_moe.layers import FusedMoEParallelConfig +from vllm.model_executor.layers.fused_moe.layer import FusedMoEParallelConfig -from vllm.model_executor.layers.fused_moe.layers import (determine_expert_map, MoEConfig, +from vllm.model_executor.layers.fused_moe.layer import (determine_expert_map, FusedMoeWeightScaleSupported, FusedMoEMethodBase) @@ -40,6 +40,70 @@ from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk from mindspore import nn, Tensor, Parameter, mint +logger = init_logger(__name__) + + +@dataclass +class MoEConfig: + num_experts: int + experts_per_token: int + hidden_dim: int + + num_local_experts: int + moe_parallel_config: FusedMoEParallelConfig + + in_dtype: torch.dtype # The activation type. + quant_dtype: torch.dtype = None + + # TODO: add more quantization params, blocked, per-token, etc. + block_size: int = 128 + + max_num_tokens: int = envs.VLLM_MOE_DP_CHUNK_SIZE + + def __post_init__(self): + if self.dp_size > 1: + logger.debug("Using MOEConfig::max_num_tokens=%d", + self.max_num_tokens) + + @property + def tp_size(self): + return self.moe_parallel_config.tp_size + + @property + def dp_size(self): + return self.moe_parallel_config.dp_size + + @property + def ep_size(self): + return self.moe_parallel_config.ep_size + + @property + def tp_rank(self): + return self.moe_parallel_config.tp_rank + + @property + def dp_rank(self): + return self.moe_parallel_config.dp_rank + + @property + def ep_rank(self): + return self.moe_parallel_config.ep_rank + + @property + def use_ep(self): + return self.moe_parallel_config.use_ep + + @property + def use_pplx_kernels(self): + return self.moe_parallel_config.use_pplx_kernels + + @property + def use_deepep_ht_kernels(self): + return self.moe_parallel_config.use_deepep_ht_kernels + + @property + def use_deepep_ll_kernels(self): + return self.moe_parallel_config.use_deepep_ll_kernels class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): @@ -137,7 +201,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): custom_routing_function=custom_routing_function, scoring_func=scoring_func, e_score_correction_bias=e_score_correction_bias, - indices_type=torch.uint32 if self.moe.use_pplx_kernels else None) + indices_type=None) return self.fused_experts( hidden_states=x, diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index 275331157..5e25a3729 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -486,7 +486,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): fall_back_to_pt_during_load = False def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() + super().__init__(vllm_config=vllm_config, prefix=prefix) config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config self.config = config diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 9ecebe2ab..56e7b623a 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -20,8 +20,8 @@ from dataclasses import dataclass, field from typing import Iterable, List, Mapping, Optional, Tuple, Union -import mindspore as ms, nn -from mindspore import mint, ops +import mindspore as ms +from mindspore import mint, ops, nn from vllm.sequence import IntermediateTensors from vllm_mindspore.multimodal.inputs import NestedTensors # type: ignore[attr-defined] -- Gitee From bb18fc8abae763f00270c6aadd6f226e621578ba Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 17:24:04 +0800 Subject: [PATCH 30/76] update --- .../model_executor/layers/fused_moe/layer.py | 142 +++++++++++++++++- 1 file changed, 140 insertions(+), 2 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 6b8db5ad2..1d1eac541 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -26,11 +26,13 @@ from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.platforms.interface import CpuArchEnum from vllm.utils import direct_register_custom_op -from vllm.model_executor.layers.fused_moe.layer import FusedMoEParallelConfig +# from vllm.model_executor.layers.fused_moe.layer import FusedMoEParallelConfig from vllm.model_executor.layers.fused_moe.layer import (determine_expert_map, FusedMoeWeightScaleSupported, - FusedMoEMethodBase) + FusedMoEMethodBase, + #MoEConfig, + ) from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk, @@ -43,6 +45,142 @@ from mindspore import nn, Tensor, Parameter, mint logger = init_logger(__name__) +@dataclass +class FusedMoEParallelConfig: + tp_size: int + dp_size: int + ep_size: int + tp_rank: int + dp_rank: int + ep_rank: int + + use_ep: bool # whether to use EP or not + + @property + def use_all2all_kernels(self): + return self.dp_size > 1 and self.use_ep + + @property + def use_pplx_kernels(self): + return (self.use_all2all_kernels + and envs.VLLM_ALL2ALL_BACKEND == "pplx") + + @property + def use_deepep_ht_kernels(self): + return (self.use_all2all_kernels + and envs.VLLM_ALL2ALL_BACKEND == "deepep_high_throughput") + + @property + def use_deepep_ll_kernels(self): + return (self.use_all2all_kernels + and envs.VLLM_ALL2ALL_BACKEND == "deepep_low_latency") + + @staticmethod + def make(tp_size_: int, dp_size_: int, + vllm_parallel_config: ParallelConfig) -> "FusedMoEParallelConfig": + """ + Determine MoE parallel configuration. Based on the input tp_size_, + dp_size_, ep_size_ and vllm's parallel config, determine what + level's of parallelism to use in the fused moe layer. + + Args: + tp_size_ (int): tp_size passed into the FusedMoE constructor. + dp_size_ (int): dp_size passed into the FusedMoE constructor. + ep_size_ (int): ep_size passed into the FusedMoE constructor. + vllm_parallel_config (ParallelConfig): vllm's parallel config + object. + + Examples: + When there is no parallelism requested, i.e. tp_size_ = dp_size_ = 1, + we simply return the sizes unaltered and the ranks set to 0. + + Expert Parallelism is considered only when either dp_size_ or tp_size_ + is non trivial. + + When TP = 2, DP = 1 and EP = False, the configuration on different + devices, + - device 0 : TP = {2, 0} DP = {1, 0} EP = {1, 0} // + legend : {size, rank} + - device 1 : TP = {2, 1} DP = {1, 0} EP = {1, 0} + - Comment : Tensors are sharded across 2 devices. + + When TP = 1, DP = 2 and EP = False, the configuration on different + devices, + - device 0 : TP = {2, 0} DP = {2, 0} EP = {1, 0} + - device 1 : TP = {2, 1} DP = {2, 1} EP = {1, 0} + - Comment: There are 2 engine instances and the tensors are sharded + across 2 decvices. + + When TP = 2, DP = 2 and EP = False, the configuration on different + devices, + - device 0: TP = {4, 0} DP = {2, 0} EP = {1, 0} + - device 1: TP = {4, 1} DP = {2, 0} EP = {1, 0} + - device 2: TP = {4, 2} DP = {2, 1} EP = {1, 0} + - device 3: TP = {4, 3} DP = {2, 1} EP = {1, 0} + - Comment: There are 2 engine instances and the tensors are sharded + across 4 devices. + + When, TP = 2, DP = 1 and EP = True, the configuration on different + devices, + - device 0: TP = {1, 0} DP = {1, 0} EP = {2, 0} + - device 1: TP = {1, 0} DP = {1, 0} EP = {2, 1} + - Comment: The experts are split between the 2 devices. + + When, TP = 1, DP = 2 and EP = True, the configuration on different + devices, + - device 0: TP = {1, 0} DP = {2, 0} EP = {2, 0} + - device 1: TP = {1, 0} DP = {2, 1} EP = {2, 1} + - Comment: There are 2 engine instances and the experts are split + between the 2 devices. + + When TP = 2, DP = 2 and EP = True, the configuration on different + devices, + - device 0: TP = {1, 0} DP = {2, 0} EP = {4, 0} + - device 1: TP = {1, 0} DP = {2, 0} EP = {4, 1} + - device 2: TP = {1, 0} DP = {2, 1} EP = {4, 2} + - device 3: TP = {1, 0} DP = {2, 1} EP = {4, 3} + - Comment: There are 2 engine instances and the experts are split + between the 4 devices. + """ + + def flatten_tp_across_dp(dp_rank: int): + tp_rank = 0 if tp_size_ == 1 else get_tensor_model_parallel_rank() + # There are actually dp_size_ * tp_size_ devices. Update tp_size + # and tp_rank so we shard across all devices. + tp_size = dp_size_ * tp_size_ + tp_rank = dp_rank * tp_size_ + tp_rank + return tp_size, tp_rank + + use_ep = (dp_size_ * tp_size_ > 1 + and vllm_parallel_config.enable_expert_parallel) + + dp_size = dp_size_ + dp_rank = get_dp_group().rank_in_group if dp_size > 1 else 0 + tp_size, tp_rank = flatten_tp_across_dp(dp_rank) + + if not use_ep: + return FusedMoEParallelConfig(tp_size=tp_size, + tp_rank=tp_rank, + dp_size=dp_size, + dp_rank=dp_rank, + ep_size=1, + ep_rank=0, + use_ep=False) + # DP + EP / TP + EP / DP + TP + EP + assert use_ep + # In EP, each device owns a set of experts fully. There is no tensor + # parallel update tp_size, tp_rank, ep_size and ep_rank to reflect that. + ep_size = tp_size + ep_rank = tp_rank + return FusedMoEParallelConfig(tp_size=1, + tp_rank=0, + dp_size=dp_size, + dp_rank=dp_rank, + ep_size=ep_size, + ep_rank=ep_rank, + use_ep=True) + + @dataclass class MoEConfig: num_experts: int -- Gitee From b1462eea9823cc2dbd1936bf4755a4a81203d82d Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 19:35:35 +0800 Subject: [PATCH 31/76] update --- .../model_executor/layers/fused_moe/layer.py | 43 ++++++++++++++++--- .../model_executor/models/qwen3_moe.py | 13 +++--- vllm_mindspore/model_executor/models/utils.py | 2 +- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 1d1eac541..a4a164fec 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -20,8 +20,7 @@ from vllm.logger import init_logger from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( is_rocm_aiter_moe_enabled) -from vllm.model_executor.layers.quantization.base_config import ( - QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.layers.quantization.base_config import QuantizationConfig from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.platforms.interface import CpuArchEnum @@ -37,8 +36,8 @@ from vllm.model_executor.layers.fused_moe.layer import (determine_expert_map, from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk, grouped_topk, - MOE_DP_CHUNK_SIZE, fused_experts) +from vllm_mindspore.model_executor.layers.quantization.base_config import QuantizeMethodBase from mindspore import nn, Tensor, Parameter, mint @@ -244,6 +243,35 @@ class MoEConfig: return self.moe_parallel_config.use_deepep_ll_kernels +class FusedMoEMethodBase(QuantizeMethodBase): + + @abstractmethod + def create_weights(self, layer: torch.nn.Module, num_experts: int, + hidden_size: int, intermediate_size_per_partition: int, + params_dtype: torch.dtype, **extra_weight_attrs): + raise NotImplementedError + + @abstractmethod + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + router_logits: torch.Tensor, + top_k: int, + renormalize: bool, + use_grouped_topk: bool = False, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + apply_router_weight_on_input: bool = False, + activation: str = "silu", + ) -> torch.Tensor: + raise NotImplementedError + class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): """MoE method without quantization.""" @@ -465,7 +493,7 @@ class FusedMoE(nn.Cell): moe_parallel_config=self.moe_parallel_config, # TODO (bnell): this needs to be fixed for quantized types. in_dtype=params_dtype, - max_num_tokens=MOE_DP_CHUNK_SIZE, + max_num_tokens=envs.VLLM_MOE_DP_CHUNK_SIZE, ) self.moe_config = moe self.quant_config = quant_config @@ -587,15 +615,15 @@ class FusedMoE(nn.Cell): # Index the loaded weight for tp sharding. # down_proj: "RowParallel" so tp sharding on input_dim # Narrow parameter and load. - shard_size = param.shape[shard_dim] if not load_full: + shard_size = param.shape[shard_dim + 1] loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, shard_size) - param.set_data(loaded_weight) + param[expert_id] = loaded_weight # w2, down_proj: Load into only logical weight of w2. else: - param[expert_id] = loaded_weight + param.set_data(loaded_weight) def _load_single_value(self, param: Parameter, loaded_weight: Tensor, expert_id: int): @@ -713,6 +741,7 @@ class FusedMoE(nn.Cell): shard_dim=shard_dim, loaded_weight=loaded_weight, param=param, + expert_id=expert_id, tp_rank=self.tp_rank) return diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index 5e25a3729..a31b5c3ee 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -22,9 +22,9 @@ # limitations under the License. """Inference-only Qwen3MoE model compatible with HuggingFace weights.""" from collections.abc import Iterable -from typing import Any, Optional, Union +from typing import Any, Optional, Union, Dict, Tuple -from mindspore import Tensor, nn +from mindspore import Tensor, nn, Parameter from transformers import PretrainedConfig from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size @@ -371,8 +371,8 @@ class Qwen3MoeModel(nn.Cell): hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[tuple[str, - Tensor]]) -> set[str]: + def load_weights(self, weights: Iterable[Tuple[str, Tensor]], + params_dict: Dict[str, Parameter]): stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -390,7 +390,6 @@ class Qwen3MoeModel(nn.Cell): ckpt_up_proj_name="up_proj", num_experts=self.config.num_experts) - params_dict = dict(self.named_parameters()) loaded_params: set[str] = set() for name, loaded_weight in weights: for (param_name, weight_name, shard_id) in stacked_params_mapping: @@ -502,6 +501,8 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + self.common_preprocess(vllm_config, prefix) + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.model.get_input_embeddings(input_ids) @@ -528,4 +529,4 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): def load_weights(self, weights: Iterable[tuple[str, Tensor]]) -> set[str]: params_dict = self.get_params_dict() - self.model.load_weights(weights, params_dict) + return self.model.load_weights(weights, params_dict) diff --git a/vllm_mindspore/model_executor/models/utils.py b/vllm_mindspore/model_executor/models/utils.py index 56e7b623a..26b5c2680 100644 --- a/vllm_mindspore/model_executor/models/utils.py +++ b/vllm_mindspore/model_executor/models/utils.py @@ -273,7 +273,7 @@ def get_pp_missing_layer_names(model: nn.Cell) -> list[str]: return _model_to_pp_missing_layer_names[model_id] missing_layer_names = [] - for name, cell in model.name_cells(): + for cell, name in model.cells_and_names(): if isinstance(cell, PPMissingLayer): # NOTE: the trailing dot is used to match the prefix of the layer. # without the dot, we could match a layer that is not missing, -- Gitee From c0995996ddd91081b5431e0d18a0ae374445228f Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 19:48:16 +0800 Subject: [PATCH 32/76] update --- vllm_mindspore/config.py | 13 ++++++++++++- vllm_mindspore/v1/worker/gpu_model_runner.py | 5 ++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/vllm_mindspore/config.py b/vllm_mindspore/config.py index 0fd6ca239..5cdb67362 100644 --- a/vllm_mindspore/config.py +++ b/vllm_mindspore/config.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================ from collections import Counter -from typing import Union +from typing import Union, TypeVar import sys import socket import threading @@ -409,3 +409,14 @@ def stateless_destroy_socket_process_group(dp_group: "SocketProcessGroup") -> No if dp_group: dp_group.close() logger.info(f"Socket process group for rank {dp_group.rank} destroyed.") + +T = TypeVar("T") + +def get_layers_from_vllm_config(vllm_config: VllmConfig, + layer_type: type[T]) -> dict[str, T]: + return { + layer_name: layer + for layer_name, layer in + vllm_config.compilation_config.static_forward_context.items() + if isinstance(layer, layer_type) + } diff --git a/vllm_mindspore/v1/worker/gpu_model_runner.py b/vllm_mindspore/v1/worker/gpu_model_runner.py index 7f4e3fe16..1edc077b2 100644 --- a/vllm_mindspore/v1/worker/gpu_model_runner.py +++ b/vllm_mindspore/v1/worker/gpu_model_runner.py @@ -25,6 +25,8 @@ from mindspore import mutable from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata from vllm_mindspore.utils import get_valid_dtype from vllm_mindspore.model_executor.layers.rotary_embedding import InferMRotaryEmbedding as MRotaryEmbedding # type: ignore[attr-defined] +from vllm_mindspore.config import get_layers_from_vllm_config +from vllm_mindspore.model_executor.models.model_base import AttentionWrapper from vllm.v1.outputs import ModelRunnerOutput from vllm.attention import AttentionType @@ -444,7 +446,8 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: block_size = self.vllm_config.cache_config.block_size use_mla = self.vllm_config.model_config.use_mla kv_cache_spec: dict[str, KVCacheSpec] = {} - for layer_name, attn_module in forward_ctx.items(): + attn_layers = get_layers_from_vllm_config(self.vllm_config, AttentionWrapper) + for layer_name, attn_module in attn_layers.items(): # vllm-mindspore AttentionWrapper is not an Attention isinstance # assert isinstance(attn_module, Attention) if attn_module.attn_type == AttentionType.DECODER: -- Gitee From 1c7f00087e8bbe005d1fcbd68c8bf05f371a40bb Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 20:13:51 +0800 Subject: [PATCH 33/76] update --- .../model_executor/models/qwen3_moe.py | 58 ++++++++++++++----- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index a31b5c3ee..59c92a63c 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -22,7 +22,7 @@ # limitations under the License. """Inference-only Qwen3MoE model compatible with HuggingFace weights.""" from collections.abc import Iterable -from typing import Any, Optional, Union, Dict, Tuple +from typing import Any, Optional, Union, Dict, Tuple, List from mindspore import Tensor, nn, Parameter from transformers import PretrainedConfig @@ -84,7 +84,7 @@ class Qwen3MoeMLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def forward(self, x): + def construct(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) @@ -122,7 +122,7 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): quant_config=None, prefix=f"{prefix}.gate") - def forward(self, hidden_states: Tensor) -> Tensor: + def construct(self, hidden_states: Tensor) -> Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] @@ -212,10 +212,18 @@ class Qwen3MoeAttention(nn.Cell): self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) - def forward( + def construct( self, positions: Tensor, hidden_states: Tensor, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, ) -> Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) @@ -229,8 +237,10 @@ class Qwen3MoeAttention(nn.Cell): self.head_dim) k_by_head = self.k_norm(k_by_head) k = k_by_head.view(k.shape) - q, k = self.rotary_emb(positions, q, k) - attn_output = self.attn(q, k, v) + q, k = self.rotary_emb(positions, q, k, batch_valid_length, is_prefill) + attn_output = self.attn(q, k, v, key_cache, value_cache, is_prefill, + slot_mapping, attn_mask, batch_valid_length, + q_seq_lens, block_tables) output, _ = self.o_proj(attn_output) return output @@ -286,10 +296,18 @@ class Qwen3MoeDecoderLayer(nn.Cell): self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) - def forward( + def construct( self, positions: Tensor, hidden_states: Tensor, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, residual: Optional[Tensor], ) -> Tensor: # Self Attention @@ -299,11 +317,10 @@ class Qwen3MoeDecoderLayer(nn.Cell): else: hidden_states, residual = self.input_layernorm( hidden_states, residual) - hidden_states = self.self_attn( - positions=positions, - hidden_states=hidden_states, - ) - + hidden_states = self.self_attn(positions, hidden_states, key_cache, + value_cache, is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) @@ -343,10 +360,18 @@ class Qwen3MoeModel(nn.Cell): def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.embed_tokens(input_ids) - def forward( + def construct( self, input_ids: Tensor, positions: Tensor, + key_caches: List[Tensor], + value_caches: List[Tensor], + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, ) -> Union[Tensor, IntermediateTensors]: @@ -362,7 +387,12 @@ class Qwen3MoeModel(nn.Cell): residual = intermediate_tensors["residual"] for i in range(self.start_layer, self.end_layer): layer = self.layers[i] - hidden_states, residual = layer(positions, hidden_states, residual) + hidden_states, residual = layer(positions, hidden_states, + key_caches[i - self.start_layer], + value_caches[i - self.start_layer], + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables, residual) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, -- Gitee From a1df535dd87bcc23b8d4fd7eead5eef6cc8dcd55 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 20:15:01 +0800 Subject: [PATCH 34/76] update --- vllm_mindspore/model_executor/models/qwen3_moe.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index 59c92a63c..f0ffc6f87 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -542,6 +542,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): positions: Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, + **kwargs ) -> Union[Tensor, IntermediateTensors]: hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, inputs_embeds) -- Gitee From 17a3aa1afe0382e82e8a12e8e8903dca96a53241 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 20:45:20 +0800 Subject: [PATCH 35/76] update --- vllm_mindspore/model_executor/layers/linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm_mindspore/model_executor/layers/linear.py b/vllm_mindspore/model_executor/layers/linear.py index 0dee09d6a..f2a883a71 100644 --- a/vllm_mindspore/model_executor/layers/linear.py +++ b/vllm_mindspore/model_executor/layers/linear.py @@ -248,7 +248,7 @@ class ReplicatedLinear(LinearBase): f"to a parameter of size {param.size()}") param.set_data(loaded_weight) - def forward( + def construct( self, x: Tensor ) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]: bias = self.bias if not self.skip_bias_add else None -- Gitee From 80d4d72a641349b3ccb300635a0f4e1299c6f071 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 20:49:42 +0800 Subject: [PATCH 36/76] update npucomm --- .../device_communicators/npu_communicator.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vllm_mindspore/distributed/device_communicators/npu_communicator.py b/vllm_mindspore/distributed/device_communicators/npu_communicator.py index 3885baa93..9cd0f2781 100644 --- a/vllm_mindspore/distributed/device_communicators/npu_communicator.py +++ b/vllm_mindspore/distributed/device_communicators/npu_communicator.py @@ -1,4 +1,17 @@ +from mindspore import Tensor + from vllm.distributed.device_communicators.cuda_communicator import CudaCommunicator class NPUCommunicator(CudaCommunicator): - ... + def dispatch( + self, hidden_states: Tensor, + router_logits: Tensor) -> tuple[Tensor, Tensor]: + assert self.all2all_manager is not None + hidden_states, router_logits = self.all2all_manager.dispatch( + hidden_states, router_logits) + return hidden_states, router_logits + + def combine(self, hidden_states: Tensor) -> Tensor: + assert self.all2all_manager is not None + hidden_states = self.all2all_manager.combine(hidden_states) + return hidden_states -- Gitee From edc03deeaa9b8981714b4b528bea21d1dc0d969f Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Mon, 23 Jun 2025 21:10:53 +0800 Subject: [PATCH 37/76] update --- .../device_communicators/npu_communicator.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/vllm_mindspore/distributed/device_communicators/npu_communicator.py b/vllm_mindspore/distributed/device_communicators/npu_communicator.py index 9cd0f2781..cfb892948 100644 --- a/vllm_mindspore/distributed/device_communicators/npu_communicator.py +++ b/vllm_mindspore/distributed/device_communicators/npu_communicator.py @@ -1,8 +1,35 @@ from mindspore import Tensor +from mindspore.communication import get_rank, get_group_size +import torch.distributed as dist from vllm.distributed.device_communicators.cuda_communicator import CudaCommunicator +from vllm.distributed.parallel_state import (get_dp_group, + get_tp_group, + in_the_same_node_as) +from vllm.forward_context import get_forward_context + class NPUCommunicator(CudaCommunicator): + def __init__(self, + cpu_group, + device = None, + device_group = None, + unique_name: str = ""): + super().__init__(cpu_group, device, device_group, unique_name) + + # all2all lives in ep group, which is merged from dp and tp group + self.dp_group = get_dp_group() + self.tp_group = get_tp_group() + # no self.ep_group since self.ep_group is still in construction + # when we create this object + self.dp_rank = self.dp_group.rank_in_group + self.dp_world_size = self.dp_group.world_size + self.rank = dist.get_rank(cpu_group) + self.world_size = dist.get_world_size(cpu_group) + + # all2all communication often has separate implementations for + # intra-node and inter-node communication + self.internode = not all(in_the_same_node_as(cpu_group, source_rank=0)) def dispatch( self, hidden_states: Tensor, router_logits: Tensor) -> tuple[Tensor, Tensor]: @@ -15,3 +42,25 @@ class NPUCommunicator(CudaCommunicator): assert self.all2all_manager is not None hidden_states = self.all2all_manager.combine(hidden_states) return hidden_states + + def dispatch(self, hidden_states: Tensor, + router_logits: Tensor): + cu_tokens_across_dp_cpu = get_forward_context( + ).dp_metadata.cu_tokens_across_dp_cpu + + hidden_states = self.naive_multicast(hidden_states, + cu_tokens_across_dp_cpu) + router_logits = self.naive_multicast(router_logits, + cu_tokens_across_dp_cpu) + return hidden_states, router_logits + + def combine(self, hidden_states: Tensor) -> Tensor: + cu_tokens_across_dp_cpu = get_forward_context( + ).dp_metadata.cu_tokens_across_dp_cpu + start = 0 if self.dp_rank == 0 else cu_tokens_across_dp_cpu[ + self.dp_rank - 1] + end = cu_tokens_across_dp_cpu[self.dp_rank] + + all_hidden_states = self.dp_group.all_reduce(hidden_states) + hidden_states = all_hidden_states[start:end, :] + return hidden_states \ No newline at end of file -- Gitee From 926d26935acfd13a5e87ae43a0eda1cb4944d82e Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Tue, 24 Jun 2025 11:18:35 +0800 Subject: [PATCH 38/76] update --- .../layers/fused_moe/fused_moe.py | 143 ++++++++++++------ .../model_executor/layers/fused_moe/layer.py | 18 ++- vllm_mindspore/platforms/ascend.py | 3 +- 3 files changed, 107 insertions(+), 57 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index 1f6f4b7e2..aa2e32ca1 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -1,9 +1,12 @@ from typing import Optional from mindspore import Tensor, mint, ops -from mindspore.ops.auto_generate import FusedAddTopKDiv +from mindspore.ops.auto_generate import (GroupedMatmulV4, + FusedAddTopKDiv, + MoeInitRoutingV2, + MoeTokenUnpermute) import mindspore as ms -from vllm.distributed.parallel_state import get_ep_group +from vllm.distributed.parallel_state import get_ep_group, get_dp_group def fused_topk( hidden_states: Tensor, @@ -25,8 +28,8 @@ def fused_topk( if indices_type is not None: topk_ids = topk_ids.to(indices_type) - return topk_weights.to(ms.float32), topk_ids.to(ms.int32) - + return topk_weights, topk_ids + def grouped_topk( hidden_states: Tensor, @@ -53,14 +56,8 @@ def grouped_topk( scoring_type, renormalize) - return topk_weights.to(ms.float32), topk_ids.to(ms.int32) - - -def _ep_dispatch(x, topk_ids): - return mint.distributed.all_to_all(x, topk_ids) + return topk_weights, topk_ids -def _ep_combine(x, topk_ids): - return mint.distributed.all_to_all(x, topk_ids) def fused_experts(hidden_states: Tensor, w1: Tensor, @@ -70,38 +67,47 @@ def fused_experts(hidden_states: Tensor, activation: str = "silu", global_num_experts: int = -1, apply_router_weight_on_input: bool = False, - expert_map: Optional[Tensor] = None) -> Tensor: - - use_ep = False - if expert_map is not None: - use_ep = True - - if use_ep: - hidden_states = _ep_dispatch(hidden_states, topk_ids) - hidden_states = _run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) - hidden_states = _ep_combine(hidden_states, topk_ids) - if apply_router_weight_on_input: - hidden_states = mint.mul(hidden_states, topk_weights) - hidden_states = hidden_states.sum(-1) - else: - hidden_states =_run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation) - if apply_router_weight_on_input: - hidden_states = mint.mul(hidden_states, topk_weights) + expert_map: Optional[Tensor] = None, + tp_size: int = 1, + ep_size: int = 0) -> Tensor: + + if tp_size >= 1: + # no ep, pure tp + if ep_size == 1: + hidden_states = _run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + # ep_size > 1 : pure ep or tp + ep + else: + # pure ep + if tp_size == 1: + hidden_states = _run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + # tp_size > 1 : tp + ep + else: + hidden_states = _run_tp_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) return hidden_states -def _run_activation(x, activation): +def _gate_activation(gate, activation): if activation == "silu": - return mint.silu(x) + return mint.silu(gate) elif activation == "gelu": - return mint.gelu(x) + return mint.gelu(gate) else: raise ValueError(f"Unsupported activation function: {activation}") -group_matmul_ops = ops.auto_generate.GroupedMatmulV4() +group_matmul_ops = GroupedMatmulV4() +moe_init_routing_op = MoeInitRoutingV2() +moe_token_unpermute = MoeTokenUnpermute() +all_gather_dp = ops.AllGather(get_dp_group()) +all_reduce_ep = ops.AllReduce(get_ep_group()) -def _run_group_matmul(hidden_states, weight, group_list): +def _group_matmul(hidden_states, weight, group_list): return group_matmul_ops([hidden_states], [weight], group_list, None, None, None, None, None, None, group_list, split_item=3, group_type=0, group_list_type=1) @@ -109,24 +115,65 @@ def _run_group_matmul(hidden_states, weight, group_list): def _run_ep_moe(hidden_states, w1, w2, - group_list, - group_logits, - activation): - hidden_states = _run_group_matmul(hidden_states, w1, group_list) - hidden_states = _run_activation(hidden_states, activation) - hidden_states = _run_group_matmul(hidden_states, w2, group_list) + topk_ids, + topk_weights, + activation, + global_num_experts, + apply_router_weight_on_input): + hidden_states = _group_matmul(hidden_states, w1, topk_ids) + hidden_states = _gate_activation(hidden_states, activation) + hidden_states = _group_matmul(hidden_states, w2, topk_ids) return hidden_states def _run_tp_moe(hidden_states, w1, w2, - group_list, - group_logits, - activation): - # hidden_states = mint.group_matmul(hidden_states, w1, group_list) - hidden_states = _run_group_matmul([hidden_states], [w1], group_list) - hidden_states = _run_activation(hidden_states, activation) - hidden_states = _run_group_matmul(hidden_states, w2, group_list) - hidden_states = mint.distributed.all_reduce(hidden_states, get_ep_group()) - return hidden_states + topk_ids, + topk_weights, + activation, + global_num_experts, + apply_router_weight_on_input): + hidden_states = all_gather_dp(hidden_states) + topk_ids = all_gather_dp(topk_ids) + topk_weights = all_gather_dp(topk_weights) + sorted_input_tensor, unsort_map, group_list, _ = \ + moe_init_routing_op( + hidden_states, + topk_ids, + active_num=0, + expert_capacity=0, + expert_num=global_num_experts, + drop_pad_mode=0, + expert_tokens_count_or_cumsum_flag=2, + expert_tokens_before_capacity_flag=True) + + group_list = group_list.astype(ms.int64) + + gate_hidden_out = _group_matmul(sorted_input_tensor, w1, group_list) + gate, hidden = mint.split(gate_hidden_out, + (w1.shape[0] // 2, w1.shape[0] // 2), -1) + gate = _gate_activation(gate, activation) + hidden = mint.mul(hidden, gate) + hidden = _group_matmul(hidden, w2, group_list) + expert_output = all_reduce_ep(hidden) + if apply_router_weight_on_input: + moe_output = moe_token_unpermute(permuted_tokens=expert_output, + sorted_indices=unsort_map, + probs=topk_weights, + padded_mode=False, + restore_shape=None) + moe_output = moe_output[:].sum(-1) + return moe_output + + +def _run_tp_ep_moe(hidden_states, + w1, + w2, + group_list, + group_logits, + activation, + global_num_experts, + apply_router_weight_on_input): + raise NotImplementedError( + "TP + EP MoE is not implemented yet. Please use pure TP or pure EP MoE instead.") diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index a4a164fec..a774e17ff 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -379,6 +379,8 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): global_num_experts=global_num_experts, apply_router_weight_on_input=apply_router_weight_on_input, expert_map=expert_map, + tp_size=self.moe.tp_size, + ep_size=self.moe.ep_size, ) @@ -822,12 +824,12 @@ class FusedMoE(nn.Cell): router_logits: Tensor): assert self.quant_method is not None - do_naive_dispatch_combine: bool = ( - self.dp_size > 1 - and not self.ep_size > 1) - if do_naive_dispatch_combine: - hidden_states, router_logits = get_ep_group().dispatch( - hidden_states, router_logits) + # do_naive_dispatch_combine: bool = ( + # self.dp_size > 1 + # and not self.ep_size > 1) + # if do_naive_dispatch_combine: + # hidden_states, router_logits = get_ep_group().dispatch( + # hidden_states, router_logits) # Matrix multiply. final_hidden_states = self.quant_method.apply( @@ -848,8 +850,8 @@ class FusedMoE(nn.Cell): apply_router_weight_on_input=self.apply_router_weight_on_input, ) - if do_naive_dispatch_combine: - final_hidden_states = get_ep_group().combine(final_hidden_states) + # if do_naive_dispatch_combine: + # final_hidden_states = get_ep_group().combine(final_hidden_states) if self.reduce_results and (self.tp_size > 1 or self.ep_size > 1): # Default set to False. (May have to add shared expert outputs.) diff --git a/vllm_mindspore/platforms/ascend.py b/vllm_mindspore/platforms/ascend.py index 7a31885cb..c61e3978a 100644 --- a/vllm_mindspore/platforms/ascend.py +++ b/vllm_mindspore/platforms/ascend.py @@ -136,7 +136,8 @@ class AscendPlatform(Platform): def get_device_communicator_cls(cls) -> str: """Get device specific communicator class for distributed communication.""" if envs.VLLM_USE_V1: - return "vllm_mindspore.distributed.device_communicators.npu_communicator.NPUCommunicator" + return "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator" + # return "vllm_mindspore.distributed.device_communicators.npu_communicator.NPUCommunicator" return "vllm.distributed.device_communicators.base_device_communicator.DeviceCommunicatorBase" @classmethod -- Gitee From c9b2258018a194a26e924d0ed0cc235cd18bce7c Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Tue, 24 Jun 2025 16:42:36 +0800 Subject: [PATCH 39/76] update --- .../layers/fused_moe/fused_moe.py | 35 ++++++++----------- .../model_executor/layers/fused_moe/layer.py | 21 ++++++++++- 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index aa2e32ca1..994988777 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -94,9 +94,9 @@ def fused_experts(hidden_states: Tensor, def _gate_activation(gate, activation): if activation == "silu": - return mint.silu(gate) + return mint.nn.functional.silu(gate) elif activation == "gelu": - return mint.gelu(gate) + return mint.nn.functional.gelu(gate) else: raise ValueError(f"Unsupported activation function: {activation}") @@ -104,13 +104,11 @@ def _gate_activation(gate, activation): group_matmul_ops = GroupedMatmulV4() moe_init_routing_op = MoeInitRoutingV2() moe_token_unpermute = MoeTokenUnpermute() -all_gather_dp = ops.AllGather(get_dp_group()) -all_reduce_ep = ops.AllReduce(get_ep_group()) def _group_matmul(hidden_states, weight, group_list): - return group_matmul_ops([hidden_states], [weight], group_list, + return group_matmul_ops([hidden_states], [weight], None, None, None, None, None, None, - group_list, split_item=3, group_type=0, group_list_type=1) + group_list, split_item=3, group_type=0, group_list_type=1)[0] def _run_ep_moe(hidden_states, w1, @@ -134,9 +132,9 @@ def _run_tp_moe(hidden_states, activation, global_num_experts, apply_router_weight_on_input): - hidden_states = all_gather_dp(hidden_states) - topk_ids = all_gather_dp(topk_ids) - topk_weights = all_gather_dp(topk_weights) + topk_weights = mint.cast(topk_weights, hidden_states.dtype) + topk_ids = mint.cast(topk_ids, ms.int32) + sorted_input_tensor, unsort_map, group_list, _ = \ moe_init_routing_op( hidden_states, @@ -150,20 +148,17 @@ def _run_tp_moe(hidden_states, group_list = group_list.astype(ms.int64) - gate_hidden_out = _group_matmul(sorted_input_tensor, w1, group_list) + gate_hidden_out = _group_matmul(sorted_input_tensor, mint.transpose(w1, -1, -2), group_list) gate, hidden = mint.split(gate_hidden_out, - (w1.shape[0] // 2, w1.shape[0] // 2), -1) + (w1.shape[1] // 2, w1.shape[1] // 2), -1) gate = _gate_activation(gate, activation) hidden = mint.mul(hidden, gate) - hidden = _group_matmul(hidden, w2, group_list) - expert_output = all_reduce_ep(hidden) - if apply_router_weight_on_input: - moe_output = moe_token_unpermute(permuted_tokens=expert_output, - sorted_indices=unsort_map, - probs=topk_weights, - padded_mode=False, - restore_shape=None) - moe_output = moe_output[:].sum(-1) + expert_output = _group_matmul(hidden, mint.transpose(w2, -1, -2), group_list) + moe_output = moe_token_unpermute(permuted_tokens=expert_output, + sorted_indices=unsort_map, + probs=topk_weights, + padded_mode=False, + restore_shape=None) return moe_output diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index a774e17ff..5069e94ce 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -39,7 +39,8 @@ from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk fused_experts) from vllm_mindspore.model_executor.layers.quantization.base_config import QuantizeMethodBase -from mindspore import nn, Tensor, Parameter, mint +from mindspore import nn, Tensor, Parameter, mint, ops +import mindspore as ms logger = init_logger(__name__) @@ -530,6 +531,11 @@ class FusedMoE(nn.Cell): self.quant_method.create_weights(layer=self, **moe_quant_params) + if self.dp_size > 1 and self.ep_size == 1: + self.pure_tp = True + self.all_gather_from_dp_group = ops.Gather(get_dp_group()) + self.all_reduce_from_world_group = ops.AllReduce(get_ep_group) + @property def tp_size(self): return self.moe_parallel_config.tp_size @@ -831,6 +837,13 @@ class FusedMoE(nn.Cell): # hidden_states, router_logits = get_ep_group().dispatch( # hidden_states, router_logits) + if self.pure_tp: + hidden_states = self.all_gather_from_dp_group(hidden_states) + router_logits = self.all_gather_from_dp_group(router_logits) + tokens_num = Tensor(hidden_states.shape[0], ms.int32) + tokens_num_total = self.all_gather_from_dp_group(tokens_num, 0) + tokens_cumulative = mint.cumsum(tokens_num_total) + # Matrix multiply. final_hidden_states = self.quant_method.apply( layer=self, @@ -850,6 +863,12 @@ class FusedMoE(nn.Cell): apply_router_weight_on_input=self.apply_router_weight_on_input, ) + if self.pure_tp: + final_hidden_states = self.all_reduce(final_hidden_states) + start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] + end = tokens_cumulative[self.dp_rank] + final_hidden_states = final_hidden_states[start:end] + # if do_naive_dispatch_combine: # final_hidden_states = get_ep_group().combine(final_hidden_states) -- Gitee From 4813e87b13c71cc21991bda077315148c484534c Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Tue, 24 Jun 2025 19:10:13 +0800 Subject: [PATCH 40/76] update v1 --- .../model_executor/layers/fused_moe/fused_moe.py | 4 ++-- .../model_executor/layers/fused_moe/layer.py | 14 ++++++++------ vllm_mindspore/model_executor/models/qwen3_moe.py | 9 +++++++++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index 994988777..a3d9a1734 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -132,8 +132,8 @@ def _run_tp_moe(hidden_states, activation, global_num_experts, apply_router_weight_on_input): - topk_weights = mint.cast(topk_weights, hidden_states.dtype) - topk_ids = mint.cast(topk_ids, ms.int32) + topk_weights = topk_weights.astype(hidden_states.dtype) + topk_ids = topk_ids.astype(ms.int32) sorted_input_tensor, unsort_map, group_list, _ = \ moe_init_routing_op( diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 5069e94ce..9415ecab6 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -533,8 +533,8 @@ class FusedMoE(nn.Cell): if self.dp_size > 1 and self.ep_size == 1: self.pure_tp = True - self.all_gather_from_dp_group = ops.Gather(get_dp_group()) - self.all_reduce_from_world_group = ops.AllReduce(get_ep_group) + self.all_gather_from_dp_group = ops.AllGather(get_dp_group().device_group._name) + self.all_reduce_from_world_group = ops.AllReduce(get_ep_group().device_group._name) @property def tp_size(self): @@ -838,11 +838,12 @@ class FusedMoE(nn.Cell): # hidden_states, router_logits) if self.pure_tp: + tokens_num = Tensor([[hidden_states.shape[0]]], dtype=ms.int32) + tokens_num_total = self.all_gather_from_dp_group(tokens_num) + tokens_num_total = tokens_num_total.reshape(-1) + tokens_cumulative = mint.cumsum(tokens_num_total, 0) hidden_states = self.all_gather_from_dp_group(hidden_states) router_logits = self.all_gather_from_dp_group(router_logits) - tokens_num = Tensor(hidden_states.shape[0], ms.int32) - tokens_num_total = self.all_gather_from_dp_group(tokens_num, 0) - tokens_cumulative = mint.cumsum(tokens_num_total) # Matrix multiply. final_hidden_states = self.quant_method.apply( @@ -864,7 +865,8 @@ class FusedMoE(nn.Cell): ) if self.pure_tp: - final_hidden_states = self.all_reduce(final_hidden_states) + # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) + mint.distributed.all_reduce(final_hidden_states) start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] end = tokens_cumulative[self.dp_rank] final_hidden_states = final_hidden_states[start:end] diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index f0ffc6f87..adb710b40 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -52,6 +52,8 @@ from vllm_mindspore.model_executor.models.utils import ( extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) from vllm_mindspore.model_executor.models.model_base import NativeModel +from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, + get_sampler) logger = init_logger(__name__) @@ -531,6 +533,8 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + self.sampler = get_sampler() + self.common_preprocess(vllm_config, prefix) def get_input_embeddings(self, input_ids: Tensor) -> Tensor: @@ -548,6 +552,11 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): inputs_embeds) return hidden_states + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + def compute_logits( self, hidden_states: Tensor, -- Gitee From e5956279373a1bd719c6979b7fb6e53bec20c784 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Wed, 25 Jun 2025 09:10:38 +0800 Subject: [PATCH 41/76] update --- .../model_executor/layers/fused_moe/layer.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 9415ecab6..bc2ab61bd 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -531,10 +531,14 @@ class FusedMoE(nn.Cell): self.quant_method.create_weights(layer=self, **moe_quant_params) - if self.dp_size > 1 and self.ep_size == 1: + self.dp_group = get_dp_group().device_group._name + self.ep_group = get_ep_group().device_group._name + + if self.dp_size > 1 and self.ep_size == 1 or self.dp_size == 1: self.pure_tp = True - self.all_gather_from_dp_group = ops.AllGather(get_dp_group().device_group._name) - self.all_reduce_from_world_group = ops.AllReduce(get_ep_group().device_group._name) + if self.dp_size > 1: + self.all_gather_from_dp_group = ops.AllGather(self.dp_group) + self.all_reduce_from_world_group = ops.AllReduce(self.ep_group) @property def tp_size(self): @@ -837,7 +841,7 @@ class FusedMoE(nn.Cell): # hidden_states, router_logits = get_ep_group().dispatch( # hidden_states, router_logits) - if self.pure_tp: + if self.dp_size > 1 and self.pure_tp: tokens_num = Tensor([[hidden_states.shape[0]]], dtype=ms.int32) tokens_num_total = self.all_gather_from_dp_group(tokens_num) tokens_num_total = tokens_num_total.reshape(-1) @@ -866,10 +870,11 @@ class FusedMoE(nn.Cell): if self.pure_tp: # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) - mint.distributed.all_reduce(final_hidden_states) - start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] - end = tokens_cumulative[self.dp_rank] - final_hidden_states = final_hidden_states[start:end] + mint.distributed.all_reduce(final_hidden_states, self.ep_group) + if self.dp_size > 1: + start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] + end = tokens_cumulative[self.dp_rank] + final_hidden_states = final_hidden_states[start:end] # if do_naive_dispatch_combine: # final_hidden_states = get_ep_group().combine(final_hidden_states) -- Gitee From a7b5ed465a6d93b32a8992a69347eecbde923906 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Wed, 25 Jun 2025 10:31:53 +0800 Subject: [PATCH 42/76] fix --- vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py | 1 + vllm_mindspore/model_executor/layers/fused_moe/layer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index a3d9a1734..6beb2aa5b 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -154,6 +154,7 @@ def _run_tp_moe(hidden_states, gate = _gate_activation(gate, activation) hidden = mint.mul(hidden, gate) expert_output = _group_matmul(hidden, mint.transpose(w2, -1, -2), group_list) + expert_output = mint.nan_to_num(expert_output, 0, 0, 0) moe_output = moe_token_unpermute(permuted_tokens=expert_output, sorted_indices=unsort_map, probs=topk_weights, diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index bc2ab61bd..d83347c4a 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -578,7 +578,7 @@ class FusedMoE(nn.Cell): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim - shard_size = param.shape[shard_dim] // 2 + shard_size = param.shape[shard_dim + 1] // 2 loaded_weight = loaded_weight.narrow(shard_dim, shard_size * tp_rank, shard_size) # Narrow parameter and load. -- Gitee From 07f031f16a81b5c6f382bac6ac00d9725ede8f5e Mon Sep 17 00:00:00 2001 From: YiYang <15594999221@163.com> Date: Tue, 24 Jun 2025 11:04:46 +0800 Subject: [PATCH 43/76] =?UTF-8?q?feat:=20=E4=B8=93=E5=AE=B6=E6=9D=83?= =?UTF-8?q?=E9=87=8D=E5=8A=A0=E8=BD=BD=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitmodules | 2 +- install_depend_pkgs.sh | 6 +- tests/mindformers | 2 +- .../mf_models/deepseekv3_weight_processor.py | 1932 +++++++++++------ 4 files changed, 1240 insertions(+), 702 deletions(-) diff --git a/.gitmodules b/.gitmodules index ec95f133e..d057201a7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,4 @@ [submodule "tests/mindformers"] path = tests/mindformers url = https://gitee.com/mindspore/mindformers.git - branch = dev + branch = br_infer_deepseek_os diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index 97da181da..01acabfca 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -64,10 +64,10 @@ pip uninstall mindspore -y && pip install "$mindspore_name" || { echo "Failed to echo "========= Installing mindformers" -mf_dir=mindformers-dev +mf_dir=br_infer_deepseek_os if [ ! -d "$mf_dir" ]; then - git clone https://gitee.com/mindspore/mindformers.git -b dev "$mf_dir" - git checkout 13adb2201abe8979b679a98566495a8642d7ec0d + git clone https://gitee.com/mindspore/mindformers.git -b br_infer_deepseek_os "$mf_dir" + git checkout 1e6aad8700aff37c0f9e7ddc0bfd62bf7123ad51 else echo "The $mf_dir folder already exists and will not be re-downloaded." fi diff --git a/tests/mindformers b/tests/mindformers index 13adb2201..1e6aad870 160000 --- a/tests/mindformers +++ b/tests/mindformers @@ -1 +1 @@ -Subproject commit 13adb2201abe8979b679a98566495a8642d7ec0d +Subproject commit 1e6aad8700aff37c0f9e7ddc0bfd62bf7123ad51 diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py b/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py index 2ee13c238..c63abe694 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py @@ -12,23 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ - """ transform huggingface model to mindspore safetensor. """ -import os -import json import gc -import numpy as np -from tqdm import tqdm +import json +import os import mindspore as ms +import numpy as np +from mindformers.parallel_core.inference.parallel_state import ( + get_tensor_model_parallel_rank) from mindspore import dtype from mindspore.communication.management import get_rank -from mindformers.parallel_core.inference.parallel_state import get_tensor_model_parallel_rank -from vllm_mindspore.model_executor.models.mf_models.weight_processor import BaseWeightProcessor, EPMethod +from tqdm import tqdm from vllm.logger import init_logger +from vllm_mindspore.model_executor.models.mf_models.weight_processor import ( + BaseWeightProcessor, EPMethod) + logger = init_logger(__name__) @@ -64,79 +66,131 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): self.expert_num = self.config.moe_config.expert_num self.moe_split_tp = self.moe_tp_size > 1 self.moe_split_ep = self.moe_ep_size > 1 - logger.debug(f"Deepseekv3 weight split info:" - f"global_rank_id: {self.global_rank_id} \n" - f"tp_group_size: {self.tp_group_size} \n" - f"dp_group_size: {self.dp_group_size} \n" - f"tp_rank_id: {self.tp_rank_id} \n" - f"ep_method: {self.ep_method.name} \n" - f"num_router_experts: {self.num_router_experts} \n" - f"ep_group_nums: {self.ep_group_nums} \n" - f"moe_ep_rank_id: {self.moe_ep_rank_id} \n" - f"moe_tp_rank_id: {self.moe_tp_rank_id} \n" - f"moe_ep_size: {self.moe_ep_size} \n" - f"moe_tp_size: {self.moe_tp_size}") + logger.debug("Deepseekv3 weight split info:") + logger.debug("global_rank_id: %s", self.global_rank_id) + logger.debug("tp_group_size: %s", self.tp_group_size) + logger.debug("dp_group_size: %s", self.dp_group_size) + logger.debug("tp_rank_id: %s", self.tp_rank_id) + logger.debug("ep_method: %s", self.ep_method.name) + logger.debug("num_router_experts: %s", self.num_router_experts) + logger.debug("ep_group_nums: %s", self.ep_group_nums) + logger.debug("moe_ep_rank_id: %s", self.moe_ep_rank_id) + logger.debug("moe_tp_rank_id: %s", self.moe_tp_rank_id) + logger.debug("moe_ep_size: %s", self.moe_ep_size) + logger.debug("moe_tp_size: %s", self.moe_tp_size) def quant_convert_weight_name(self, weight_name: str): """replace quant net weight name""" - weight_name = weight_name.replace('embed_tokens.weight', 'tok_embeddings.embedding_weight') - - weight_name = weight_name.replace('.self_attn.q_a_proj.weight', '.attention.q2l_proj._layer.weight') - weight_name = weight_name.replace('.self_attn.q_a_proj.input_scale', '.attention.q2l_proj.quant_op.input_scale') - weight_name = weight_name.replace('.self_attn.q_a_proj.input_offset', '.attention.q2l_proj.quant_op.input_zp') - weight_name = weight_name.replace('.self_attn.q_a_proj.quant_bias', - '.attention.q2l_proj._layer.matmul.quant_bias') - weight_name = weight_name.replace('.self_attn.q_a_proj.deq_scale', - '.attention.q2l_proj._layer.matmul.dequant_scale') - - weight_name = weight_name.replace('.self_attn.q_a_layernorm.weight', '.attention.lq_norm.weight') - weight_name = weight_name.replace('.self_attn.kv_a_layernorm.weight', '.attention.lkv_norm.weight') - weight_name = weight_name.replace('.self_attn.kv_b_proj.', '.attention.lkv2kv.') - - weight_name = weight_name.replace('.self_attn.q_b_proj.weight', '.attention.l2q_proj._layer.weight') - weight_name = weight_name.replace('.self_attn.q_b_proj.input_scale', '.attention.l2q_proj.quant_op.input_scale') - weight_name = weight_name.replace('.self_attn.q_b_proj.input_offset', '.attention.l2q_proj.quant_op.input_zp') - weight_name = weight_name.replace('.self_attn.q_b_proj.quant_bias', - '.attention.l2q_proj._layer.matmul.quant_bias') - weight_name = weight_name.replace('.self_attn.q_b_proj.deq_scale', - '.attention.l2q_proj._layer.matmul.dequant_scale') - - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.weight', '.attention.kv2l._layer.weight') - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.input_scale', - '.attention.kv2l.quant_op.input_scale') - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.input_offset', - '.attention.kv2l.quant_op.input_zp') - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.quant_bias', - '.attention.kv2l._layer.matmul.quant_bias') - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.deq_scale', - '.attention.kv2l._layer.matmul.dequant_scale') - - weight_name = weight_name.replace('.self_attn.o_proj.weight', '.attention.wo._layer.weight') - weight_name = weight_name.replace('.self_attn.o_proj.input_scale', '.attention.wo.quant_op.input_scale') - weight_name = weight_name.replace('.self_attn.o_proj.input_offset', '.attention.wo.quant_op.input_zp') - weight_name = weight_name.replace('.self_attn.o_proj.quant_bias', '.attention.wo._layer.matmul.quant_bias') - weight_name = weight_name.replace('.self_attn.o_proj.deq_scale', '.attention.wo._layer.matmul.dequant_scale') - - weight_name = weight_name.replace('.self_attn.q_a_layernorm.bias', '.attention.l2q_proj.quant_op.beta') - weight_name = weight_name.replace('.input_layernorm.bias', '.attention.q2l_proj.quant_op.beta') + weight_name = weight_name.replace('embed_tokens.weight', + 'tok_embeddings.embedding_weight') + + weight_name = weight_name.replace('.self_attn.q_a_proj.weight', + '.attention.q2l_proj._layer.weight') + weight_name = weight_name.replace( + '.self_attn.q_a_proj.input_scale', + '.attention.q2l_proj.quant_op.input_scale') + weight_name = weight_name.replace( + '.self_attn.q_a_proj.input_offset', + '.attention.q2l_proj.quant_op.input_zp') + weight_name = weight_name.replace( + '.self_attn.q_a_proj.quant_bias', + '.attention.q2l_proj._layer.matmul.quant_bias') + weight_name = weight_name.replace( + '.self_attn.q_a_proj.deq_scale', + '.attention.q2l_proj._layer.matmul.dequant_scale') + + weight_name = weight_name.replace('.self_attn.q_a_layernorm.weight', + '.attention.lq_norm.weight') + weight_name = weight_name.replace('.self_attn.kv_a_layernorm.weight', + '.attention.lkv_norm.weight') + weight_name = weight_name.replace('.self_attn.kv_b_proj.', + '.attention.lkv2kv.') + + weight_name = weight_name.replace('.self_attn.q_b_proj.weight', + '.attention.l2q_proj._layer.weight') + weight_name = weight_name.replace( + '.self_attn.q_b_proj.input_scale', + '.attention.l2q_proj.quant_op.input_scale') + weight_name = weight_name.replace( + '.self_attn.q_b_proj.input_offset', + '.attention.l2q_proj.quant_op.input_zp') + weight_name = weight_name.replace( + '.self_attn.q_b_proj.quant_bias', + '.attention.l2q_proj._layer.matmul.quant_bias') + weight_name = weight_name.replace( + '.self_attn.q_b_proj.deq_scale', + '.attention.l2q_proj._layer.matmul.dequant_scale') + + weight_name = weight_name.replace( + '.self_attn.kv_a_proj_with_mqa.weight', + '.attention.kv2l._layer.weight') + weight_name = weight_name.replace( + '.self_attn.kv_a_proj_with_mqa.input_scale', + '.attention.kv2l.quant_op.input_scale') + weight_name = weight_name.replace( + '.self_attn.kv_a_proj_with_mqa.input_offset', + '.attention.kv2l.quant_op.input_zp') + weight_name = weight_name.replace( + '.self_attn.kv_a_proj_with_mqa.quant_bias', + '.attention.kv2l._layer.matmul.quant_bias') + weight_name = weight_name.replace( + '.self_attn.kv_a_proj_with_mqa.deq_scale', + '.attention.kv2l._layer.matmul.dequant_scale') + + weight_name = weight_name.replace('.self_attn.o_proj.weight', + '.attention.wo._layer.weight') + weight_name = weight_name.replace( + '.self_attn.o_proj.input_scale', + '.attention.wo.quant_op.input_scale') + weight_name = weight_name.replace('.self_attn.o_proj.input_offset', + '.attention.wo.quant_op.input_zp') + weight_name = weight_name.replace( + '.self_attn.o_proj.quant_bias', + '.attention.wo._layer.matmul.quant_bias') + weight_name = weight_name.replace( + '.self_attn.o_proj.deq_scale', + '.attention.wo._layer.matmul.dequant_scale') + + weight_name = weight_name.replace('.self_attn.q_a_layernorm.bias', + '.attention.l2q_proj.quant_op.beta') + weight_name = weight_name.replace('.input_layernorm.bias', + '.attention.q2l_proj.quant_op.beta') # mlp is pertoken quant - weight_name = weight_name.replace('.weight_scale', '.matmul.weight_scale') - weight_name = weight_name.replace('.weight_offset', '.matmul.weight_offset') - - weight_name = weight_name.replace('mlp.gate_proj.', 'feed_forward.w1._layer.') - weight_name = weight_name.replace('mlp.down_proj.', 'feed_forward.w2._layer.') - weight_name = weight_name.replace('mlp.up_proj.', 'feed_forward.w3._layer.') - weight_name = weight_name.replace('mlp.experts.', 'feed_forward.routed_experts.ffn.') - weight_name = weight_name.replace('mlp.shared_experts.gate_proj.', 'feed_forward.shared_experts.w1._layer.') - weight_name = weight_name.replace('mlp.shared_experts.down_proj.', 'feed_forward.shared_experts.w2._layer.') - weight_name = weight_name.replace('mlp.shared_experts.up_proj.', 'feed_forward.shared_experts.w3._layer.') - weight_name = weight_name.replace('mlp.gate.weight', 'feed_forward.routed_experts.router.dense.weight') - weight_name = weight_name.replace('mlp.gate.e_score_correction_bias', - 'feed_forward.routed_experts.router.e_score_correction_bias') - weight_name = weight_name.replace('.input_layernorm.weight', '.attention_norm.weight') - weight_name = weight_name.replace('.post_attention_layernorm.', '.ffn_norm.') - weight_name = weight_name.replace('model.norm.weight', 'model.norm_out.weight') + weight_name = weight_name.replace('.weight_scale', + '.matmul.weight_scale') + weight_name = weight_name.replace('.weight_offset', + '.matmul.weight_offset') + + weight_name = weight_name.replace('mlp.gate_proj.', + 'feed_forward.w1._layer.') + weight_name = weight_name.replace('mlp.down_proj.', + 'feed_forward.w2._layer.') + weight_name = weight_name.replace('mlp.up_proj.', + 'feed_forward.w3._layer.') + weight_name = weight_name.replace('mlp.experts.', + 'feed_forward.routed_experts.ffn.') + weight_name = weight_name.replace( + 'mlp.shared_experts.gate_proj.', + 'feed_forward.shared_experts.w1._layer.') + weight_name = weight_name.replace( + 'mlp.shared_experts.down_proj.', + 'feed_forward.shared_experts.w2._layer.') + weight_name = weight_name.replace( + 'mlp.shared_experts.up_proj.', + 'feed_forward.shared_experts.w3._layer.') + weight_name = weight_name.replace( + 'mlp.gate.weight', + 'feed_forward.routed_experts.router.dense.weight') + weight_name = weight_name.replace( + 'mlp.gate.e_score_correction_bias', + 'feed_forward.routed_experts.router.e_score_correction_bias') + weight_name = weight_name.replace('.input_layernorm.weight', + '.attention_norm.weight') + weight_name = weight_name.replace('.post_attention_layernorm.', + '.ffn_norm.') + weight_name = weight_name.replace('model.norm.weight', + 'model.norm_out.weight') return weight_name def infer_trans_rope_weight(self, weight, qk_rope_head_dim): @@ -146,7 +200,8 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): weight[..., -qk_rope_head_dim:, :] = np.concatenate([w1, w2], axis=-2) return weight - def infer_quant_process_moe_with_ep(self, src_hf_dir, hf_weight_map, layer_id): + def infer_quant_process_moe_with_ep(self, src_hf_dir, hf_weight_map, + layer_id): w1_list = [] w2_list = [] w3_list = [] @@ -160,9 +215,12 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w2_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.down_proj.weight" w3_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.up_proj.weight" - w1_ms_param, _ = self.get_safetensor_from_file(w1_hf_name, src_hf_dir, hf_weight_map) - w2_ms_param, _ = self.get_safetensor_from_file(w2_hf_name, src_hf_dir, hf_weight_map) - w3_ms_param, _ = self.get_safetensor_from_file(w3_hf_name, src_hf_dir, hf_weight_map) + w1_ms_param, _ = self.get_safetensor_from_file( + w1_hf_name, src_hf_dir, hf_weight_map) + w2_ms_param, _ = self.get_safetensor_from_file( + w2_hf_name, src_hf_dir, hf_weight_map) + w3_ms_param, _ = self.get_safetensor_from_file( + w3_hf_name, src_hf_dir, hf_weight_map) w1_list.append(w1_ms_param) w2_list.append(w2_ms_param) @@ -172,9 +230,12 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w2_scale_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.down_proj.weight_scale" w3_scale_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.up_proj.weight_scale" - w1_scale_ms_param, _ = self.get_safetensor_from_file(w1_scale_hf_name, src_hf_dir, hf_weight_map) - w2_scale_ms_param, _ = self.get_safetensor_from_file(w2_scale_hf_name, src_hf_dir, hf_weight_map) - w3_scale_ms_param, _ = self.get_safetensor_from_file(w3_scale_hf_name, src_hf_dir, hf_weight_map) + w1_scale_ms_param, _ = self.get_safetensor_from_file( + w1_scale_hf_name, src_hf_dir, hf_weight_map) + w2_scale_ms_param, _ = self.get_safetensor_from_file( + w2_scale_hf_name, src_hf_dir, hf_weight_map) + w3_scale_ms_param, _ = self.get_safetensor_from_file( + w3_scale_hf_name, src_hf_dir, hf_weight_map) w1_scale_ms_param = w1_scale_ms_param.squeeze(axis=-1) w2_scale_ms_param = w2_scale_ms_param.squeeze(axis=-1) @@ -185,7 +246,8 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): return w1_list, w2_list, w3_list, w1_scale_list, w2_scale_list, w3_scale_list - def infer_quant_process_moe_with_ep_tp(self, src_hf_dir, hf_weight_map, layer_id): + def infer_quant_process_moe_with_ep_tp(self, src_hf_dir, hf_weight_map, + layer_id): w1_list = [] w2_list = [] w3_list = [] @@ -199,12 +261,12 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w2_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.down_proj.weight" w3_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.up_proj.weight" - w1_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) - w2_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) - w3_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w2_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) + w3_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w1_list.append(w1_ms_param) w2_list.append(w2_ms_param) @@ -214,14 +276,12 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w2_scale_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.down_proj.weight_scale" w3_scale_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.up_proj.weight_scale" - w1_scale_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w1_scale_hf_name, src_hf_dir, - hf_weight_map, - split_axis=0) - w2_scale_ms_param, _ = self.get_safetensor_from_file(w2_scale_hf_name, src_hf_dir, - hf_weight_map) - w3_scale_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w3_scale_hf_name, src_hf_dir, - hf_weight_map, - split_axis=0) + w1_scale_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w1_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w2_scale_ms_param, _ = self.get_safetensor_from_file( + w2_scale_hf_name, src_hf_dir, hf_weight_map) + w3_scale_ms_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w3_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w1_scale_ms_param = w1_scale_ms_param.squeeze(axis=-1) w2_scale_ms_param = w2_scale_ms_param.squeeze(axis=-1) @@ -234,30 +294,46 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): def infer_quant_process_moe(self, src_hf_dir, hf_weight_map, layer_id): if self.moe_tp_size > 1: - return self.infer_quant_process_moe_with_ep_tp(src_hf_dir, hf_weight_map, layer_id) + return self.infer_quant_process_moe_with_ep_tp( + src_hf_dir, hf_weight_map, layer_id) else: - return self.infer_quant_process_moe_with_ep(src_hf_dir, hf_weight_map, layer_id) + return self.infer_quant_process_moe_with_ep( + src_hf_dir, hf_weight_map, layer_id) - def infer_quant_process_moe_routed_expert_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_process_moe_routed_expert_ffn_weight( + self, src_hf_dir, layer_id, hf_weight_map): """process moe router expert weight""" ffn_concat = self.config.model.model_config.ffn_concat # router expert dense router_dense_hf_name = f"model.layers.{layer_id}.mlp.gate.weight" - router_dense_ms_name = self.quant_convert_weight_name(router_dense_hf_name) - router_dense_ms_param, _ = self.get_safetensor_from_file(router_dense_hf_name, src_hf_dir, hf_weight_map) + router_dense_ms_name = self.quant_convert_weight_name( + router_dense_hf_name) + router_dense_ms_param, _ = self.get_safetensor_from_file( + router_dense_hf_name, src_hf_dir, hf_weight_map) + + if self.moe_split_ep and self.ep_method != EPMethod.ALLTOALL: + expert_idx = [idx for idx in range(router_dense_ms_param.shape[0])] + in_start_expert_idx = self.ep_group_nums * self.moe_ep_rank_id + expert_idx = expert_idx[ + in_start_expert_idx:] + expert_idx[:in_start_expert_idx] + router_dense_ms_param = np.array(router_dense_ms_param)[expert_idx] + self.parameter_dict[router_dense_ms_name] = ms.Parameter( ms.from_numpy(router_dense_ms_param).astype(ms.bfloat16), - name=router_dense_ms_name, requires_grad=False) + name=router_dense_ms_name, + requires_grad=False) # e_score_correction_bias e_score_correction_bias_hf_name = f"model.layers.{layer_id}.mlp.gate.e_score_correction_bias" - e_score_correction_bias_ms_name = self.quant_convert_weight_name(e_score_correction_bias_hf_name) - e_score_correction_bias_ms_param, _ = self.get_safetensor_from_file(e_score_correction_bias_hf_name, src_hf_dir, - hf_weight_map) + e_score_correction_bias_ms_name = self.quant_convert_weight_name( + e_score_correction_bias_hf_name) + e_score_correction_bias_ms_param, _ = self.get_safetensor_from_file( + e_score_correction_bias_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[e_score_correction_bias_ms_name] = ms.Parameter( ms.from_numpy(e_score_correction_bias_ms_param).astype(ms.float32), - name=e_score_correction_bias_ms_name, requires_grad=False) + name=e_score_correction_bias_ms_name, + requires_grad=False) w1_ms_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w1._layer.weight" w2_ms_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w2._layer.weight" @@ -281,27 +357,36 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): if ffn_concat: # w_gate_hidden w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w_gate_hidden._layer.weight" - w_gate_hidden_np = np.concatenate([w1_ms_stack_param, w3_ms_stack_param], axis=1) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).permute(0, 2, 1).astype(ms.int8) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate( + [w1_ms_stack_param, w3_ms_stack_param], axis=1) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).permute( + 0, 2, 1).astype(ms.int8) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) # w_scale_gate_hidden w_scale_gate_hidden_name = \ f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w_gate_hidden._layer.matmul.weight_scale" - w_scale_gate_hidden_np = np.concatenate([w1_scale_ms_stack_param, w3_scale_ms_stack_param], axis=1) - w_scale_gate_hidden_param = ms.from_numpy(w_scale_gate_hidden_np).astype(ms.bfloat16) - self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter(w_scale_gate_hidden_param, - name=w_scale_gate_hidden_name, - requires_grad=False) + w_scale_gate_hidden_np = np.concatenate( + [w1_scale_ms_stack_param, w3_scale_ms_stack_param], axis=1) + w_scale_gate_hidden_param = ms.from_numpy( + w_scale_gate_hidden_np).astype(ms.bfloat16) + self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter( + w_scale_gate_hidden_param, + name=w_scale_gate_hidden_name, + requires_grad=False) else: # w1 w3 self.parameter_dict[w1_ms_name] = ms.Parameter( - ms.from_numpy(w1_ms_stack_param).permute(0, 2, 1).astype(ms.int8), + ms.from_numpy(w1_ms_stack_param).permute(0, 2, + 1).astype(ms.int8), name=w1_ms_name, requires_grad=False) self.parameter_dict[w3_ms_name] = ms.Parameter( - ms.from_numpy(w3_ms_stack_param).permute(0, 2, 1).astype(ms.int8), + ms.from_numpy(w3_ms_stack_param).permute(0, 2, + 1).astype(ms.int8), name=w3_ms_name, requires_grad=False) @@ -325,35 +410,45 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): name=w2_scale_ms_name, requires_grad=False) - def get_quant_moe_shared_expert_weight(self, w1_hf_name, w2_hf_name, w3_hf_name, w1_scale_hf_name, w2_scale_hf_name, - w3_scale_hf_name, src_hf_dir, hf_weight_map): + def get_quant_moe_shared_expert_weight(self, w1_hf_name, w2_hf_name, + w3_hf_name, w1_scale_hf_name, + w2_scale_hf_name, w3_scale_hf_name, + src_hf_dir, hf_weight_map): if self.ep_method in [EPMethod.DEFAULT, EPMethod.ALLGATHER]: - w1_ms_param, _ = self.get_safetensor_from_file_split_global_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) - w2_ms_param, _ = self.get_safetensor_from_file_split_global_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) - w3_ms_param, _ = self.get_safetensor_from_file_split_global_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) - w1_scale_ms_param, _ = self.get_safetensor_from_file_split_global_group(w1_scale_hf_name, src_hf_dir, - hf_weight_map, split_axis=0) - w2_scale_ms_param, _ = self.get_safetensor_from_file(w2_scale_hf_name, src_hf_dir, hf_weight_map) - - w3_scale_ms_param, _ = self.get_safetensor_from_file_split_global_group(w3_scale_hf_name, src_hf_dir, - hf_weight_map, split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w2_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) + w3_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w1_scale_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w1_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w2_scale_ms_param, _ = self.get_safetensor_from_file( + w2_scale_hf_name, src_hf_dir, hf_weight_map) + + w3_scale_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w3_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) elif self.ep_method == EPMethod.ALLTOALL: - w1_ms_param, _ = self.get_safetensor_from_file(w1_hf_name, src_hf_dir, hf_weight_map) - w2_ms_param, _ = self.get_safetensor_from_file(w2_hf_name, src_hf_dir, hf_weight_map) - w3_ms_param, _ = self.get_safetensor_from_file(w3_hf_name, src_hf_dir, hf_weight_map) - - w1_scale_ms_param, _ = self.get_safetensor_from_file(w1_scale_hf_name, src_hf_dir, hf_weight_map) - w2_scale_ms_param, _ = self.get_safetensor_from_file(w2_scale_hf_name, src_hf_dir, hf_weight_map) - w3_scale_ms_param, _ = self.get_safetensor_from_file(w3_scale_hf_name, src_hf_dir, hf_weight_map) + w1_ms_param, _ = self.get_safetensor_from_file( + w1_hf_name, src_hf_dir, hf_weight_map) + w2_ms_param, _ = self.get_safetensor_from_file( + w2_hf_name, src_hf_dir, hf_weight_map) + w3_ms_param, _ = self.get_safetensor_from_file( + w3_hf_name, src_hf_dir, hf_weight_map) + + w1_scale_ms_param, _ = self.get_safetensor_from_file( + w1_scale_hf_name, src_hf_dir, hf_weight_map) + w2_scale_ms_param, _ = self.get_safetensor_from_file( + w2_scale_hf_name, src_hf_dir, hf_weight_map) + w3_scale_ms_param, _ = self.get_safetensor_from_file( + w3_scale_hf_name, src_hf_dir, hf_weight_map) else: - raise ValueError("Unsupported ep_method:{}".format(self.ep_method)) + raise ValueError("Unsupported ep_method: %s", self.ep_method) return w1_ms_param, w2_ms_param, w3_ms_param, w1_scale_ms_param, w2_scale_ms_param, w3_scale_ms_param - def infer_quant_process_moe_shared_expert_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_process_moe_shared_expert_ffn_weight( + self, src_hf_dir, layer_id, hf_weight_map): """infer quant process moe shared expert ffn weight""" ffn_concat = self.config.model.model_config.ffn_concat w1_hf_name = f"model.layers.{layer_id}.mlp.shared_experts.gate_proj.weight" @@ -383,26 +478,35 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): if ffn_concat: w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.shared_experts.w_gate_hidden._layer.weight" - w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], axis=0) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype(ms.int8) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], + axis=0) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype( + ms.int8) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) w_scale_gate_hidden_name = \ f"model.layers.{layer_id}.feed_forward.shared_experts.w_gate_hidden._layer.matmul.weight_scale" - w_scale_gate_hidden_np = np.concatenate([w1_scale_ms_param, w3_scale_ms_param], axis=0) - w_scale_gate_hidden_param = ms.from_numpy(w_scale_gate_hidden_np).astype(ms.bfloat16) - self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter(w_scale_gate_hidden_param, - name=w_scale_gate_hidden_name, - requires_grad=False) + w_scale_gate_hidden_np = np.concatenate( + [w1_scale_ms_param, w3_scale_ms_param], axis=0) + w_scale_gate_hidden_param = ms.from_numpy( + w_scale_gate_hidden_np).astype(ms.bfloat16) + self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter( + w_scale_gate_hidden_param, + name=w_scale_gate_hidden_name, + requires_grad=False) else: - self.parameter_dict[w1_ms_name] = ms.Parameter(ms.from_numpy(w1_ms_param).astype(ms.int8), - name=w1_ms_name, - requires_grad=False) - self.parameter_dict[w3_ms_name] = ms.Parameter(ms.from_numpy(w3_ms_param).astype(ms.int8), - name=w3_ms_name, - requires_grad=False) + self.parameter_dict[w1_ms_name] = ms.Parameter( + ms.from_numpy(w1_ms_param).astype(ms.int8), + name=w1_ms_name, + requires_grad=False) + self.parameter_dict[w3_ms_name] = ms.Parameter( + ms.from_numpy(w3_ms_param).astype(ms.int8), + name=w3_ms_name, + requires_grad=False) self.parameter_dict[w1_scale_ms_name] = ms.Parameter( ms.from_numpy(w1_scale_ms_param).astype(ms.bfloat16), @@ -413,45 +517,48 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): name=w3_ms_name, requires_grad=False) - self.parameter_dict[w2_ms_name] = ms.Parameter(ms.from_numpy(w2_ms_param).astype(ms.int8), - name=w2_ms_name, - requires_grad=False) + self.parameter_dict[w2_ms_name] = ms.Parameter( + ms.from_numpy(w2_ms_param).astype(ms.int8), + name=w2_ms_name, + requires_grad=False) self.parameter_dict[w2_scale_ms_name] = ms.Parameter( ms.from_numpy(w2_scale_ms_param).astype(ms.bfloat16), name=w2_ms_name, requires_grad=False) - def infer_quant_process_dense_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_process_dense_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer process dense ffn weight""" ffn_concat = self.config.model.model_config.ffn_concat w1_hf_name = f"model.layers.{layer_id}.mlp.gate_proj.weight" w1_ms_name = self.quant_convert_weight_name(w1_hf_name) - w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w1_scale_hf_name = f"model.layers.{layer_id}.mlp.gate_proj.weight_scale" w1_scale_ms_name = self.quant_convert_weight_name(w1_scale_hf_name) - w1_scale_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w1_scale_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_scale_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w2_hf_name = f"model.layers.{layer_id}.mlp.down_proj.weight" w2_ms_name = self.quant_convert_weight_name(w2_hf_name) - w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) + w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) w2_scale_hf_name = f"model.layers.{layer_id}.mlp.down_proj.weight_scale" w2_scale_ms_name = self.quant_convert_weight_name(w2_scale_hf_name) # shape:[7168,1] - w2_scale_ms_param, _ = self.get_safetensor_from_file(w2_scale_hf_name, src_hf_dir, hf_weight_map) + w2_scale_ms_param, _ = self.get_safetensor_from_file( + w2_scale_hf_name, src_hf_dir, hf_weight_map) w3_hf_name = f"model.layers.{layer_id}.mlp.up_proj.weight" w3_ms_name = self.quant_convert_weight_name(w3_hf_name) - w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w3_scale_hf_name = f"model.layers.{layer_id}.mlp.up_proj.weight_scale" w3_scale_ms_name = self.quant_convert_weight_name(w3_scale_hf_name) - w3_scale_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w3_scale_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_scale_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_scale_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w1_scale_ms_param = w1_scale_ms_param.squeeze(axis=-1) w2_scale_ms_param = w2_scale_ms_param.squeeze(axis=-1) @@ -459,25 +566,33 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): if ffn_concat: w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.w_gate_hidden._layer.weight" - w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], axis=0) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype(dtype=ms.int8) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], + axis=0) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype( + dtype=ms.int8) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) w_scale_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.w_gate_hidden._layer.matmul.weight_scale" w_scale_gate_hidden_param = ms.from_numpy( - np.concatenate([w1_scale_ms_param, w3_scale_ms_param], axis=0)).astype(dtype=ms.bfloat16) - self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter(w_scale_gate_hidden_param, - name=w_scale_gate_hidden_name, - requires_grad=False) + np.concatenate([w1_scale_ms_param, w3_scale_ms_param], + axis=0)).astype(dtype=ms.bfloat16) + self.parameter_dict[w_scale_gate_hidden_name] = ms.Parameter( + w_scale_gate_hidden_param, + name=w_scale_gate_hidden_name, + requires_grad=False) else: - self.parameter_dict[w1_ms_name] = ms.Parameter(ms.from_numpy(w1_ms_param).astype(ms.int8), - name=w1_ms_name, - requires_grad=False) - self.parameter_dict[w3_ms_name] = ms.Parameter(ms.from_numpy(w3_ms_param).astype(ms.int8), - name=w3_ms_name, - requires_grad=False) + self.parameter_dict[w1_ms_name] = ms.Parameter( + ms.from_numpy(w1_ms_param).astype(ms.int8), + name=w1_ms_name, + requires_grad=False) + self.parameter_dict[w3_ms_name] = ms.Parameter( + ms.from_numpy(w3_ms_param).astype(ms.int8), + name=w3_ms_name, + requires_grad=False) self.parameter_dict[w1_scale_ms_name] = ms.Parameter( ms.from_numpy(w1_scale_ms_param).astype(ms.bfloat16), @@ -488,9 +603,10 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): name=w3_scale_ms_name, requires_grad=False) - self.parameter_dict[w2_ms_name] = ms.Parameter(ms.from_numpy(w2_ms_param).astype(ms.int8), - name=w2_ms_name, - requires_grad=False) + self.parameter_dict[w2_ms_name] = ms.Parameter( + ms.from_numpy(w2_ms_param).astype(ms.int8), + name=w2_ms_name, + requires_grad=False) self.parameter_dict[w2_scale_ms_name] = ms.Parameter( ms.from_numpy(w2_scale_ms_param).astype(ms.bfloat16), @@ -500,31 +616,44 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): def infer_convert_outer_weight(self, src_hf_dir, hf_weight_map): """convert weight not in model""" embed_tokens_hf_name = "model.embed_tokens.weight" - embed_tokens_ms_name = self.quant_convert_weight_name(embed_tokens_hf_name) - np_data, _ = self.get_safetensor_from_file(embed_tokens_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[embed_tokens_ms_name] = ms.Parameter(ms.from_numpy(np_data).astype(ms.bfloat16), - name=embed_tokens_ms_name, - requires_grad=False) + embed_tokens_ms_name = self.quant_convert_weight_name( + embed_tokens_hf_name) + np_data, _ = self.get_safetensor_from_file(embed_tokens_hf_name, + src_hf_dir, hf_weight_map) + self.parameter_dict[embed_tokens_ms_name] = ms.Parameter( + ms.from_numpy(np_data).astype(ms.bfloat16), + name=embed_tokens_ms_name, + requires_grad=False) norm_hf_name = "model.norm.weight" norm_ms_name = self.quant_convert_weight_name(norm_hf_name) - np_data, _ = self.get_safetensor_from_file(norm_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[norm_ms_name] = ms.Parameter(ms.from_numpy(np_data).astype(ms.bfloat16), - name=norm_ms_name, - requires_grad=False) + np_data, _ = self.get_safetensor_from_file(norm_hf_name, src_hf_dir, + hf_weight_map) + self.parameter_dict[norm_ms_name] = ms.Parameter( + ms.from_numpy(np_data).astype(ms.bfloat16), + name=norm_ms_name, + requires_grad=False) lm_head_hf_name = "lm_head.weight" lm_head_ms_name = self.quant_convert_weight_name(lm_head_hf_name) if not self.config.parallel_config.vocab_emb_dp: - np_data, _ = self.get_safetensor_from_file_split_tp_group(lm_head_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + np_data, _ = self.get_safetensor_from_file_split_tp_group( + lm_head_hf_name, src_hf_dir, hf_weight_map, split_axis=0) else: - np_data, _ = self.get_safetensor_from_file(lm_head_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[lm_head_ms_name] = ms.Parameter(ms.from_numpy(np_data).astype(ms.bfloat16), - name=lm_head_ms_name, - requires_grad=False) + np_data, _ = self.get_safetensor_from_file(lm_head_hf_name, + src_hf_dir, + hf_weight_map) + self.parameter_dict[lm_head_ms_name] = ms.Parameter( + ms.from_numpy(np_data).astype(ms.bfloat16), + name=lm_head_ms_name, + requires_grad=False) - def quant_special_attention_weight(self, layer_id, src_hf_dir, hf_weight_map, name, is_trans_rope_weigh=False, + def quant_special_attention_weight(self, + layer_id, + src_hf_dir, + hf_weight_map, + name, + is_trans_rope_weigh=False, is_split_param=False): # q_a_proj->q2l_proj # kv_a_proj_with_mqa->kv2l @@ -533,29 +662,38 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # input_scale, input_zp no split input_scale_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".input_scale" - input_scale_ms_name = self.quant_convert_weight_name(input_scale_hf_name) - input_scale_ms_param, _ = self.get_safetensor_from_file(input_scale_hf_name, src_hf_dir, hf_weight_map) + input_scale_ms_name = self.quant_convert_weight_name( + input_scale_hf_name) + input_scale_ms_param, _ = self.get_safetensor_from_file( + input_scale_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[input_scale_ms_name] = ms.Parameter( ms.from_numpy(input_scale_ms_param).astype(ms.bfloat16), - name=input_scale_ms_name, requires_grad=False) + name=input_scale_ms_name, + requires_grad=False) input_zp_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".input_offset" input_zp_ms_name = self.quant_convert_weight_name(input_zp_hf_name) - input_zp_ms_param, _ = self.get_safetensor_from_file(input_zp_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[input_zp_ms_name] = ms.Parameter(ms.from_numpy(input_zp_ms_param).astype(ms.int8), - name=input_zp_ms_name, - requires_grad=False) + input_zp_ms_param, _ = self.get_safetensor_from_file( + input_zp_hf_name, src_hf_dir, hf_weight_map) + self.parameter_dict[input_zp_ms_name] = ms.Parameter( + ms.from_numpy(input_zp_ms_param).astype(ms.int8), + name=input_zp_ms_name, + requires_grad=False) if not is_trans_rope_weigh: quant_bias_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".quant_bias" - quant_bias_ms_name = self.quant_convert_weight_name(quant_bias_hf_name) - quant_bias_ms_param, _ = self.get_safetensor_from_file(quant_bias_hf_name, src_hf_dir, hf_weight_map) + quant_bias_ms_name = self.quant_convert_weight_name( + quant_bias_hf_name) + quant_bias_ms_param, _ = self.get_safetensor_from_file( + quant_bias_hf_name, src_hf_dir, hf_weight_map) if name == "o_proj" and self.tp_rank_id != 0: quant_bias_ms_param.fill(0) dequant_scale_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".deq_scale" - dequant_scale_ms_name = self.quant_convert_weight_name(dequant_scale_hf_name) - dequant_scale_ms_param, _ = self.get_safetensor_from_file(dequant_scale_hf_name, src_hf_dir, hf_weight_map) + dequant_scale_ms_name = self.quant_convert_weight_name( + dequant_scale_hf_name) + dequant_scale_ms_param, _ = self.get_safetensor_from_file( + dequant_scale_hf_name, src_hf_dir, hf_weight_map) else: kv_lora_rank = self.config.model.model_config.kv_lora_rank qk_rope_head_dim = self.config.model.model_config.qk_rope_head_dim @@ -566,37 +704,53 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): kv_head_dim = kv_lora_rank + qk_rope_head_dim quant_bias_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".quant_bias" - quant_bias_ms_name = self.quant_convert_weight_name(quant_bias_hf_name) - quant_bias_ms_param, _ = self.get_safetensor_from_file(quant_bias_hf_name, src_hf_dir, hf_weight_map) + quant_bias_ms_name = self.quant_convert_weight_name( + quant_bias_hf_name) + quant_bias_ms_param, _ = self.get_safetensor_from_file( + quant_bias_hf_name, src_hf_dir, hf_weight_map) dequant_scale_hf_name = f"model.layers.{layer_id}.self_attn." + name + ".deq_scale" - dequant_scale_ms_name = self.quant_convert_weight_name(dequant_scale_hf_name) - dequant_scale_ms_param, _ = self.get_safetensor_from_file(dequant_scale_hf_name, src_hf_dir, hf_weight_map) + dequant_scale_ms_name = self.quant_convert_weight_name( + dequant_scale_hf_name) + dequant_scale_ms_param, _ = self.get_safetensor_from_file( + dequant_scale_hf_name, src_hf_dir, hf_weight_map) if name == "q_b_proj": - quant_bias_ms_param = quant_bias_ms_param.reshape(num_heads, rope_dim, -1) - quant_bias_ms_param = self.infer_trans_rope_weight(quant_bias_ms_param, qk_rope_head_dim) - quant_bias_ms_param = quant_bias_ms_param.reshape(num_heads * rope_dim, -1).reshape(-1) - - dequant_scale_ms_param = dequant_scale_ms_param.reshape(num_heads, rope_dim, -1) - dequant_scale_ms_param = self.infer_trans_rope_weight(dequant_scale_ms_param, qk_rope_head_dim) - dequant_scale_ms_param = dequant_scale_ms_param.reshape(num_heads * rope_dim, -1).reshape(-1) + quant_bias_ms_param = quant_bias_ms_param.reshape( + num_heads, rope_dim, -1) + quant_bias_ms_param = self.infer_trans_rope_weight( + quant_bias_ms_param, qk_rope_head_dim) + quant_bias_ms_param = quant_bias_ms_param.reshape( + num_heads * rope_dim, -1).reshape(-1) + + dequant_scale_ms_param = dequant_scale_ms_param.reshape( + num_heads, rope_dim, -1) + dequant_scale_ms_param = self.infer_trans_rope_weight( + dequant_scale_ms_param, qk_rope_head_dim) + dequant_scale_ms_param = dequant_scale_ms_param.reshape( + num_heads * rope_dim, -1).reshape(-1) elif name == "kv_a_proj_with_mqa": - quant_bias_ms_param = quant_bias_ms_param.reshape(kv_head_dim, -1) - quant_bias_ms_param = self.infer_trans_rope_weight(quant_bias_ms_param, qk_rope_head_dim).reshape(-1) + quant_bias_ms_param = quant_bias_ms_param.reshape( + kv_head_dim, -1) + quant_bias_ms_param = self.infer_trans_rope_weight( + quant_bias_ms_param, qk_rope_head_dim).reshape(-1) - dequant_scale_ms_param = dequant_scale_ms_param.reshape(kv_head_dim, -1) - dequant_scale_ms_param = self.infer_trans_rope_weight(dequant_scale_ms_param, qk_rope_head_dim).reshape( - -1) + dequant_scale_ms_param = dequant_scale_ms_param.reshape( + kv_head_dim, -1) + dequant_scale_ms_param = self.infer_trans_rope_weight( + dequant_scale_ms_param, qk_rope_head_dim).reshape(-1) if is_split_param: - quant_bias_ms_param = self.split_weight_by_rank(quant_bias_ms_param, split_axis=0) - dequant_scale_ms_param = self.split_weight_by_rank(dequant_scale_ms_param, split_axis=0) + quant_bias_ms_param = self.split_weight_by_rank( + quant_bias_ms_param, split_axis=0) + dequant_scale_ms_param = self.split_weight_by_rank( + dequant_scale_ms_param, split_axis=0) self.parameter_dict[quant_bias_ms_name] = ms.Parameter( ms.from_numpy(quant_bias_ms_param).astype(ms.int32), - name=quant_bias_ms_name, requires_grad=False) + name=quant_bias_ms_name, + requires_grad=False) self.parameter_dict[dequant_scale_ms_name] = ms.Parameter( ms.from_numpy(dequant_scale_ms_param).astype(ms.float32), name=dequant_scale_ms_name, @@ -605,15 +759,19 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): def infer_quant_bias_weight(self, src_hf_dir, layer_id, hf_weight_map): # quant_op.beta q2l_proj_bias_hf_name = f"model.layers.{layer_id}.input_layernorm.bias" - q2l_proj_bias_ms_name = self.quant_convert_weight_name(q2l_proj_bias_hf_name) - q2l_proj_bias_ms_param, _ = self.get_safetensor_from_file(q2l_proj_bias_hf_name, src_hf_dir, hf_weight_map) + q2l_proj_bias_ms_name = self.quant_convert_weight_name( + q2l_proj_bias_hf_name) + q2l_proj_bias_ms_param, _ = self.get_safetensor_from_file( + q2l_proj_bias_hf_name, src_hf_dir, hf_weight_map) kv2l_bias_ms_name = f"model.layers.{layer_id}.attention.kv2l.quant_op.beta" kv2l_bias_ms_param = q2l_proj_bias_ms_param.copy() l2q_proj_bias_hf_name = f"model.layers.{layer_id}.self_attn.q_a_layernorm.bias" - l2q_proj_bias_ms_name = self.quant_convert_weight_name(l2q_proj_bias_hf_name) - l2q_proj_bias_ms_param, _ = self.get_safetensor_from_file(l2q_proj_bias_hf_name, src_hf_dir, hf_weight_map) + l2q_proj_bias_ms_name = self.quant_convert_weight_name( + l2q_proj_bias_hf_name) + l2q_proj_bias_ms_param, _ = self.get_safetensor_from_file( + l2q_proj_bias_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[q2l_proj_bias_ms_name] = ms.Parameter( ms.from_numpy(q2l_proj_bias_ms_param).astype(ms.bfloat16), @@ -628,7 +786,8 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): name=l2q_proj_bias_ms_name, requires_grad=False) - def infer_quant_process_attention_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_process_attention_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer quant process attention weight""" num_heads = self.config.model.model_config.num_heads qk_rope_head_dim = self.config.model.model_config.qk_rope_head_dim @@ -640,30 +799,40 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # q_a_layernorm->lq_norm lq_norm_hf_name = f"model.layers.{layer_id}.self_attn.q_a_layernorm.weight" lq_norm_ms_name = self.quant_convert_weight_name(lq_norm_hf_name) - lq_norm_ms_param, _ = self.get_safetensor_from_file(lq_norm_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[lq_norm_ms_name] = ms.Parameter(ms.from_numpy(lq_norm_ms_param).astype(ms.bfloat16), - name=lq_norm_ms_name, - requires_grad=False) + lq_norm_ms_param, _ = self.get_safetensor_from_file( + lq_norm_hf_name, src_hf_dir, hf_weight_map) + self.parameter_dict[lq_norm_ms_name] = ms.Parameter( + ms.from_numpy(lq_norm_ms_param).astype(ms.bfloat16), + name=lq_norm_ms_name, + requires_grad=False) # q_b_proj->l2q_proj l2q_proj_hf_name = f"model.layers.{layer_id}.self_attn.q_b_proj.weight" l2q_proj_ms_name = self.quant_convert_weight_name(l2q_proj_hf_name) - l2q_proj_ms_param, _ = self.get_safetensor_from_file(l2q_proj_hf_name, src_hf_dir, hf_weight_map) + l2q_proj_ms_param, _ = self.get_safetensor_from_file( + l2q_proj_hf_name, src_hf_dir, hf_weight_map) l2q_proj_ms_param = l2q_proj_ms_param.reshape(num_heads, rope_dim, -1) - l2q_proj_ms_param = self.infer_trans_rope_weight(l2q_proj_ms_param, qk_rope_head_dim) + l2q_proj_ms_param = self.infer_trans_rope_weight( + l2q_proj_ms_param, qk_rope_head_dim) l2q_proj_ms_param = l2q_proj_ms_param.reshape(num_heads * rope_dim, -1) - l2q_proj_ms_param = self.split_weight_by_rank(l2q_proj_ms_param, split_axis=0) + l2q_proj_ms_param = self.split_weight_by_rank(l2q_proj_ms_param, + split_axis=0) self.parameter_dict[l2q_proj_ms_name] = ms.Parameter( ms.from_numpy(l2q_proj_ms_param).astype(ms.int8), name=l2q_proj_ms_name, requires_grad=False) - self.quant_special_attention_weight(layer_id, src_hf_dir, hf_weight_map, "q_b_proj", is_trans_rope_weigh=True, + self.quant_special_attention_weight(layer_id, + src_hf_dir, + hf_weight_map, + "q_b_proj", + is_trans_rope_weigh=True, is_split_param=True) # kv_a_layernorm->lkv_norm lkv_norm_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_layernorm.weight" lkv_norm_ms_name = self.quant_convert_weight_name(lkv_norm_hf_name) - lkv_norm_ms_param, _ = self.get_safetensor_from_file(lkv_norm_hf_name, src_hf_dir, hf_weight_map) + lkv_norm_ms_param, _ = self.get_safetensor_from_file( + lkv_norm_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[lkv_norm_ms_name] = ms.Parameter( ms.from_numpy(lkv_norm_ms_param).astype(ms.bfloat16), name=lkv_norm_ms_name, @@ -672,37 +841,48 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # kv_b_proj->lkv2kv lkv2kv_hf_name = f"model.layers.{layer_id}.self_attn.kv_b_proj.weight" lkv2kv_ms_name = self.quant_convert_weight_name(lkv2kv_hf_name) - lkv2kv_ms_param, _ = self.get_safetensor_from_file(lkv2kv_hf_name, src_hf_dir, hf_weight_map) + lkv2kv_ms_param, _ = self.get_safetensor_from_file( + lkv2kv_hf_name, src_hf_dir, hf_weight_map) lkv2kv_head = qk_nope_head_dim + v_head_dim lkv2kv_ms_param = lkv2kv_ms_param.reshape(num_heads, lkv2kv_head, -1) - value_k_nope, value_v = lkv2kv_ms_param[:, :qk_nope_head_dim, :], lkv2kv_ms_param[:, qk_nope_head_dim:, :] + value_k_nope, value_v = lkv2kv_ms_param[:, : + qk_nope_head_dim, :], lkv2kv_ms_param[:, + qk_nope_head_dim:, :] # value_k_nope value_k_nope = value_k_nope.reshape(-1, value_k_nope.shape[-1]) value_k_nope = self.split_weight_by_rank(value_k_nope, split_axis=0) - name_k_nope = lkv2kv_ms_name.replace(".attention.lkv2kv.", ".attention.lkv2kv_k_nope.") - self.parameter_dict[name_k_nope] = ms.Parameter(ms.from_numpy(value_k_nope).astype(ms.bfloat16), - name=name_k_nope, - requires_grad=False) + name_k_nope = lkv2kv_ms_name.replace(".attention.lkv2kv.", + ".attention.lkv2kv_k_nope.") + self.parameter_dict[name_k_nope] = ms.Parameter( + ms.from_numpy(value_k_nope).astype(ms.bfloat16), + name=name_k_nope, + requires_grad=False) # value_v value_v = value_v.reshape(-1, value_v.shape[-1]) value_v = self.split_weight_by_rank(value_v, split_axis=0) - name_v = lkv2kv_ms_name.replace(".attention.lkv2kv.", ".attention.lkv2kv_v.") - self.parameter_dict[name_v] = ms.Parameter(ms.from_numpy(value_v).astype(ms.bfloat16), - name=name_v, - requires_grad=False) + name_v = lkv2kv_ms_name.replace(".attention.lkv2kv.", + ".attention.lkv2kv_v.") + self.parameter_dict[name_v] = ms.Parameter( + ms.from_numpy(value_v).astype(ms.bfloat16), + name=name_v, + requires_grad=False) # o_proj->wo wo_hf_name = f"model.layers.{layer_id}.self_attn.o_proj.weight" wo_ms_name = self.quant_convert_weight_name(wo_hf_name) - wo_ms_param, _ = self.get_safetensor_from_file(wo_hf_name, src_hf_dir, hf_weight_map) + wo_ms_param, _ = self.get_safetensor_from_file(wo_hf_name, src_hf_dir, + hf_weight_map) wo_ms_param = self.split_weight_by_rank(wo_ms_param, split_axis=1) - self.parameter_dict[wo_ms_name] = ms.Parameter(ms.from_numpy(wo_ms_param).astype(ms.int8), - name=wo_ms_name, - requires_grad=False) - self.quant_special_attention_weight(layer_id, src_hf_dir, hf_weight_map, "o_proj") + self.parameter_dict[wo_ms_name] = ms.Parameter( + ms.from_numpy(wo_ms_param).astype(ms.int8), + name=wo_ms_name, + requires_grad=False) + self.quant_special_attention_weight(layer_id, src_hf_dir, + hf_weight_map, "o_proj") - def infer_quant_process_dense_qkv_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_process_dense_qkv_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer_quant_process_dense_qkv_weight""" parameter_dict = {} kv_lora_rank = self.config.model.model_config.kv_lora_rank @@ -713,61 +893,78 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # q2l q2l_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.weight" q2l_ms_name = self.quant_convert_weight_name(q2l_hf_name) - q2l_ms_param, _ = self.get_safetensor_from_file(q2l_hf_name, src_hf_dir, hf_weight_map) + q2l_ms_param, _ = self.get_safetensor_from_file( + q2l_hf_name, src_hf_dir, hf_weight_map) q2l_input_scale_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.input_scale" - q2l_input_scale_ms_name = self.quant_convert_weight_name(q2l_input_scale_hf_name) - q2l_input_scale_ms_param, _ = self.get_safetensor_from_file(q2l_input_scale_hf_name, src_hf_dir, - hf_weight_map) + q2l_input_scale_ms_name = self.quant_convert_weight_name( + q2l_input_scale_hf_name) + q2l_input_scale_ms_param, _ = self.get_safetensor_from_file( + q2l_input_scale_hf_name, src_hf_dir, hf_weight_map) q2l_input_zp_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.input_offset" - q2l_input_zp_ms_name = self.quant_convert_weight_name(q2l_input_zp_hf_name) - q2l_input_zp_ms_param, _ = self.get_safetensor_from_file(q2l_input_zp_hf_name, src_hf_dir, hf_weight_map) + q2l_input_zp_ms_name = self.quant_convert_weight_name( + q2l_input_zp_hf_name) + q2l_input_zp_ms_param, _ = self.get_safetensor_from_file( + q2l_input_zp_hf_name, src_hf_dir, hf_weight_map) q2l_quant_bias_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.quant_bias" - q2l_quant_bias_ms_name = self.quant_convert_weight_name(q2l_quant_bias_hf_name) - q2l_quant_bias_ms_param, _ = self.get_safetensor_from_file(q2l_quant_bias_hf_name, src_hf_dir, - hf_weight_map) + q2l_quant_bias_ms_name = self.quant_convert_weight_name( + q2l_quant_bias_hf_name) + q2l_quant_bias_ms_param, _ = self.get_safetensor_from_file( + q2l_quant_bias_hf_name, src_hf_dir, hf_weight_map) q2l_dequant_scale_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.deq_scale" - q2l_dequant_scale_ms_name = self.quant_convert_weight_name(q2l_dequant_scale_hf_name) - q2l_dequant_scale_ms_param, _ = self.get_safetensor_from_file(q2l_dequant_scale_hf_name, src_hf_dir, - hf_weight_map) + q2l_dequant_scale_ms_name = self.quant_convert_weight_name( + q2l_dequant_scale_hf_name) + q2l_dequant_scale_ms_param, _ = self.get_safetensor_from_file( + q2l_dequant_scale_hf_name, src_hf_dir, hf_weight_map) # kv2l kv2l_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.weight" kv2l_ms_name = self.quant_convert_weight_name(kv2l_hf_name) - kv2l_ms_param, _ = self.get_safetensor_from_file(kv2l_hf_name, src_hf_dir, hf_weight_map) + kv2l_ms_param, _ = self.get_safetensor_from_file( + kv2l_hf_name, src_hf_dir, hf_weight_map) kv2l_ms_param = kv2l_ms_param.reshape(kv_head_dim, -1) - kv2l_ms_param = self.infer_trans_rope_weight(kv2l_ms_param, qk_rope_head_dim) + kv2l_ms_param = self.infer_trans_rope_weight(kv2l_ms_param, + qk_rope_head_dim) kv2l_input_scale_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.input_scale" - kv2l_input_scale_ms_name = self.quant_convert_weight_name(kv2l_input_scale_hf_name) - kv2l_input_scale_ms_param, _ = self.get_safetensor_from_file(kv2l_input_scale_hf_name, src_hf_dir, - hf_weight_map) + kv2l_input_scale_ms_name = self.quant_convert_weight_name( + kv2l_input_scale_hf_name) + kv2l_input_scale_ms_param, _ = self.get_safetensor_from_file( + kv2l_input_scale_hf_name, src_hf_dir, hf_weight_map) kv2l_input_zp_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.input_offset" - kv2l_input_zp_ms_name = self.quant_convert_weight_name(kv2l_input_zp_hf_name) - kv2l_input_zp_ms_param, _ = self.get_safetensor_from_file(kv2l_input_zp_hf_name, src_hf_dir, hf_weight_map) + kv2l_input_zp_ms_name = self.quant_convert_weight_name( + kv2l_input_zp_hf_name) + kv2l_input_zp_ms_param, _ = self.get_safetensor_from_file( + kv2l_input_zp_hf_name, src_hf_dir, hf_weight_map) kv2l_quant_bias_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.quant_bias" - kv2l_quant_bias_ms_name = self.quant_convert_weight_name(kv2l_quant_bias_hf_name) - kv2l_quant_bias_ms_param, _ = self.get_safetensor_from_file(kv2l_quant_bias_hf_name, src_hf_dir, - hf_weight_map) - kv2l_quant_bias_ms_param = kv2l_quant_bias_ms_param.reshape(kv_head_dim, -1) - kv2l_quant_bias_ms_param = self.infer_trans_rope_weight(kv2l_quant_bias_ms_param, - qk_rope_head_dim).reshape(-1) + kv2l_quant_bias_ms_name = self.quant_convert_weight_name( + kv2l_quant_bias_hf_name) + kv2l_quant_bias_ms_param, _ = self.get_safetensor_from_file( + kv2l_quant_bias_hf_name, src_hf_dir, hf_weight_map) + kv2l_quant_bias_ms_param = kv2l_quant_bias_ms_param.reshape( + kv_head_dim, -1) + kv2l_quant_bias_ms_param = self.infer_trans_rope_weight( + kv2l_quant_bias_ms_param, qk_rope_head_dim).reshape(-1) kv2l_dequant_scale_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.deq_scale" - kv2l_dequant_scale_ms_name = self.quant_convert_weight_name(kv2l_dequant_scale_hf_name) - kv2l_dequant_scale_ms_param, _ = self.get_safetensor_from_file(kv2l_dequant_scale_hf_name, src_hf_dir, - hf_weight_map) - kv2l_dequant_scale_ms_param = kv2l_dequant_scale_ms_param.reshape(kv_head_dim, -1) - kv2l_dequant_scale_ms_param = self.infer_trans_rope_weight(kv2l_dequant_scale_ms_param, - qk_rope_head_dim).reshape(-1) + kv2l_dequant_scale_ms_name = self.quant_convert_weight_name( + kv2l_dequant_scale_hf_name) + kv2l_dequant_scale_ms_param, _ = self.get_safetensor_from_file( + kv2l_dequant_scale_hf_name, src_hf_dir, hf_weight_map) + kv2l_dequant_scale_ms_param = kv2l_dequant_scale_ms_param.reshape( + kv_head_dim, -1) + kv2l_dequant_scale_ms_param = self.infer_trans_rope_weight( + kv2l_dequant_scale_ms_param, qk_rope_head_dim).reshape(-1) attn_rmsnorm_beta_hf_name = f"model.layers.{layer_id}.input_layernorm.bias" - attn_rmsnorm_beta_ms_name = self.quant_convert_weight_name(attn_rmsnorm_beta_hf_name) - attn_rmsnorm_beta_ms_param, _ = self.get_safetensor_from_file(attn_rmsnorm_beta_hf_name, src_hf_dir, hf_weight_map) + attn_rmsnorm_beta_ms_name = self.quant_convert_weight_name( + attn_rmsnorm_beta_hf_name) + attn_rmsnorm_beta_ms_param, _ = self.get_safetensor_from_file( + attn_rmsnorm_beta_hf_name, src_hf_dir, hf_weight_map) if qkv_concat: qkv2l_weight_name = f"model.layers.{layer_id}.attention.qkv2l._layer.weight" @@ -778,76 +975,149 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): qkv2l_rmsnorm_beta_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.beta" qkv2l_weight = np.concatenate((q2l_ms_param, kv2l_ms_param), 0) - parameter_dict[qkv2l_weight_name] = ms.Parameter(ms.Tensor(qkv2l_weight, ms.int8), name=qkv2l_weight_name, requires_grad=False) - qkv2l_bias = np.concatenate((q2l_quant_bias_ms_param, kv2l_quant_bias_ms_param), 0) - parameter_dict[qkv2l_bias_name] = ms.Parameter(ms.Tensor(qkv2l_bias, ms.int32), name=qkv2l_bias_name,requires_grad=False) - qkv2l_scale = np.concatenate((q2l_dequant_scale_ms_param, kv2l_dequant_scale_ms_param), 0) - parameter_dict[qkv2l_scale_name] = ms.Parameter(ms.Tensor(qkv2l_scale, ms.float32), name=qkv2l_scale_name, requires_grad=False) - parameter_dict[qkv2l_quant_zp_name] = ms.Parameter(ms.Tensor(q2l_input_zp_ms_param, ms.int8),requires_grad=False) - parameter_dict[qkv2l_quant_scale_name] = ms.Parameter(ms.Tensor(q2l_input_scale_ms_param, ms.bfloat16), requires_grad=False) - parameter_dict[qkv2l_rmsnorm_beta_name] = ms.Parameter(ms.Tensor(attn_rmsnorm_beta_ms_param, ms.float32), requires_grad=False) + parameter_dict[qkv2l_weight_name] = ms.Parameter( + ms.Tensor(qkv2l_weight, ms.int8), + name=qkv2l_weight_name, + requires_grad=False) + qkv2l_bias = np.concatenate( + (q2l_quant_bias_ms_param, kv2l_quant_bias_ms_param), 0) + parameter_dict[qkv2l_bias_name] = ms.Parameter( + ms.Tensor(qkv2l_bias, ms.int32), + name=qkv2l_bias_name, + requires_grad=False) + qkv2l_scale = np.concatenate( + (q2l_dequant_scale_ms_param, kv2l_dequant_scale_ms_param), 0) + parameter_dict[qkv2l_scale_name] = ms.Parameter( + ms.Tensor(qkv2l_scale, ms.float32), + name=qkv2l_scale_name, + requires_grad=False) + parameter_dict[qkv2l_quant_zp_name] = ms.Parameter( + ms.Tensor(q2l_input_zp_ms_param, ms.int8), requires_grad=False) + parameter_dict[qkv2l_quant_scale_name] = ms.Parameter( + ms.Tensor(q2l_input_scale_ms_param, ms.bfloat16), + requires_grad=False) + parameter_dict[qkv2l_rmsnorm_beta_name] = ms.Parameter( + ms.Tensor(attn_rmsnorm_beta_ms_param, ms.float32), + requires_grad=False) else: - parameter_dict[q2l_ms_name] = ms.Parameter(ms.Tensor(q2l_ms_param, ms.int8), name=q2l_ms_name,requires_grad=False) - parameter_dict[kv2l_ms_name] = ms.Parameter(ms.Tensor(kv2l_ms_param, ms.int8),requires_grad=False) - parameter_dict[q2l_quant_bias_ms_name] = ms.Parameter(ms.Tensor(q2l_quant_bias_ms_param, ms.int32),name=q2l_quant_bias_ms_name,requires_grad = False) - parameter_dict[kv2l_quant_bias_ms_name] = ms.Parameter(ms.Tensor(kv2l_quant_bias_ms_param, ms.int32),name=kv2l_quant_bias_ms_name,requires_grad = False) - parameter_dict[q2l_dequant_scale_ms_name] = ms.Parameter(ms.Tensor(q2l_dequant_scale_ms_param, ms.float32), name=q2l_dequant_scale_ms_name, requires_grad = False) - parameter_dict[kv2l_dequant_scale_ms_name] = ms.Parameter(ms.Tensor(kv2l_dequant_scale_ms_param, ms.float32),name = kv2l_dequant_scale_ms_name, requires_grad = False) - parameter_dict[q2l_input_zp_ms_name] = ms.Parameter(ms.Tensor(q2l_input_zp_ms_param, ms.int8),name=q2l_input_zp_ms_name, requires_grad = False) - parameter_dict[kv2l_input_zp_ms_name] = ms.Parameter(ms.Tensor(kv2l_input_zp_ms_param, ms.int8), name=kv2l_input_zp_ms_name, requires_grad = False) - parameter_dict[q2l_input_scale_ms_name] = ms.Parameter(ms.Tensor(q2l_input_scale_ms_param, ms.bfloat16), name = q2l_input_scale_ms_name, requires_grad = False) - parameter_dict[kv2l_input_scale_ms_name] = ms.Parameter(ms.Tensor(kv2l_input_scale_ms_param, ms.bfloat16), name = kv2l_input_scale_ms_name, requires_grad = False) - parameter_dict[attn_rmsnorm_beta_ms_name] = ms.Parameter(ms.Tensor(attn_rmsnorm_beta_ms_param, ms.float32), name=attn_rmsnorm_beta_ms_name, requires_grad=False) + parameter_dict[q2l_ms_name] = ms.Parameter(ms.Tensor( + q2l_ms_param, ms.int8), + name=q2l_ms_name, + requires_grad=False) + parameter_dict[kv2l_ms_name] = ms.Parameter(ms.Tensor( + kv2l_ms_param, ms.int8), + requires_grad=False) + parameter_dict[q2l_quant_bias_ms_name] = ms.Parameter( + ms.Tensor(q2l_quant_bias_ms_param, ms.int32), + name=q2l_quant_bias_ms_name, + requires_grad=False) + parameter_dict[kv2l_quant_bias_ms_name] = ms.Parameter( + ms.Tensor(kv2l_quant_bias_ms_param, ms.int32), + name=kv2l_quant_bias_ms_name, + requires_grad=False) + parameter_dict[q2l_dequant_scale_ms_name] = ms.Parameter( + ms.Tensor(q2l_dequant_scale_ms_param, ms.float32), + name=q2l_dequant_scale_ms_name, + requires_grad=False) + parameter_dict[kv2l_dequant_scale_ms_name] = ms.Parameter( + ms.Tensor(kv2l_dequant_scale_ms_param, ms.float32), + name=kv2l_dequant_scale_ms_name, + requires_grad=False) + parameter_dict[q2l_input_zp_ms_name] = ms.Parameter( + ms.Tensor(q2l_input_zp_ms_param, ms.int8), + name=q2l_input_zp_ms_name, + requires_grad=False) + parameter_dict[kv2l_input_zp_ms_name] = ms.Parameter( + ms.Tensor(kv2l_input_zp_ms_param, ms.int8), + name=kv2l_input_zp_ms_name, + requires_grad=False) + parameter_dict[q2l_input_scale_ms_name] = ms.Parameter( + ms.Tensor(q2l_input_scale_ms_param, ms.bfloat16), + name=q2l_input_scale_ms_name, + requires_grad=False) + parameter_dict[kv2l_input_scale_ms_name] = ms.Parameter( + ms.Tensor(kv2l_input_scale_ms_param, ms.bfloat16), + name=kv2l_input_scale_ms_name, + requires_grad=False) + parameter_dict[attn_rmsnorm_beta_ms_name] = ms.Parameter( + ms.Tensor(attn_rmsnorm_beta_ms_param, ms.float32), + name=attn_rmsnorm_beta_ms_name, + requires_grad=False) _, _ = ms.load_param_into_net(self.network, parameter_dict) del parameter_dict gc.collect() - def infer_quant_net_convert_layer_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_quant_net_convert_layer_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer quant net convert layer weight""" if layer_id >= 3: - self.infer_quant_process_moe_routed_expert_ffn_weight(src_hf_dir, layer_id, hf_weight_map) - self.infer_quant_process_moe_shared_expert_ffn_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_quant_process_moe_routed_expert_ffn_weight( + src_hf_dir, layer_id, hf_weight_map) + self.infer_quant_process_moe_shared_expert_ffn_weight( + src_hf_dir, layer_id, hf_weight_map) else: - self.infer_quant_process_dense_ffn_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_quant_process_dense_ffn_weight(src_hf_dir, layer_id, + hf_weight_map) - self.infer_quant_process_dense_qkv_weight(src_hf_dir, layer_id, hf_weight_map) - self.infer_quant_process_attention_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_quant_process_dense_qkv_weight(src_hf_dir, layer_id, + hf_weight_map) + self.infer_quant_process_attention_weight(src_hf_dir, layer_id, + hf_weight_map) self.infer_quant_bias_weight(src_hf_dir, layer_id, hf_weight_map) self.infer_process_norm_weight(src_hf_dir, layer_id, hf_weight_map) def convert_weight_name(self, weight_name: str): """replace weight name""" - weight_name = weight_name.replace('embed_tokens.weight', 'tok_embeddings.embedding_weight') - weight_name = weight_name.replace('.self_attn.q_a_proj.', '.attention.q2l_proj.') - weight_name = weight_name.replace('.self_attn.q_a_layernorm.', '.attention.lq_norm.') - weight_name = weight_name.replace('.self_attn.q_b_proj.', '.attention.l2q_proj.') - weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.', '.attention.kv2l.') - weight_name = weight_name.replace('.self_attn.kv_a_layernorm.', '.attention.lkv_norm.') - weight_name = weight_name.replace('.self_attn.kv_b_proj.', '.attention.lkv2kv.') - weight_name = weight_name.replace('.self_attn.o_proj.', '.attention.wo.') + weight_name = weight_name.replace('embed_tokens.weight', + 'tok_embeddings.embedding_weight') + weight_name = weight_name.replace('.self_attn.q_a_proj.', + '.attention.q2l_proj.') + weight_name = weight_name.replace('.self_attn.q_a_layernorm.', + '.attention.lq_norm.') + weight_name = weight_name.replace('.self_attn.q_b_proj.', + '.attention.l2q_proj.') + weight_name = weight_name.replace('.self_attn.kv_a_proj_with_mqa.', + '.attention.kv2l.') + weight_name = weight_name.replace('.self_attn.kv_a_layernorm.', + '.attention.lkv_norm.') + weight_name = weight_name.replace('.self_attn.kv_b_proj.', + '.attention.lkv2kv.') + weight_name = weight_name.replace('.self_attn.o_proj.', + '.attention.wo.') weight_name = weight_name.replace('mlp.gate_proj.', 'feed_forward.w1.') weight_name = weight_name.replace('mlp.down_proj.', 'feed_forward.w2.') weight_name = weight_name.replace('mlp.up_proj.', 'feed_forward.w3.') - weight_name = weight_name.replace('mlp.experts.', 'feed_forward.routed_experts.ffn.') - weight_name = weight_name.replace('mlp.shared_experts.gate_proj.', 'feed_forward.shared_experts.w1.') - weight_name = weight_name.replace('mlp.shared_experts.down_proj.', 'feed_forward.shared_experts.w2.') - weight_name = weight_name.replace('mlp.shared_experts.up_proj.', 'feed_forward.shared_experts.w3.') - weight_name = weight_name.replace('mlp.gate.weight', 'feed_forward.routed_experts.router.dense.weight') - weight_name = weight_name.replace('mlp.gate.e_score_correction_bias', - 'feed_forward.routed_experts.router.e_score_correction_bias') - weight_name = weight_name.replace('.input_layernorm.', '.attention_norm.') - weight_name = weight_name.replace('.post_attention_layernorm.', '.ffn_norm.') - weight_name = weight_name.replace('model.norm.weight', 'model.norm_out.weight') + weight_name = weight_name.replace('mlp.experts.', + 'feed_forward.routed_experts.ffn.') + weight_name = weight_name.replace('mlp.shared_experts.gate_proj.', + 'feed_forward.shared_experts.w1.') + weight_name = weight_name.replace('mlp.shared_experts.down_proj.', + 'feed_forward.shared_experts.w2.') + weight_name = weight_name.replace('mlp.shared_experts.up_proj.', + 'feed_forward.shared_experts.w3.') + weight_name = weight_name.replace( + 'mlp.gate.weight', + 'feed_forward.routed_experts.router.dense.weight') + weight_name = weight_name.replace( + 'mlp.gate.e_score_correction_bias', + 'feed_forward.routed_experts.router.e_score_correction_bias') + weight_name = weight_name.replace('.input_layernorm.', + '.attention_norm.') + weight_name = weight_name.replace('.post_attention_layernorm.', + '.ffn_norm.') + weight_name = weight_name.replace('model.norm.weight', + 'model.norm_out.weight') weight_name = self.convert_mtp_weight_name(weight_name) return weight_name def convert_mtp_weight_name(self, weight_name: str): - layer = 0 if 'layers.' not in weight_name else int(weight_name[weight_name.find('layers.'):].split('.')[1]) + layer = 0 if 'layers.' not in weight_name else int( + weight_name[weight_name.find('layers.'):].split('.')[1]) if layer < self.num_layers: return weight_name - mtp_prefix = f'mtp_model' + mtp_prefix = 'mtp_model' is_mtp_layer = 'tok_embeddings' not in weight_name and 'shared_head.' not in weight_name mtp_prefix = mtp_prefix if not is_mtp_layer else f'{mtp_prefix}.layer' is_decode_layer = "ffn" in weight_name or "attention" in weight_name or "feed_forward" in weight_name @@ -855,55 +1125,63 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): weight_name = weight_name.replace(f'model.layers.{layer}', mtp_prefix) if "tok_embeddings" in weight_name: - weight_name = weight_name.replace(f'.weight', f'.embedding_weight') + weight_name = weight_name.replace('.weight', '.embedding_weight') if "shared_head." in weight_name: - weight_name = weight_name.replace(f'shared_head.', f'') + weight_name = weight_name.replace('shared_head.', '') return weight_name - def infer_process_moe_routed_expert_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_process_moe_routed_expert_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map): """process moe router expert weight""" ffn_concat = self.config.model.model_config.ffn_concat # router expert dense router_dense_hf_name = f"model.layers.{layer_id}.mlp.gate.weight" router_dense_ms_name = self.convert_weight_name(router_dense_hf_name) - router_dense_ms_param, _ = self.get_safetensor_from_file(router_dense_hf_name, src_hf_dir, hf_weight_map) + router_dense_ms_param, _ = self.get_safetensor_from_file( + router_dense_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[router_dense_ms_name] = ms.Parameter( ms.from_numpy(router_dense_ms_param).astype(ms.bfloat16), - name=router_dense_ms_name, requires_grad=False) + name=router_dense_ms_name, + requires_grad=False) # e_score_correction_bias e_score_correction_bias_hf_name = f"model.layers.{layer_id}.mlp.gate.e_score_correction_bias" - e_score_correction_bias_ms_name = self.convert_weight_name(e_score_correction_bias_hf_name) - e_score_correction_bias_ms_param, _ = self.get_safetensor_from_file(e_score_correction_bias_hf_name, src_hf_dir, - hf_weight_map) + e_score_correction_bias_ms_name = self.convert_weight_name( + e_score_correction_bias_hf_name) + e_score_correction_bias_ms_param, _ = self.get_safetensor_from_file( + e_score_correction_bias_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[e_score_correction_bias_ms_name] = ms.Parameter( ms.from_numpy(e_score_correction_bias_ms_param).astype(ms.float32), - name=e_score_correction_bias_ms_name, requires_grad=False) + name=e_score_correction_bias_ms_name, + requires_grad=False) w1_list = [] w2_list = [] w3_list = [] w1_ms_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w1.weight" - w1_ms_name = w1_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name(w1_ms_name) + w1_ms_name = w1_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name( + w1_ms_name) w2_ms_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w2.weight" - w2_ms_name = w2_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name(w2_ms_name) + w2_ms_name = w2_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name( + w2_ms_name) w3_ms_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w3.weight" - w3_ms_name = w3_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name(w3_ms_name) + w3_ms_name = w3_ms_name if layer_id < self.num_layers else self.convert_mtp_weight_name( + w3_ms_name) for index in range(0, self.num_router_experts): w1_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.gate_proj.weight" - w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w2_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.down_proj.weight" - w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) + w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) w3_hf_name = f"model.layers.{layer_id}.mlp.experts.{index}.up_proj.weight" - w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w1_list.append(w1_ms_param) w2_list.append(w2_ms_param) @@ -917,46 +1195,57 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.routed_experts.ffn.w_gate_hidden.weight" w_gate_hidden_name = w_gate_hidden_name if layer_id < self.num_layers else \ self.convert_mtp_weight_name(w_gate_hidden_name) - w_gate_hidden_np = np.concatenate([w1_ms_stack_param, w3_ms_stack_param], axis=1) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).permute(0, 2, 1).astype(dtype=ms.bfloat16) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, - name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate( + [w1_ms_stack_param, w3_ms_stack_param], axis=1) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).permute( + 0, 2, 1).astype(dtype=ms.bfloat16) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) else: - w1_ms_stack_param = ms.from_numpy(w1_ms_stack_param).permute(0, 2, 1).astype(ms.bfloat16) + w1_ms_stack_param = ms.from_numpy(w1_ms_stack_param).permute( + 0, 2, 1).astype(ms.bfloat16) self.parameter_dict[w1_ms_name] = ms.Parameter(w1_ms_stack_param, name=w1_ms_name, requires_grad=False) - w3_ms_stack_param = ms.from_numpy(w3_ms_stack_param).permute(0, 2, 1).astype(ms.bfloat16) + w3_ms_stack_param = ms.from_numpy(w3_ms_stack_param).permute( + 0, 2, 1).astype(ms.bfloat16) self.parameter_dict[w3_ms_name] = ms.Parameter(w3_ms_stack_param, name=w3_ms_name, requires_grad=False) - w2_ms_stack_param = ms.from_numpy(w2_ms_stack_param).permute(0, 2, 1).astype(ms.bfloat16) + w2_ms_stack_param = ms.from_numpy(w2_ms_stack_param).permute( + 0, 2, 1).astype(ms.bfloat16) self.parameter_dict[w2_ms_name] = ms.Parameter(w2_ms_stack_param, name=w2_ms_name, requires_grad=False) - def get_moe_shared_expert_weight(self, w1_hf_name, w2_hf_name, w3_hf_name, src_hf_dir, hf_weight_map): + def get_moe_shared_expert_weight(self, w1_hf_name, w2_hf_name, w3_hf_name, + src_hf_dir, hf_weight_map): if self.ep_method in [EPMethod.DEFAULT, EPMethod.ALLGATHER]: - w1_ms_param, _ = self.get_safetensor_from_file_split_global_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) - w2_ms_param, _ = self.get_safetensor_from_file_split_global_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) - w3_ms_param, _ = self.get_safetensor_from_file_split_global_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) + w2_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) + w3_ms_param, _ = self.get_safetensor_from_file_split_global_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) elif self.ep_method == EPMethod.ALLTOALL: - w1_ms_param, _ = self.get_safetensor_from_file(w1_hf_name, src_hf_dir, hf_weight_map) - w2_ms_param, _ = self.get_safetensor_from_file(w2_hf_name, src_hf_dir, hf_weight_map) - w3_ms_param, _ = self.get_safetensor_from_file(w3_hf_name, src_hf_dir, hf_weight_map) + w1_ms_param, _ = self.get_safetensor_from_file( + w1_hf_name, src_hf_dir, hf_weight_map) + w2_ms_param, _ = self.get_safetensor_from_file( + w2_hf_name, src_hf_dir, hf_weight_map) + w3_ms_param, _ = self.get_safetensor_from_file( + w3_hf_name, src_hf_dir, hf_weight_map) else: - raise ValueError("Unsupported ep_method:{}".format(self.ep_method)) + raise ValueError("Unsupported ep_method: %s", self.ep_method) return w1_ms_param, w2_ms_param, w3_ms_param - def infer_process_moe_shared_expert_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_process_moe_shared_expert_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer process moe shared expert ffn weight""" ffn_concat = self.config.model.model_config.ffn_concat w1_hf_name = f"model.layers.{layer_id}.mlp.shared_experts.gate_proj.weight" @@ -967,69 +1256,83 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w2_ms_name = self.convert_weight_name(w2_hf_name) w3_ms_name = self.convert_weight_name(w3_hf_name) - w1_ms_param, w2_ms_param, w3_ms_param = self.get_moe_shared_expert_weight(w1_hf_name, w2_hf_name, w3_hf_name, - src_hf_dir, hf_weight_map) + w1_ms_param, w2_ms_param, w3_ms_param = self.get_moe_shared_expert_weight( + w1_hf_name, w2_hf_name, w3_hf_name, src_hf_dir, hf_weight_map) if ffn_concat: w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.shared_experts.w_gate_hidden.weight" w_gate_hidden_name = w_gate_hidden_name if layer_id < self.num_layers else \ self.convert_mtp_weight_name(w_gate_hidden_name) - w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], axis=0) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype(ms.bfloat16) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, - name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], + axis=0) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype( + ms.bfloat16) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) else: - self.parameter_dict[w1_ms_name] = ms.Parameter(ms.from_numpy(w1_ms_param).astype(ms.bfloat16), - name=w1_ms_name, - requires_grad=False) - self.parameter_dict[w3_ms_name] = ms.Parameter(ms.from_numpy(w3_ms_param).astype(ms.bfloat16), - name=w3_ms_name, - requires_grad=False) - self.parameter_dict[w2_ms_name] = ms.Parameter(ms.from_numpy(w2_ms_param).astype(ms.bfloat16), - name=w2_ms_name, - requires_grad=False) + self.parameter_dict[w1_ms_name] = ms.Parameter( + ms.from_numpy(w1_ms_param).astype(ms.bfloat16), + name=w1_ms_name, + requires_grad=False) + self.parameter_dict[w3_ms_name] = ms.Parameter( + ms.from_numpy(w3_ms_param).astype(ms.bfloat16), + name=w3_ms_name, + requires_grad=False) + self.parameter_dict[w2_ms_name] = ms.Parameter( + ms.from_numpy(w2_ms_param).astype(ms.bfloat16), + name=w2_ms_name, + requires_grad=False) - def infer_process_dense_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_process_dense_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer process dense ffn weight""" ffn_concat = self.config.model.model_config.ffn_concat w1_hf_name = f"model.layers.{layer_id}.mlp.gate_proj.weight" w1_ms_name = self.convert_weight_name(w1_hf_name) - w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w1_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_hf_name, src_hf_dir, hf_weight_map, split_axis=0) w2_hf_name = f"model.layers.{layer_id}.mlp.down_proj.weight" w2_ms_name = self.convert_weight_name(w2_hf_name) - w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w2_hf_name, src_hf_dir, hf_weight_map, - split_axis=1) + w2_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w2_hf_name, src_hf_dir, hf_weight_map, split_axis=1) w3_hf_name = f"model.layers.{layer_id}.mlp.up_proj.weight" w3_ms_name = self.convert_weight_name(w3_hf_name) - w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group(w3_hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_ms_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_hf_name, src_hf_dir, hf_weight_map, split_axis=0) if ffn_concat: w_gate_hidden_name = f"model.layers.{layer_id}.feed_forward.w_gate_hidden.weight" - w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], axis=0) - w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype(ms.bfloat16) - self.parameter_dict[w_gate_hidden_name] = ms.Parameter(w_gate_hidden_param, - name=w_gate_hidden_name, - requires_grad=False) + w_gate_hidden_np = np.concatenate([w1_ms_param, w3_ms_param], + axis=0) + w_gate_hidden_param = ms.from_numpy(w_gate_hidden_np).astype( + ms.bfloat16) + self.parameter_dict[w_gate_hidden_name] = ms.Parameter( + w_gate_hidden_param, + name=w_gate_hidden_name, + requires_grad=False) else: - self.parameter_dict[w1_ms_name] = ms.Parameter(ms.from_numpy(w1_ms_param).astype(ms.bfloat16), - name=w1_ms_name, - requires_grad=False) - self.parameter_dict[w3_ms_name] = ms.Parameter(ms.from_numpy(w3_ms_param).astype(ms.bfloat16), - name=w3_ms_name, - requires_grad=False) + self.parameter_dict[w1_ms_name] = ms.Parameter( + ms.from_numpy(w1_ms_param).astype(ms.bfloat16), + name=w1_ms_name, + requires_grad=False) + self.parameter_dict[w3_ms_name] = ms.Parameter( + ms.from_numpy(w3_ms_param).astype(ms.bfloat16), + name=w3_ms_name, + requires_grad=False) - self.parameter_dict[w2_ms_name] = ms.Parameter(ms.from_numpy(w2_ms_param).astype(ms.bfloat16), - name=w2_ms_name, - requires_grad=False) + self.parameter_dict[w2_ms_name] = ms.Parameter( + ms.from_numpy(w2_ms_param).astype(ms.bfloat16), + name=w2_ms_name, + requires_grad=False) - def infer_process_attention_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_process_attention_weight(self, src_hf_dir, layer_id, + hf_weight_map): """infer process attention weight""" num_heads = self.config.model.model_config.num_heads kv_lora_rank = self.config.model.model_config.kv_lora_rank @@ -1044,43 +1347,55 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # q2l_proj q2l_proj_hf_name = f"model.layers.{layer_id}.self_attn.q_a_proj.weight" q2l_proj_ms_name = self.convert_weight_name(q2l_proj_hf_name) - q_a_proj_ms_param, _ = self.get_safetensor_from_file(q2l_proj_hf_name, src_hf_dir, hf_weight_map) + q_a_proj_ms_param, _ = self.get_safetensor_from_file( + q2l_proj_hf_name, src_hf_dir, hf_weight_map) # kv2l kv2l_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_proj_with_mqa.weight" kv2l_ms_name = self.convert_weight_name(kv2l_hf_name) - kv2l_ms_param, _ = self.get_safetensor_from_file(kv2l_hf_name, src_hf_dir, hf_weight_map) + kv2l_ms_param, _ = self.get_safetensor_from_file( + kv2l_hf_name, src_hf_dir, hf_weight_map) kv2l_ms_param = kv2l_ms_param.reshape(kv_head_dim, -1) - kv2l_ms_param = self.infer_trans_rope_weight(kv2l_ms_param, qk_rope_head_dim) + kv2l_ms_param = self.infer_trans_rope_weight(kv2l_ms_param, + qk_rope_head_dim) if qkv_concat: - wqkv2l_weight = np.concatenate((q_a_proj_ms_param, kv2l_ms_param), 0) + wqkv2l_weight = np.concatenate((q_a_proj_ms_param, kv2l_ms_param), + 0) wqkv2l_weight_name = f"model.layers.{layer_id}.attention.qkv2l.weight" - self.parameter_dict[wqkv2l_weight_name] = ms.Parameter(ms.from_numpy(wqkv2l_weight).astype(ms.bfloat16), - name=wqkv2l_weight_name, - requires_grad=False) + self.parameter_dict[wqkv2l_weight_name] = ms.Parameter( + ms.from_numpy(wqkv2l_weight).astype(ms.bfloat16), + name=wqkv2l_weight_name, + requires_grad=False) else: - self.parameter_dict[q2l_proj_ms_name] = ms.Parameter(ms.from_numpy(q_a_proj_ms_param).astype(ms.bfloat16), - name=q2l_proj_ms_name, - requires_grad=False) - self.parameter_dict[kv2l_ms_name] = ms.Parameter(ms.from_numpy(kv2l_ms_param).astype(ms.bfloat16), - name=kv2l_ms_name, - requires_grad=False) + self.parameter_dict[q2l_proj_ms_name] = ms.Parameter( + ms.from_numpy(q_a_proj_ms_param).astype(ms.bfloat16), + name=q2l_proj_ms_name, + requires_grad=False) + self.parameter_dict[kv2l_ms_name] = ms.Parameter( + ms.from_numpy(kv2l_ms_param).astype(ms.bfloat16), + name=kv2l_ms_name, + requires_grad=False) # lq_norm lq_norm_hf_name = f"model.layers.{layer_id}.self_attn.q_a_layernorm.weight" lq_norm_ms_name = self.convert_weight_name(lq_norm_hf_name) - lq_norm_ms_param, _ = self.get_safetensor_from_file(lq_norm_hf_name, src_hf_dir, hf_weight_map) - self.parameter_dict[lq_norm_ms_name] = ms.Parameter(ms.from_numpy(lq_norm_ms_param).astype(ms.bfloat16), - name=lq_norm_ms_name, - requires_grad=False) + lq_norm_ms_param, _ = self.get_safetensor_from_file( + lq_norm_hf_name, src_hf_dir, hf_weight_map) + self.parameter_dict[lq_norm_ms_name] = ms.Parameter( + ms.from_numpy(lq_norm_ms_param).astype(ms.bfloat16), + name=lq_norm_ms_name, + requires_grad=False) # l2q_proj l2q_proj_hf_name = f"model.layers.{layer_id}.self_attn.q_b_proj.weight" l2q_proj_ms_name = self.convert_weight_name(l2q_proj_hf_name) - l2q_proj_ms_param, _ = self.get_safetensor_from_file(l2q_proj_hf_name, src_hf_dir, hf_weight_map) + l2q_proj_ms_param, _ = self.get_safetensor_from_file( + l2q_proj_hf_name, src_hf_dir, hf_weight_map) l2q_proj_ms_param = l2q_proj_ms_param.reshape(num_heads, rope_dim, -1) - l2q_proj_ms_param = self.infer_trans_rope_weight(l2q_proj_ms_param, qk_rope_head_dim) + l2q_proj_ms_param = self.infer_trans_rope_weight( + l2q_proj_ms_param, qk_rope_head_dim) l2q_proj_ms_param = l2q_proj_ms_param.reshape(num_heads * rope_dim, -1) - l2q_proj_ms_param = self.split_weight_by_rank(l2q_proj_ms_param, split_axis=0) + l2q_proj_ms_param = self.split_weight_by_rank(l2q_proj_ms_param, + split_axis=0) self.parameter_dict[l2q_proj_ms_name] = ms.Parameter( ms.from_numpy(l2q_proj_ms_param).astype(ms.bfloat16), name=l2q_proj_ms_name, @@ -1089,7 +1404,8 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # lkv_norm lkv_norm_hf_name = f"model.layers.{layer_id}.self_attn.kv_a_layernorm.weight" lkv_norm_ms_name = self.convert_weight_name(lkv_norm_hf_name) - lkv_norm_ms_param, _ = self.get_safetensor_from_file(lkv_norm_hf_name, src_hf_dir, hf_weight_map) + lkv_norm_ms_param, _ = self.get_safetensor_from_file( + lkv_norm_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[lkv_norm_ms_name] = ms.Parameter( ms.from_numpy(lkv_norm_ms_param).astype(ms.bfloat16), name=lkv_norm_ms_name, @@ -1098,43 +1414,52 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # lkv2kv lkv2kv_hf_name = f"model.layers.{layer_id}.self_attn.kv_b_proj.weight" lkv2kv_ms_name = self.convert_weight_name(lkv2kv_hf_name) - lkv2kv_ms_param, _ = self.get_safetensor_from_file(lkv2kv_hf_name, src_hf_dir, hf_weight_map) + lkv2kv_ms_param, _ = self.get_safetensor_from_file( + lkv2kv_hf_name, src_hf_dir, hf_weight_map) lkv2kv_head = qk_nope_head_dim + v_head_dim lkv2kv_ms_param = lkv2kv_ms_param.reshape(num_heads, lkv2kv_head, -1) - value_k_nope, value_v = lkv2kv_ms_param[:, :qk_nope_head_dim, :], lkv2kv_ms_param[:, qk_nope_head_dim:, :] + value_k_nope, value_v = lkv2kv_ms_param[:, : + qk_nope_head_dim, :], lkv2kv_ms_param[:, + qk_nope_head_dim:, :] # value_k_nope value_k_nope = value_k_nope.reshape(-1, value_k_nope.shape[-1]) value_k_nope = self.split_weight_by_rank(value_k_nope, split_axis=0) - name_k_nope = lkv2kv_ms_name.replace(".attention.lkv2kv.", ".attention.lkv2kv_k_nope.") - self.parameter_dict[name_k_nope] = ms.Parameter(ms.from_numpy(value_k_nope).astype(ms.bfloat16), - name=name_k_nope, - requires_grad=False) + name_k_nope = lkv2kv_ms_name.replace(".attention.lkv2kv.", + ".attention.lkv2kv_k_nope.") + self.parameter_dict[name_k_nope] = ms.Parameter( + ms.from_numpy(value_k_nope).astype(ms.bfloat16), + name=name_k_nope, + requires_grad=False) # value_v value_v = value_v.reshape(-1, value_v.shape[-1]) value_v = self.split_weight_by_rank(value_v, split_axis=0) - name_v = lkv2kv_ms_name.replace(".attention.lkv2kv.", ".attention.lkv2kv_v.") - self.parameter_dict[name_v] = ms.Parameter(ms.from_numpy(value_v).astype(ms.bfloat16), - name=name_v, - requires_grad=False) + name_v = lkv2kv_ms_name.replace(".attention.lkv2kv.", + ".attention.lkv2kv_v.") + self.parameter_dict[name_v] = ms.Parameter( + ms.from_numpy(value_v).astype(ms.bfloat16), + name=name_v, + requires_grad=False) # wo wo_hf_name = f"model.layers.{layer_id}.self_attn.o_proj.weight" wo_ms_name = self.convert_weight_name(wo_hf_name) - wo_ms_param, _ = self.get_safetensor_from_file(wo_hf_name, src_hf_dir, hf_weight_map) + wo_ms_param, _ = self.get_safetensor_from_file(wo_hf_name, src_hf_dir, + hf_weight_map) wo_ms_param = self.split_weight_by_rank(wo_ms_param, split_axis=1) - self.parameter_dict[wo_ms_name] = ms.Parameter(ms.from_numpy(wo_ms_param).astype(ms.bfloat16), - name=wo_ms_name, - requires_grad=False) + self.parameter_dict[wo_ms_name] = ms.Parameter( + ms.from_numpy(wo_ms_param).astype(ms.bfloat16), + name=wo_ms_name, + requires_grad=False) def infer_process_norm_weight(self, src_hf_dir, layer_id, hf_weight_map): """infer process attention weight""" # attention_norm attention_norm_hf_name = f"model.layers.{layer_id}.input_layernorm.weight" - attention_norm_ms_name = self.convert_weight_name(attention_norm_hf_name) - attention_norm_ms_param, _ = self.get_safetensor_from_file(attention_norm_hf_name, - src_hf_dir, - hf_weight_map) + attention_norm_ms_name = self.convert_weight_name( + attention_norm_hf_name) + attention_norm_ms_param, _ = self.get_safetensor_from_file( + attention_norm_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[attention_norm_ms_name] = ms.Parameter( ms.from_numpy(attention_norm_ms_param).astype(ms.bfloat16), name=attention_norm_ms_name, @@ -1143,26 +1468,33 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): # ffn_norm ffn_norm_hf_name = f"model.layers.{layer_id}.post_attention_layernorm.weight" ffn_norm_ms_name = self.convert_weight_name(ffn_norm_hf_name) - ffn_norm_ms_param, _ = self.get_safetensor_from_file(ffn_norm_hf_name, src_hf_dir, hf_weight_map) + ffn_norm_ms_param, _ = self.get_safetensor_from_file( + ffn_norm_hf_name, src_hf_dir, hf_weight_map) self.parameter_dict[ffn_norm_ms_name] = ms.Parameter( ms.from_numpy(ffn_norm_ms_param).astype(ms.bfloat16), name=ffn_norm_ms_name, requires_grad=False) - def infer_process_mtp_layer_weight(self, src_hf_dir, layer_id, hf_weight_map): + def infer_process_mtp_layer_weight(self, src_hf_dir, layer_id, + hf_weight_map): parameter_dict = {} - mtp_layer_names = ["embed_tokens.weight", "enorm.weight", "hnorm.weight", "eh_proj.weight", - "shared_head.norm.weight", "shared_head.head.weight"] + mtp_layer_names = [ + "embed_tokens.weight", "enorm.weight", "hnorm.weight", + "eh_proj.weight", "shared_head.norm.weight", + "shared_head.head.weight" + ] head_names = ["eh_proj.weight", "shared_head.head.weight"] for prefix_name in mtp_layer_names: hf_name = f"model.layers.{layer_id}.{prefix_name}" ms_name = self.convert_weight_name(hf_name) if prefix_name in head_names and not self.config.parallel_config.vocab_emb_dp: - ms_param, _ = self.get_safetensor_from_file_split_tp_group(hf_name, src_hf_dir, hf_weight_map, - split_axis=0) + ms_param, _ = self.get_safetensor_from_file_split_tp_group( + hf_name, src_hf_dir, hf_weight_map, split_axis=0) else: - ms_param, _ = self.get_safetensor_from_file(hf_name, src_hf_dir, hf_weight_map) - parameter_dict[ms_name] = ms.Parameter(ms.Tensor(ms_param, ms.bfloat16), + ms_param, _ = self.get_safetensor_from_file( + hf_name, src_hf_dir, hf_weight_map) + parameter_dict[ms_name] = ms.Parameter(ms.Tensor( + ms_param, ms.bfloat16), name=ms_name, requires_grad=False) @@ -1171,19 +1503,26 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): def infer_convert_layer_weight(self, src_hf_dir, layer_id, hf_weight_map): """infer convert layer weight""" if layer_id >= 3: - self.infer_process_moe_routed_expert_ffn_weight(src_hf_dir, layer_id, hf_weight_map) - self.infer_process_moe_shared_expert_ffn_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_process_moe_routed_expert_ffn_weight( + src_hf_dir, layer_id, hf_weight_map) + self.infer_process_moe_shared_expert_ffn_weight( + src_hf_dir, layer_id, hf_weight_map) else: - self.infer_process_dense_ffn_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_process_dense_ffn_weight(src_hf_dir, layer_id, + hf_weight_map) - self.infer_process_attention_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_process_attention_weight(src_hf_dir, layer_id, + hf_weight_map) self.infer_process_norm_weight(src_hf_dir, layer_id, hf_weight_map) # convert mtp shared weights. if layer_id >= self.num_layers: - self.infer_process_mtp_layer_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_process_mtp_layer_weight(src_hf_dir, layer_id, + hf_weight_map) - def smooth_quant_process_route_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map, parameter_dict, layer_type): + def smooth_quant_process_route_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map, parameter_dict, + layer_type): """smooth_quant_process_route_ffn_weight""" ffn_concat = self.config.model.model_config.ffn_concat @@ -1193,86 +1532,138 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w3_scale_name = f"model.layers.{layer_id}.{layer_type}.w3._layer.matmul.weight_scale" w2_weight_name = f"model.layers.{layer_id}.{layer_type}.w2._layer.weight" w2_scale_name = f"model.layers.{layer_id}.{layer_type}.w2._layer.matmul.weight_scale" - w1_weight_param, _ = self.get_routed_safetensor_3_dim(w1_weight_name, src_hf_dir, hf_weight_map, tp_axis=2, - split_ep=self.moe_split_ep, split_tp=self.moe_split_tp) - - w1_scale_param, _ = self.get_routed_safetensor_2_dim(w1_scale_name, src_hf_dir, hf_weight_map, tp_axis=1, - split_ep=self.moe_split_ep, split_tp=self.moe_split_tp) - - w3_weight_param, _ = self.get_routed_safetensor_3_dim(w3_weight_name, src_hf_dir, hf_weight_map, tp_axis=2, - split_ep=self.moe_split_ep, split_tp=self.moe_split_tp) - - w3_scale_param, _ = self.get_routed_safetensor_2_dim(w3_scale_name, src_hf_dir, hf_weight_map, tp_axis=1, - split_ep=self.moe_split_ep, split_tp=self.moe_split_tp) - - w2_weight_param, _ = self.get_routed_safetensor_3_dim(w2_weight_name, src_hf_dir, hf_weight_map, tp_axis=1, - split_ep=self.moe_split_ep, split_tp=self.moe_split_tp) - w2_scale_param, _ = self.get_routed_safetensor_2_dim(w2_scale_name, src_hf_dir, hf_weight_map, - split_ep=self.moe_split_ep, split_tp=False) + w1_weight_param, _ = self.get_routed_safetensor_3_dim( + w1_weight_name, + src_hf_dir, + hf_weight_map, + tp_axis=2, + split_ep=self.moe_split_ep, + split_tp=self.moe_split_tp) + + w1_scale_param, _ = self.get_routed_safetensor_2_dim( + w1_scale_name, + src_hf_dir, + hf_weight_map, + tp_axis=1, + split_ep=self.moe_split_ep, + split_tp=self.moe_split_tp) + + w3_weight_param, _ = self.get_routed_safetensor_3_dim( + w3_weight_name, + src_hf_dir, + hf_weight_map, + tp_axis=2, + split_ep=self.moe_split_ep, + split_tp=self.moe_split_tp) + + w3_scale_param, _ = self.get_routed_safetensor_2_dim( + w3_scale_name, + src_hf_dir, + hf_weight_map, + tp_axis=1, + split_ep=self.moe_split_ep, + split_tp=self.moe_split_tp) + + w2_weight_param, _ = self.get_routed_safetensor_3_dim( + w2_weight_name, + src_hf_dir, + hf_weight_map, + tp_axis=1, + split_ep=self.moe_split_ep, + split_tp=self.moe_split_tp) + w2_scale_param, _ = self.get_routed_safetensor_2_dim( + w2_scale_name, + src_hf_dir, + hf_weight_map, + split_ep=self.moe_split_ep, + split_tp=False) if ffn_concat: concat_weight_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.weight" - concat_weight_param = ms.Tensor(np.concatenate([w1_weight_param, w3_weight_param], axis=2), dtype=ms.int8) - parameter_dict[concat_weight_name] = ms.Parameter(concat_weight_param, name=concat_weight_name, - requires_grad=False) + concat_weight_param = ms.Tensor(np.concatenate( + [w1_weight_param, w3_weight_param], axis=2), + dtype=ms.int8) + parameter_dict[concat_weight_name] = ms.Parameter( + concat_weight_param, + name=concat_weight_name, + requires_grad=False) concat_scale_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.matmul.weight_scale" - concat_scale_param = ms.Tensor(np.concatenate([w1_scale_param, w3_scale_param], axis=1), dtype=ms.bfloat16) - parameter_dict[concat_scale_name] = ms.Parameter(concat_scale_param, name=concat_scale_name, - requires_grad=False) + concat_scale_param = ms.Tensor(np.concatenate( + [w1_scale_param, w3_scale_param], axis=1), + dtype=ms.bfloat16) + parameter_dict[concat_scale_name] = ms.Parameter( + concat_scale_param, + name=concat_scale_name, + requires_grad=False) else: # w1 w3 - parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor(w1_weight_param, ms.int8), name=w1_weight_name, + parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor( + w1_weight_param, ms.int8), + name=w1_weight_name, requires_grad=False) - parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor(w3_weight_param, ms.int8), name=w3_weight_name, + parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor( + w3_weight_param, ms.int8), + name=w3_weight_name, requires_grad=False) - parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor(w1_scale_param, ms.bfloat16), - name=w1_scale_name, requires_grad=False) - parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor(w3_scale_param, ms.bfloat16), - name=w3_scale_name, requires_grad=False) + parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor( + w1_scale_param, ms.bfloat16), + name=w1_scale_name, + requires_grad=False) + parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor( + w3_scale_param, ms.bfloat16), + name=w3_scale_name, + requires_grad=False) - parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor(w2_weight_param, ms.int8), name=w2_weight_name, + parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor( + w2_weight_param, ms.int8), + name=w2_weight_name, requires_grad=False) - parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor(w2_scale_param, ms.bfloat16), - name=w2_scale_name, requires_grad=False) - - def get_smooth_quant_moe_shared_expert_weight(self, w1_weight_name, w1_scale_name, w3_weight_name,w3_scale_name, - w2_weight_name, src_hf_dir, hf_weight_map): + parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor( + w2_scale_param, ms.bfloat16), + name=w2_scale_name, + requires_grad=False) + + def get_smooth_quant_moe_shared_expert_weight( + self, w1_weight_name, w1_scale_name, w3_weight_name, w3_scale_name, + w2_weight_name, src_hf_dir, hf_weight_map): '''get_smooth_quant_moe_shared_expert_weight''' if self.ep_method in [EPMethod.DEFAULT, EPMethod.ALLGATHER]: - w1_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w1_weight_name, src_hf_dir, - hf_weight_map, - split_axis=0) - - w1_scale_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w1_scale_name, src_hf_dir, - hf_weight_map, - split_axis=0) - - w3_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w3_weight_name, src_hf_dir, - hf_weight_map, - split_axis=0) - w3_scale_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w3_scale_name, src_hf_dir, - hf_weight_map, - split_axis=0) - - w2_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group(w2_weight_name, src_hf_dir, - hf_weight_map, - split_axis=1) - elif self.ep_method == EPMethod.ALLTOALL: - w1_weight_param, _ = self.get_safetensor_from_file(w1_weight_name, src_hf_dir, hf_weight_map) - w1_scale_param, _ = self.get_safetensor_from_file(w1_scale_name, src_hf_dir, hf_weight_map) + w1_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w1_weight_name, src_hf_dir, hf_weight_map, split_axis=0) - w3_weight_param, _ = self.get_safetensor_from_file(w3_weight_name, src_hf_dir, hf_weight_map) - w3_scale_param, _ = self.get_safetensor_from_file(w3_scale_name, src_hf_dir, hf_weight_map) + w1_scale_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w1_scale_name, src_hf_dir, hf_weight_map, split_axis=0) - w2_weight_param, _ = self.get_safetensor_from_file(w2_weight_name, src_hf_dir, hf_weight_map) + w3_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w3_weight_name, src_hf_dir, hf_weight_map, split_axis=0) + w3_scale_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w3_scale_name, src_hf_dir, hf_weight_map, split_axis=0) + + w2_weight_param, _ = self.get_safetensor_from_file_split_moe_tp_group( + w2_weight_name, src_hf_dir, hf_weight_map, split_axis=1) + elif self.ep_method == EPMethod.ALLTOALL: + w1_weight_param, _ = self.get_safetensor_from_file( + w1_weight_name, src_hf_dir, hf_weight_map) + w1_scale_param, _ = self.get_safetensor_from_file( + w1_scale_name, src_hf_dir, hf_weight_map) + + w3_weight_param, _ = self.get_safetensor_from_file( + w3_weight_name, src_hf_dir, hf_weight_map) + w3_scale_param, _ = self.get_safetensor_from_file( + w3_scale_name, src_hf_dir, hf_weight_map) + + w2_weight_param, _ = self.get_safetensor_from_file( + w2_weight_name, src_hf_dir, hf_weight_map) else: - raise ValueError("Unsupported ep_method:{}".format(self.ep_method)) + raise ValueError("Unsupported ep_method: %s", self.ep_method) return w1_weight_param, w1_scale_param, w3_weight_param, w3_scale_param, w2_weight_param - def smooth_quant_process_shared_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map, parameter_dict, layer_type): + def smooth_quant_process_shared_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map, parameter_dict, + layer_type): """smooth_quant_process_shared_ffn_weight""" ffn_concat = self.config.model.model_config.ffn_concat @@ -1287,114 +1678,169 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): w1_weight_param, w1_scale_param, w3_weight_param, w3_scale_param, w2_weight_param = \ self.get_smooth_quant_moe_shared_expert_weight(w1_weight_name, w1_scale_name, w3_weight_name, w3_scale_name, w2_weight_name, src_hf_dir, hf_weight_map) - w2_scale_param, _ = self.get_safetensor_from_file(w2_scale_name, src_hf_dir, hf_weight_map) + w2_scale_param, _ = self.get_safetensor_from_file( + w2_scale_name, src_hf_dir, hf_weight_map) if ffn_concat: concat_weight_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.weight" - concat_weight_param = ms.Tensor(np.concatenate([w1_weight_param, w3_weight_param], axis=0), dtype=ms.int8) - parameter_dict[concat_weight_name] = ms.Parameter(concat_weight_param, name=concat_weight_name, - requires_grad=False) + concat_weight_param = ms.Tensor(np.concatenate( + [w1_weight_param, w3_weight_param], axis=0), + dtype=ms.int8) + parameter_dict[concat_weight_name] = ms.Parameter( + concat_weight_param, + name=concat_weight_name, + requires_grad=False) concat_scale_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.matmul.weight_scale" - concat_scale_param = ms.Tensor(np.concatenate([w1_scale_param, w3_scale_param], axis=0), dtype=ms.bfloat16) - parameter_dict[concat_scale_name] = ms.Parameter(concat_scale_param, name=concat_scale_name, - requires_grad=False) + concat_scale_param = ms.Tensor(np.concatenate( + [w1_scale_param, w3_scale_param], axis=0), + dtype=ms.bfloat16) + parameter_dict[concat_scale_name] = ms.Parameter( + concat_scale_param, + name=concat_scale_name, + requires_grad=False) else: # w1 w3 - parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor(w1_weight_param, ms.int8), name=w1_weight_name, + parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor( + w1_weight_param, ms.int8), + name=w1_weight_name, requires_grad=False) - parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor(w3_weight_param, ms.int8), name=w3_weight_name, + parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor( + w3_weight_param, ms.int8), + name=w3_weight_name, requires_grad=False) - parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor(w1_scale_param, ms.bfloat16), - name=w1_scale_name, requires_grad=False) - parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor(w3_scale_param, ms.bfloat16), - name=w3_scale_name, requires_grad=False) + parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor( + w1_scale_param, ms.bfloat16), + name=w1_scale_name, + requires_grad=False) + parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor( + w3_scale_param, ms.bfloat16), + name=w3_scale_name, + requires_grad=False) - parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor(w2_weight_param, ms.int8), name=w2_weight_name, + parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor( + w2_weight_param, ms.int8), + name=w2_weight_name, requires_grad=False) - parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor(w2_scale_param, ms.bfloat16), - name=w2_scale_name, requires_grad=False) - - def smooth_quant_process_ffn_weight(self, src_hf_dir, layer_id, hf_weight_map, parameter_dict, layer_type): + parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor( + w2_scale_param, ms.bfloat16), + name=w2_scale_name, + requires_grad=False) + + def smooth_quant_process_ffn_weight(self, src_hf_dir, layer_id, + hf_weight_map, parameter_dict, + layer_type): """smooth_quant_process_ffn_weight""" ffn_concat = self.config.model.model_config.ffn_concat w1_weight_name = f"model.layers.{layer_id}.{layer_type}.w1._layer.weight" - w1_weight_param, _ = self.get_safetensor_from_file_split_tp_group(w1_weight_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_weight_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_weight_name, src_hf_dir, hf_weight_map, split_axis=0) w1_scale_name = f"model.layers.{layer_id}.{layer_type}.w1._layer.matmul.weight_scale" - w1_scale_param, _ = self.get_safetensor_from_file_split_tp_group(w1_scale_name, src_hf_dir, hf_weight_map, - split_axis=0) + w1_scale_param, _ = self.get_safetensor_from_file_split_tp_group( + w1_scale_name, src_hf_dir, hf_weight_map, split_axis=0) w3_weight_name = f"model.layers.{layer_id}.{layer_type}.w3._layer.weight" - w3_weight_param, _ = self.get_safetensor_from_file_split_tp_group(w3_weight_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_weight_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_weight_name, src_hf_dir, hf_weight_map, split_axis=0) w3_scale_name = f"model.layers.{layer_id}.{layer_type}.w3._layer.matmul.weight_scale" - w3_scale_param, _ = self.get_safetensor_from_file_split_tp_group(w3_scale_name, src_hf_dir, hf_weight_map, - split_axis=0) + w3_scale_param, _ = self.get_safetensor_from_file_split_tp_group( + w3_scale_name, src_hf_dir, hf_weight_map, split_axis=0) w2_weight_name = f"model.layers.{layer_id}.{layer_type}.w2._layer.weight" w2_scale_name = f"model.layers.{layer_id}.{layer_type}.w2._layer.matmul.weight_scale" - w2_weight_param, _ = self.get_safetensor_from_file_split_tp_group(w2_weight_name, src_hf_dir, hf_weight_map, - split_axis=1) - w2_scale_param, _ = self.get_safetensor_from_file(w2_scale_name, src_hf_dir, hf_weight_map) + w2_weight_param, _ = self.get_safetensor_from_file_split_tp_group( + w2_weight_name, src_hf_dir, hf_weight_map, split_axis=1) + w2_scale_param, _ = self.get_safetensor_from_file( + w2_scale_name, src_hf_dir, hf_weight_map) if ffn_concat: concat_weight_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.weight" - concat_weight_param = ms.Tensor(np.concatenate([w1_weight_param, w3_weight_param], axis=0), dtype=ms.int8) - parameter_dict[concat_weight_name] = ms.Parameter(concat_weight_param, name=concat_weight_name, - requires_grad=False) + concat_weight_param = ms.Tensor(np.concatenate( + [w1_weight_param, w3_weight_param], axis=0), + dtype=ms.int8) + parameter_dict[concat_weight_name] = ms.Parameter( + concat_weight_param, + name=concat_weight_name, + requires_grad=False) concat_scale_name = f"model.layers.{layer_id}.{layer_type}.w_gate_hidden._layer.matmul.weight_scale" - concat_scale_param = ms.Tensor(np.concatenate([w1_scale_param, w3_scale_param], axis=0), dtype=ms.bfloat16) - parameter_dict[concat_scale_name] = ms.Parameter(concat_scale_param, name=concat_scale_name, - requires_grad=False) + concat_scale_param = ms.Tensor(np.concatenate( + [w1_scale_param, w3_scale_param], axis=0), + dtype=ms.bfloat16) + parameter_dict[concat_scale_name] = ms.Parameter( + concat_scale_param, + name=concat_scale_name, + requires_grad=False) else: # w1 w3 - parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor(w1_weight_param, ms.int8), name=w1_weight_name, + parameter_dict[w1_weight_name] = ms.Parameter(ms.Tensor( + w1_weight_param, ms.int8), + name=w1_weight_name, requires_grad=False) - parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor(w3_weight_param, ms.int8), name=w3_weight_name, + parameter_dict[w3_weight_name] = ms.Parameter(ms.Tensor( + w3_weight_param, ms.int8), + name=w3_weight_name, requires_grad=False) - parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor(w1_scale_param, ms.bfloat16), - name=w1_scale_name, requires_grad=False) - parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor(w3_scale_param, ms.bfloat16), - name=w3_scale_name, requires_grad=False) + parameter_dict[w1_scale_name] = ms.Parameter(ms.Tensor( + w1_scale_param, ms.bfloat16), + name=w1_scale_name, + requires_grad=False) + parameter_dict[w3_scale_name] = ms.Parameter(ms.Tensor( + w3_scale_param, ms.bfloat16), + name=w3_scale_name, + requires_grad=False) - parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor(w2_weight_param, ms.int8), name=w2_weight_name, + parameter_dict[w2_weight_name] = ms.Parameter(ms.Tensor( + w2_weight_param, ms.int8), + name=w2_weight_name, requires_grad=False) - parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor(w2_scale_param, ms.bfloat16), - name=w2_scale_name, requires_grad=False) + parameter_dict[w2_scale_name] = ms.Parameter(ms.Tensor( + w2_scale_param, ms.bfloat16), + name=w2_scale_name, + requires_grad=False) - def smooth_quant_process_qkv_weight(self, src_hf_dir, layer_id, hf_weight_map, parameter_dict): + def smooth_quant_process_qkv_weight(self, src_hf_dir, layer_id, + hf_weight_map, parameter_dict): '''smooth_quant_process_qkv_weight''' qkv_concat = self.config.model.model_config.qkv_concat # q2l_proj q2l_weight_name = f"model.layers.{layer_id}.attention.q2l_proj._layer.weight" - q2l_weight_param, _ = self.get_safetensor_from_file(q2l_weight_name, src_hf_dir, hf_weight_map) + q2l_weight_param, _ = self.get_safetensor_from_file( + q2l_weight_name, src_hf_dir, hf_weight_map) q2l_bias_name = f"model.layers.{layer_id}.attention.q2l_proj._layer.matmul.quant_bias" - q2l_bias_param, _ = self.get_safetensor_from_file(q2l_bias_name, src_hf_dir, hf_weight_map) + q2l_bias_param, _ = self.get_safetensor_from_file( + q2l_bias_name, src_hf_dir, hf_weight_map) q2l_scale_name = f"model.layers.{layer_id}.attention.q2l_proj._layer.matmul.dequant_scale" - q2l_scale_param, _ = self.get_safetensor_from_file(q2l_scale_name, src_hf_dir, hf_weight_map) + q2l_scale_param, _ = self.get_safetensor_from_file( + q2l_scale_name, src_hf_dir, hf_weight_map) q2l_quant_zp = f"model.layers.{layer_id}.attention.q2l_proj.quant_op.input_zp" q2l_quant_scale = f"model.layers.{layer_id}.attention.q2l_proj.quant_op.input_scale" - q2l_quant_zp_param, _ = self.get_safetensor_from_file(q2l_quant_zp, src_hf_dir, hf_weight_map) - q2l_quant_scale_param, _ = self.get_safetensor_from_file(q2l_quant_scale, src_hf_dir, hf_weight_map) + q2l_quant_zp_param, _ = self.get_safetensor_from_file( + q2l_quant_zp, src_hf_dir, hf_weight_map) + q2l_quant_scale_param, _ = self.get_safetensor_from_file( + q2l_quant_scale, src_hf_dir, hf_weight_map) kv2l_weight_name = f"model.layers.{layer_id}.attention.kv2l._layer.weight" - kv2l_weight_param, _ = self.get_safetensor_from_file(kv2l_weight_name, src_hf_dir, hf_weight_map) + kv2l_weight_param, _ = self.get_safetensor_from_file( + kv2l_weight_name, src_hf_dir, hf_weight_map) kv2l_bias_name = f"model.layers.{layer_id}.attention.kv2l._layer.matmul.quant_bias" - kv2l_bias_param, _ = self.get_safetensor_from_file(kv2l_bias_name, src_hf_dir, hf_weight_map) + kv2l_bias_param, _ = self.get_safetensor_from_file( + kv2l_bias_name, src_hf_dir, hf_weight_map) kv2l_scale_name = f"model.layers.{layer_id}.attention.kv2l._layer.matmul.dequant_scale" - kv2l_scale_param, _ = self.get_safetensor_from_file(kv2l_scale_name, src_hf_dir, hf_weight_map) + kv2l_scale_param, _ = self.get_safetensor_from_file( + kv2l_scale_name, src_hf_dir, hf_weight_map) kv2l_quant_zp = f"model.layers.{layer_id}.attention.kv2l.quant_op.input_zp" kv2l_quant_scale = f"model.layers.{layer_id}.attention.kv2l.quant_op.input_scale" - kv2l_quant_zp_param, _ = self.get_safetensor_from_file(kv2l_quant_zp, src_hf_dir, hf_weight_map) - kv2l_quant_scale_param, _ = self.get_safetensor_from_file(kv2l_quant_scale, src_hf_dir, hf_weight_map) + kv2l_quant_zp_param, _ = self.get_safetensor_from_file( + kv2l_quant_zp, src_hf_dir, hf_weight_map) + kv2l_quant_scale_param, _ = self.get_safetensor_from_file( + kv2l_quant_scale, src_hf_dir, hf_weight_map) if qkv_concat: qkv2l_weight_name = f"model.layers.{layer_id}.attention.qkv2l._layer.weight" @@ -1403,62 +1849,96 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): qkv2l_quant_zp_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.input_zp" qkv2l_quant_scale_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.input_scale" - qkv2l_weight = np.concatenate((q2l_weight_param, kv2l_weight_param), 0) - parameter_dict[qkv2l_weight_name] = ms.Parameter(ms.Tensor(qkv2l_weight, ms.int8), name=qkv2l_weight_name, - requires_grad=False) + qkv2l_weight = np.concatenate( + (q2l_weight_param, kv2l_weight_param), 0) + parameter_dict[qkv2l_weight_name] = ms.Parameter( + ms.Tensor(qkv2l_weight, ms.int8), + name=qkv2l_weight_name, + requires_grad=False) qkv2l_bias = np.concatenate((q2l_bias_param, kv2l_bias_param), 0) - parameter_dict[qkv2l_bias_name] = ms.Parameter(ms.Tensor(qkv2l_bias, ms.int32), name=qkv2l_bias_name, - requires_grad=False) - qkv2l_scale = np.concatenate((q2l_scale_param, kv2l_scale_param), 0) - parameter_dict[qkv2l_scale_name] = ms.Parameter(ms.Tensor(qkv2l_scale, ms.float32), name=qkv2l_scale_name, - requires_grad=False) - parameter_dict[qkv2l_quant_zp_name] = ms.Parameter(ms.Tensor(q2l_quant_zp_param, ms.int8), - name=qkv2l_quant_zp_name, requires_grad=False) - parameter_dict[qkv2l_quant_scale_name] = ms.Parameter(ms.Tensor(q2l_quant_scale_param, ms.bfloat16), - name=qkv2l_quant_scale_name, requires_grad=False) + parameter_dict[qkv2l_bias_name] = ms.Parameter( + ms.Tensor(qkv2l_bias, ms.int32), + name=qkv2l_bias_name, + requires_grad=False) + qkv2l_scale = np.concatenate((q2l_scale_param, kv2l_scale_param), + 0) + parameter_dict[qkv2l_scale_name] = ms.Parameter( + ms.Tensor(qkv2l_scale, ms.float32), + name=qkv2l_scale_name, + requires_grad=False) + parameter_dict[qkv2l_quant_zp_name] = ms.Parameter( + ms.Tensor(q2l_quant_zp_param, ms.int8), + name=qkv2l_quant_zp_name, + requires_grad=False) + parameter_dict[qkv2l_quant_scale_name] = ms.Parameter( + ms.Tensor(q2l_quant_scale_param, ms.bfloat16), + name=qkv2l_quant_scale_name, + requires_grad=False) else: - parameter_dict[q2l_weight_name] = ms.Parameter(ms.Tensor(q2l_weight_param, ms.int8), name=q2l_weight_name, - requires_grad=False) - parameter_dict[kv2l_weight_name] = ms.Parameter(ms.Tensor(kv2l_weight_param, ms.int8), - name=kv2l_weight_name, requires_grad=False) - parameter_dict[q2l_bias_name] = ms.Parameter(ms.Tensor(q2l_bias_param, ms.int32), name=q2l_bias_name, + parameter_dict[q2l_weight_name] = ms.Parameter( + ms.Tensor(q2l_weight_param, ms.int8), + name=q2l_weight_name, + requires_grad=False) + parameter_dict[kv2l_weight_name] = ms.Parameter( + ms.Tensor(kv2l_weight_param, ms.int8), + name=kv2l_weight_name, + requires_grad=False) + parameter_dict[q2l_bias_name] = ms.Parameter(ms.Tensor( + q2l_bias_param, ms.int32), + name=q2l_bias_name, requires_grad=False) - parameter_dict[kv2l_bias_name] = ms.Parameter(ms.Tensor(kv2l_bias_param, ms.int32), name=kv2l_bias_name, + parameter_dict[kv2l_bias_name] = ms.Parameter(ms.Tensor( + kv2l_bias_param, ms.int32), + name=kv2l_bias_name, requires_grad=False) - parameter_dict[q2l_scale_name] = ms.Parameter(ms.Tensor(q2l_scale_param, ms.float32), name=q2l_scale_name, + parameter_dict[q2l_scale_name] = ms.Parameter(ms.Tensor( + q2l_scale_param, ms.float32), + name=q2l_scale_name, requires_grad=False) - parameter_dict[kv2l_scale_name] = ms.Parameter(ms.Tensor(kv2l_scale_param, ms.float32), - name=kv2l_scale_name, requires_grad=False) - parameter_dict[q2l_quant_zp] = ms.Parameter(ms.Tensor(q2l_quant_zp_param, ms.int8), name=q2l_quant_zp, + parameter_dict[kv2l_scale_name] = ms.Parameter( + ms.Tensor(kv2l_scale_param, ms.float32), + name=kv2l_scale_name, + requires_grad=False) + parameter_dict[q2l_quant_zp] = ms.Parameter(ms.Tensor( + q2l_quant_zp_param, ms.int8), + name=q2l_quant_zp, requires_grad=False) - parameter_dict[kv2l_quant_zp] = ms.Parameter(ms.Tensor(kv2l_quant_zp_param, ms.int8), name=kv2l_quant_zp, + parameter_dict[kv2l_quant_zp] = ms.Parameter(ms.Tensor( + kv2l_quant_zp_param, ms.int8), + name=kv2l_quant_zp, requires_grad=False) - parameter_dict[q2l_quant_scale] = ms.Parameter(ms.Tensor(q2l_quant_scale_param, ms.bfloat16), - name=q2l_quant_scale, requires_grad=False) - parameter_dict[kv2l_quant_scale] = ms.Parameter(ms.Tensor(kv2l_quant_scale_param, ms.bfloat16), - name=kv2l_quant_scale, requires_grad=False) + parameter_dict[q2l_quant_scale] = ms.Parameter( + ms.Tensor(q2l_quant_scale_param, ms.bfloat16), + name=q2l_quant_scale, + requires_grad=False) + parameter_dict[kv2l_quant_scale] = ms.Parameter( + ms.Tensor(kv2l_quant_scale_param, ms.bfloat16), + name=kv2l_quant_scale, + requires_grad=False) - def infer_smooth_quant_row_linear_split(self, param_name, src_hf_dir, hf_weight_map): + def infer_smooth_quant_row_linear_split(self, param_name, src_hf_dir, + hf_weight_map): '''infer_smooth_quant_row_linear_split''' if param_name.endswith(".weight"): - value, _ = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, - hf_weight_map, - split_axis=1) + value, _ = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=1) elif "quant_op" in param_name: - value, _ = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, - hf_weight_map, - split_axis=0) + value, _ = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) else: value, _ = self.get_safetensor_from_file(param_name, src_hf_dir, hf_weight_map) - quant_bias_set_zero = ["wo._layer.matmul.quant_bias", "w2._layer.matmul.quant_bias"] + quant_bias_set_zero = [ + "wo._layer.matmul.quant_bias", "w2._layer.matmul.quant_bias" + ] if any([name in param_name for name in quant_bias_set_zero]) and \ - get_tensor_model_parallel_rank() != 0: + get_tensor_model_parallel_rank() != 0: value.fill(0) return value - def infer_smooth_quant_get_value(self, param_name, src_hf_dir, hf_weight_map, no_need_split_layer): + def infer_smooth_quant_get_value(self, param_name, src_hf_dir, + hf_weight_map, no_need_split_layer): '''infer_smooth_quant_get_value''' if any([name in param_name for name in no_need_split_layer]): @@ -1466,123 +1946,172 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): hf_weight_map) elif any([name in param_name for name in [".l2q_proj."]]): if param_name.endswith(".weight") or "matmul" in param_name: - value, _ = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, - hf_weight_map, - split_axis=0) + value, _ = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) else: - value, _ = self.get_safetensor_from_file(param_name, src_hf_dir, - hf_weight_map) + value, _ = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) elif any([name in param_name for name in [".wo."]]): - value = self.infer_smooth_quant_row_linear_split(param_name, src_hf_dir, hf_weight_map) - elif any([name in param_name for name in ["lkv2kv_k_nope", "lkv2kv_v"]]): - value, _ = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, hf_weight_map, - split_axis=0) + value = self.infer_smooth_quant_row_linear_split( + param_name, src_hf_dir, hf_weight_map) + elif any( + [name in param_name for name in ["lkv2kv_k_nope", "lkv2kv_v"]]): + value, _ = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) elif "lm_head" in param_name: if not self.config.parallel_config.vocab_emb_dp: - value, _ = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, hf_weight_map, - split_axis=0) + value, _ = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) else: - value, _ = self.get_safetensor_from_file(param_name, src_hf_dir, hf_weight_map) + value, _ = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) else: - raise ValueError(f"not found layer {param_name}, please check safetensors file.") + raise ValueError( + f"not found layer {param_name}, please check safetensors file." + ) return value - def infer_smooth_quant_net_ms_convert_layer_weight(self, src_hf_dir, num_layers, hf_weight_map): + def infer_smooth_quant_net_ms_convert_layer_weight(self, src_hf_dir, + num_layers, + hf_weight_map): '''infer_smooth_quant_net_ms_convert_layer_weight''' - parameter_dict = {} + parameter_dict = {} # type: ignore[var-annotated] - no_need_split_layer = ["tok_embeddings", "norm", "routed_experts.router.dense", - "routed_experts.router.e_score_correction_bias", - "topk_bias"] + no_need_split_layer = [ + "tok_embeddings", "norm", "routed_experts.router.dense", + "routed_experts.router.e_score_correction_bias", "topk_bias" + ] for layer_id in tqdm(range(num_layers), desc="qkv/ffn params load"): if layer_id >= 3: - self.smooth_quant_process_route_ffn_weight(src_hf_dir, layer_id, hf_weight_map, parameter_dict, - "feed_forward.routed_experts.ffn") - self.smooth_quant_process_shared_ffn_weight(src_hf_dir, layer_id, hf_weight_map, parameter_dict, - "feed_forward.shared_experts") + self.smooth_quant_process_route_ffn_weight( + src_hf_dir, layer_id, hf_weight_map, parameter_dict, + "feed_forward.routed_experts.ffn") + self.smooth_quant_process_shared_ffn_weight( + src_hf_dir, layer_id, hf_weight_map, parameter_dict, + "feed_forward.shared_experts") else: - self.smooth_quant_process_ffn_weight(src_hf_dir, layer_id, hf_weight_map, parameter_dict, + self.smooth_quant_process_ffn_weight(src_hf_dir, layer_id, + hf_weight_map, + parameter_dict, "feed_forward") - self.smooth_quant_process_qkv_weight(src_hf_dir, layer_id, hf_weight_map, parameter_dict) - - skip_layer = ["feed_forward.routed_experts.ffn", "feed_forward.shared_experts", "feed_forward.w", - "attention.kv2l", "attention.q"] - - for param_name, _ in tqdm(hf_weight_map.items(), desc="remaining params load"): - if "model.layers" in param_name and int(param_name.split('.')[2]) >= num_layers: + self.smooth_quant_process_qkv_weight(src_hf_dir, layer_id, + hf_weight_map, parameter_dict) + + skip_layer = [ + "feed_forward.routed_experts.ffn", "feed_forward.shared_experts", + "feed_forward.w", "attention.kv2l", "attention.q" + ] + + for param_name, _ in tqdm(hf_weight_map.items(), + desc="remaining params load"): + if "model.layers" in param_name and int( + param_name.split('.')[2]) >= num_layers: continue if any([name in param_name for name in skip_layer]): continue - value = self.infer_smooth_quant_get_value(param_name, src_hf_dir, hf_weight_map, no_need_split_layer) + value = self.infer_smooth_quant_get_value(param_name, src_hf_dir, + hf_weight_map, + no_need_split_layer) dst_dtype = convert_np_to_ms_dtype(value) - parameter_dict[param_name] = ms.Parameter(ms.Tensor(value, dtype=dst_dtype), - name=param_name, requires_grad=False) + parameter_dict[param_name] = ms.Parameter(ms.Tensor( + value, dtype=dst_dtype), + name=param_name, + requires_grad=False) - param_not_load, ckpt_not_load = ms.load_param_into_net(self.network, parameter_dict) - logger.info(f"smoothquant param_not_load:{param_not_load}") - logger.info(f"smoothquant ckpt_not_load:{ckpt_not_load}") + param_not_load, ckpt_not_load = ms.load_param_into_net( + self.network, parameter_dict) + logger.info("smoothquant param_not_load: %s", param_not_load) + logger.info("smoothquant ckpt_not_load: %s", ckpt_not_load) - def infer_gptq_quant_net_ms_convert_layer_weight(self, src_hf_dir, num_layers, hf_weight_map): + def infer_gptq_quant_net_ms_convert_layer_weight(self, src_hf_dir, + num_layers, + hf_weight_map): """infer_gptq_quant_net_ms_convert_layer_weight""" parameter_dict = {} - no_need_split_layer = ["tok_embeddings", "norm", "q2l_proj", - "kv2l", "routed_experts.router.dense", - "routed_experts.router.e_score_correction_bias", - "topk_bias"] + no_need_split_layer = [ + "tok_embeddings", "norm", "q2l_proj", "kv2l", + "routed_experts.router.dense", + "routed_experts.router.e_score_correction_bias", "topk_bias" + ] - for param_name, _ in tqdm(hf_weight_map.items(), desc="split safetensors"): - if "model.layers" in param_name and int(param_name.split('.')[2]) >= num_layers: + for param_name, _ in tqdm(hf_weight_map.items(), + desc="split safetensors"): + if "model.layers" in param_name and int( + param_name.split('.')[2]) >= num_layers: continue if any([name in param_name for name in no_need_split_layer]): - value, is_int4 = self.get_safetensor_from_file(param_name, src_hf_dir, - hf_weight_map) - elif any([name in param_name for name in [".l2q_proj.", ".feed_forward.w_gate_hidden.", - "shared_experts.w_gate_hidden"]]): + value, is_int4 = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) + elif any([ + name in param_name for name in [ + ".l2q_proj.", ".feed_forward.w_gate_hidden.", + "shared_experts.w_gate_hidden" + ] + ]): value, is_int4 = self.get_safetensor_from_file_split_tp_group( param_name, src_hf_dir, hf_weight_map, split_axis=1) elif any([name in param_name for name in [".wo."]]): value, is_int4 = self.get_safetensor_from_file_split_tp_group( param_name, src_hf_dir, hf_weight_map, split_axis=0) - elif any([name in param_name for name in [".feed_forward.w2.","shared_experts.w2"]]): - value = self.infer_smooth_quant_row_linear_split(param_name, src_hf_dir, hf_weight_map) + elif any([ + name in param_name + for name in [".feed_forward.w2.", "shared_experts.w2"] + ]): + value = self.infer_smooth_quant_row_linear_split( + param_name, src_hf_dir, hf_weight_map) is_int4 = False elif ".routed_experts.ffn.w_gate_hidden." in param_name: - value, is_int4 = self.get_safetensor_from_file(param_name, src_hf_dir, hf_weight_map) + value, is_int4 = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) value_list = [] for experts_id in range(value.shape[0]): - value_list.append(self.split_weight_by_rank(value[experts_id, :, :], split_axis=1)) + value_list.append( + self.split_weight_by_rank(value[experts_id, :, :], + split_axis=1)) value = np.stack(value_list, axis=0) elif ".routed_experts.ffn.w2" in param_name: - value, is_int4 = self.get_safetensor_from_file(param_name, src_hf_dir, hf_weight_map) + value, is_int4 = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) value_list = [] for experts_id in range(value.shape[0]): - value_list.append(self.split_weight_by_rank(value[experts_id, :, :], split_axis=0)) + value_list.append( + self.split_weight_by_rank(value[experts_id, :, :], + split_axis=0)) value = np.stack(value_list, axis=0) - elif any([name in param_name for name in ["lkv2kv_k_nope", "lkv2kv_v"]]): - value, is_int4 = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, hf_weight_map, - split_axis=0) + elif any( + [name in param_name + for name in ["lkv2kv_k_nope", "lkv2kv_v"]]): + value, is_int4 = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) elif "lm_head" in param_name: if not self.config.parallel_config.vocab_emb_dp: - value, is_int4 = self.get_safetensor_from_file_split_tp_group(param_name, src_hf_dir, hf_weight_map, - split_axis=0) + value, is_int4 = self.get_safetensor_from_file_split_tp_group( + param_name, src_hf_dir, hf_weight_map, split_axis=0) else: - value, is_int4 = self.get_safetensor_from_file(param_name, src_hf_dir, hf_weight_map) + value, is_int4 = self.get_safetensor_from_file( + param_name, src_hf_dir, hf_weight_map) else: - raise ValueError(f"not found layer {param_name}, please check safetensors file.") + raise ValueError( + f"not found layer {param_name}, please check safetensors file." + ) dst_dtype = convert_np_to_ms_dtype(value) if is_int4: - parameter_dict[param_name] = ms.Parameter(ms.Tensor(value, dtype=dtype.qint4x2), - name=param_name, requires_grad=False) + parameter_dict[param_name] = ms.Parameter(ms.Tensor( + value, dtype=dtype.qint4x2), + name=param_name, + requires_grad=False) else: - parameter_dict[param_name] = ms.Parameter(ms.Tensor(value, dtype=dst_dtype), - name=param_name, requires_grad=False) + parameter_dict[param_name] = ms.Parameter(ms.Tensor( + value, dtype=dst_dtype), + name=param_name, + requires_grad=False) _, _ = ms.load_param_into_net(self.network, parameter_dict) def load_safetensors_shard(self, src_hf_dir, is_mtp_model=False): @@ -1596,12 +2125,12 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): if ('quant' in file and self.is_quant) or \ ('quant' not in file and (not self.is_quant or is_mtp_model)): param_json_path = os.path.join(src_hf_dir, file) - with open(param_json_path, "r") as fp: + with open(param_json_path) as fp: hf_weight_map = json.load(fp)['weight_map'] break elif file.endswith('_name_map.json'): param_json_path = os.path.join(src_hf_dir, file) - with open(param_json_path, "r") as fp: + with open(param_json_path) as fp: hf_weight_map = json.load(fp) if hf_weight_map.get('weight_map'): hf_weight_map = hf_weight_map['weight_map'] @@ -1618,26 +2147,35 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): self.infer_convert_outer_weight(src_hf_dir, hf_weight_map) if quant_method and quant_method == "gptq-pergroup": - self.infer_gptq_quant_net_ms_convert_layer_weight(src_hf_dir, self.num_layers, hf_weight_map) + self.infer_gptq_quant_net_ms_convert_layer_weight( + src_hf_dir, self.num_layers, hf_weight_map) return if quant_method and quant_method == "smoothquant": - self.infer_smooth_quant_net_ms_convert_layer_weight(src_hf_dir, self.num_layers, hf_weight_map) + self.infer_smooth_quant_net_ms_convert_layer_weight( + src_hf_dir, self.num_layers, hf_weight_map) return if quant_method and quant_method == "osl": - self.infer_smooth_quant_net_ms_convert_layer_weight(src_hf_dir, self.num_layers, hf_weight_map) + self.infer_smooth_quant_net_ms_convert_layer_weight( + src_hf_dir, self.num_layers, hf_weight_map) return enable_tqdm = rank_id == 0 mtp_layers = self.config.model.model_config.num_nextn_predict_layers start_layer = 0 if not is_mtp_model else self.num_layers end_layer = self.num_layers if not is_mtp_model else self.num_layers + mtp_layers - for layer_id in tqdm(range(start_layer, end_layer), desc="Weight loading", disable=not enable_tqdm): + for layer_id in tqdm(range(start_layer, end_layer), + desc="Weight loading", + disable=not enable_tqdm): if self.is_quant: - self.infer_quant_net_convert_layer_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_quant_net_convert_layer_weight( + src_hf_dir, layer_id, hf_weight_map) else: - self.infer_convert_layer_weight(src_hf_dir, layer_id, hf_weight_map) + self.infer_convert_layer_weight(src_hf_dir, layer_id, + hf_weight_map) - param_not_load, ckpt_not_load = ms.load_param_into_net(self.network, self.parameter_dict) - logger.info("param_not_load: %s, ckpt_not_load: %s" % (str(param_not_load), str(ckpt_not_load))) + param_not_load, ckpt_not_load = ms.load_param_into_net( + self.network, self.parameter_dict) + logger.info("param_not_load: %s, ckpt_not_load: %s", + str(param_not_load), str(ckpt_not_load)) del self.parameter_dict gc.collect() -- Gitee From aefe3efd68294faabf02f0d126effe3909b87e31 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Wed, 25 Jun 2025 15:19:40 +0800 Subject: [PATCH 44/76] update good presision --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index d83347c4a..614eb6ae8 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -824,7 +824,8 @@ class FusedMoE(nn.Cell): """ The pplx combine kernel reduces across GPU ranks by default. """ - return tensor_model_parallel_all_reduce(final_hidden_states) + # return tensor_model_parallel_all_reduce(final_hidden_states) + return final_hidden_states def construct(self, hidden_states: Tensor, router_logits: Tensor): -- Gitee From 57a13bd3e66ac29186abbdc138735852ccd4f2e8 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Wed, 25 Jun 2025 18:05:07 +0800 Subject: [PATCH 45/76] update --- .../model_executor/layers/fused_moe/layer.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 614eb6ae8..cf58f6bf4 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -847,8 +847,23 @@ class FusedMoE(nn.Cell): tokens_num_total = self.all_gather_from_dp_group(tokens_num) tokens_num_total = tokens_num_total.reshape(-1) tokens_cumulative = mint.cumsum(tokens_num_total, 0) - hidden_states = self.all_gather_from_dp_group(hidden_states) - router_logits = self.all_gather_from_dp_group(router_logits) + start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] + end = tokens_cumulative[self.dp_rank] + + hidden_buffer = mint.zeros(tokens_cumulative[-1].item(), + hidden_states.shape[-1], + dtype=hidden_states.dtype) + hidden_buffer[start:end] = hidden_states + mint.distributed.all_reduce(hidden_buffer, self.ep_group) + + logit_buffer = mint.zeros(tokens_cumulative[-1].item(), + router_logits.shape[-1], + dtype=router_logits.dtype) + logit_buffer[start:end] = router_logits + mint.distributed.all_reduce(logit_buffer, self.ep_group) + + hidden_states = hidden_buffer + router_logits = logit_buffer # Matrix multiply. final_hidden_states = self.quant_method.apply( -- Gitee From 47b6024dbd955a6ca1c5badeb577f50075bc25cc Mon Sep 17 00:00:00 2001 From: wangpingan2 Date: Wed, 25 Jun 2025 15:53:29 +0800 Subject: [PATCH 46/76] deepseek add mlapre op --- .jenkins/test/config/dependent_packages.yaml | 1 - install_depend_pkgs.sh | 4 +- tests/mindformers | 2 +- .../mf_models/deepseekv3_weight_processor.py | 54 ++++++++++++------- 4 files changed, 37 insertions(+), 24 deletions(-) diff --git a/.jenkins/test/config/dependent_packages.yaml b/.jenkins/test/config/dependent_packages.yaml index 16ca50fdb..30637669a 100644 --- a/.jenkins/test/config/dependent_packages.yaml +++ b/.jenkins/test/config/dependent_packages.yaml @@ -1,6 +1,5 @@ mindspore: 'https://repo.mindspore.cn/mindspore/mindspore/version/202506/20250613/br_infer_iter_20250613031508_11bcfd2ff4dc201a1c07e5d525cbeff7ec7f9558_newest/' - mindspore_gs: 'https://repo.mindspore.cn/mindspore/golden-stick/version/202506/20250604/master_20250604160014_35fcbec4406d3b18faf02ef99fcbe2741e80348e_newest/' diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index 01acabfca..607631e45 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -64,10 +64,10 @@ pip uninstall mindspore -y && pip install "$mindspore_name" || { echo "Failed to echo "========= Installing mindformers" -mf_dir=br_infer_deepseek_os +mf_dir=mindformers-os if [ ! -d "$mf_dir" ]; then git clone https://gitee.com/mindspore/mindformers.git -b br_infer_deepseek_os "$mf_dir" - git checkout 1e6aad8700aff37c0f9e7ddc0bfd62bf7123ad51 + git checkout 849e943230b7f30317654327109df1dd7acd4b4c else echo "The $mf_dir folder already exists and will not be re-downloaded." fi diff --git a/tests/mindformers b/tests/mindformers index 1e6aad870..849e94323 160000 --- a/tests/mindformers +++ b/tests/mindformers @@ -1 +1 @@ -Subproject commit 1e6aad8700aff37c0f9e7ddc0bfd62bf7123ad51 +Subproject commit 849e943230b7f30317654327109df1dd7acd4b4c diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py b/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py index c63abe694..58ac64dce 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseekv3_weight_processor.py @@ -66,18 +66,22 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): self.expert_num = self.config.moe_config.expert_num self.moe_split_tp = self.moe_tp_size > 1 self.moe_split_ep = self.moe_ep_size > 1 - logger.debug("Deepseekv3 weight split info:") - logger.debug("global_rank_id: %s", self.global_rank_id) - logger.debug("tp_group_size: %s", self.tp_group_size) - logger.debug("dp_group_size: %s", self.dp_group_size) - logger.debug("tp_rank_id: %s", self.tp_rank_id) - logger.debug("ep_method: %s", self.ep_method.name) - logger.debug("num_router_experts: %s", self.num_router_experts) - logger.debug("ep_group_nums: %s", self.ep_group_nums) - logger.debug("moe_ep_rank_id: %s", self.moe_ep_rank_id) - logger.debug("moe_tp_rank_id: %s", self.moe_tp_rank_id) - logger.debug("moe_ep_size: %s", self.moe_ep_size) - logger.debug("moe_tp_size: %s", self.moe_tp_size) + logger.debug( + "Deepseekv3 weight split info:" + "global_rank_id: %s \n" + "tp_group_size: %s \n" + "dp_group_size: %s \n" + "tp_rank_id: %s \n" + "ep_method: %s \n" + "num_router_experts: %s \n" + "ep_group_nums: %s \n" + "moe_ep_rank_id: %s \n" + "moe_tp_rank_id: %s \n" + "moe_ep_size: %s \n" + "moe_tp_size: %s", self.global_rank_id, self.tp_group_size, + self.dp_group_size, self.tp_rank_id, self.ep_method.name, + self.num_router_experts, self.ep_group_nums, self.moe_ep_rank_id, + self.moe_tp_rank_id, self.moe_ep_size, self.moe_tp_size) def quant_convert_weight_name(self, weight_name: str): """replace quant net weight name""" @@ -973,20 +977,30 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): qkv2l_quant_zp_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.input_zp" qkv2l_quant_scale_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.input_scale" qkv2l_rmsnorm_beta_name = f"model.layers.{layer_id}.attention.qkv2l.quant_op.beta" + if hasattr(self.config.model.model_config, "use_mla_pre" + ) and self.config.model.model_config.use_mla_pre: + qkv2l_weight = np.concatenate((kv2l_ms_param, q2l_ms_param), 0) + qkv2l_bias = np.concatenate( + (kv2l_quant_bias_ms_param, q2l_quant_bias_ms_param), 0) + qkv2l_scale = np.concatenate( + (kv2l_dequant_scale_ms_param, q2l_dequant_scale_ms_param), + 0) + else: + qkv2l_weight = np.concatenate((q2l_ms_param, kv2l_ms_param), 0) + qkv2l_bias = np.concatenate( + (q2l_quant_bias_ms_param, kv2l_quant_bias_ms_param), 0) + qkv2l_scale = np.concatenate( + (q2l_dequant_scale_ms_param, kv2l_dequant_scale_ms_param), + 0) - qkv2l_weight = np.concatenate((q2l_ms_param, kv2l_ms_param), 0) parameter_dict[qkv2l_weight_name] = ms.Parameter( ms.Tensor(qkv2l_weight, ms.int8), name=qkv2l_weight_name, requires_grad=False) - qkv2l_bias = np.concatenate( - (q2l_quant_bias_ms_param, kv2l_quant_bias_ms_param), 0) parameter_dict[qkv2l_bias_name] = ms.Parameter( ms.Tensor(qkv2l_bias, ms.int32), name=qkv2l_bias_name, requires_grad=False) - qkv2l_scale = np.concatenate( - (q2l_dequant_scale_ms_param, kv2l_dequant_scale_ms_param), 0) parameter_dict[qkv2l_scale_name] = ms.Parameter( ms.Tensor(qkv2l_scale, ms.float32), name=qkv2l_scale_name, @@ -1975,7 +1989,7 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): num_layers, hf_weight_map): '''infer_smooth_quant_net_ms_convert_layer_weight''' - parameter_dict = {} # type: ignore[var-annotated] + parameter_dict: dict[str, ms.Parameter] = {} no_need_split_layer = [ "tok_embeddings", "norm", "routed_experts.router.dense", @@ -2175,7 +2189,7 @@ class DeepseekV3WeightProcessor(BaseWeightProcessor): param_not_load, ckpt_not_load = ms.load_param_into_net( self.network, self.parameter_dict) - logger.info("param_not_load: %s, ckpt_not_load: %s", - str(param_not_load), str(ckpt_not_load)) + logger.info("param_not_load: %s, ckpt_not_load: %s", param_not_load, + ckpt_not_load) del self.parameter_dict gc.collect() -- Gitee From ff4960a8f1a99ad3e807ba6d3c6298999a3b6866 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 09:18:01 +0800 Subject: [PATCH 47/76] update --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index cf58f6bf4..6f9bf8bc4 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -854,13 +854,13 @@ class FusedMoE(nn.Cell): hidden_states.shape[-1], dtype=hidden_states.dtype) hidden_buffer[start:end] = hidden_states - mint.distributed.all_reduce(hidden_buffer, self.ep_group) + mint.distributed.all_reduce(hidden_buffer, group=self.dp_group) logit_buffer = mint.zeros(tokens_cumulative[-1].item(), router_logits.shape[-1], dtype=router_logits.dtype) logit_buffer[start:end] = router_logits - mint.distributed.all_reduce(logit_buffer, self.ep_group) + mint.distributed.all_reduce(logit_buffer, group=self.dp_group) hidden_states = hidden_buffer router_logits = logit_buffer @@ -886,7 +886,7 @@ class FusedMoE(nn.Cell): if self.pure_tp: # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) - mint.distributed.all_reduce(final_hidden_states, self.ep_group) + mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) if self.dp_size > 1: start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] end = tokens_cumulative[self.dp_rank] -- Gitee From 03d4dfd9831292eadf9b4103d7ad11f9ef8fdbdd Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 09:30:34 +0800 Subject: [PATCH 48/76] update --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 6f9bf8bc4..80bc19bb6 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -850,14 +850,14 @@ class FusedMoE(nn.Cell): start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] end = tokens_cumulative[self.dp_rank] - hidden_buffer = mint.zeros(tokens_cumulative[-1].item(), - hidden_states.shape[-1], + hidden_buffer = mint.zeros((tokens_cumulative[-1].item(), + hidden_states.shape[-1]), dtype=hidden_states.dtype) hidden_buffer[start:end] = hidden_states mint.distributed.all_reduce(hidden_buffer, group=self.dp_group) - logit_buffer = mint.zeros(tokens_cumulative[-1].item(), - router_logits.shape[-1], + logit_buffer = mint.zeros((tokens_cumulative[-1].item(), + router_logits.shape[-1]), dtype=router_logits.dtype) logit_buffer[start:end] = router_logits mint.distributed.all_reduce(logit_buffer, group=self.dp_group) -- Gitee From f35239220ba2537caef95a79e05b984962ae19f5 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 11:45:06 +0800 Subject: [PATCH 49/76] update --- .../model_executor/layers/fused_moe/layer.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 80bc19bb6..949fdbef0 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -536,9 +536,10 @@ class FusedMoE(nn.Cell): if self.dp_size > 1 and self.ep_size == 1 or self.dp_size == 1: self.pure_tp = True + self.all_reduce_from_ep_group = ops.AllReduce(group=self.ep_group) if self.dp_size > 1: - self.all_gather_from_dp_group = ops.AllGather(self.dp_group) - self.all_reduce_from_world_group = ops.AllReduce(self.ep_group) + self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) + self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) @property def tp_size(self): @@ -847,20 +848,21 @@ class FusedMoE(nn.Cell): tokens_num_total = self.all_gather_from_dp_group(tokens_num) tokens_num_total = tokens_num_total.reshape(-1) tokens_cumulative = mint.cumsum(tokens_num_total, 0) - start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] - end = tokens_cumulative[self.dp_rank] + start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() + end = tokens_cumulative[self.dp_rank].item() hidden_buffer = mint.zeros((tokens_cumulative[-1].item(), hidden_states.shape[-1]), dtype=hidden_states.dtype) hidden_buffer[start:end] = hidden_states - mint.distributed.all_reduce(hidden_buffer, group=self.dp_group) - + # mint.distributed.all_reduce(hidden_buffer, group=self.dp_group) + hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) logit_buffer = mint.zeros((tokens_cumulative[-1].item(), router_logits.shape[-1]), dtype=router_logits.dtype) logit_buffer[start:end] = router_logits - mint.distributed.all_reduce(logit_buffer, group=self.dp_group) + # mint.distributed.all_reduce(logit_buffer, group=self.dp_group) + logit_buffer = self.all_reduce_from_dp_group(logit_buffer) hidden_states = hidden_buffer router_logits = logit_buffer @@ -886,7 +888,8 @@ class FusedMoE(nn.Cell): if self.pure_tp: # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) - mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) + # mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) + final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) if self.dp_size > 1: start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] end = tokens_cumulative[self.dp_rank] -- Gitee From 45424e42c5373652f7cf23639c5aa224c44d60bf Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 11:58:16 +0800 Subject: [PATCH 50/76] test moe2 --- .../layers/fused_moe/fused_moe2.py | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py new file mode 100644 index 000000000..f3c441461 --- /dev/null +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py @@ -0,0 +1,186 @@ +from typing import Optional + +from mindspore import Tensor, mint, ops, nn +from mindspore.ops.auto_generate import (GroupedMatmulV4, + FusedAddTopKDiv, + MoeInitRoutingV2, + MoeTokenUnpermute) +import mindspore as ms +from vllm.distributed.parallel_state import get_ep_group, get_dp_group + +def fused_topk( + hidden_states: Tensor, + gating_output: Tensor, + topk: int, + renormalize: bool, + indices_type = None, +) -> tuple[Tensor, Tensor]: + assert hidden_states.shape[0] == gating_output.shape[0], ( + "Number of tokens mismatch") + score = mint.softmax(gating_output, dim=-1) + topk_weights, topk_ids = mint.topk( + score, + k=topk, + dim=-1 + ) + if renormalize: + topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) + + if indices_type is not None: + topk_ids = topk_ids.to(indices_type) + return topk_weights, topk_ids + + +def grouped_topk( + hidden_states: Tensor, + gating_output: Tensor, + topk: int, + renormalize: bool, + num_expert_group: int = 0, + topk_group: int = 0, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[Tensor] = None +) -> tuple[Tensor, Tensor]: + fused_add_topk_div = FusedAddTopKDiv() + assert hidden_states.shape[0] == gating_output.shape[0], ( + "Number of tokens mismatch") + scoring_type = 0 # sigmoid + topk_in_group = 2 + topk_weights, topk_ids = fused_add_topk_div( + gating_output, + e_score_correction_bias, + num_expert_group, + topk_group, + topk, + topk_in_group, + scoring_type, + renormalize) + + return topk_weights, topk_ids + + +class FusedExperts(nn.Cell): + def __init__(self): + super().__init__() + self.group_matmul_ops = GroupedMatmulV4() + self.moe_init_routing_op = MoeInitRoutingV2() + self.moe_token_unpermute = MoeTokenUnpermute() + + def construct(self, + hidden_states: Tensor, + w1: Tensor, + w2: Tensor, + topk_weights: Tensor, + topk_ids: Tensor, + activation: str = "silu", + global_num_experts: int = -1, + apply_router_weight_on_input: bool = False, + expert_map: Optional[Tensor] = None, + tp_size: int = 1, + ep_size: int = 0) -> Tensor: + + if tp_size >= 1: + # no ep, pure tp + if ep_size == 1: + hidden_states = self._run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + # ep_size > 1 : pure ep or tp + ep + else: + # pure ep + if tp_size == 1: + hidden_states = self._run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + # tp_size > 1 : tp + ep + else: + hidden_states = self._run_tp_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + + return hidden_states + + + def _gate_activation(self, gate, activation): + if activation == "silu": + return mint.nn.functional.silu(gate) + elif activation == "gelu": + return mint.nn.functional.gelu(gate) + else: + raise ValueError(f"Unsupported activation function: {activation}") + + + + + def _group_matmul(self, hidden_states, weight, group_list): + return self.group_matmul_ops([hidden_states], [weight], + None, None, None, None, None, None, + group_list, split_item=3, group_type=0, group_list_type=1)[0] + + def _run_ep_moe(self, + hidden_states, + w1, + w2, + topk_ids, + topk_weights, + activation, + global_num_experts, + apply_router_weight_on_input): + hidden_states = self._group_matmul(hidden_states, w1, topk_ids) + hidden_states = self._gate_activation(hidden_states, activation) + hidden_states = self._group_matmul(hidden_states, w2, topk_ids) + return hidden_states + + + def _run_tp_moe(self, + hidden_states, + w1, + w2, + topk_ids, + topk_weights, + activation, + global_num_experts, + apply_router_weight_on_input): + topk_weights = topk_weights.astype(hidden_states.dtype) + topk_ids = topk_ids.astype(ms.int32) + + sorted_input_tensor, unsort_map, group_list, _ = \ + self.moe_init_routing_op( + hidden_states, + topk_ids, + active_num=0, + expert_capacity=0, + expert_num=global_num_experts, + drop_pad_mode=0, + expert_tokens_count_or_cumsum_flag=2, + expert_tokens_before_capacity_flag=True) + + group_list = group_list.astype(ms.int64) + + gate_hidden_out = self._group_matmul(sorted_input_tensor, mint.transpose(w1, -1, -2), group_list) + gate, hidden = mint.split(gate_hidden_out, + (w1.shape[1] // 2, w1.shape[1] // 2), -1) + gate = self._gate_activation(gate, activation) + hidden = mint.mul(hidden, gate) + expert_output = self._group_matmul(hidden, mint.transpose(w2, -1, -2), group_list) + expert_output = mint.nan_to_num(expert_output, 0, 0, 0) + moe_output = self.moe_token_unpermute(permuted_tokens=expert_output, + sorted_indices=unsort_map, + probs=topk_weights, + padded_mode=False, + restore_shape=None) + return moe_output + + + def _run_tp_ep_moe( + self, + hidden_states, + w1, + w2, + group_list, + group_logits, + activation, + global_num_experts, + apply_router_weight_on_input): + raise NotImplementedError( + "TP + EP MoE is not implemented yet. Please use pure TP or pure EP MoE instead.") -- Gitee From d7a9bc9066d52713124ec6432531d136625851d1 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 15:50:15 +0800 Subject: [PATCH 51/76] support jit --- vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index 6beb2aa5b..a7b3bf7da 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -15,8 +15,6 @@ def fused_topk( renormalize: bool, indices_type = None, ) -> tuple[Tensor, Tensor]: - assert hidden_states.shape[0] == gating_output.shape[0], ( - "Number of tokens mismatch") score = mint.softmax(gating_output, dim=-1) topk_weights, topk_ids = mint.topk( score, -- Gitee From f2c0a1d08552f9d24e60de99485cd21e4818bf3a Mon Sep 17 00:00:00 2001 From: lijiakun Date: Thu, 26 Jun 2025 15:54:16 +0800 Subject: [PATCH 52/76] deprecated docker file --- Dockerfile | 108 ----------------------------------------------------- 1 file changed, 108 deletions(-) delete mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index d174da7c2..000000000 --- a/Dockerfile +++ /dev/null @@ -1,108 +0,0 @@ -FROM hub.oepkgs.net/openeuler/openeuler:22.03-lts-sp4 - -####################### os ####################### -RUN yum clean all && \ - yum makecache && \ - yum install -y \ - kmod \ - sudo \ - wget \ - curl \ - cmake \ - make \ - git \ - vim \ - gcc && \ - yum clean all - -####################### python ####################### -WORKDIR /root -RUN wget https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py311_25.1.1-2-Linux-aarch64.sh && \ - bash /root/Miniconda3-py311_25.1.1-2-Linux-aarch64.sh -b && \ - rm /root/Miniconda3-py311_25.1.1-2-Linux-aarch64.sh -ENV PATH="/root/miniconda3/bin:$PATH" -ENV PYTHONPATH="/root/miniconda3/lib/python3.11/site-packages" -RUN pip config set global.index-url 'https://pypi.tuna.tsinghua.edu.cn/simple' && \ - pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn - -####################### CANN ####################### -WORKDIR /root -RUN echo "UserName=HwHiAiUser" >> /etc/ascend_install.info && \ - echo "UserGroup=HwHiAiUser" >> /etc/ascend_install.info && \ - echo "Firmware_Install_Type=full" >> /etc/ascend_install.info && \ - echo "Firmware_Install_Path_Param=/usr/local/Ascend" >> /etc/ascend_install.info && \ - echo "Driver_Install_Type=full" >> /etc/ascend_install.info && \ - echo "Driver_Install_Path_Param=/usr/local/Ascend" >> /etc/ascend_install.info && \ - echo "Driver_Install_For_All=no" >> /etc/ascend_install.info && \ - echo "Driver_Install_Mode=normal" >> /etc/ascend_install.info && \ - echo "Driver_Install_Status=complete" >> /etc/ascend_install.info -RUN curl -s "https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%208.0.0/Ascend-cann-toolkit_8.0.0_linux-aarch64.run" -o Ascend-cann-toolkit.run && \ - curl -s "https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%208.0.0/Ascend-cann-kernels-910b_8.0.0_linux-aarch64.run" -o Ascend-cann-kernels-910b.run && \ - curl -s "https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%208.0.0/Ascend-cann-nnrt_8.0.0_linux-aarch64.run" -o Ascend-cann-nnrt.run && \ - chmod a+x *.run && \ - bash /root/Ascend-cann-toolkit.run --install -q && \ - bash /root/Ascend-cann-kernels-910b.run --install -q && \ - bash /root/Ascend-cann-nnrt.run --install -q && \ - rm /root/*.run -RUN echo "source /usr/local/Ascend/nnrt/set_env.sh" >> /root/.bashrc && \ - echo "source /usr/local/Ascend/ascend-toolkit/set_env.sh" >> /root/.bashrc - -####################### dev env ####################### -RUN pip install --no-cache-dir \ - cmake>=3.26 \ - decorator \ - ray==2.42.1 \ - protobuf==3.20.0 \ - ml_dtypes \ - wheel \ - setuptools \ - wrap \ - deprecated \ - packaging \ - ninja \ - "setuptools-scm>=8" \ - numpy \ - build - -WORKDIR /workspace - -RUN git clone -b br_infer_deepseek_os https://gitee.com/mindspore/mindformers.git /workspace/mindformers && \ - cd mindformers && \ - sed -i 's/-i https:\/\/pypi.tuna.tsinghua.edu.cn\/simple//g' build.sh && \ - bash build.sh && \ - PACKAGE_PATH=$(python3 -c "import site; print(site.getsitepackages()[0])") && \ - cp -a research "$PACKAGE_PATH" && \ - rm -rf /workspace/mindformers - -RUN git clone https://gitee.com/mindspore/golden-stick.git /workspace/golden-stick && \ - cd golden-stick && \ - bash build.sh && \ - pip install --no-cache-dir /workspace/golden-stick/output/*.whl && \ - rm -rf /workspace/golden-stick - -ENV USE_TORCH="FALSE" -ENV USE_TF="FALSE" -RUN git clone -b v0.6.6.post1 https://gitee.com/mirrors/vllm.git /workspace/vllm && \ - cd vllm && \ - VLLM_TARGET_DEVICE=empty pip install --no-cache-dir . && \ - rm -rf /workspace/vllm - -RUN git clone https://openi.pcl.ac.cn/OpenI/MSAdapter.git /workspace/msadapter && \ - cd /workspace/msadapter && \ - bash scripts/build_and_reinstall.sh && \ - rm -rf /workspace/msadapter - -ADD . /workspace/vllm_mindspore -RUN cd /workspace/vllm_mindspore && \ - pip install --no-cache-dir -r requirements.txt && \ - pip install . && \ - rm -rf /workspace/vllm_mindspore - -RUN wget -O mindspore-2.5.0-cp311-cp311-linux_aarch64.whl \ -https://repo.mindspore.cn/mindspore/mindspore/version/202503/20250303/br_infer_deepseek_os_20250303004707_705727d59236c8c197b25ad0e464c4908434d42f_newest/unified/aarch64/mindspore-2.5.0-cp311-cp311-linux_aarch64.whl && \ -pip install --no-cache-dir mindspore-2.5.0-cp311-cp311-linux_aarch64.whl && \ -rm -f mindspore-2.5.0-cp311-cp311-linux_aarch64.whl - -RUN pip uninstall torch torch-npu torchvision -y - -CMD ["bash"] \ No newline at end of file -- Gitee From df37c48aadb2b0c5fed5cca72bf51f8e06275d76 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 20:09:02 +0800 Subject: [PATCH 53/76] suit tp+dp jit --- .../model_executor/layers/fused_moe/layer.py | 33 ++-- .../model_executor/models/qwen3_moe.py | 157 +++++++++++++++++- 2 files changed, 162 insertions(+), 28 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 949fdbef0..863330f7c 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -829,11 +829,12 @@ class FusedMoE(nn.Cell): return final_hidden_states def construct(self, hidden_states: Tensor, - router_logits: Tensor): - return self.forward_impl(hidden_states, router_logits) + router_logits: Tensor, + dp_pad_input): + return self.forward_impl(hidden_states, router_logits, dp_pad_input) def forward_impl(self, hidden_states: Tensor, - router_logits: Tensor): + router_logits: Tensor, dp_pad_input): assert self.quant_method is not None # do_naive_dispatch_combine: bool = ( @@ -844,24 +845,12 @@ class FusedMoE(nn.Cell): # hidden_states, router_logits) if self.dp_size > 1 and self.pure_tp: - tokens_num = Tensor([[hidden_states.shape[0]]], dtype=ms.int32) - tokens_num_total = self.all_gather_from_dp_group(tokens_num) - tokens_num_total = tokens_num_total.reshape(-1) - tokens_cumulative = mint.cumsum(tokens_num_total, 0) - start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() - end = tokens_cumulative[self.dp_rank].item() - - hidden_buffer = mint.zeros((tokens_cumulative[-1].item(), - hidden_states.shape[-1]), - dtype=hidden_states.dtype) - hidden_buffer[start:end] = hidden_states - # mint.distributed.all_reduce(hidden_buffer, group=self.dp_group) + tokens_num = hidden_states.shape[0] + + hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_input) hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) - logit_buffer = mint.zeros((tokens_cumulative[-1].item(), - router_logits.shape[-1]), - dtype=router_logits.dtype) - logit_buffer[start:end] = router_logits - # mint.distributed.all_reduce(logit_buffer, group=self.dp_group) + + logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_input) logit_buffer = self.all_reduce_from_dp_group(logit_buffer) hidden_states = hidden_buffer @@ -891,8 +880,8 @@ class FusedMoE(nn.Cell): # mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) if self.dp_size > 1: - start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1] - end = tokens_cumulative[self.dp_rank] + start = dp_pad_input[-2] + end = start + tokens_num final_hidden_states = final_hidden_states[start:end] # if do_naive_dispatch_combine: diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index adb710b40..da88a23ee 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -24,10 +24,15 @@ from collections.abc import Iterable from typing import Any, Optional, Union, Dict, Tuple, List -from mindspore import Tensor, nn, Parameter +import mindspore as ms +from mindspore import Tensor, nn, Parameter, mint +from mindspore import Tensor, nn, mutable +from mindspore.common import dtype as mstype + from transformers import PretrainedConfig from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, + get_dp_group) from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.models.interfaces import SupportsPP @@ -54,6 +59,7 @@ from vllm_mindspore.model_executor.models.utils import ( from vllm_mindspore.model_executor.models.model_base import NativeModel from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, get_sampler) +from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE logger = init_logger(__name__) @@ -86,7 +92,7 @@ class Qwen3MoeMLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def construct(self, x): + def construct(self, x, dp_pad_input): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) @@ -124,7 +130,7 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): quant_config=None, prefix=f"{prefix}.gate") - def construct(self, hidden_states: Tensor) -> Tensor: + def construct(self, hidden_states: Tensor, dp_pad_input) -> Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] @@ -311,6 +317,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): q_seq_lens: Tensor, block_tables: Tensor, residual: Optional[Tensor], + dp_pad_input: Optional[bool] = None, ) -> Tensor: # Self Attention if residual is None: @@ -326,7 +333,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) - hidden_states = self.mlp(hidden_states) + hidden_states = self.mlp(hidden_states, dp_pad_input) return hidden_states, residual @@ -376,6 +383,7 @@ class Qwen3MoeModel(nn.Cell): block_tables: Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, + dp_pad_input = None, ) -> Union[Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: if inputs_embeds is not None: @@ -394,7 +402,8 @@ class Qwen3MoeModel(nn.Cell): value_caches[i - self.start_layer], is_prefill, slot_mapping, attn_mask, batch_valid_length, - q_seq_lens, block_tables, residual) + q_seq_lens, block_tables, residual, + dp_pad_input) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, @@ -537,6 +546,12 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): self.common_preprocess(vllm_config, prefix) + if get_dp_group().world_size > 1 and not self.parallel_config.enable_expert_parallel: + self.dp_pad_input = True + self.dp_group = get_dp_group().device_group._name + self.dp_world_size = get_dp_group().world_size + self.dp_rank = get_dp_group().rank_in_group + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.model.get_input_embeddings(input_ids) @@ -570,3 +585,133 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): Tensor]]) -> set[str]: params_dict = self.get_params_dict() return self.model.load_weights(weights, params_dict) + + def exec_model(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs): + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, + intermediate_tensors, + inputs_embeds) + + if self.prev_prefill != is_prefill and self.is_graph_mode: + self.set_model_inputs(input_ids, positions, intermediate_tensors, + inputs_embeds, is_prefill) + self.prev_prefill = is_prefill + + # for dummy_attention_metadata + if is_prefill and not self.set_flags: + self.set_flags = True + + if self.run_model is None: + self.run_model = ms.jit( + function=self.model, # type: ignore[attr-defined] + jit_level='O0' + ) if self.is_graph_mode else self.model # type: ignore[attr-defined] + + if self.dp_pad_input: + # if dp and not ep, should pad input to gather. + token_num_total = mint.empty((self.dp_world_size, 1), dtype=ms.int32) + send_tensor = ms.Tensor([[input_ids.shape[0]]], dtype=ms.int32) + mint.distributed.all_gather_into_tensor(token_num_total, send_tensor, + group=self.dp_group) + token_num_total = token_num_total.reshape(-1) + tokens_cumulative = mint.cumsum(token_num_total, dim=0) + start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() + end = tokens_cumulative[self.dp_rank].item() + end2 = tokens_cumulative[-1].item() - end + pad_index_tensor = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + + model_output = self.run_model( # type: ignore[misc] + input_ids=model_inputs["input_ids"], + positions=model_inputs["position_ids"], + key_caches=model_inputs["key_cache"], + value_caches=model_inputs["value_cache"], + is_prefill=is_prefill, + slot_mapping=model_inputs["slot_mapping"], + attn_mask=model_inputs["attention_mask"], + batch_valid_length=model_inputs["batch_valid_length"], + q_seq_lens=model_inputs["q_seq_lens"], + block_tables=model_inputs["block_tables"], + intermediate_tensors=model_inputs["intermediate_tensors"], + inputs_embeds=model_inputs["inputs_embeds"], + pad_index_tensor=pad_index_tensor if self.dp_pad_input else None, + ) + + return model_output + + + def set_model_inputs(self, input_ids, position_ids, intermediate_tensors, + inputs_embeds, is_prefill): + if input_ids is None: + dyn_input_ids = None + else: + dyn_input_ids = ms.Tensor(shape=[None] * input_ids.ndim, + dtype=mstype.int32) + + if position_ids is None: + dyn_position_ids = None + else: + dyn_position_ids = ms.Tensor(shape=[None] * position_ids.ndim, + dtype=mstype.int32) + + if inputs_embeds is None: + dyn_inputs_embeds = None + else: + dyn_inputs_embeds = ms.Tensor(shape=[None] * inputs_embeds.ndim, + dtype=inputs_embeds.dtype) + + if intermediate_tensors is None: + dyn_intermediate_tensors = None + else: + dyn_intermediate_tensors = ms.Tensor( + shape=[None] * intermediate_tensors.ndim, + dtype=intermediate_tensors.dtype) + + block_size = self.cache_config.block_size + num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) + head_size = self.model_config.get_head_size() + kv_cache_shape = (None, block_size, num_kv_heads, head_size) + + kv_cache_dtype = self.model_config.dtype if self.cache_config.cache_dtype == "auto" \ + else self.cache_config.cache_dtype + if kv_cache_dtype in STR_DTYPE_TO_MS_DTYPE: + kv_cache_dtype = STR_DTYPE_TO_MS_DTYPE[kv_cache_dtype] + + num_layers = self.model_config.get_num_layers(self.parallel_config) + + dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) + dyn_value_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) + dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) + dyn_value_caches = mutable( + [dyn_value_cache for _ in range(num_layers)]) + + dyn_slot_mapping = Tensor(shape=[None], dtype=mstype.int32) + dynamic_attention_mask = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) + dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) + dyn_pad_input = Tensor(shape=[4], dtype=mstype.int32) if self.dp_pad_input else None + + self.model.set_inputs( + dyn_input_ids, + dyn_position_ids, + dyn_key_caches, # type: ignore[attr-defined] + dyn_value_caches, + is_prefill, + dyn_slot_mapping, + dynamic_attention_mask, + dyn_batch_valid_length, + dyn_q_seq_lens, + dyn_block_tables, + dyn_intermediate_tensors, + dyn_inputs_embeds, + dyn_pad_input) + + dynamic_hidden_states = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + self.lm_head.set_inputs( + dynamic_hidden_states) # type: ignore[attr-defined] -- Gitee From 638b3e15d0c88695f5cc67e6c8842b03feb51dd4 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 20:22:16 +0800 Subject: [PATCH 54/76] update --- .../model_executor/layers/fused_moe/layer.py | 12 ++++++------ .../model_executor/models/qwen3_moe.py | 19 ++++++++++--------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 863330f7c..d280acb2d 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -830,11 +830,11 @@ class FusedMoE(nn.Cell): def construct(self, hidden_states: Tensor, router_logits: Tensor, - dp_pad_input): - return self.forward_impl(hidden_states, router_logits, dp_pad_input) + dp_pad_index): + return self.forward_impl(hidden_states, router_logits, dp_pad_index) def forward_impl(self, hidden_states: Tensor, - router_logits: Tensor, dp_pad_input): + router_logits: Tensor, dp_pad_index): assert self.quant_method is not None # do_naive_dispatch_combine: bool = ( @@ -847,10 +847,10 @@ class FusedMoE(nn.Cell): if self.dp_size > 1 and self.pure_tp: tokens_num = hidden_states.shape[0] - hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_input) + hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) - logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_input) + logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_index) logit_buffer = self.all_reduce_from_dp_group(logit_buffer) hidden_states = hidden_buffer @@ -880,7 +880,7 @@ class FusedMoE(nn.Cell): # mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) if self.dp_size > 1: - start = dp_pad_input[-2] + start = dp_pad_index[-2] end = start + tokens_num final_hidden_states = final_hidden_states[start:end] diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index da88a23ee..0e492e0db 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -92,7 +92,7 @@ class Qwen3MoeMLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def construct(self, x, dp_pad_input): + def construct(self, x, dp_pad_index): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) @@ -130,7 +130,7 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): quant_config=None, prefix=f"{prefix}.gate") - def construct(self, hidden_states: Tensor, dp_pad_input) -> Tensor: + def construct(self, hidden_states: Tensor, dp_pad_index) -> Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] @@ -139,7 +139,8 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states=hidden_states, - router_logits=router_logits) + router_logits=router_logits, + dp_pad_index=dp_pad_index) final_hidden_states = final_hidden_states if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 @@ -317,7 +318,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): q_seq_lens: Tensor, block_tables: Tensor, residual: Optional[Tensor], - dp_pad_input: Optional[bool] = None, + dp_pad_index: Optional[bool] = None, ) -> Tensor: # Self Attention if residual is None: @@ -333,7 +334,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) - hidden_states = self.mlp(hidden_states, dp_pad_input) + hidden_states = self.mlp(hidden_states, dp_pad_index) return hidden_states, residual @@ -383,7 +384,7 @@ class Qwen3MoeModel(nn.Cell): block_tables: Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, - dp_pad_input = None, + dp_pad_index = None, ) -> Union[Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: if inputs_embeds is not None: @@ -403,7 +404,7 @@ class Qwen3MoeModel(nn.Cell): is_prefill, slot_mapping, attn_mask, batch_valid_length, q_seq_lens, block_tables, residual, - dp_pad_input) + dp_pad_index) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, @@ -622,7 +623,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() end = tokens_cumulative[self.dp_rank].item() end2 = tokens_cumulative[-1].item() - end - pad_index_tensor = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) model_output = self.run_model( # type: ignore[misc] input_ids=model_inputs["input_ids"], @@ -637,7 +638,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): block_tables=model_inputs["block_tables"], intermediate_tensors=model_inputs["intermediate_tensors"], inputs_embeds=model_inputs["inputs_embeds"], - pad_index_tensor=pad_index_tensor if self.dp_pad_input else None, + dp_pad_index=dp_pad_index if self.dp_pad_input else None, ) return model_output -- Gitee From 89e46734c6557aa6f29bac6bd53930fb35f0a336 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Thu, 26 Jun 2025 20:55:09 +0800 Subject: [PATCH 55/76] fix --- vllm_mindspore/model_executor/models/qwen3_moe.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index 0e492e0db..339508d52 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -547,6 +547,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): self.common_preprocess(vllm_config, prefix) + self.dp_pad_input = False if get_dp_group().world_size > 1 and not self.parallel_config.enable_expert_parallel: self.dp_pad_input = True self.dp_group = get_dp_group().device_group._name -- Gitee From 6faab79abda2b484557264eb58465d04808ad9de Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 10:27:42 +0800 Subject: [PATCH 56/76] update --- .../model_executor/layers/fused_moe/layer.py | 39 ++++++++++++------- .../model_executor/models/qwen3_moe.py | 36 ++++++++++++----- 2 files changed, 52 insertions(+), 23 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index d280acb2d..1ac6b7822 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -540,6 +540,7 @@ class FusedMoE(nn.Cell): if self.dp_size > 1: self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) + self.reduce_scatter_from_dp_group = ops.ReduceScatter(group=self.dp_group) @property def tp_size(self): @@ -829,12 +830,13 @@ class FusedMoE(nn.Cell): return final_hidden_states def construct(self, hidden_states: Tensor, - router_logits: Tensor, - dp_pad_index): - return self.forward_impl(hidden_states, router_logits, dp_pad_index) + router_logits: Tensor, + dp_pad_index, + dp_select_index): + return self.forward_impl(hidden_states, router_logits, dp_pad_index, dp_select_index) def forward_impl(self, hidden_states: Tensor, - router_logits: Tensor, dp_pad_index): + router_logits: Tensor, dp_pad_index, dp_select_index): assert self.quant_method is not None # do_naive_dispatch_combine: bool = ( @@ -847,14 +849,21 @@ class FusedMoE(nn.Cell): if self.dp_size > 1 and self.pure_tp: tokens_num = hidden_states.shape[0] + # hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) + # hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) + + # logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_index) + # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) + + # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) - hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) + hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_index) - logit_buffer = self.all_reduce_from_dp_group(logit_buffer) + logit_buffer = self.all_gather_from_dp_group(logit_buffer) - hidden_states = hidden_buffer - router_logits = logit_buffer + # hidden_states = mint.index_select(hidden_buffer, 0, dp_select_index) + # router_logits = mint.index_select(logit_buffer, 0, dp_select_index) # Matrix multiply. final_hidden_states = self.quant_method.apply( @@ -878,11 +887,15 @@ class FusedMoE(nn.Cell): if self.pure_tp: # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) # mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) - final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) - if self.dp_size > 1: - start = dp_pad_index[-2] - end = start + tokens_num - final_hidden_states = final_hidden_states[start:end] + if self.dp_size == 1: + final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) + # dp_size > 1 + else: + final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) + final_hidden_states = mint.index_select(final_hidden_states, 0, dp_select_index) + # start = dp_pad_index[-2] + # end = start + tokens_num + # final_hidden_states = final_hidden_states[start:end] # if do_naive_dispatch_combine: # final_hidden_states = get_ep_group().combine(final_hidden_states) diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index 339508d52..f3ab0a9ab 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -24,6 +24,7 @@ from collections.abc import Iterable from typing import Any, Optional, Union, Dict, Tuple, List +import numpy as np import mindspore as ms from mindspore import Tensor, nn, Parameter, mint from mindspore import Tensor, nn, mutable @@ -92,7 +93,7 @@ class Qwen3MoeMLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def construct(self, x, dp_pad_index): + def construct(self, x, dp_pad_index, dp_select_index): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) @@ -319,6 +320,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): block_tables: Tensor, residual: Optional[Tensor], dp_pad_index: Optional[bool] = None, + dp_select_index: Optional[Tensor] = None, ) -> Tensor: # Self Attention if residual is None: @@ -334,7 +336,7 @@ class Qwen3MoeDecoderLayer(nn.Cell): # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) - hidden_states = self.mlp(hidden_states, dp_pad_index) + hidden_states = self.mlp(hidden_states, dp_pad_index, dp_select_index) return hidden_states, residual @@ -385,6 +387,7 @@ class Qwen3MoeModel(nn.Cell): intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, dp_pad_index = None, + dp_select_index: Optional[Tensor] = None, ) -> Union[Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: if inputs_embeds is not None: @@ -404,7 +407,7 @@ class Qwen3MoeModel(nn.Cell): is_prefill, slot_mapping, attn_mask, batch_valid_length, q_seq_lens, block_tables, residual, - dp_pad_index) + dp_pad_index, dp_select_index) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, @@ -620,11 +623,20 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): mint.distributed.all_gather_into_tensor(token_num_total, send_tensor, group=self.dp_group) token_num_total = token_num_total.reshape(-1) - tokens_cumulative = mint.cumsum(token_num_total, dim=0) - start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() - end = tokens_cumulative[self.dp_rank].item() - end2 = tokens_cumulative[-1].item() - end - dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + # tokens_cumulative = mint.cumsum(token_num_total, dim=0) + # start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() + # end = tokens_cumulative[self.dp_rank].item() + # end2 = tokens_cumulative[-1].item() - end + # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + token_num_total = token_num_total.asnumpy() + max_token_num = int(token_num_total.max()) + total_pad_num = (max_token_num - token_num_total) + this_pad_num = total_pad_num[self.dp_rank] + dp_pad_index = ms.Tensor([0, 0, 0, int(this_pad_num)], dtype=mstype.int32) + dp_select_index = [j + self.dp_rank * max_token_num + for j in range(token_num_total[self.dp_rank])] + dp_select_index = ms.Tensor(dp_select_index, dtype=mstype.int32) + model_output = self.run_model( # type: ignore[misc] input_ids=model_inputs["input_ids"], @@ -640,6 +652,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): intermediate_tensors=model_inputs["intermediate_tensors"], inputs_embeds=model_inputs["inputs_embeds"], dp_pad_index=dp_pad_index if self.dp_pad_input else None, + dp_select_index=dp_select_index if self.dp_pad_input else None ) return model_output @@ -696,7 +709,9 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) - dyn_pad_input = Tensor(shape=[4], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_pad_input = Tensor(shape=[4], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_select_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + self.model.set_inputs( dyn_input_ids, @@ -711,7 +726,8 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): dyn_block_tables, dyn_intermediate_tensors, dyn_inputs_embeds, - dyn_pad_input) + dyn_dp_pad_input, + dyn_dp_select_index) dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.model_config.dtype) -- Gitee From 9887e4af707bd8fd2a4b909e8083bf20e5890145 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 14:10:12 +0800 Subject: [PATCH 57/76] update --- .../model_executor/layers/fused_moe/layer.py | 42 ++++++------- .../model_executor/models/qwen3_moe.py | 62 +++++++++++++------ 2 files changed, 63 insertions(+), 41 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 1ac6b7822..df74ede17 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -538,6 +538,7 @@ class FusedMoE(nn.Cell): self.pure_tp = True self.all_reduce_from_ep_group = ops.AllReduce(group=self.ep_group) if self.dp_size > 1: + self.gather = ops.Gather() self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) self.reduce_scatter_from_dp_group = ops.ReduceScatter(group=self.dp_group) @@ -832,23 +833,21 @@ class FusedMoE(nn.Cell): def construct(self, hidden_states: Tensor, router_logits: Tensor, dp_pad_index, - dp_select_index): - return self.forward_impl(hidden_states, router_logits, dp_pad_index, dp_select_index) + dp_unpad_index, + dp_pad_index_with_offset, + dp_unpad_index_total_with_offset): + return self.forward_impl(hidden_states, router_logits, dp_pad_index, + dp_unpad_index, dp_pad_index_with_offset, + dp_unpad_index_total_with_offset) def forward_impl(self, hidden_states: Tensor, - router_logits: Tensor, dp_pad_index, dp_select_index): - assert self.quant_method is not None - - # do_naive_dispatch_combine: bool = ( - # self.dp_size > 1 - # and not self.ep_size > 1) - # if do_naive_dispatch_combine: - # hidden_states, router_logits = get_ep_group().dispatch( - # hidden_states, router_logits) + router_logits: Tensor, dp_pad_index, dp_unpad_index, + dp_pad_index_total_with_offset, + dp_unpad_index_total_with_offset): + # dp_pad_index = [0, 1, 2, 3, 0, 0, 0] + # dp_pad_index_with_offset = [5, 6, 7, 8, 0, 0, 0] if self.dp_size > 1 and self.pure_tp: - tokens_num = hidden_states.shape[0] - # hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) # hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) @@ -856,14 +855,14 @@ class FusedMoE(nn.Cell): # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. - hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) + hidden_buffer = self.gather(hidden_states, dp_pad_index) hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) - logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_index) + logit_buffer = self.gather(router_logits, dp_pad_index) logit_buffer = self.all_gather_from_dp_group(logit_buffer) - # hidden_states = mint.index_select(hidden_buffer, 0, dp_select_index) - # router_logits = mint.index_select(logit_buffer, 0, dp_select_index) + hidden_states = mint.index_select(hidden_buffer, 0, dp_unpad_index_total_with_offset) + router_logits = mint.index_select(logit_buffer, 0, dp_unpad_index_total_with_offset) # Matrix multiply. final_hidden_states = self.quant_method.apply( @@ -885,21 +884,18 @@ class FusedMoE(nn.Cell): ) if self.pure_tp: - # final_hidden_states = self.all_reduce_from_world_group(final_hidden_states) - # mint.distributed.all_reduce(final_hidden_states, group=self.ep_group) if self.dp_size == 1: final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) # dp_size > 1 else: + final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset) final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) - final_hidden_states = mint.index_select(final_hidden_states, 0, dp_select_index) + final_hidden_states = self.gather(final_hidden_states, dp_unpad_index) + # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] # end = start + tokens_num # final_hidden_states = final_hidden_states[start:end] - # if do_naive_dispatch_combine: - # final_hidden_states = get_ep_group().combine(final_hidden_states) - if self.reduce_results and (self.tp_size > 1 or self.ep_size > 1): # Default set to False. (May have to add shared expert outputs.) final_hidden_states = tensor_model_parallel_all_reduce( diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index f3ab0a9ab..bd29b708c 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -93,7 +93,7 @@ class Qwen3MoeMLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def construct(self, x, dp_pad_index, dp_select_index): + def construct(self, x, dp_pad_index, dp_unpad_index, dp_unpad_index_total_with_offset): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) @@ -131,7 +131,8 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): quant_config=None, prefix=f"{prefix}.gate") - def construct(self, hidden_states: Tensor, dp_pad_index) -> Tensor: + def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] @@ -141,7 +142,10 @@ class Qwen3MoeSparseMoeBlock(nn.Cell): router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states=hidden_states, router_logits=router_logits, - dp_pad_index=dp_pad_index) + dp_pad_index=dp_pad_index, + dp_unpad_index=dp_unpad_index, + dp_pad_index_with_offset=dp_pad_index_with_offset, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) final_hidden_states = final_hidden_states if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 @@ -320,7 +324,9 @@ class Qwen3MoeDecoderLayer(nn.Cell): block_tables: Tensor, residual: Optional[Tensor], dp_pad_index: Optional[bool] = None, - dp_select_index: Optional[Tensor] = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, ) -> Tensor: # Self Attention if residual is None: @@ -336,7 +342,8 @@ class Qwen3MoeDecoderLayer(nn.Cell): # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) - hidden_states = self.mlp(hidden_states, dp_pad_index, dp_select_index) + hidden_states = self.mlp(hidden_states, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) return hidden_states, residual @@ -387,7 +394,10 @@ class Qwen3MoeModel(nn.Cell): intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[Tensor] = None, dp_pad_index = None, - dp_select_index: Optional[Tensor] = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_total_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + ) -> Union[Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: if inputs_embeds is not None: @@ -407,7 +417,9 @@ class Qwen3MoeModel(nn.Cell): is_prefill, slot_mapping, attn_mask, batch_valid_length, q_seq_lens, block_tables, residual, - dp_pad_index, dp_select_index) + dp_pad_index, dp_unpad_index, + dp_pad_index_total_with_offset, + dp_unpad_index_total_with_offset) if not get_pp_group().is_last_rank: return IntermediateTensors({ "hidden_states": hidden_states, @@ -629,13 +641,21 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): # end2 = tokens_cumulative[-1].item() - end # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) token_num_total = token_num_total.asnumpy() - max_token_num = int(token_num_total.max()) - total_pad_num = (max_token_num - token_num_total) + max_token_num = token_num_total.max() + total_pad_num = max_token_num - token_num_total this_pad_num = total_pad_num[self.dp_rank] - dp_pad_index = ms.Tensor([0, 0, 0, int(this_pad_num)], dtype=mstype.int32) - dp_select_index = [j + self.dp_rank * max_token_num - for j in range(token_num_total[self.dp_rank])] - dp_select_index = ms.Tensor(dp_select_index, dtype=mstype.int32) + + dp_unpad_index = np.arange(token_num_total[self.dp_rank]) + dp_pad_index = np.pad(dp_unpad_index, (0, this_pad_num)) + + dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) + for rank in self.dp_world_size] + dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) + + + dp_unpad_index_total_with_offset = [np.arange(token_num_total[rank]) + rank * max_token_num + for rank in self.dp_world_size] + dp_unpad_index_total_with_offset = ms.Tensor(dp_unpad_index_total_with_offset, dtype=mstype.int32) model_output = self.run_model( # type: ignore[misc] @@ -652,7 +672,9 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): intermediate_tensors=model_inputs["intermediate_tensors"], inputs_embeds=model_inputs["inputs_embeds"], dp_pad_index=dp_pad_index if self.dp_pad_input else None, - dp_select_index=dp_select_index if self.dp_pad_input else None + dp_unpad_index=dp_unpad_index if self.dp_pad_input else None, + dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset if self.dp_pad_input else None ) return model_output @@ -709,8 +731,10 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) - dyn_dp_pad_input = Tensor(shape=[4], dtype=mstype.int32) if self.dp_pad_input else None - dyn_dp_select_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_pad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_unpad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_pad_index_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dp_unpad_index_total_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None self.model.set_inputs( @@ -726,8 +750,10 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): dyn_block_tables, dyn_intermediate_tensors, dyn_inputs_embeds, - dyn_dp_pad_input, - dyn_dp_select_index) + dyn_dp_pad_index, + dyn_dp_unpad_index, + dyn_dp_pad_index_with_offset, + dp_unpad_index_total_with_offset) dynamic_hidden_states = Tensor(shape=[None, None], dtype=self.model_config.dtype) -- Gitee From 137b932fede213bcd3249cab4928d6769519642a Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 14:38:48 +0800 Subject: [PATCH 58/76] update --- .../model_executor/layers/fused_moe/layer.py | 8 ++++---- vllm_mindspore/model_executor/models/qwen3_moe.py | 12 +++++++----- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index df74ede17..ff6244bee 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -855,10 +855,10 @@ class FusedMoE(nn.Cell): # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. - hidden_buffer = self.gather(hidden_states, dp_pad_index) + hidden_buffer = self.gather(hidden_states, dp_pad_index, 0) hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) - logit_buffer = self.gather(router_logits, dp_pad_index) + logit_buffer = self.gather(router_logits, dp_pad_index, 0) logit_buffer = self.all_gather_from_dp_group(logit_buffer) hidden_states = mint.index_select(hidden_buffer, 0, dp_unpad_index_total_with_offset) @@ -888,9 +888,9 @@ class FusedMoE(nn.Cell): final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) # dp_size > 1 else: - final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset) + final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) - final_hidden_states = self.gather(final_hidden_states, dp_unpad_index) + final_hidden_states = self.gather(final_hidden_states, dp_unpad_index, 0) # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] # end = start + tokens_num diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index bd29b708c..d0926e1f5 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -645,16 +645,18 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): total_pad_num = max_token_num - token_num_total this_pad_num = total_pad_num[self.dp_rank] - dp_unpad_index = np.arange(token_num_total[self.dp_rank]) - dp_pad_index = np.pad(dp_unpad_index, (0, this_pad_num)) + dp_unpad_index = ms.Tensor(np.arange(token_num_total[self.dp_rank]), dtype=ms.int32) + dp_pad_index = ms.Tensor(np.pad(dp_unpad_index, (0, this_pad_num)), dtype=ms.int32) dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) - for rank in self.dp_world_size] + for rank in range(self.dp_world_size)] dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) + dp_pad_index_total_with_offset = ms.Tensor(dp_pad_index_total_with_offset, dtype=mstype.int32) dp_unpad_index_total_with_offset = [np.arange(token_num_total[rank]) + rank * max_token_num - for rank in self.dp_world_size] + for rank in range(self.dp_world_size)] + dp_unpad_index_total_with_offset = np.concatenate(dp_unpad_index_total_with_offset, axis=0) dp_unpad_index_total_with_offset = ms.Tensor(dp_unpad_index_total_with_offset, dtype=mstype.int32) @@ -673,7 +675,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): inputs_embeds=model_inputs["inputs_embeds"], dp_pad_index=dp_pad_index if self.dp_pad_input else None, dp_unpad_index=dp_unpad_index if self.dp_pad_input else None, - dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None + dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None, dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset if self.dp_pad_input else None ) -- Gitee From ced4491a6512d8a19ef93aaaa93419c2a7c185b9 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 15:13:09 +0800 Subject: [PATCH 59/76] update --- .../model_executor/layers/fused_moe/layer.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index ff6244bee..e8ee29bd8 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -534,6 +534,8 @@ class FusedMoE(nn.Cell): self.dp_group = get_dp_group().device_group._name self.ep_group = get_ep_group().device_group._name + self.tp_world_size = get_tensor_model_parallel_world_size() + if self.dp_size > 1 and self.ep_size == 1 or self.dp_size == 1: self.pure_tp = True self.all_reduce_from_ep_group = ops.AllReduce(group=self.ep_group) @@ -541,7 +543,9 @@ class FusedMoE(nn.Cell): self.gather = ops.Gather() self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) - self.reduce_scatter_from_dp_group = ops.ReduceScatter(group=self.dp_group) + # self.reduce_scatter_from_ep_group = ops.ReduceScatter(group=self.ep_group) + self.reduce_from_ep_group = ops.Reduce(0, group=self.ep_group) + self.scatter_to_ep_group = ops.CollectiveScatter(0, group=self.ep_group) @property def tp_size(self): @@ -889,7 +893,10 @@ class FusedMoE(nn.Cell): # dp_size > 1 else: final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) - final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) + final_hidden_states = self.reduce_from_ep_group(final_hidden_states) + final_hidden_states = mint.repeat_interleave(final_hidden_states, self.tp_world_size, dim=0) + final_hidden_states = self.scatter_to_ep_group(final_hidden_states) + # final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) final_hidden_states = self.gather(final_hidden_states, dp_unpad_index, 0) # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] -- Gitee From 1e7fc08ece80737191e7643b8da27957460bda5e Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 16:13:23 +0800 Subject: [PATCH 60/76] update --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 8 ++------ vllm_mindspore/model_executor/models/qwen3_moe.py | 9 +++++++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index e8ee29bd8..e964d987e 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -543,9 +543,7 @@ class FusedMoE(nn.Cell): self.gather = ops.Gather() self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) - # self.reduce_scatter_from_ep_group = ops.ReduceScatter(group=self.ep_group) - self.reduce_from_ep_group = ops.Reduce(0, group=self.ep_group) - self.scatter_to_ep_group = ops.CollectiveScatter(0, group=self.ep_group) + self.reduce_scatter_from_ep_group = ops.ReduceScatter(group=self.ep_group) @property def tp_size(self): @@ -893,10 +891,8 @@ class FusedMoE(nn.Cell): # dp_size > 1 else: final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) - final_hidden_states = self.reduce_from_ep_group(final_hidden_states) final_hidden_states = mint.repeat_interleave(final_hidden_states, self.tp_world_size, dim=0) - final_hidden_states = self.scatter_to_ep_group(final_hidden_states) - # final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) + final_hidden_state = self.reduce_scatter_from_ep_group(final_hidden_state) final_hidden_states = self.gather(final_hidden_states, dp_unpad_index, 0) # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] diff --git a/vllm_mindspore/model_executor/models/qwen3_moe.py b/vllm_mindspore/model_executor/models/qwen3_moe.py index d0926e1f5..882f584e5 100644 --- a/vllm_mindspore/model_executor/models/qwen3_moe.py +++ b/vllm_mindspore/model_executor/models/qwen3_moe.py @@ -641,6 +641,7 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): # end2 = tokens_cumulative[-1].item() - end # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) token_num_total = token_num_total.asnumpy() + token_num_total_cumsum = np.cumsum(token_num_total) max_token_num = token_num_total.max() total_pad_num = max_token_num - token_num_total this_pad_num = total_pad_num[self.dp_rank] @@ -648,8 +649,12 @@ class Qwen3MoeForCausalLM(NativeModel, SupportsPP): dp_unpad_index = ms.Tensor(np.arange(token_num_total[self.dp_rank]), dtype=ms.int32) dp_pad_index = ms.Tensor(np.pad(dp_unpad_index, (0, this_pad_num)), dtype=ms.int32) - dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) - for rank in range(self.dp_world_size)] + # dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) + # for rank in range(self.dp_world_size)] + dp_pad_index_total_with_offset = [np.pad(np.arange(0 if rank == 0 else token_num_total_cumsum[rank - 1], + token_num_total_cumsum[rank]), (0, total_pad_num[rank])) + for rank in range(self.dp_world_size)] + dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) dp_pad_index_total_with_offset = ms.Tensor(dp_pad_index_total_with_offset, dtype=mstype.int32) -- Gitee From 80b9c6a99e88f3554cfbc45c0c3ef83b72d6b766 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 17:22:59 +0800 Subject: [PATCH 61/76] update --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index e964d987e..cc5a35ba2 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -857,6 +857,10 @@ class FusedMoE(nn.Cell): # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. + num_token = ms.Tensor([[hidden_states.shape[0]]], dtype=ms.int32) + all_num_token = self.all_gather_from_dp_group(num_token) + all_num_token_cumsum = mint.cumsum(all_num_token, dim=0) + hidden_buffer = self.gather(hidden_states, dp_pad_index, 0) hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) @@ -891,8 +895,10 @@ class FusedMoE(nn.Cell): # dp_size > 1 else: final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) + final_hidden_states = final_hidden_states.reshape(self.dp_size, -1, final_hidden_states.shape[-1]) final_hidden_states = mint.repeat_interleave(final_hidden_states, self.tp_world_size, dim=0) - final_hidden_state = self.reduce_scatter_from_ep_group(final_hidden_state) + final_hidden_states = final_hidden_states.reshape(-1, final_hidden_states.shape[-1]) + final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) final_hidden_states = self.gather(final_hidden_states, dp_unpad_index, 0) # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] -- Gitee From f20a9df9a028c4568a60f186e964e9cf1c236194 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Fri, 27 Jun 2025 17:36:29 +0800 Subject: [PATCH 62/76] update --- vllm_mindspore/model_executor/layers/fused_moe/layer.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index cc5a35ba2..0685b9ddb 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -857,10 +857,6 @@ class FusedMoE(nn.Cell): # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. - num_token = ms.Tensor([[hidden_states.shape[0]]], dtype=ms.int32) - all_num_token = self.all_gather_from_dp_group(num_token) - all_num_token_cumsum = mint.cumsum(all_num_token, dim=0) - hidden_buffer = self.gather(hidden_states, dp_pad_index, 0) hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) -- Gitee From 43d15258e97201206a188199a932dac595cd3479 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Sat, 28 Jun 2025 10:23:06 +0800 Subject: [PATCH 63/76] test --- .../model_executor/layers/fused_moe/layer.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 0685b9ddb..4cb4e8d3d 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -822,14 +822,15 @@ class FusedMoE(nn.Cell): Therefore it is required that we reduce the shared_experts output early. """ - return self.use_pplx_kernels + return not (self.pure_tp and self.dp_size == 1) def maybe_all_reduce_tensor_model_parallel( self, final_hidden_states: Tensor): """ The pplx combine kernel reduces across GPU ranks by default. """ - # return tensor_model_parallel_all_reduce(final_hidden_states) + if self.pure_tp and self.dp_size == 1: + return tensor_model_parallel_all_reduce(final_hidden_states) return final_hidden_states def construct(self, hidden_states: Tensor, @@ -885,11 +886,12 @@ class FusedMoE(nn.Cell): apply_router_weight_on_input=self.apply_router_weight_on_input, ) - if self.pure_tp: - if self.dp_size == 1: - final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) - # dp_size > 1 - else: + # if self.pure_tp: + # if self.dp_size == 1: + # final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) + # # dp_size > 1 + # else: + if self.pure_tp and self.dp_size > 1: final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) final_hidden_states = final_hidden_states.reshape(self.dp_size, -1, final_hidden_states.shape[-1]) final_hidden_states = mint.repeat_interleave(final_hidden_states, self.tp_world_size, dim=0) -- Gitee From efc30693af49dcf5ba14f7a877bd42f68c205cd0 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Sat, 28 Jun 2025 11:04:24 +0800 Subject: [PATCH 64/76] update test --- .../model_executor/layers/fused_moe/layer.py | 31 +++++++++---------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 4cb4e8d3d..304b45577 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -38,6 +38,7 @@ from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk grouped_topk, fused_experts) from vllm_mindspore.model_executor.layers.quantization.base_config import QuantizeMethodBase +from vllm_mindspore.distributed.communication_op import ReduceFromModelParallelRegion from mindspore import nn, Tensor, Parameter, mint, ops import mindspore as ms @@ -536,9 +537,13 @@ class FusedMoE(nn.Cell): self.tp_world_size = get_tensor_model_parallel_world_size() - if self.dp_size > 1 and self.ep_size == 1 or self.dp_size == 1: + self.reduce_from_tp_group = ReduceFromModelParallelRegion() + + # pure_tp means using tensor parallelism only, no expert parallelism. + self.pure_tp = False + + if self.tp_size >= 1 and self.ep_size == 1: self.pure_tp = True - self.all_reduce_from_ep_group = ops.AllReduce(group=self.ep_group) if self.dp_size > 1: self.gather = ops.Gather() self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) @@ -810,27 +815,19 @@ class FusedMoE(nn.Cell): return topk_weights, topk_ids def must_reduce_shared_expert_outputs(self) -> bool: - """ - The shared_experts are typically computed using the RowParallelLinear - layer. The result of this function is typically used as - the reduce_results argument to the module. - When just tensor-parallel is used, it is not required to reduce - the shared_experts results immediately. Instead we reduce at the - once at the end of the MoE op. (Refer to DeepSeekV2MoE module) - With EP and the pplx kernels - this is no longer viable as all - GPU ranks in DP, produce the complete set of hidden_states. - Therefore it is required that we reduce the shared_experts output - early. - """ + # If dp_size == 1, means routed expert use the same tensor parallel group as shared expert. + # And meanwhile if ep_size == 1, it means using tensor parallel to compute routed expert. + # So we can delay the shared expert outputs reduce after the routed expert and + # the shared expert are added. return not (self.pure_tp and self.dp_size == 1) def maybe_all_reduce_tensor_model_parallel( self, final_hidden_states: Tensor): """ - The pplx combine kernel reduces across GPU ranks by default. + To all_reduce after routed expert and shared expert are added. """ if self.pure_tp and self.dp_size == 1: - return tensor_model_parallel_all_reduce(final_hidden_states) + return self.reduce_from_tp_group(final_hidden_states) return final_hidden_states def construct(self, hidden_states: Tensor, @@ -850,7 +847,7 @@ class FusedMoE(nn.Cell): # dp_pad_index = [0, 1, 2, 3, 0, 0, 0] # dp_pad_index_with_offset = [5, 6, 7, 8, 0, 0, 0] - if self.dp_size > 1 and self.pure_tp: + if self.pure_tp and self.dp_size > 1: # hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) # hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) -- Gitee From f0bbd02c16765ebcfd6d1621936ffa7ce7d43b05 Mon Sep 17 00:00:00 2001 From: lvhaoyu Date: Sat, 28 Jun 2025 11:29:01 +0800 Subject: [PATCH 65/76] update --- .../model_executor/layers/fused_moe/layer.py | 32 +++++++------------ 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 304b45577..2692ce1ff 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -545,7 +545,6 @@ class FusedMoE(nn.Cell): if self.tp_size >= 1 and self.ep_size == 1: self.pure_tp = True if self.dp_size > 1: - self.gather = ops.Gather() self.all_gather_from_dp_group = ops.AllGather(group=self.dp_group) self.all_reduce_from_dp_group = ops.AllReduce(group=self.dp_group) self.reduce_scatter_from_ep_group = ops.ReduceScatter(group=self.ep_group) @@ -603,7 +602,6 @@ class FusedMoE(nn.Cell): # w3, up_proj: Load into second logical weight of w13. else: assert shard_id == "w3" - # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) if shard_dim == 1: param[expert_id, :, shard_size:shard_size*2] = loaded_weight else: @@ -619,7 +617,6 @@ class FusedMoE(nn.Cell): # w3, up_proj: Load into second logical weight of w13. else: assert shard_id == "w3" - # expert_data = expert_data.narrow(shard_dim, shard_size, shard_size) if shard_dim == 2: param[:, :, shard_size:shard_size*2] = loaded_weight else: @@ -844,21 +841,19 @@ class FusedMoE(nn.Cell): router_logits: Tensor, dp_pad_index, dp_unpad_index, dp_pad_index_total_with_offset, dp_unpad_index_total_with_offset): - # dp_pad_index = [0, 1, 2, 3, 0, 0, 0] - # dp_pad_index_with_offset = [5, 6, 7, 8, 0, 0, 0] - + """ + If dp_world_size == 4, dp_rank == 1, tokens_num across dp is [1, 3, 4, 2], then + dp_pad_index = [0, 1, 2, 0] + dp_unpad_index = [0, 1, 2] + dp_pad_index_total_with_offset = [0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 0, 0] + dp_unpad_index_total_with_offset = [0, 4, 5, 6, 8, 9, 10, 11, 12, 13] + """ if self.pure_tp and self.dp_size > 1: - # hidden_buffer = mint.nn.functional.pad(hidden_states, dp_pad_index) - # hidden_buffer = self.all_reduce_from_dp_group(hidden_buffer) - - # logit_buffer = mint.nn.functional.pad(router_logits, dp_pad_index) - # logit_buffer = self.all_reduce_from_dp_group(logit_buffer) - # ops.AllGather is not supported for uneven size tensor, so need to pad to same size. - hidden_buffer = self.gather(hidden_states, dp_pad_index, 0) + hidden_buffer = mint.index_select(hidden_states, 0, dp_pad_index) hidden_buffer = self.all_gather_from_dp_group(hidden_buffer) - logit_buffer = self.gather(router_logits, dp_pad_index, 0) + logit_buffer = mint.index_select(router_logits, 0, dp_pad_index) logit_buffer = self.all_gather_from_dp_group(logit_buffer) hidden_states = mint.index_select(hidden_buffer, 0, dp_unpad_index_total_with_offset) @@ -883,18 +878,13 @@ class FusedMoE(nn.Cell): apply_router_weight_on_input=self.apply_router_weight_on_input, ) - # if self.pure_tp: - # if self.dp_size == 1: - # final_hidden_states = self.all_reduce_from_ep_group(final_hidden_states) - # # dp_size > 1 - # else: if self.pure_tp and self.dp_size > 1: - final_hidden_states = self.gather(final_hidden_states, dp_pad_index_total_with_offset, 0) + final_hidden_states = mint.index_select(final_hidden_states, 0, dp_pad_index_total_with_offset) final_hidden_states = final_hidden_states.reshape(self.dp_size, -1, final_hidden_states.shape[-1]) final_hidden_states = mint.repeat_interleave(final_hidden_states, self.tp_world_size, dim=0) final_hidden_states = final_hidden_states.reshape(-1, final_hidden_states.shape[-1]) final_hidden_states = self.reduce_scatter_from_ep_group(final_hidden_states) - final_hidden_states = self.gather(final_hidden_states, dp_unpad_index, 0) + final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # final_hidden_states = mint.index_select(final_hidden_states, 0, dp_unpad_index) # start = dp_pad_index[-2] # end = start + tokens_num -- Gitee From d6d45846d69e2a9bc248d8b377f729f53bbe531c Mon Sep 17 00:00:00 2001 From: zlq2020 Date: Sat, 28 Jun 2025 11:37:29 +0800 Subject: [PATCH 66/76] optimized the install script --- install_depend_pkgs.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index 607631e45..302cad80a 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -40,7 +40,7 @@ vllm_dir=vllm-v0.8.3 if [ ! -d "$vllm_dir" ]; then git clone https://github.com/vllm-project/vllm.git -b v0.8.3 "$vllm_dir" cd "$vllm_dir" || { echo "Failed to git clone vllm!"; exit 1; } - git apply ../../vllm_dp/dp_scale_out.patch + git apply $script_dir/vllm_dp/dp_scale_out.patch else echo "The $vllm_dir folder already exists and will not be re-downloaded." cd "$vllm_dir" || { echo "Failed to git clone vllm!"; exit 1; } @@ -49,7 +49,7 @@ pip uninstall msadapter -y pip uninstall vllm -y pip install -v -r requirements/cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu VLLM_TARGET_DEVICE=empty python setup.py install || { echo "Failed to install vllm"; exit 1; } -pip uninstall torch torch-npu torchvision -y +pip uninstall torch torch-npu torchvision torchaudio -y cd .. @@ -100,4 +100,5 @@ cd "$msadapter_dir" || { echo "Failed to git clone msadapter!"; exit 1; } pip uninstall msadapter -y && pip install . || { echo "Failed to install msadapter"; exit 1; } cd .. -echo "========= All dependencies installed successfully!" \ No newline at end of file +echo "========= All dependencies installed successfully!" +echo -e "[\033[0;34mnotice\033[0m]Please set the command: export PYTHONPATH=$(pwd)/$mf_dir/:\$PYTHONPATH" -- Gitee From 55d4cdc1466221ed9d148076738bbe50c29a6e6b Mon Sep 17 00:00:00 2001 From: panshaowu Date: Sat, 28 Jun 2025 20:59:59 +0800 Subject: [PATCH 67/76] add tool parser for deepseekv3 --- ...l_chat_template_deepseekv3_zh_prompt.jinja | 101 ++++ vllm_mindspore/__init__.py | 10 + vllm_mindspore/entrypoints/__init__.py | 0 vllm_mindspore/entrypoints/openai/__init__.py | 0 .../entrypoints/openai/serving_chat.py | 484 ++++++++++++++++++ .../openai/tool_parsers/__init__.py | 0 .../tool_parsers/deepseekv3_tool_parser.py | 387 ++++++++++++++ 7 files changed, 982 insertions(+) create mode 100644 examples/tool_chat_template_deepseekv3_zh_prompt.jinja create mode 100644 vllm_mindspore/entrypoints/__init__.py create mode 100644 vllm_mindspore/entrypoints/openai/__init__.py create mode 100644 vllm_mindspore/entrypoints/openai/serving_chat.py create mode 100644 vllm_mindspore/entrypoints/openai/tool_parsers/__init__.py create mode 100644 vllm_mindspore/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py diff --git a/examples/tool_chat_template_deepseekv3_zh_prompt.jinja b/examples/tool_chat_template_deepseekv3_zh_prompt.jinja new file mode 100644 index 000000000..69d542635 --- /dev/null +++ b/examples/tool_chat_template_deepseekv3_zh_prompt.jinja @@ -0,0 +1,101 @@ +{% if not add_generation_prompt is defined %} + {% set add_generation_prompt = false %} +{% endif %} +{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_last_user=false) %} +{%- for message in messages %} + {%- if message['role'] == 'system' %} + {%- if ns.is_first_sp %} + {% set ns.system_prompt = ns.system_prompt + message['content'] %} + {% set ns.is_first_sp = false %} + {%- else %} + {% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %} + {%- endif %} + {%- endif %} +{%- endfor %} + +{#- Adapted from https://github.com/sgl-project/sglang/blob/main/examples/chat_template/tool_chat_template_deepseekr1.jinja #} +{% if tools is defined and tools is not none %} + {% set tool_ns = namespace(text='你可以调用工具函数。' + '当你需要调用工具时,你必须严格遵守下面的格式输出:' + '<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>FUNCTION_NAME\n' + '```json\n{"param1": "value1", "param2": "value2"}\n```<|tool▁call▁end|><|tool▁calls▁end|>\n\n' + '不遵守上面的格式就不能成功调用工具,是错误答案。\n' + '错误答案举例1:function<|tool▁sep|>FUNCTION_NAME\n```json\n{"param1": "value1", "param2": "value2"}\n```' + '<|tool▁call▁end|><|tool▁calls▁end|>\n' + '错误1原因:没有使用<|tool▁calls▁begin|>、<|tool▁call▁begin|>,不符合格式。\n' + '错误答案举例2:<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>FUNCTION_NAME\n```json\n' + '{"param1": "value1", "param2": "value2"}\n```<|tool▁call▁end|>\n' + '错误2原因:没有使用<|tool▁calls▁end|>,不符合格式。\n' + '错误答案举例3:<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>FUNCTION_NAME\n```json\n' + '{"param1": "value1", "param2": "value2"}\n```<|tool▁call▁end|><|tool▁calls▁begin|>\n' + '错误3原因:最后一个<|tool▁calls▁begin|>应为<|tool▁calls▁end|>,不符合格式。' + '## Tools\n\n### Function\n\nYou have the following functions available:\n\n') %} + {% for tool in tools %} + {% set tool_ns.text = tool_ns.text + '\n```json\n' + (tool | tojson) + '\n```\n' %} + {% endfor %} + {% set ns.system_prompt = ns.system_prompt + '\n\n' + tool_ns.text %} +{% endif %} + +{{ bos_token }} +{{ ns.system_prompt }} +{%- for message in messages %} + {% set content = message['content'] %} + {%- if message['role'] == 'user' %} + {%- set ns.is_tool = false -%} + {%- set ns.is_first = false -%} + {%- set ns.is_last_user = true -%} + {{'<|User|>' + content + '<|Assistant|>'}} + {%- endif %} + {%- if message['role'] == 'assistant' %} + {% if '' in content %} + {% set content = content.split('')[-1] %} + {% endif %} + {% endif %} + {%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %} + {%- set ns.is_last_user = false -%} + {%- if ns.is_tool %} + {{'<|tool▁outputs▁end|>'}} + {%- endif %} + {%- set ns.is_first = false %} + {%- set ns.is_tool = false -%} + {%- set ns.is_output_first = true %} + {%- for tool in message['tool_calls'] %} + {%- if not ns.is_first %} + {%- if content is none %} + {{'<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments']|tojson + '\n' + '```' + '<|tool▁call▁end|>'}} + {%- else %} + {{content + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments']|tojson + '\n' + '```' + '<|tool▁call▁end|>'}} + {%- endif %} + {%- set ns.is_first = true -%} + {%- else %} + {{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments']|tojson + '\n' + '```' + '<|tool▁call▁end|>'}} + {%- endif %} + {%- endfor %} + {{'<|tool▁calls▁end|><|end▁of▁sentence|>'}} + {%- endif %} + {%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none)%} + {%- set ns.is_last_user = false -%} + {%- if ns.is_tool %} + {{'<|tool▁outputs▁end|>' + content + '<|end▁of▁sentence|>'}} + {%- set ns.is_tool = false -%} + {%- else %} + {{content + '<|end▁of▁sentence|>'}} + {%- endif %} + {%- endif %} + {%- if message['role'] == 'tool' %} + {%- set ns.is_last_user = false -%} + {%- set ns.is_tool = true -%} + {%- if ns.is_output_first %} + {{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + content + '<|tool▁output▁end|>'}} + {%- set ns.is_output_first = false %} + {%- else %} + {{'\n<|tool▁output▁begin|>' + content + '<|tool▁output▁end|>'}} + {%- endif %} + {%- endif %} +{%- endfor -%} +{% if ns.is_tool %} + {{'<|tool▁outputs▁end|>'}} +{% endif %} +{% if add_generation_prompt and not ns.is_last_user and not ns.is_tool %} + {{'<|Assistant|>'}} +{% endif %} diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index daeeb2a65..8614e1573 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -392,4 +392,14 @@ import vllm.engine.multiprocessing.engine vllm.engine.multiprocessing.engine.MQLLMEngine.cleanup = cleanup +from vllm.entrypoints.openai.serving_chat import OpenAIServingChat +from vllm_mindspore.entrypoints.openai.serving_chat import chat_completion_stream_generator + +OpenAIServingChat.chat_completion_stream_generator = chat_completion_stream_generator + +from vllm_mindspore.entrypoints.openai.tool_parsers import deepseekv3_tool_parser + +sys.modules[ + 'vllm.entrypoints.openai.tool_parsers.deepseekv3_tool_parser'] = deepseekv3_tool_parser + check_ready() diff --git a/vllm_mindspore/entrypoints/__init__.py b/vllm_mindspore/entrypoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/entrypoints/openai/__init__.py b/vllm_mindspore/entrypoints/openai/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/entrypoints/openai/serving_chat.py b/vllm_mindspore/entrypoints/openai/serving_chat.py new file mode 100644 index 000000000..05713071e --- /dev/null +++ b/vllm_mindspore/entrypoints/openai/serving_chat.py @@ -0,0 +1,484 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# SPDX-License-Identifier: Apache-2.0 +""" +Adapted from +https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/openai/serving_chat.py +""" +import time +from collections.abc import AsyncGenerator, AsyncIterator +from typing import Final, Optional, Union + +from vllm.entrypoints.chat_utils import ConversationMessage +from vllm.entrypoints.openai.protocol import ( + ChatCompletionNamedToolChoiceParam, ChatCompletionRequest, + ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, + DeltaFunctionCall, DeltaMessage, DeltaToolCall, PromptTokenUsageInfo, + RequestResponseMetadata, UsageInfo) +from vllm.entrypoints.openai.tool_parsers import ToolParser +from vllm.logger import init_logger +from vllm.outputs import RequestOutput +from vllm.transformers_utils.tokenizer import AnyTokenizer + +logger = init_logger(__name__) + + +async def chat_completion_stream_generator( + self, + request: ChatCompletionRequest, + result_generator: AsyncIterator[RequestOutput], + request_id: str, + model_name: str, + conversation: list[ConversationMessage], + tokenizer: AnyTokenizer, + request_metadata: RequestResponseMetadata, +) -> AsyncGenerator[str, None]: + created_time = int(time.time()) + chunk_object_type: Final = "chat.completion.chunk" + first_iteration = True + + # Send response for each token for each request.n (index) + num_choices = 1 if request.n is None else request.n + previous_num_tokens = [0] * num_choices + finish_reason_sent = [False] * num_choices + num_prompt_tokens = 0 + num_cached_tokens = None + + if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam): + tool_choice_function_name = request.tool_choice.function.name + else: + tool_choice_function_name = None + + # Determine whether tools are in use with "auto" tool choice + tool_choice_auto = (not tool_choice_function_name and + self._should_stream_with_auto_tool_parsing(request)) + + should_stream_with_reasoning_parsing = ( + self._should_stream_with_reasoning_parsing(request)) + + all_previous_token_ids: Optional[list[list[int]]] + function_name_returned: Optional[list[bool]] = None + + # Only one of these will be used, thus previous_texts and + # all_previous_token_ids will not be used twice in the same iteration. + if tool_choice_auto or should_stream_with_reasoning_parsing: + # These are only required in "auto" tool choice case + previous_texts = [""] * num_choices + all_previous_token_ids = [[]] * num_choices + # For reasoning parser and tool call all enabled + added_content_delta_arr = [False] * num_choices + reasoning_end_arr = [False] * num_choices + elif request.tool_choice == "required": + previous_texts = [""] * num_choices + function_name_returned = [False] * num_choices + all_previous_token_ids = None + else: + previous_texts, all_previous_token_ids = None, None + + try: + # There is no need to check if the reasoning_parser is None + # because the should_stream_with_reasoning_parsing check + # already ensures that the reasoning_parser is not None. + # but the pre-commit hook requires it. + if should_stream_with_reasoning_parsing and \ + self.reasoning_parser is not None: + reasoning_parser = self.reasoning_parser(tokenizer) + except RuntimeError as e: + logger.exception("Error in reasoning parser creation.") + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + yield "data: [DONE]\n\n" + return + + # Prepare the tool parser if it's needed + try: + if tool_choice_auto and self.tool_parser: + tool_parsers: list[Optional[ToolParser]] = [ + self.tool_parser(tokenizer) + ] * num_choices + else: + tool_parsers = [None] * num_choices + except Exception as e: + logger.exception("Error in tool parser creation.") + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + yield "data: [DONE]\n\n" + return + + stream_options = request.stream_options + if stream_options: + include_usage = stream_options.include_usage + include_continuous_usage = include_usage and \ + stream_options.continuous_usage_stats + else: + include_usage, include_continuous_usage = False, False + + try: + async for res in result_generator: + if res.prompt_token_ids is not None: + num_prompt_tokens = len(res.prompt_token_ids) + if res.encoder_prompt_token_ids is not None: + num_prompt_tokens += len(res.encoder_prompt_token_ids) + + # We need to do it here, because if there are exceptions in + # the result_generator, it needs to be sent as the FIRST + # response (by the try...catch). + if first_iteration: + num_cached_tokens = res.num_cached_tokens + # Send first response for each request.n (index) with + # the role + role = self.get_chat_request_role(request) + + # NOTE num_choices defaults to 1 so this usually executes + # once per request + for i in range(num_choices): + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage( + role=role, + content="", + ), + logprobs=None, + finish_reason=None) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + + # if continuous usage stats are requested, add it + if include_continuous_usage: + chunk.usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=0, + total_tokens=num_prompt_tokens) + + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + + # Send response to echo the input portion of the + # last message + if request.echo: + last_msg_content: Union[str, list[dict[str, str]]] = "" + if conversation and "content" in conversation[ + -1] and conversation[-1].get("role") == role: + last_msg_content = conversation[-1]["content"] or "" + + if last_msg_content: + for i in range(num_choices): + choice_data = (ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage(content=last_msg_content), + logprobs=None, + finish_reason=None)) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + if include_continuous_usage: + chunk.usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=0, + total_tokens=num_prompt_tokens) + + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + first_iteration = False + + for output in res.outputs: + i = output.index + tool_parser = tool_parsers[i] + + if finish_reason_sent[i]: + continue + + if request.logprobs and request.top_logprobs is not None: + assert output.logprobs is not None, ( + "Did not output logprobs") + logprobs = self._create_chat_logprobs( + token_ids=output.token_ids, + top_logprobs=output.logprobs, + tokenizer=tokenizer, + num_output_top_logprobs=request.top_logprobs, + return_as_token_id=request.return_tokens_as_token_ids, + ) + else: + logprobs = None + + delta_text = output.text + + if not delta_text and not output.token_ids and \ + not previous_num_tokens[i]: + # Chunked prefill case, don't return empty chunks + continue + + delta_message: Optional[DeltaMessage] + + # just update previous_texts and previous_token_ids + if tool_choice_auto or should_stream_with_reasoning_parsing: + assert previous_texts is not None + assert all_previous_token_ids is not None + previous_text = previous_texts[i] + previous_token_ids = all_previous_token_ids[i] + current_text = previous_text + delta_text + current_token_ids = previous_token_ids + list( + output.token_ids) + + # handle streaming deltas for tools with named tool_choice + if tool_choice_function_name: + if (self.enable_reasoning + and not reasoning_parser.is_reasoning_end( + previous_token_ids)): + assert reasoning_parser is not None + delta_message = (reasoning_parser. + extract_reasoning_content_streaming( + previous_text, + current_text, + delta_text, + previous_token_ids, + current_token_ids, + output.token_ids, + )) + # When encountering think end id in delta_token_ids, + # process the `content`. Only keep 'content', + # remove 'reasoning_content' + if reasoning_parser.is_reasoning_end( + list(output.token_ids)): + if delta_message and delta_message.content: + # This need to be added to next `delta_text` + current_text = delta_message.content + delta_message.content = None + else: + current_text = "" + else: + # Just to add remaining `content` + if self.enable_reasoning: + delta_text = previous_text + delta_text + current_text = "" + + delta_message = DeltaMessage(tool_calls=[ + DeltaToolCall(function=DeltaFunctionCall( + name=tool_choice_function_name, + arguments=delta_text), + index=i) + ]) + + elif request.tool_choice == "required": + assert previous_texts is not None + assert function_name_returned is not None + previous_text = previous_texts[i] + current_text = previous_text + delta_text + fn_name_returned = function_name_returned[i] + + delta_message, function_name_returned[i] = ( + self.extract_tool_call_required_streaming( + previous_text=previous_text, + current_text=current_text, + delta_text=delta_text, + function_name_returned=fn_name_returned)) + + # update the previous values for the next iteration + previous_texts[i] = current_text + + # handle streaming deltas for tools with "auto" tool choice + # and reasoning parser + elif tool_choice_auto and self.enable_reasoning: + assert tool_parser is not None + assert reasoning_parser is not None + assert added_content_delta_arr is not None + assert reasoning_end_arr is not None + if not reasoning_end_arr[i]: + delta_message = (reasoning_parser. + extract_reasoning_content_streaming( + previous_text, + current_text, + delta_text, + previous_token_ids, + current_token_ids, + output.token_ids, + )) + + # When encountering think end id in delta_token_ids, + # set reasoning status to end. + # Remove the text and token ids related + # to 'reasoning_content'. + if reasoning_parser.is_reasoning_end( + list(output.token_ids)): + reasoning_end_arr[i] = True + current_token_ids = \ + reasoning_parser.extract_content_ids( + list(output.token_ids)) + if delta_message and delta_message.content: + current_text = delta_message.content + delta_message.content = None + else: + current_text = "" + + # handle tool calls only after reasoning is done, + else: + delta_token_ids = list(output.token_ids) + # First time to tool call, + # add the remaining text and token ids + # to delta from previous + if not added_content_delta_arr[i]: + added_content_delta_arr[i] = True + previous_text = "" + previous_token_ids = [] + delta_text = current_text + delta_token_ids = current_token_ids + + delta_message = ( + tool_parser.extract_tool_calls_streaming( + previous_text=previous_text, + current_text=current_text, + delta_text=delta_text, + previous_token_ids=previous_token_ids, + current_token_ids=current_token_ids, + delta_token_ids=delta_token_ids, + request=request)) + # when only tool calls + elif tool_choice_auto: + assert tool_parser is not None + delta_message = (tool_parser.extract_tool_calls_streaming( + previous_text=previous_text, + current_text=current_text, + delta_text=delta_text, + previous_token_ids=previous_token_ids, + current_token_ids=current_token_ids, + delta_token_ids=output.token_ids, + request=request)) + # when only reasoning + elif self.enable_reasoning: + assert reasoning_parser is not None + delta_message = ( + reasoning_parser.extract_reasoning_content_streaming( + previous_text, + current_text, + delta_text, + previous_token_ids, + current_token_ids, + output.token_ids, + )) + # handle streaming just a content delta + else: + delta_message = DeltaMessage(content=delta_text) + + # update the previous values for the next iteration + if tool_choice_auto or should_stream_with_reasoning_parsing: + assert previous_texts is not None + assert all_previous_token_ids is not None + previous_texts[i] = current_text + all_previous_token_ids[i] = current_token_ids + + # set the previous values for the next iteration + previous_num_tokens[i] += len(output.token_ids) + + # if the message delta is None (e.g. because it was a + # "control token" for tool calls or the parser otherwise + # wasn't ready to send a token, then + # get the next token without streaming a chunk + if delta_message is None: + continue + + if output.finish_reason is None: + # Send token-by-token response for each request.n + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=delta_message, + logprobs=logprobs, + finish_reason=None) + + # if the model is finished generating + else: + # check to make sure we haven't "forgotten" to stream + # any tokens that were generated but previously + # matched by partial json parsing + # only happens if we are NOT using guided decoding + auto_tools_called = False + if tool_parser: + auto_tools_called = len( + tool_parser.prev_tool_call_arr) > 0 + + # Send the finish response for each request.n only once + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=delta_message, + logprobs=logprobs, + finish_reason=output.finish_reason + if not auto_tools_called else "tool_calls", + stop_reason=output.stop_reason) + + finish_reason_sent[i] = True + + chunk = ChatCompletionStreamResponse(id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + + # handle usage stats if requested & if continuous + if include_continuous_usage: + completion_tokens = previous_num_tokens[i] + chunk.usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=num_prompt_tokens + completion_tokens, + ) + + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + + # once the final token is handled, if stream_options.include_usage + # is sent, send the usage + if include_usage: + completion_tokens = sum(previous_num_tokens) + final_usage = UsageInfo(prompt_tokens=num_prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=num_prompt_tokens + + completion_tokens) + if self.enable_prompt_tokens_details and num_cached_tokens: + final_usage.prompt_tokens_details = PromptTokenUsageInfo( + cached_tokens=num_cached_tokens) + + final_usage_chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[], + model=model_name, + usage=final_usage) + final_usage_data = (final_usage_chunk.model_dump_json( + exclude_unset=True, exclude_none=True)) + yield f"data: {final_usage_data}\n\n" + + # report to FastAPI middleware aggregate usage across all choices + num_completion_tokens = sum(previous_num_tokens) + request_metadata.final_usage_info = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=num_completion_tokens, + total_tokens=num_prompt_tokens + num_completion_tokens) + + except Exception as e: + # TODO: Use a vllm-specific Validation Error + logger.exception("Error in chat completion stream generator.") + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + # Send the final done message after all response.n are finished + yield "data: [DONE]\n\n" diff --git a/vllm_mindspore/entrypoints/openai/tool_parsers/__init__.py b/vllm_mindspore/entrypoints/openai/tool_parsers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py b/vllm_mindspore/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py new file mode 100644 index 000000000..c672c2967 --- /dev/null +++ b/vllm_mindspore/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py @@ -0,0 +1,387 @@ +#!/usr/bin/env python3 +# Copyright 2025 Huawei Technologies Co., Ltd +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# SPDX-License-Identifier: Apache-2.0 +""" +Adapted from +https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +""" +import re +from collections.abc import Sequence +from typing import Union + +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + DeltaFunctionCall, DeltaMessage, + DeltaToolCall, + ExtractedToolCallInformation, + FunctionCall, ToolCall) +from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( + ToolParser, ToolParserManager) +from vllm.logger import init_logger +from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import random_uuid + +logger = init_logger(__name__) + + +@ToolParserManager.register_module("deepseek_v3") +class DeepSeekV3ToolParser(ToolParser): + + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) + + self.current_tool_name_sent: bool = False + self.prev_tool_call_arr: list[dict] = [] + self.current_tool_id: int = -1 + self.streamed_args_for_tool: list[str] = ( + []) # map what has been streamed for each tool so far to a list + + self.tool_calls_start_token: str = "<|tool▁calls▁begin|>" + self.tool_calls_end_token: str = "<|tool▁calls▁end|>" + + self.tool_call_start_token: str = "<|tool▁call▁begin|>" + self.tool_call_end_token: str = "<|tool▁call▁end|>" + + self.tool_call_regex = re.compile( + r"<|tool▁call▁begin|>(?P.*)<|tool▁sep|>(?P.*)\n```json\n(?P.*)\n```<|tool▁call▁end|>" + ) + + self.stream_tool_call_portion_regex = re.compile( + r"(?P.*)<|tool▁sep|>(?P.*)\n```json\n(?P.*[^\n`])" + ) + + self.stream_tool_call_name_regex = re.compile( + r"(?P.*)<|tool▁sep|>(?P.*)\n") + + if not self.model_tokenizer: + raise ValueError( + "The model tokenizer must be passed to the ToolParser " + "constructor during construction.") + self.tool_calls_start_token_id = self.vocab.get( + self.tool_calls_start_token) + self.tool_calls_end_token_id = self.vocab.get( + self.tool_calls_end_token) + + self.tool_call_start_token_id = self.vocab.get( + self.tool_call_start_token) + self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token) + + if (self.tool_calls_start_token_id is None + or self.tool_calls_end_token_id is None): + raise RuntimeError( + "DeepSeek-V3 Tool parser could not locate tool call start/end " + "tokens in the tokenizer!") + + def extract_tool_calls( + self, + model_output: str, + request: ChatCompletionRequest, + ) -> ExtractedToolCallInformation: + + # sanity check; avoid unnecessary processing + if self.tool_calls_start_token not in model_output: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=model_output) + + else: + try: + # there are two possible captures - between tags, or between a + # tag and end-of-string so the result of + # findall is an array of tuples where one is a function call and + # the other is None + function_call_tuples = self.tool_call_regex.findall( + model_output) + + tool_calls = [] + for match in function_call_tuples: + tool_type, function_name, function_args = match + tool_calls.append( + ToolCall( + type=tool_type, + function=FunctionCall(name=function_name, + arguments=function_args), + )) + + content = model_output[:model_output. + find(self.tool_calls_start_token)] + return ExtractedToolCallInformation( + tools_called=True, + tool_calls=tool_calls, + content=content if content else None, + ) + + except Exception: + logger.exception( + "Error in extracting tool call from response.") + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=model_output) + + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + + logger.debug("delta_text: %s", delta_text) + logger.debug("delta_token_ids: %s", delta_token_ids) + # check to see if we should be streaming a tool call - is there a + if self.tool_calls_start_token_id not in current_token_ids: + logger.debug("No tool call tokens found!") + return DeltaMessage(content=delta_text) + delta_text = delta_text.replace(self.tool_calls_start_token, + "").replace(self.tool_calls_end_token, + "") + try: + + # figure out where we are in the parsing by counting tool call + # start & end tags + prev_tool_start_count = previous_token_ids.count( + self.tool_call_start_token_id) + prev_tool_end_count = previous_token_ids.count( + self.tool_call_end_token_id) + cur_tool_start_count = current_token_ids.count( + self.tool_call_start_token_id) + cur_tool_end_count = current_token_ids.count( + self.tool_call_end_token_id) + tool_call_portion = None + text_portion = None + + # case: if we're generating text, OR rounding out a tool call + if (cur_tool_start_count == cur_tool_end_count + and prev_tool_end_count == cur_tool_end_count + and self.tool_call_end_token not in delta_text): + logger.debug("Generating text content! skipping tool parsing.") + return DeltaMessage(content=delta_text) + + if self.tool_call_end_token in delta_text: + logger.debug("tool_call_end_token in delta_text") + full_text = current_text + delta_text + tool_call_portion = full_text.split( + self.tool_call_start_token)[-1].split( + self.tool_call_end_token)[0].rstrip() + delta_text = delta_text.split( + self.tool_call_end_token)[0].rstrip() + text_portion = delta_text.split( + self.tool_call_end_token)[-1].lstrip() + + # case -- we're starting a new tool call + if (cur_tool_start_count > cur_tool_end_count + and cur_tool_start_count > prev_tool_start_count): + if len(delta_token_ids) > 1: + tool_call_portion = current_text.split( + self.tool_call_start_token)[-1] + else: + tool_call_portion = None + delta = None + + text_portion = None + + # set cursors and state appropriately + self.current_tool_id += 1 + self.current_tool_name_sent = False + self.streamed_args_for_tool.append("") + logger.debug("Starting on a new tool %s", self.current_tool_id) + + # case -- we're updating an existing tool call + elif (cur_tool_start_count > cur_tool_end_count + and cur_tool_start_count == prev_tool_start_count): + + # get the portion of the text that's the tool call + tool_call_portion = current_text.split( + self.tool_call_start_token)[-1] + text_portion = None + + # case -- the current tool call is being closed. + elif (cur_tool_start_count == cur_tool_end_count + and cur_tool_end_count >= prev_tool_end_count): + if self.prev_tool_call_arr is None or len( + self.prev_tool_call_arr) == 0: + logger.debug( + "attempting to close tool call, but no tool call") + return None + diff = self.prev_tool_call_arr[self.current_tool_id].get( + "arguments") + if diff: + diff = (diff.encode("utf-8").decode("unicode_escape") + if diff is str else diff) + if '}' not in delta_text: + return None + end_loc = delta_text.rindex('}') + diff = delta_text[:end_loc] + '}' + logger.debug( + "Finishing tool and found diff that had not " + "been streamed yet: %s", + diff, + ) + self.streamed_args_for_tool[self.current_tool_id] += diff + return DeltaMessage(tool_calls=[ + DeltaToolCall( + index=self.current_tool_id, + function=DeltaFunctionCall( + arguments=diff).model_dump(exclude_none=True), + ) + ]) + + # case -- otherwise we're just generating text + else: + text = delta_text.replace(self.tool_call_start_token, "") + text = text.replace(self.tool_call_end_token, "") + delta = DeltaMessage(tool_calls=[], content=text) + return delta + + current_tool_call = dict() + if tool_call_portion: + current_tool_call_matches = ( + self.stream_tool_call_portion_regex.match( + tool_call_portion)) + if current_tool_call_matches: + tool_type, tool_name, tool_args = ( + current_tool_call_matches.groups()) + current_tool_call["name"] = tool_name + current_tool_call["arguments"] = tool_args + else: + current_tool_call_name_matches = ( + self.stream_tool_call_name_regex.match( + tool_call_portion)) + if current_tool_call_name_matches: + tool_type, tool_name = ( + current_tool_call_name_matches.groups()) + current_tool_call["name"] = tool_name + current_tool_call["arguments"] = "" + else: + logger.debug("Not enough token") + return None + + # case - we haven't sent the tool name yet. If it's available, send + # it. otherwise, wait until it's available. + if not self.current_tool_name_sent: + if current_tool_call is None: + return None + function_name: Union[str, None] = current_tool_call.get("name") + if function_name: + self.current_tool_name_sent = True + return DeltaMessage(tool_calls=[ + DeltaToolCall( + index=self.current_tool_id, + type="function", + id=f"chatcmpl-tool-{random_uuid()}", + function=DeltaFunctionCall( + name=function_name).model_dump( + exclude_none=True), + ) + ]) + else: + return None + + # case -- otherwise, send the tool call delta + + # if the tool call portion is None, send the delta as text + if tool_call_portion is None: + # if there's text but not tool calls, send that - + # otherwise None to skip chunk + delta = (DeltaMessage( + content=delta_text) if text_portion is not None else None) + return delta + + # now, the nitty-gritty of tool calls + # now we have the portion to parse as tool call. + + logger.debug("Trying to parse current tool call with ID %s", + self.current_tool_id) + + # if we're starting a new tool call, push an empty object in as + # a placeholder for the arguments + if len(self.prev_tool_call_arr) <= self.current_tool_id: + self.prev_tool_call_arr.append({}) + + # main logic for tool parsing here - compare prev. partially-parsed + # JSON to the current partially-parsed JSON + prev_arguments = self.prev_tool_call_arr[self.current_tool_id].get( + "arguments") + cur_arguments = current_tool_call.get("arguments") + + logger.debug("diffing old arguments: %s", prev_arguments) + logger.debug("against new ones: %s", cur_arguments) + + # case -- no arguments have been created yet. skip sending a delta. + if not cur_arguments and not prev_arguments: + logger.debug("Skipping text %s - no arguments", delta_text) + delta = None + + # case -- prev arguments are defined, but non are now. + # probably impossible, but not a fatal error - just keep going + elif not cur_arguments and prev_arguments: + logger.error("should be impossible to have arguments reset " + "mid-call. skipping streaming anything.") + delta = None + + # case -- we now have the first info about arguments available from + # autocompleting the JSON + elif cur_arguments and not prev_arguments: + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=self.current_tool_id, + function=DeltaFunctionCall( + arguments=cur_arguments).model_dump( + exclude_none=True), + ) + ]) + self.streamed_args_for_tool[ + self.current_tool_id] = cur_arguments + + # last case -- we have an update to existing arguments. + elif cur_arguments and prev_arguments: + if (isinstance(delta_text, str) + and cur_arguments != prev_arguments + and len(cur_arguments) > len(prev_arguments) + and cur_arguments.startswith(prev_arguments)): + delta_arguments = cur_arguments[len(prev_arguments):] + logger.debug("got diff %s", delta_text) + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=self.current_tool_id, + function=DeltaFunctionCall( + arguments=delta_arguments).model_dump( + exclude_none=True), + ) + ]) + self.streamed_args_for_tool[ + self.current_tool_id] = cur_arguments + else: + delta = None + + # handle saving the state for the current tool into + # the "prev" list for use in diffing for the next iteration + if self.current_tool_id == len(self.prev_tool_call_arr) - 1: + self.prev_tool_call_arr[ + self.current_tool_id] = current_tool_call + else: + self.prev_tool_call_arr.append(current_tool_call) + + return delta + + except Exception: + logger.exception("Error trying to handle streaming tool call.") + return None # do not stream a delta. skip this token ID. -- Gitee From 0c8a657456982fe1beb62613def1dba45959a05a Mon Sep 17 00:00:00 2001 From: tronzhang Date: Fri, 27 Jun 2025 14:24:01 +0800 Subject: [PATCH 68/76] cleanup all process in group when main exit --- vllm_mindspore/__init__.py | 13 ++-- vllm_mindspore/v1/executor/__init__.py | 0 .../v1/executor/multiproc_executor.py | 64 +++++++++++++++++++ 3 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 vllm_mindspore/v1/executor/__init__.py create mode 100644 vllm_mindspore/v1/executor/multiproc_executor.py diff --git a/vllm_mindspore/__init__.py b/vllm_mindspore/__init__.py index daeeb2a65..fb286c319 100644 --- a/vllm_mindspore/__init__.py +++ b/vllm_mindspore/__init__.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python3 -# type: ignore -# isort:skip_file # Copyright 2025 Huawei Technologies Co., Ltd # Copyright 2024 The vLLM team. # @@ -15,7 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# ============================================================================ +"""Main entry point for monkey patching vllm.""" + +# type: ignore +# isort:skip_file import sys import warnings @@ -385,6 +385,11 @@ from vllm.v1.core.sched.scheduler import Scheduler Scheduler.update_from_output = update_from_output +from vllm_mindspore.v1.executor.multiproc_executor import executor_ensure_worker_termination +from vllm.v1.executor.multiproc_executor import MultiprocExecutor + +MultiprocExecutor._ensure_worker_termination = executor_ensure_worker_termination + from .utils import check_ready from vllm_mindspore.engine.multiprocessing.engine import cleanup diff --git a/vllm_mindspore/v1/executor/__init__.py b/vllm_mindspore/v1/executor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vllm_mindspore/v1/executor/multiproc_executor.py b/vllm_mindspore/v1/executor/multiproc_executor.py new file mode 100644 index 000000000..04c9190c4 --- /dev/null +++ b/vllm_mindspore/v1/executor/multiproc_executor.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: Apache-2.0 + +# Functions are adapted from vllm-project/vllm/v1/executor/multiproc_executor.py +# +# Copyright 2025 Huawei Technologies Co., Ltd. +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Monkey Patch functions for v1 executor mp distributed backend.""" +import os +import signal +import time + +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +def executor_ensure_worker_termination(self): + """Ensure that all worker processes are terminated. Assumes workers have + received termination requests. Waits for processing, then sends + termination and kill signals if needed.""" + + def wait_for_termination(procs, timeout): + if not time: + # If we are in late stage shutdown, the interpreter may replace + # `time` with `None`. + return all(not proc.is_alive() for proc in procs) + start_time = time.time() + while time.time() - start_time < timeout: + if all(not proc.is_alive() for proc in procs): + return True + time.sleep(0.1) + return False + + # Send SIGTERM if still running + active_procs = [w.proc for w in self.workers if w.proc.is_alive()] + for p in active_procs: + p.terminate() + if not wait_for_termination(active_procs, 4): + # Send SIGKILL if still running + active_procs = [p for p in active_procs if p.is_alive()] + for p in active_procs: + # vllm-mindspore begin: kill all the process in the process group + # (including scheduler process, kernel process and so on) instead of + # calling p.kill. + pid = p.pid + try: + os.killpg(pid, signal.SIGKILL) + except Exception as e: + logger.debug("Kill process %d error: %s!", pid, str(e)) + # vllm-mindspore end. + + self._cleanup_sockets() -- Gitee From bd272be446200b21611393f6512228ec3cabe8d4 Mon Sep 17 00:00:00 2001 From: horcam Date: Wed, 2 Jul 2025 21:28:33 +0800 Subject: [PATCH 69/76] add deepseekv2 pure dp --- .../model_executor/layers/fused_moe/layer.py | 3 +- .../model_executor/models/deepseek_v2_moe.py | 254 ++++++++++++++++++ 2 files changed, 256 insertions(+), 1 deletion(-) create mode 100644 vllm_mindspore/model_executor/models/deepseek_v2_moe.py diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index 2692ce1ff..cfcb53793 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -430,6 +430,7 @@ class FusedMoE(nn.Cell): e_score_correction_bias: Optional[Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + num_redundant_experts: int = 0, ): super().__init__() @@ -446,7 +447,7 @@ class FusedMoE(nn.Cell): get_dp_group().world_size), vllm_parallel_config=vllm_config.parallel_config)) - self.global_num_experts = num_experts + self.global_num_experts = num_experts + num_redundant_experts # For smuggling this layer into the fused moe custom op self.use_direct_call = self.dp_size == 1 diff --git a/vllm_mindspore/model_executor/models/deepseek_v2_moe.py b/vllm_mindspore/model_executor/models/deepseek_v2_moe.py new file mode 100644 index 000000000..6b5ba6e77 --- /dev/null +++ b/vllm_mindspore/model_executor/models/deepseek_v2_moe.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" +from collections.abc import Iterable +from typing import Any, Optional, Union, Dict, Tuple, List + +import numpy as np +import mindspore as ms +from mindspore import Tensor, nn, Parameter, mint +from mindspore import Tensor, nn, mutable +from mindspore.common import dtype as mstype + +from transformers import PretrainedConfig +from vllm.config import CacheConfig, VllmConfig +from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, + get_dp_group) +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from vllm_mindspore.attention import Attention +from vllm_mindspore.model_executor.layers.activation import SiluAndMul +from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE +from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ( + MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, + RowParallelLinear) +from vllm_mindspore.model_executor.layers.logits_processor import ( + LogitsProcessor) +from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader + +from vllm_mindspore.model_executor.models.utils import ( + extract_layer_index, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +from vllm_mindspore.model_executor.models.model_base import NativeModel +from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, + get_sampler) +from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE + +logger = init_logger(__name__) + + +class DeepseekV2MLP(nn.Cell): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj") + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results, + prefix=f"{prefix}.down_proj") + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def construct(self, x, dp_pad_index, dp_unpad_index, dp_unpad_index_total_with_offset): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class DeepseekV2MoE(Cell): + r""" + This is an implementation of self-attention mechanism in DeepSeek-V3. + + Args: + - **config** (Config): Model config of DeepSeek-V3. + + Inputs: + - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. + + Outputs: + - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. + """ + + def __init__(self, config): + super(DeepseekV2MoE, self).__init__() + self.tp_size = get_tensor_model_parallel_world_size() + self.routed_scaling_factor = config.routed_scaling_factor + + # zhq: ep_group needed + # self.ep_group = get_ep_group().device_group + # self.ep_rank = self.ep_group.rank() + # self.ep_size = self.ep_group.size() + + self.n_routed_experts: int = config.n_routed_experts + self.n_shared_experts: int = config.n_shared_experts + + """zhq: needed? + if config.hidden_act != "silu": + raise ValueError(f"Unsupported activation: {config.hidden_act}. " + "Only silu is supported for now.") + """ + + + + self.gate = ReplicatedLinear(config.hidden_size, + config.n_routed_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") + + # Load balancing settings. zhq: needed? + vllm_config = get_current_vllm_config() + parallel_config = vllm_config.parallel_config + self.enable_eplb = enable_eplb + + self.n_redundant_experts = parallel_config.num_redundant_experts + self.n_logical_experts = self.n_routed_experts + self.n_physical_experts = (self.n_logical_experts + + self.n_redundant_experts) + self.n_local_physical_experts = self.n_physical_experts // self.ep_size + + self.physical_expert_start = (self.ep_rank * + self.n_local_physical_experts) + self.physical_expert_end = (self.physical_expert_start + + self.n_local_physical_experts) + + self.experts = FusedMoE( + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + e_score_correction_bias=self.gate.e_score_correction_bias, + num_redundant_experts=self.n_redundant_experts) + + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + + self.shared_experts = DeepseekV2MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=self.experts.must_reduce_shared_expert_outputs( + ), + prefix=f"{prefix}.shared_experts", + ) + + + def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: + # NOTE: hidden_states can have either 1D or 2D shape. + orig_shape = hidden_states.shape + hidden_dim = hidden_states.shape[-1] + hidden_states = hidden_states.view(-1, hidden_dim) + if self.n_shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits, + dp_pad_index=dp_pad_index, + dp_unpad_index=dp_unpad_index, + dp_pad_index_with_offset=dp_pad_index_with_offset, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) + if shared_output is not None: + if hidden_states.dtype != torch.float16: + final_hidden_states = final_hidden_states + shared_output + else: + # Fix FP16 overflow + # See DeepseekV2DecoderLayer for more details. + final_hidden_states = final_hidden_states + shared_output \ + * (1. / self.routed_scaling_factor) + + if self.tp_size > 1: + final_hidden_states = ( + self.experts.maybe_all_reduce_tensor_model_parallel( + final_hidden_states)) + + return final_hidden_states.view(num_tokens, hidden_dim) + + + + + + + +class DeepseekV2ForCausalLM(NativeModel, SupportsPP): + packed_modules_mapping = {} + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = DeepseekV2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + self.expert_weights = [] + + self.sampler = get_sampler() + + + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.model.get_input_embeddings(input_ids) + -- Gitee From 2659c4ef8f95dd2b7ab25c34108ea3f358143177 Mon Sep 17 00:00:00 2001 From: horcam Date: Thu, 3 Jul 2025 21:25:17 +0800 Subject: [PATCH 70/76] update dsv3 moe tp DeekseekV2ForCausalLm DeepseekV2DecoderLayer --- .../{deepseek_v2_moe.py => deepseek_v2.py} | 871 +++++++++++++----- .../model_executor/models/registry.py | 2 + 2 files changed, 619 insertions(+), 254 deletions(-) rename vllm_mindspore/model_executor/models/{deepseek_v2_moe.py => deepseek_v2.py} (40%) diff --git a/vllm_mindspore/model_executor/models/deepseek_v2_moe.py b/vllm_mindspore/model_executor/models/deepseek_v2.py similarity index 40% rename from vllm_mindspore/model_executor/models/deepseek_v2_moe.py rename to vllm_mindspore/model_executor/models/deepseek_v2.py index 6b5ba6e77..406d2c215 100644 --- a/vllm_mindspore/model_executor/models/deepseek_v2_moe.py +++ b/vllm_mindspore/model_executor/models/deepseek_v2.py @@ -1,254 +1,617 @@ -# SPDX-License-Identifier: Apache-2.0 - -# Copyright 2024 The Qwen team. -# Copyright 2023 The vLLM team. -# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" -from collections.abc import Iterable -from typing import Any, Optional, Union, Dict, Tuple, List - -import numpy as np -import mindspore as ms -from mindspore import Tensor, nn, Parameter, mint -from mindspore import Tensor, nn, mutable -from mindspore.common import dtype as mstype - -from transformers import PretrainedConfig -from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, - get_dp_group) -from vllm.logger import init_logger -from vllm.model_executor.layers.quantization import QuantizationConfig -from vllm.model_executor.models.interfaces import SupportsPP -from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors - -from vllm_mindspore.attention import Attention -from vllm_mindspore.model_executor.layers.activation import SiluAndMul -from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE -from vllm_mindspore.model_executor.layers.layernorm import RMSNorm -from vllm_mindspore.model_executor.layers.linear import ( - MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, - RowParallelLinear) -from vllm_mindspore.model_executor.layers.logits_processor import ( - LogitsProcessor) -from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope -from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( - ParallelLMHead, VocabParallelEmbedding) -from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader - -from vllm_mindspore.model_executor.models.utils import ( - extract_layer_index, is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) -from vllm_mindspore.model_executor.models.model_base import NativeModel -from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, - get_sampler) -from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE - -logger = init_logger(__name__) - - -class DeepseekV2MLP(nn.Cell): - - def __init__( - self, - hidden_size: int, - intermediate_size: int, - hidden_act: str, - quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True, - prefix: str = "", - ) -> None: - super().__init__() - self.gate_up_proj = MergedColumnParallelLinear( - hidden_size, [intermediate_size] * 2, - bias=False, - quant_config=quant_config, - prefix=f"{prefix}.gate_up_proj") - self.down_proj = RowParallelLinear(intermediate_size, - hidden_size, - bias=False, - quant_config=quant_config, - reduce_results=reduce_results, - prefix=f"{prefix}.down_proj") - if hidden_act != "silu": - raise ValueError(f"Unsupported activation: {hidden_act}. " - "Only silu is supported for now.") - self.act_fn = SiluAndMul() - - def construct(self, x, dp_pad_index, dp_unpad_index, dp_unpad_index_total_with_offset): - gate_up, _ = self.gate_up_proj(x) - x = self.act_fn(gate_up) - x, _ = self.down_proj(x) - return x - - -class DeepseekV2MoE(Cell): - r""" - This is an implementation of self-attention mechanism in DeepSeek-V3. - - Args: - - **config** (Config): Model config of DeepSeek-V3. - - Inputs: - - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. - - Outputs: - - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. - """ - - def __init__(self, config): - super(DeepseekV2MoE, self).__init__() - self.tp_size = get_tensor_model_parallel_world_size() - self.routed_scaling_factor = config.routed_scaling_factor - - # zhq: ep_group needed - # self.ep_group = get_ep_group().device_group - # self.ep_rank = self.ep_group.rank() - # self.ep_size = self.ep_group.size() - - self.n_routed_experts: int = config.n_routed_experts - self.n_shared_experts: int = config.n_shared_experts - - """zhq: needed? - if config.hidden_act != "silu": - raise ValueError(f"Unsupported activation: {config.hidden_act}. " - "Only silu is supported for now.") - """ - - - - self.gate = ReplicatedLinear(config.hidden_size, - config.n_routed_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate") - - # Load balancing settings. zhq: needed? - vllm_config = get_current_vllm_config() - parallel_config = vllm_config.parallel_config - self.enable_eplb = enable_eplb - - self.n_redundant_experts = parallel_config.num_redundant_experts - self.n_logical_experts = self.n_routed_experts - self.n_physical_experts = (self.n_logical_experts + - self.n_redundant_experts) - self.n_local_physical_experts = self.n_physical_experts // self.ep_size - - self.physical_expert_start = (self.ep_rank * - self.n_local_physical_experts) - self.physical_expert_end = (self.physical_expert_start + - self.n_local_physical_experts) - - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - e_score_correction_bias=self.gate.e_score_correction_bias, - num_redundant_experts=self.n_redundant_experts) - - if config.n_shared_experts is not None: - intermediate_size = (config.moe_intermediate_size * - config.n_shared_experts) - - self.shared_experts = DeepseekV2MLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs( - ), - prefix=f"{prefix}.shared_experts", - ) - - - def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, - dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: - # NOTE: hidden_states can have either 1D or 2D shape. - orig_shape = hidden_states.shape - hidden_dim = hidden_states.shape[-1] - hidden_states = hidden_states.view(-1, hidden_dim) - if self.n_shared_experts is not None: - shared_output = self.shared_experts(hidden_states) - router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts(hidden_states=hidden_states, - router_logits=router_logits, - dp_pad_index=dp_pad_index, - dp_unpad_index=dp_unpad_index, - dp_pad_index_with_offset=dp_pad_index_with_offset, - dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) - if shared_output is not None: - if hidden_states.dtype != torch.float16: - final_hidden_states = final_hidden_states + shared_output - else: - # Fix FP16 overflow - # See DeepseekV2DecoderLayer for more details. - final_hidden_states = final_hidden_states + shared_output \ - * (1. / self.routed_scaling_factor) - - if self.tp_size > 1: - final_hidden_states = ( - self.experts.maybe_all_reduce_tensor_model_parallel( - final_hidden_states)) - - return final_hidden_states.view(num_tokens, hidden_dim) - - - - - - - -class DeepseekV2ForCausalLM(NativeModel, SupportsPP): - packed_modules_mapping = {} - fall_back_to_pt_during_load = False - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - config = vllm_config.model_config.hf_config - quant_config = vllm_config.quant_config - self.config = config - self.quant_config = quant_config - self.model = DeepseekV2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - self.expert_weights = [] - - self.sampler = get_sampler() - - - - def get_input_embeddings(self, input_ids: Tensor) -> Tensor: - return self.model.get_input_embeddings(input_ids) - +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" +from collections.abc import Iterable +from typing import Any, Optional, Union, Dict, Tuple, List + +import numpy as np +import mindspore as ms +from mindspore import Tensor, nn, Parameter, mint +from mindspore import Tensor, nn, mutable +from mindspore.common import dtype as mstype + +from transformers import PretrainedConfig +from vllm.config import CacheConfig, VllmConfig +from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, + get_dp_group) +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from vllm_mindspore.attention import Attention +from vllm_mindspore.model_executor.layers.activation import SiluAndMul +from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE +from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ( + MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, + RowParallelLinear) +from vllm_mindspore.model_executor.layers.logits_processor import ( + LogitsProcessor) +from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader + +from vllm_mindspore.model_executor.models.utils import ( + extract_layer_index, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +from vllm_mindspore.model_executor.models.model_base import NativeModel +from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, + get_sampler) +from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE + +logger = init_logger(__name__) + + +class DeepseekV2MLP(nn.Cell): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj") + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results, + prefix=f"{prefix}.down_proj") + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def construct(self, x, dp_pad_index, dp_unpad_index, dp_unpad_index_total_with_offset): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class DeepseekV2MoE(Cell): + r""" + This is an implementation of self-attention mechanism in DeepSeek-V3. + + Args: + - **config** (Config): Model config of DeepSeek-V3. + + Inputs: + - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. + + Outputs: + - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. + """ + + def __init__(self, config): + super(DeepseekV2MoE, self).__init__() + self.tp_size = get_tensor_model_parallel_world_size() + self.routed_scaling_factor = config.routed_scaling_factor + + # zhq: ep_group needed + # self.ep_group = get_ep_group().device_group + # self.ep_rank = self.ep_group.rank() + # self.ep_size = self.ep_group.size() + + self.n_routed_experts: int = config.n_routed_experts + self.n_shared_experts: int = config.n_shared_experts + + """zhq: needed? + if config.hidden_act != "silu": + raise ValueError(f"Unsupported activation: {config.hidden_act}. " + "Only silu is supported for now.") + """ + + + + self.gate = ReplicatedLinear(config.hidden_size, + config.n_routed_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") + + # Load balancing settings. zhq: needed? + vllm_config = get_current_vllm_config() + parallel_config = vllm_config.parallel_config + self.enable_eplb = enable_eplb + + self.n_redundant_experts = parallel_config.num_redundant_experts + self.n_logical_experts = self.n_routed_experts + self.n_physical_experts = (self.n_logical_experts + + self.n_redundant_experts) + self.n_local_physical_experts = self.n_physical_experts // self.ep_size + + self.physical_expert_start = (self.ep_rank * + self.n_local_physical_experts) + self.physical_expert_end = (self.physical_expert_start + + self.n_local_physical_experts) + + self.experts = FusedMoE( + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + e_score_correction_bias=self.gate.e_score_correction_bias, + num_redundant_experts=self.n_redundant_experts) + + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + + self.shared_experts = DeepseekV2MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=self.experts.must_reduce_shared_expert_outputs( + ), + prefix=f"{prefix}.shared_experts", + ) + + + def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: + # NOTE: hidden_states can have either 1D or 2D shape. + orig_shape = hidden_states.shape + hidden_dim = hidden_states.shape[-1] + hidden_states = hidden_states.view(-1, hidden_dim) + if self.n_shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits, + dp_pad_index=dp_pad_index, + dp_unpad_index=dp_unpad_index, + dp_pad_index_with_offset=dp_pad_index_with_offset, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) + if shared_output is not None: + if hidden_states.dtype != torch.float16: + final_hidden_states = final_hidden_states + shared_output + else: + # Fix FP16 overflow + # See DeepseekV2DecoderLayer for more details. + final_hidden_states = final_hidden_states + shared_output \ + * (1. / self.routed_scaling_factor) + + if self.tp_size > 1: + final_hidden_states = ( + self.experts.maybe_all_reduce_tensor_model_parallel( + final_hidden_states)) + + return final_hidden_states.view(num_tokens, hidden_dim) + + +class DeepseekV2FakedAttention(nn.Cell): + + def __init__( + self, + config: PretrainedConfig, + hidden_size: int, + num_heads: int, + qk_nope_head_dim: int, + qk_rope_head_dim: int, + v_head_dim: int, + q_lora_rank: int, + kv_lora_rank: int, + rope_theta: float = 10000, + rope_scaling: Optional[dict[str, Any]] = None, + max_position_embeddings: int = 8192, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = hidden_size + self.qk_nope_head_dim = qk_nope_head_dim + self.qk_rope_head_dim = qk_rope_head_dim + self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim + self.v_head_dim = v_head_dim + self.q_lora_rank = q_lora_rank + self.kv_lora_rank = kv_lora_rank + self.num_heads = num_heads + tp_size = get_tensor_model_parallel_world_size() + assert num_heads % tp_size == 0 + self.num_local_heads = num_heads // tp_size + self.scaling = self.qk_head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + def construct( + self, + positions: Tensor, + hidden_states: Tensor, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + residual: Optional[Tensor], + dp_pad_index: Optional[bool] = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + ) -> Tensor: + return hidden_states, residual + +class DeepseekV2DecoderLayer(nn.Cell): + + def __init__( + self, + config: PretrainedConfig, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + layer_idx = int(prefix.split(sep='.')[-1]) + self.layer_idx = layer_idx + self.self_attn = DeepseekV2FakedAttention( + config=config, + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + qk_nope_head_dim=config.qk_nope_head_dim, + qk_rope_head_dim=config.qk_rope_head_dim, + v_head_dim=config.v_head_dim, + q_lora_rank=config.q_lora_rank + if hasattr(config, "q_lora_rank") else None, + kv_lora_rank=config.kv_lora_rank, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) + + if (config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0): + self.mlp = DeepseekV2MoE( + config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + enable_eplb=enable_eplb, + ) + else: + self.mlp = DeepseekV2MLP( + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.routed_scaling_factor = config.routed_scaling_factor + + + def construct( + self, + positions: Tensor, + hidden_states: Tensor, + key_cache: Tensor, + value_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + residual: Optional[Tensor], + dp_pad_index: Optional[bool] = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + ) -> Tensor: + + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn(positions, hidden_states, key_cache, + value_cache, is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) + return hidden_states, residual + + +class DeepseekV2Model(nn.Cell): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.config = config + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=f"{prefix}.embed_tokens") + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: DeepseekV2DecoderLayer( + config, + prefix, + model_config=model_config, + cache_config=cache_config, + quant_config=quant_config, + enable_eplb=enable_eplb, + ), + prefix=f"{prefix}.layers") + + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.embed_tokens(input_ids) + + def construct( + self, + input_ids: Tensor, + positions: Tensor, + key_caches: List[Tensor], + value_caches: List[Tensor], + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + dp_pad_index = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_total_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + + ) -> Union[Tensor, IntermediateTensors]: + + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + + for i in range(self.start_layer, self.end_layer): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + key_caches[i - self.start_layer], + value_caches[i - self.start_layer], + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables, residual, + dp_pad_index, dp_unpad_index, + dp_pad_index_total_with_offset, + dp_unpad_index_total_with_offset) + + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + +class DeepseekV2ForCausalLM(NativeModel, SupportsPP): + packed_modules_mapping = {} + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = DeepseekV2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + self.expert_weights = [] + + self.sampler = get_sampler() + + + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + ) -> Union[Tensor, IntermediateTensors]: + hidden_states = self.model(input_ids, positions, intermediate_tensors, + inputs_embeds) + return hidden_states + + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def load_weights(self, weights: Iterable[Tuple[str, Tensor]], + params_dict: Dict[str, Parameter]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + # Params for weights, fp8 weight scales, fp8 activation scales + # (param_name, weight_name, expert_id, shard_id) + # zhq: needed? + expert_params_mapping = FusedMoE.make_expert_params_mapping( + ckpt_gate_proj_name="gate_proj", + ckpt_down_proj_name="down_proj", + ckpt_up_proj_name="up_proj", + num_experts=self.config.n_routed_experts, + num_redundant_experts=self.num_redundant_experts) + + params_dict = dict(self.named_parameters()) + loaded_params: set[str] = set() + + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + spec_layer = get_spec_layer_idx_from_weight_name(self.config, name) + if spec_layer is not None: + continue # skip spec decode layers for main model + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + # Skip non-stacked layers and experts (experts handled below). + if weight_name not in name: + continue + # We have mlp.experts[0].gate_proj in the checkpoint. + # Since we handle the experts below in expert_params_mapping, + # we need to skip here BEFORE we update the name, otherwise + # name will be updated to mlp.experts[0].gate_up_proj, which + # will then be updated below in expert_params_mapping + # for mlp.experts[0].gate_gate_up_proj, which breaks load. + if (("mlp.experts." in name) and name not in params_dict): + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + is_expert_weight = False + for mapping in expert_params_mapping: + param_name, weight_name, expert_id, shard_id = mapping + if weight_name not in name: + continue + # Anyway, this is an expert weight and should not be + # attempted to load as other weights later + is_expert_weight = True + + # Do not modify `name` since the loop may continue here + # Instead, create a new variable + name_mapped = name.replace(weight_name, param_name) + + if is_pp_missing_parameter(name_mapped, self): + continue + + param = params_dict[name_mapped] + weight_loader = param.weight_loader + + success = weight_loader(param, + loaded_weight, + name_mapped, + shard_id=shard_id, + expert_id=expert_id, + return_success=True) + if success: + name = name_mapped + break + else: + if is_expert_weight: + # We've checked that this is an expert weight + # However it's not mapped locally to this rank + # So we simply skip it + continue + + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + + # Remapping the name of FP8 kv-scale. + # zhq: needed? + # name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + + +class DeepseekV3ForCausalLM(DeepseekV2ForCausalLM): + pass \ No newline at end of file diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index 50dde9a41..1ce60fae6 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -31,6 +31,8 @@ _NATIVE_MODELS = { "Qwen2_5_VLForConditionalGeneration": ("qwen2_5_vl", "Qwen2_5_VLForConditionalGeneration"), "Qwen3MoeForCausalLM": ("qwen3_moe", "Qwen3MoeForCausalLM"), + "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"), + "DeepseekV3ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"), } _MINDFORMERS_MODELS = { -- Gitee From 0da33f0bbbc0a18a2ff9824bdf35e643cead0d99 Mon Sep 17 00:00:00 2001 From: w00521005 Date: Fri, 4 Jul 2025 16:15:46 +0800 Subject: [PATCH 71/76] add comm ops --- .../distributed/communication_op.py | 217 ++++++++++++++++-- 1 file changed, 192 insertions(+), 25 deletions(-) diff --git a/vllm_mindspore/distributed/communication_op.py b/vllm_mindspore/distributed/communication_op.py index a24d49595..2797c9c5b 100644 --- a/vllm_mindspore/distributed/communication_op.py +++ b/vllm_mindspore/distributed/communication_op.py @@ -20,8 +20,10 @@ from typing import Any, Dict, Optional, Union -from mindspore import Tensor, nn, ops -from mindspore.communication.comm_func import all_reduce, broadcast +from mindspore import Tensor, nn, ops, mint +from mindspore.communication import get_rank, get_group_size, GlobalComm +from mindspore.communication.comm_func import (all_reduce, broadcast, all_gather_into_tensor, + reduce_scatter_tensor, scatter_tensor) from vllm.distributed.parallel_state import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, get_tp_group, get_world_group) @@ -34,6 +36,59 @@ def tensor_model_parallel_all_reduce(input_: Tensor) -> Tensor: output, _ = all_reduce(input_, group=get_tp_group()) return output +def tensor_world_parallel_all_reduce(input_: Tensor) -> Tensor: + world_size = get_group_size() + if world_size == 1: + return input_ + """All-reduce the input tensor across world parallel group.""" + output, _ = all_reduce(input_, group=get_world_group()) + return output + +def tensor_model_parallel_all_gather(input_: Tensor, axis=-1) -> Tensor: + if get_tensor_model_parallel_world_size() == 1: + return input_ + """All-gather the input tensor across model parallel group.""" + input_ = input_ if axis == 0 else mint.transpose(input_, 0, -1) + output = all_gather_into_tensor(input_, group=get_tp_group()) + output = input_ if axis == 0 else mint.transpose(output, 0, -1) + return output + +def tensor_world_parallel_all_gather(input_: Tensor, axis=-1) -> Tensor: + if get_group_size() == 1: + return input_ + """All-gather the input tensor across world parallel group.""" + input_ = input_ if axis == 0 else mint.transpose(input_, 0, -1) + output = all_gather_into_tensor(input_, group=get_world_group()) + output = input_ if axis == 0 else mint.transpose(output, 0, -1) + return output + +def tensor_model_parallel_reduce_scatter(input_: Tensor) -> Tensor: + if get_tensor_model_parallel_world_size() == 1: + return input_ + """Reduce-scatter the input tensor across model parallel group.""" + output = reduce_scatter_tensor(input_, group=get_tp_group()) + return output + +def tensor_world_parallel_reduce_scatter(input_: Tensor) -> Tensor: + if get_tensor_model_parallel_world_size() == 1: + return input_ + """Reduce-scatter the input tensor across world parallel group.""" + output = scatter_tensor(input_, group=get_world_group()) + return output + +def tensor_model_parallel_scatter(input_: Tensor) -> Tensor: + if get_tensor_model_parallel_world_size() == 1: + return input_ + """Scatter the input tensor across model parallel group.""" + output = scatter_tensor(input_, group=get_tp_group()) + return output + +def tensor_world_parallel_scatter(input_: Tensor) -> Tensor: + if get_tensor_model_parallel_world_size() == 1: + return input_ + """Scatter the input tensor across world parallel group.""" + output = scatter_tensor(input_, group=get_world_group()) + return output def broadcast_tensor(tensor, src: int = 0): # broadcast tensor to the world group @@ -49,6 +104,7 @@ def broadcast_tensor_dict(tensor_dict: Optional[Dict[Any, Union[Tensor, # return get_tp_group().broadcast_tensor_dict(tensor_dict, src) +# ToDo: Remove class ReduceFromModelParallelRegion(nn.Cell): "All reduce the input from the model parallel region." @@ -66,49 +122,160 @@ class ReduceFromModelParallelRegion(nn.Cell): return output -class GatherFromModelParallelRegion(nn.Cell): - "Gather the input from model parallel region and concatenate." +class AllGatherFromModelParallelRegion(nn.Cell): + """ + Gather the input from world parallel region and concatenate, simultaneously perform + transpose operation on input. + """ def __init__(self): super().__init__() self.world_size = get_tensor_model_parallel_world_size() - self.tp_rank = get_tensor_model_parallel_rank() if self.world_size > 1: self.tp_group = get_tp_group().device_group._name + self.all_gather_into_tensor = ops.AllGather(group=self.tp_group) - def construct(self, - input_: Tensor, - dst: int = 0, - dim: int = -1) -> Optional[Tensor]: + def construct(self, input_, axis=-1): # Size and dimension. if self.world_size == 1: return input_ - output = ops.CollectiveGather(dest_rank=dst, - group=self.tp_group)(input_.transpose( - 2, 1, 0)) - if self.tp_rank != dst: - return ops.depend(ops.zeros_like(input_), output) - return output.transpose(2, 1, 0) - + input_ = input_ if axis == 0 else ops.swapaxes(input_, 0, -1) + output = self.all_gather_into_tensor(input_) + output = input_ if axis == 0 else ops.swapaxes(output, 0, -1) + return output -class AllGatherFromModelParallelRegion(nn.Cell): - """ - Gather the input from world parallel region and concatenate, simultaneously perform - transpose operation on input. - """ +class ReduceScatterToModelParallelRegion(nn.Cell): + "Reduce scatter the input from the world parallel region." def __init__(self): super().__init__() self.world_size = get_tensor_model_parallel_world_size() if self.world_size > 1: self.tp_group = get_tp_group().device_group._name - self.all_gather_into_tensor = ops.AllGather(group=self.tp_group) + self.reduce_scatter_tensor = ops.ReduceScatter(group=self.world_group) def construct(self, input_): - # Size and dimension. if self.world_size == 1: return input_ - input_ = ops.swapaxes(input_, 0, -1) + output = self.reduce_scatter_tensor(input_) + return output + +class ScatterToModelParallelRegion(nn.Cell): + "Split the input and keep only the corresponding chuck to the rank in world parallel region." + + def __init__(self, axis=-1): + super().__init__() + self.world_size = get_tensor_model_parallel_world_size() + if self.world_size > 1: + self.rank = get_rank() + self.split = ops.Split(axis=axis, output_num=self.world_size) + + def construct(self, input_): + if self.world_size == 1: + return input_ + tensor_tuple = self.split(input_) + output = tensor_tuple[self.rank] + return output + +class AttentionReduceScatter(nn.Cell): + r""" + This is an implementation of self-attention mechanism in DeepSeek-V3. + + Args: + - **config** (Config): Model config of DeepSeek-V3. + + Inputs: + - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. + + Outputs: + - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. + """ + + def __init__(self, config): + super(AttentionReduceScatter, self).__init__() + self.config = config + self.compute_dtype = config.compute_dtype + self.hidden_size = config.hidden_size + self.model_parallel = config.parallel_config.model_parallel + self.moe_config = config.moe_config + self.is_first_iteration = True + self.reduce_scatter_to_tp_region = ReduceScatterToModelParallelRegion() + self.scatter_to_tp_region = ScatterToModelParallelRegion(axis=0) + self.reshape = ops.Reshape() + + def padding_with_idx(self, hidden_state, x, attn_padding_idx): + hidden_state = ops.gather(hidden_state, attn_padding_idx, 0) + x = ops.gather(x, attn_padding_idx, 0) + return hidden_state, x + + def construct(self, hidden_state, x): + hidden_state = self.reduce_scatter_to_tp_region(hidden_state) + x = self.scatter_to_tp_region(x) + return hidden_state, x + +class AllGatherFromWorldParallelRegion(nn.Cell): + "Gather the input from world parallel region and concatinate." + + def __init__(self): + super().__init__() + self.world_size = get_group_size() + if self.world_size > 1: + self.world_group = GlobalComm.WORLD_COMM_GROUP + self.all_gather_into_tensor = ops.AllGather(group=self.world_group) + + def construct(self, input_, axis=-1): + if self.world_size == 1: + return input_ + input_ = input_ if axis == 0 else ops.swapaxes(input_, 0, -1) output = self.all_gather_into_tensor(input_) - output = ops.swapaxes(output, 0, -1) + output = input_ if axis == 0 else ops.swapaxes(output, 0, -1) + return output + +class ReduceFromWorldParallelRegion(nn.Cell): + "All reduce the input from the world parallel region." + + def __init__(self): + super().__init__() + self.world_size = get_group_size() + if self.world_size > 1: + self.world_group = GlobalComm.WORLD_COMM_GROUP + self.all_reduce = ops.AllReduce(group=self.world_group) + + def construct(self, input_): + if self.world_size == 1: + return input_ + output = self.all_reduce(input_) + return output + +class ReduceScatterToWorldParallelRegion(nn.Cell): + "Reduce scatter the input from the world parallel region." + + def __init__(self): + super().__init__() + self.world_size = get_group_size() + if self.world_size > 1: + self.world_group = GlobalComm.WORLD_COMM_GROUP + self.reduce_scatter_tensor = ops.ReduceScatter(group=self.world_group) + + def construct(self, input_): + if self.world_size == 1: + return input_ + output = self.reduce_scatter_tensor(input_) return output + +class ScatterToWorldParallelRegion(nn.Cell): + "Split the input and keep only the corresponding chuck to the rank in world parallel region." + + def __init__(self, axis=-1): + super().__init__() + self.world_size = get_group_size() + if self.world_size > 1: + self.rank = get_rank() + self.split = ops.Split(axis=axis, output_num=self.world_size) + + def construct(self, input_): + if self.world_size == 1: + return input_ + tensor_tuple = self.split(input_) + output = tensor_tuple[self.rank] + return output \ No newline at end of file -- Gitee From d4649eaf16fc45a4c85c7386ba93989402948283 Mon Sep 17 00:00:00 2001 From: horcam Date: Sat, 5 Jul 2025 18:45:44 +0800 Subject: [PATCH 72/76] update dsv3 tp 0705 --- install_depend_pkgs.sh | 1 + vllm_dp/install_dp_vllm.sh | 4 + vllm_dp/qwen3_moe.patch | 101 +++++ .../model_executor/models/deepseek_v2.py | 348 +++++++++++------- 4 files changed, 316 insertions(+), 138 deletions(-) create mode 100644 vllm_dp/qwen3_moe.patch diff --git a/install_depend_pkgs.sh b/install_depend_pkgs.sh index 302cad80a..a30a8c457 100644 --- a/install_depend_pkgs.sh +++ b/install_depend_pkgs.sh @@ -41,6 +41,7 @@ if [ ! -d "$vllm_dir" ]; then git clone https://github.com/vllm-project/vllm.git -b v0.8.3 "$vllm_dir" cd "$vllm_dir" || { echo "Failed to git clone vllm!"; exit 1; } git apply $script_dir/vllm_dp/dp_scale_out.patch + git apply $script_dir/vllm_dp/qwen3_moe.patch else echo "The $vllm_dir folder already exists and will not be re-downloaded." cd "$vllm_dir" || { echo "Failed to git clone vllm!"; exit 1; } diff --git a/vllm_dp/install_dp_vllm.sh b/vllm_dp/install_dp_vllm.sh index ee02ea9f3..0e7f66869 100644 --- a/vllm_dp/install_dp_vllm.sh +++ b/vllm_dp/install_dp_vllm.sh @@ -33,8 +33,12 @@ fi git clone https://github.com/vllm-project/vllm.git -b ${vllm_tag} --depth 1 ${vllm_source_dir} cd ${vllm_source_dir} +echo "git apply ${script_dir}/dp_scale_out.patch" git apply "${script_dir}/dp_scale_out.patch" +echo "git apply ${script_dir}/qwen3_moe.patch" +git apply "${script_dir}/qwen3_moe.patch" + export VLLM_TARGET_DEVICE=empty pip install . diff --git a/vllm_dp/qwen3_moe.patch b/vllm_dp/qwen3_moe.patch new file mode 100644 index 000000000..0cf37408c --- /dev/null +++ b/vllm_dp/qwen3_moe.patch @@ -0,0 +1,101 @@ +idiff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py +index fa493fe..a849904 100644 +--- a/vllm/distributed/parallel_state.py ++++ b/vllm/distributed/parallel_state.py +@@ -703,6 +703,21 @@ class GroupCoordinator: + self.mq_broadcaster = None + + ++ def dispatch( ++ self, hidden_states: torch.Tensor, ++ router_logits: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ++ if self.device_communicator is not None: ++ return self.device_communicator.dispatch(hidden_states, ++ router_logits) ++ else: ++ return hidden_states, router_logits ++ ++ def combine(self, hidden_states) -> torch.Tensor: ++ if self.device_communicator is not None: ++ return self.device_communicator.combine(hidden_states) ++ else: ++ return hidden_states ++ + _WORLD: Optional[GroupCoordinator] = None + + +@@ -761,6 +776,14 @@ def get_dp_group() -> GroupCoordinator: + return _DP + + ++_EP: Optional[GroupCoordinator] = None ++ ++ ++def get_ep_group() -> GroupCoordinator: ++ assert _EP is not None, ("expert parallel group is not initialized") ++ return _EP ++ ++ + def get_pp_group() -> GroupCoordinator: + assert _PP is not None, ( + "pipeline model parallel group is not initialized") +@@ -954,10 +977,23 @@ def initialize_model_parallel( + backend, + group_name="dp") + ++ ++ global _EP ++ assert _EP is None, ("expert parallel group is already initialized") ++ group_ranks = all_ranks.transpose(1, 2).reshape( ++ -1, data_parallel_size * tensor_model_parallel_size).unbind(0) ++ group_ranks = [x.tolist() for x in group_ranks] ++ _EP = init_model_parallel_group(group_ranks, ++ get_world_group().local_rank, ++ backend, ++ group_name="ep") ++ ++ + logger.info( + "rank %s in world size %s is assigned as " +- "DP rank %s, PP rank %s, TP rank %s", rank, world_size, +- _DP.rank_in_group, _PP.rank_in_group, _TP.rank_in_group) ++ "DP rank %s, PP rank %s, TP rank %s, EP rank %s", rank, world_size, ++ _DP.rank_in_group, _PP.rank_in_group, _TP.rank_in_group, ++ _EP.rank_in_group) + + + def ensure_kv_transfer_initialized(vllm_config: "VllmConfig") -> None: +@@ -1068,6 +1104,10 @@ def destroy_model_parallel(): + _DP.destroy() + _DP = None + ++ global _EP ++ if _EP: ++ _EP.destroy() ++ _EP = None + + def destroy_distributed_environment(): + global _WORLD +diff --git a/vllm/envs.py b/vllm/envs.py +index 6067f5b..9d9e4ff 100644 +--- a/vllm/envs.py ++++ b/vllm/envs.py +@@ -106,6 +106,7 @@ if TYPE_CHECKING: + VLLM_TPU_DISABLE_TOPK_TOPP_OPTIMIZATION: bool = False + VLLM_TPU_BUCKET_PADDING_GAP: int = 0 + VLLM_USE_DEEP_GEMM: bool = False ++ VLLM_MODE_DP_CHUNK_SIZE: int = 256 + + + def get_default_cache_root(): +@@ -142,6 +143,10 @@ environment_variables: dict[str, Callable[[], Any]] = { + "VLLM_TARGET_DEVICE": + lambda: os.getenv("VLLM_TARGET_DEVICE", "cuda"), + ++ "VLLM_MOE_DP_CHUNK_SIZE": ++ lambda: int(os.getenv("VLLM_MOE_DP_CHUNK_SIZE", "256")), ++ ++ + # Maximum number of compilation jobs to run in parallel. + # By default this is the number of CPUs + "MAX_JOBS": diff --git a/vllm_mindspore/model_executor/models/deepseek_v2.py b/vllm_mindspore/model_executor/models/deepseek_v2.py index 406d2c215..b78e1ab95 100644 --- a/vllm_mindspore/model_executor/models/deepseek_v2.py +++ b/vllm_mindspore/model_executor/models/deepseek_v2.py @@ -22,8 +22,15 @@ # limitations under the License. """Inference-only Qwen3MoE model compatible with HuggingFace weights.""" from collections.abc import Iterable +from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, + Union) from typing import Any, Optional, Union, Dict, Tuple, List +if TYPE_CHECKING: + from transformers import DeepseekV3Config +else: + DeepseekV3Config = None + import numpy as np import mindspore as ms from mindspore import Tensor, nn, Parameter, mint @@ -31,9 +38,9 @@ from mindspore import Tensor, nn, mutable from mindspore.common import dtype as mstype from transformers import PretrainedConfig -from vllm.config import CacheConfig, VllmConfig +from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, - get_dp_group) + get_dp_group, get_ep_group) from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.models.interfaces import SupportsPP @@ -93,14 +100,16 @@ class DeepseekV2MLP(nn.Cell): "Only silu is supported for now.") self.act_fn = SiluAndMul() - def construct(self, x, dp_pad_index, dp_unpad_index, dp_unpad_index_total_with_offset): + def construct(self, x, dp_pad_index, dp_unpad_index, dp_pad_index_with_offset, + dp_unpad_index_total_with_offset): + # zhq: TODO gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x -class DeepseekV2MoE(Cell): +class DeepseekV2MoE(nn.Cell): r""" This is an implementation of self-attention mechanism in DeepSeek-V3. @@ -114,15 +123,19 @@ class DeepseekV2MoE(Cell): - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. """ - def __init__(self, config): + def __init__(self, config, quant_config, prefix): super(DeepseekV2MoE, self).__init__() + self.config = config + self.quant_config = quant_config + self.prefix = prefix + self.tp_size = get_tensor_model_parallel_world_size() self.routed_scaling_factor = config.routed_scaling_factor # zhq: ep_group needed - # self.ep_group = get_ep_group().device_group - # self.ep_rank = self.ep_group.rank() - # self.ep_size = self.ep_group.size() + self.ep_group = get_ep_group().device_group + self.ep_rank = self.ep_group.rank() + self.ep_size = self.ep_group.size() self.n_routed_experts: int = config.n_routed_experts self.n_shared_experts: int = config.n_shared_experts @@ -140,12 +153,20 @@ class DeepseekV2MoE(Cell): bias=False, quant_config=None, prefix=f"{prefix}.gate") + self.gate.e_score_correction_bias = 0.1 # zhq: TODO # Load balancing settings. zhq: needed? - vllm_config = get_current_vllm_config() - parallel_config = vllm_config.parallel_config - self.enable_eplb = enable_eplb + logger.warning( + config + ) + + parallel_config = get_current_vllm_config().parallel_config + parallel_config.num_redundant_experts = 0 + logger.warning( + parallel_config + ) + self.n_redundant_experts = parallel_config.num_redundant_experts self.n_logical_experts = self.n_routed_experts self.n_physical_experts = (self.n_logical_experts + @@ -195,7 +216,12 @@ class DeepseekV2MoE(Cell): hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) if self.n_shared_experts is not None: - shared_output = self.shared_experts(hidden_states) + shared_output = self.shared_experts(hidden_states, + dp_pad_index, + dp_unpad_index, + dp_pad_index_with_offset, + dp_unpad_index_total_with_offset + ) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states=hidden_states, router_logits=router_logits, @@ -224,54 +250,14 @@ class DeepseekV2FakedAttention(nn.Cell): def __init__( self, - config: PretrainedConfig, - hidden_size: int, - num_heads: int, - qk_nope_head_dim: int, - qk_rope_head_dim: int, - v_head_dim: int, - q_lora_rank: int, - kv_lora_rank: int, - rope_theta: float = 10000, - rope_scaling: Optional[dict[str, Any]] = None, - max_position_embeddings: int = 8192, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = "", + **kwargs ) -> None: super().__init__() - self.hidden_size = hidden_size - self.qk_nope_head_dim = qk_nope_head_dim - self.qk_rope_head_dim = qk_rope_head_dim - self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim - self.v_head_dim = v_head_dim - self.q_lora_rank = q_lora_rank - self.kv_lora_rank = kv_lora_rank - self.num_heads = num_heads - tp_size = get_tensor_model_parallel_world_size() - assert num_heads % tp_size == 0 - self.num_local_heads = num_heads // tp_size - self.scaling = self.qk_head_dim**-0.5 - self.rope_theta = rope_theta - self.max_position_embeddings = max_position_embeddings def construct( self, - positions: Tensor, - hidden_states: Tensor, - key_cache: Tensor, - value_cache: Tensor, - is_prefill: bool, - slot_mapping: Tensor, - attn_mask: Tensor, - batch_valid_length: Tensor, - q_seq_lens: Tensor, - block_tables: Tensor, - residual: Optional[Tensor], - dp_pad_index: Optional[bool] = None, - dp_unpad_index: Optional[Tensor] = None, - dp_pad_index_with_offset: Optional[Tensor] = None, - dp_unpad_index_total_with_offset: Optional[Tensor] = None, + hidden_states, + residual ) -> Tensor: return hidden_states, residual @@ -279,7 +265,7 @@ class DeepseekV2DecoderLayer(nn.Cell): def __init__( self, - config: PretrainedConfig, + config: DeepseekV3Config, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", @@ -292,23 +278,7 @@ class DeepseekV2DecoderLayer(nn.Cell): 8192) layer_idx = int(prefix.split(sep='.')[-1]) self.layer_idx = layer_idx - self.self_attn = DeepseekV2FakedAttention( - config=config, - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - qk_nope_head_dim=config.qk_nope_head_dim, - qk_rope_head_dim=config.qk_rope_head_dim, - v_head_dim=config.v_head_dim, - q_lora_rank=config.q_lora_rank - if hasattr(config, "q_lora_rank") else None, - kv_lora_rank=config.kv_lora_rank, - rope_theta=rope_theta, - rope_scaling=rope_scaling, - max_position_embeddings=max_position_embeddings, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) + self.self_attn = DeepseekV2FakedAttention() if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace @@ -317,7 +287,6 @@ class DeepseekV2DecoderLayer(nn.Cell): config=config, quant_config=quant_config, prefix=f"{prefix}.mlp", - enable_eplb=enable_eplb, ) else: self.mlp = DeepseekV2MLP( @@ -360,10 +329,7 @@ class DeepseekV2DecoderLayer(nn.Cell): else: hidden_states, residual = self.input_layernorm( hidden_states, residual) - hidden_states = self.self_attn(positions, hidden_states, key_cache, - value_cache, is_prefill, slot_mapping, - attn_mask, batch_valid_length, - q_seq_lens, block_tables) + hidden_states = self.self_attn(hidden_states, residual) # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) @@ -394,14 +360,13 @@ class DeepseekV2Model(nn.Cell): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: DeepseekV2DecoderLayer( - config, - prefix, - model_config=model_config, + config=config, cache_config=cache_config, quant_config=quant_config, - enable_eplb=enable_eplb, + prefix=prefix, ), - prefix=f"{prefix}.layers") + prefix=f"{prefix}.layers", + ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( @@ -432,9 +397,15 @@ class DeepseekV2Model(nn.Cell): ) -> Union[Tensor, IntermediateTensors]: - assert intermediate_tensors is not None - hidden_states = intermediate_tensors["hidden_states"] - residual = intermediate_tensors["residual"] + # assert intermediate_tensors is not None + # hidden_states = intermediate_tensors["hidden_states"] + # residual = intermediate_tensors["residual"] + + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None for i in range(self.start_layer, self.end_layer): layer = self.layers[i] @@ -450,58 +421,6 @@ class DeepseekV2Model(nn.Cell): hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - -class DeepseekV2ForCausalLM(NativeModel, SupportsPP): - packed_modules_mapping = {} - fall_back_to_pt_during_load = False - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - config = vllm_config.model_config.hf_config - quant_config = vllm_config.quant_config - self.config = config - self.quant_config = quant_config - self.model = DeepseekV2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - self.expert_weights = [] - - self.sampler = get_sampler() - - - - def get_input_embeddings(self, input_ids: Tensor) -> Tensor: - return self.model.get_input_embeddings(input_ids) - - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[Tensor] = None, - ) -> Union[Tensor, IntermediateTensors]: - hidden_states = self.model(input_ids, positions, intermediate_tensors, - inputs_embeds) - return hidden_states - - def sample(self, logits: Tensor, - sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens - - def compute_logits( - self, - hidden_states: Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits def load_weights(self, weights: Iterable[Tuple[str, Tensor]], params_dict: Dict[str, Parameter]): @@ -612,6 +531,159 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): weight_loader(param, loaded_weight) loaded_params.add(name) +class DeepseekV2ForCausalLM(NativeModel, SupportsPP): + packed_modules_mapping = {} + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = DeepseekV2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + self.expert_weights = [] + + self.sampler = get_sampler() + + self.common_preprocess(vllm_config, prefix) + + self.dp_pad_input = False + + self.enable_expert_parallel = False # zhq: TODO + # if get_dp_group().world_size > 1 and not self.parallel_config.enable_expert_parallel: + + if get_dp_group().world_size > 1 and not self.enable_expert_parallel: + self.dp_pad_input = True + self.dp_group = get_dp_group().device_group._name + self.dp_world_size = get_dp_group().world_size + self.dp_rank = get_dp_group().rank_in_group + + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + **kwargs + ) -> Union[Tensor, IntermediateTensors]: + hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, + inputs_embeds) + return hidden_states + + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: + # params_dict = self.get_params_dict() # zhq: TODO + # self.model.load_weights(weights, params_dict) # zhq: TODO + pass + + def exec_model(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs): + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, + intermediate_tensors, + inputs_embeds) + + if self.prev_prefill != is_prefill and self.is_graph_mode: + self.set_model_inputs(input_ids, positions, intermediate_tensors, + inputs_embeds, is_prefill) + self.prev_prefill = is_prefill + + # for dummy_attention_metadata + if is_prefill and not self.set_flags: + self.set_flags = True + + if self.run_model is None: + self.run_model = ms.jit( + function=self.model, # type: ignore[attr-defined] + jit_level='O0' + ) if self.is_graph_mode else self.model # type: ignore[attr-defined] + + if self.dp_pad_input: + # if dp and not ep, should pad input to gather. + token_num_total = mint.empty((self.dp_world_size, 1), dtype=ms.int32) + send_tensor = ms.Tensor([[input_ids.shape[0]]], dtype=ms.int32) + mint.distributed.all_gather_into_tensor(token_num_total, send_tensor, + group=self.dp_group) + token_num_total = token_num_total.reshape(-1) + # tokens_cumulative = mint.cumsum(token_num_total, dim=0) + # start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() + # end = tokens_cumulative[self.dp_rank].item() + # end2 = tokens_cumulative[-1].item() - end + # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + token_num_total = token_num_total.asnumpy() + token_num_total_cumsum = np.cumsum(token_num_total) + max_token_num = token_num_total.max() + total_pad_num = max_token_num - token_num_total + this_pad_num = total_pad_num[self.dp_rank] + + dp_unpad_index = ms.Tensor(np.arange(token_num_total[self.dp_rank]), dtype=ms.int32) + dp_pad_index = ms.Tensor(np.pad(dp_unpad_index, (0, this_pad_num)), dtype=ms.int32) + + # dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) + # for rank in range(self.dp_world_size)] + dp_pad_index_total_with_offset = [np.pad(np.arange(0 if rank == 0 else token_num_total_cumsum[rank - 1], + token_num_total_cumsum[rank]), (0, total_pad_num[rank])) + for rank in range(self.dp_world_size)] + + dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) + dp_pad_index_total_with_offset = ms.Tensor(dp_pad_index_total_with_offset, dtype=mstype.int32) + + + dp_unpad_index_total_with_offset = [np.arange(token_num_total[rank]) + rank * max_token_num + for rank in range(self.dp_world_size)] + dp_unpad_index_total_with_offset = np.concatenate(dp_unpad_index_total_with_offset, axis=0) + dp_unpad_index_total_with_offset = ms.Tensor(dp_unpad_index_total_with_offset, dtype=mstype.int32) + + + model_output = self.run_model( # type: ignore[misc] + input_ids=model_inputs["input_ids"], + positions=model_inputs["position_ids"], + key_caches=model_inputs["key_cache"], + value_caches=model_inputs["value_cache"], + is_prefill=is_prefill, + slot_mapping=model_inputs["slot_mapping"], + attn_mask=model_inputs["attention_mask"], + batch_valid_length=model_inputs["batch_valid_length"], + q_seq_lens=model_inputs["q_seq_lens"], + block_tables=model_inputs["block_tables"], + intermediate_tensors=model_inputs["intermediate_tensors"], + inputs_embeds=model_inputs["inputs_embeds"], + dp_pad_index=dp_pad_index if self.dp_pad_input else None, + dp_unpad_index=dp_unpad_index if self.dp_pad_input else None, + dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset if self.dp_pad_input else None + ) + + return model_output + class DeepseekV3ForCausalLM(DeepseekV2ForCausalLM): pass \ No newline at end of file -- Gitee From c1df25de9891bab9c4c2f8f0190107859f049929 Mon Sep 17 00:00:00 2001 From: huandong Date: Tue, 8 Jul 2025 12:06:37 +0800 Subject: [PATCH 73/76] deepseekv3 mla and load attn weights --- .../model_executor/layers/rotary_embedding.py | 117 +++++ .../model_executor/models/deepseek_v2.py | 439 +++++++++++++++--- .../model_executor/models/model_base.py | 47 +- 3 files changed, 524 insertions(+), 79 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/rotary_embedding.py b/vllm_mindspore/model_executor/layers/rotary_embedding.py index 747023347..1a638051d 100644 --- a/vllm_mindspore/model_executor/layers/rotary_embedding.py +++ b/vllm_mindspore/model_executor/layers/rotary_embedding.py @@ -603,6 +603,110 @@ class InferMRotaryEmbedding(InferRotaryEmbedding): batch_valid_length) +def _yarn_get_mscale(scale: float = 1) -> float: + if scale <= 1: + return 1.0 + return 0.1 * math.log(scale) + 1.0 + + +def _yarn_find_correction_dim(num_rotations: int, + dim: int, + base: float = 10000, + max_position_embeddings: int = 2048) -> float: + return (dim * math.log(max_position_embeddings / + (num_rotations * 2 * math.pi))) / (2 * + math.log(base)) + + +# Find dim range bounds based on rotations +def _yarn_find_correction_range( + low_rot: int, + high_rot: int, + dim: int, + base: float = 10000, + max_position_embeddings: int = 2048) -> Tuple[int, int]: + low = math.floor( + _yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings)) + high = math.ceil( + _yarn_find_correction_dim(high_rot, dim, base, + max_position_embeddings)) + return max(low, 0), min(high, dim - 1) # Clamp values just in case + + +def _yarn_linear_ramp_mask(low: float, high: float, dim: int, + dtype: np.dtype) -> np.ndarray: + if low == high: + high += 0.001 # Prevent singularity + + linear_func = (np.arange(dim, dtype=dtype) - low) / (high - low) + ramp_func = np.clip(linear_func, 0, 1) + return ramp_func + + +class InferYaRNScalingRotaryEmbedding(InferRotaryEmbedding): + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + scaling_factor: float, + dtype, + *, + extrapolation_factor: float = 1, + attn_factor: float = 1, + beta_fast: int = 32, + beta_slow: int = 1, + ) -> None: + self.scaling_factor = scaling_factor + self.extrapolation_factor = extrapolation_factor + self.attn_factor = attn_factor + self.beta_fast = beta_fast + self.beta_slow = beta_slow + # Get n-d magnitude scaling corrected for interpolation + self.mscale = float( + _yarn_get_mscale(self.scaling_factor) * attn_factor) + super().__init__(head_size, rotary_dim, max_position_embeddings, base, + is_neox_style, dtype) + + def _compute_inv_freq(self, scaling_factor: float) -> Tensor: + pos_freqs = self.base**( + np.arange(0, self.rotary_dim, 2, dtype=np.float32) / + self.rotary_dim) + inv_freq_extrapolation = 1.0 / pos_freqs + inv_freq_interpolation = 1.0 / (scaling_factor * pos_freqs) + + low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, + self.rotary_dim, self.base, + self.max_position_embeddings) + # Get n-d rotational scaling corrected for extrapolation + inv_freq_mask = ( + 1 - _yarn_linear_ramp_mask( + low, + high, + self.rotary_dim // 2, + dtype=np.float32 # type: ignore[arg-type] + )) * self.extrapolation_factor + inv_freq = inv_freq_interpolation * ( + 1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask + return inv_freq + + def _compute_cos_sin_cache(self) -> Tuple[Tensor, Tensor]: + freqs = self._compute_inv_freq(self.scaling_factor) + t = np.arange(self.max_position_embeddings * + self.scaling_factor).astype(np.float32) + self.freqs = Tensor(freqs.reshape(1, 1, 1, -1), dtype=self.dtype) + freqs = np.outer(t, freqs) # (max_position_embedding, head_dim // 2) + emb = np.concatenate((freqs, freqs), axis=-1) + freqs_cos = np.cos(emb) * self.mscale # (seq_len, head_dim) + freqs_sin = np.sin(emb) * self.mscale # (seq_len, head_dim) + freqs_cos = Tensor(freqs_cos, dtype=self.dtype) + freqs_sin = Tensor(freqs_sin, dtype=self.dtype) + return freqs_cos, freqs_sin + + _ROPE_DICT: Dict[Tuple, Union[InferRotaryEmbedding, RotaryEmbedding]] = {} @@ -671,6 +775,19 @@ def get_rope( ) else: raise NotImplementedError + elif scaling_type == "yarn": + scaling_factor = rope_scaling["factor"] + original_max_position = rope_scaling[ + "original_max_position_embeddings"] + extra_kwargs = { + k: v + for k, v in rope_scaling.items() + if k in ("extrapolation_factor", "attn_factor", "beta_fast", + "beta_slow") + } + rotary_emb = InferYaRNScalingRotaryEmbedding( + head_size, rotary_dim, original_max_position, base, + is_neox_style, scaling_factor, dtype, **extra_kwargs) else: raise NotImplementedError diff --git a/vllm_mindspore/model_executor/models/deepseek_v2.py b/vllm_mindspore/model_executor/models/deepseek_v2.py index b78e1ab95..c5345bb96 100644 --- a/vllm_mindspore/model_executor/models/deepseek_v2.py +++ b/vllm_mindspore/model_executor/models/deepseek_v2.py @@ -31,9 +31,10 @@ if TYPE_CHECKING: else: DeepseekV3Config = None +import math import numpy as np import mindspore as ms -from mindspore import Tensor, nn, Parameter, mint +from mindspore import Tensor, nn, Parameter, mint, ops from mindspore import Tensor, nn, mutable from mindspore.common import dtype as mstype @@ -53,7 +54,7 @@ from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE from vllm_mindspore.model_executor.layers.layernorm import RMSNorm from vllm_mindspore.model_executor.layers.linear import ( MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, - RowParallelLinear) + ColumnParallelLinear, RowParallelLinear) from vllm_mindspore.model_executor.layers.logits_processor import ( LogitsProcessor) from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope @@ -72,7 +73,7 @@ from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE logger = init_logger(__name__) -class DeepseekV2MLP(nn.Cell): +class DeepseekV3MLP(nn.Cell): def __init__( self, @@ -109,7 +110,7 @@ class DeepseekV2MLP(nn.Cell): return x -class DeepseekV2MoE(nn.Cell): +class DeepseekV3MoE(nn.Cell): r""" This is an implementation of self-attention mechanism in DeepSeek-V3. @@ -124,7 +125,7 @@ class DeepseekV2MoE(nn.Cell): """ def __init__(self, config, quant_config, prefix): - super(DeepseekV2MoE, self).__init__() + super(DeepseekV3MoE, self).__init__() self.config = config self.quant_config = quant_config self.prefix = prefix @@ -146,7 +147,7 @@ class DeepseekV2MoE(nn.Cell): "Only silu is supported for now.") """ - + self.gate = ReplicatedLinear(config.hidden_size, config.n_routed_experts, @@ -159,14 +160,14 @@ class DeepseekV2MoE(nn.Cell): logger.warning( config ) - + parallel_config = get_current_vllm_config().parallel_config parallel_config.num_redundant_experts = 0 logger.warning( parallel_config ) - + self.n_redundant_experts = parallel_config.num_redundant_experts self.n_logical_experts = self.n_routed_experts self.n_physical_experts = (self.n_logical_experts + @@ -198,7 +199,7 @@ class DeepseekV2MoE(nn.Cell): intermediate_size = (config.moe_intermediate_size * config.n_shared_experts) - self.shared_experts = DeepseekV2MLP( + self.shared_experts = DeepseekV3MLP( hidden_size=config.hidden_size, intermediate_size=intermediate_size, hidden_act=config.hidden_act, @@ -234,7 +235,7 @@ class DeepseekV2MoE(nn.Cell): final_hidden_states = final_hidden_states + shared_output else: # Fix FP16 overflow - # See DeepseekV2DecoderLayer for more details. + # See DeepseekV3DecoderLayer for more details. final_hidden_states = final_hidden_states + shared_output \ * (1. / self.routed_scaling_factor) @@ -246,22 +247,221 @@ class DeepseekV2MoE(nn.Cell): return final_hidden_states.view(num_tokens, hidden_dim) -class DeepseekV2FakedAttention(nn.Cell): - +class DeepseekV3Attention(nn.Cell): def __init__( self, - **kwargs + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[dict[str, Any]] = None, + max_position_embeddings: int = 8192, + head_dim: Optional[int] = None, + kv_lora_rank: int =512, + q_lora_rank: int =1536, + qk_rope_head_dim: int =64, + v_head_dim: int =128, + qk_nope_head_dim: int =128, + rms_norm_eps: float = 1e-06, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() - + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + + self.head_dim = head_dim or (hidden_size // self.total_num_heads) + self.kv_lora_rank = kv_lora_rank # 512 + self.q_lora_rank = q_lora_rank # 1536 + self.qk_rope_head_dim = qk_rope_head_dim # 64 + self.qk_nope_head_dim = qk_nope_head_dim # 128 + self.v_head_dim = v_head_dim # 128 + self.q_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim # 192 = 128 + 64 + + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.rotary_emb = get_rope( + qk_rope_head_dim, # 64 + rotary_dim=qk_rope_head_dim, # 64 + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + + input_layout = "TH" + scale = 1. / math.sqrt(self.q_head_dim) + pre_tokens = 2147483647 + next_tokens = 2147483647 + + self.reshape_and_cache = ops.auto_generate.ReshapeAndCache() + self.flash_attention = ops.operations.nn_ops.FlashAttentionScore(head_num=num_heads, + scale_value=scale, + pre_tokens=pre_tokens, + next_tokens=next_tokens, + input_layout=input_layout) + self.paged_attention = ops.auto_generate.PagedAttention(head_num=self.num_heads, + scale_value=scale, + kv_head_num=1, + mla_v_dim=self.kv_lora_rank) + + self.q_a_proj = ReplicatedLinear( + self.hidden_size, # 7168 + self.q_lora_rank, # 1536 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.q_a_proj" + ) + + self.q_a_layernorm = RMSNorm(self.q_lora_rank, rms_norm_eps) + self.q_b_proj = ColumnParallelLinear( + self.q_lora_rank, # 1536 + self.total_num_heads * self.q_head_dim, # 128 * 192 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.q_b_proj" + ) + + # 1. kv_a_proj_with_mqa: kv latent vector; 2. kv_a_layernorm: latent vector of kv normalization + self.kv_a_proj_with_mqa = ReplicatedLinear( + self.hidden_size, # 7168 + self.kv_lora_rank + self.qk_rope_head_dim, # 576 = 512 + 64 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_a_proj_with_mqa" + ) + self.kv_a_layernorm = RMSNorm(self.kv_lora_rank, rms_norm_eps) + self.kv_b_proj_k = ColumnParallelLinear( + self.kv_lora_rank, # 512 + self.total_num_heads * self.qk_nope_head_dim, # 128 * 128 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_b_proj_k" + ) + + self.kv_b_proj_v = ColumnParallelLinear( + self.kv_lora_rank, # 512 + self.total_num_heads * self.v_head_dim, # 128 * 128 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_b_proj_v" + ) + + self.o_proj = RowParallelLinear(self.total_num_heads * self.v_head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.o_proj") + + self.reshape = ops.Reshape() + self.tile_kv = ops.Tile() + self.dim_slice_4d = ops.Slice() + self.kpe_concat = ops.Concat(1) + self.pe_concat = ops.Concat(2) + self.qabsorb_k_matmul = ops.BatchMatMul() + self.outabsorb_v_matmul = ops.BatchMatMul(transpose_b=True) + def construct( self, - hidden_states, - residual + positions: Tensor, + hidden_states: Tensor, + key_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, ) -> Tensor: - return hidden_states, residual + # calculate q + q = self.q_a_proj(hidden_states) # (t, 7168) -> (t, 1536) + norm_q = self.q_a_layernorm(q) + q = self.q_b_proj(norm_q) # (t, 1536) -> (t, head * 192) + q = self.reshape(q, (-1, self.num_heads, self.q_head_dim)) # (t, 1536) -> (t, head, 192) + + # calculate k(v) + latent_kv_all = self.kv_a_proj_with_mqa(hidden_states) # (t, 7168) -> (t, 576) + latent_kv, k_pe = mint.split(latent_kv_all, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) # (t, 576) -> (t, 512), (t, 64) + i_kv = self.kv_a_layernorm(latent_kv) + + # q, k rope + q_nope, q_pe = mint.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) # (t, head, 192) -> (t, head, 128), (t, head, 64) + q_pe = self.reshape(q_pe, (-1, self.num_heads * self.qk_rope_head_dim)) # (t, head, 64) -> (t, head * 64) + q_pe, k_pe = self.rotary_emb(positions, q_pe, k_pe, batch_valid_length, is_prefill) + q_pe = self.reshape(q_pe, (-1, self.num_heads, self.qk_rope_head_dim)) # (t, head * 64) -> (t, head, 64) + + # k reshape_and_cache + key_states_cache = mint.cat((i_kv, k_pe), 1) # (t, 512) (t, 64) -> (t, 576) + key_states_cache = key_states_cache.contiguous() # for pynaitve key_states_cache need contiguous + key_out = self.reshape_and_cache(key_states_cache, None, key_cache, None, slot_mapping) + q_nope = ops.depend(q_nope, key_out) + + if is_prefill: + # q + query_states = mint.cat((q_nope, q_pe), 2) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # k + k_pe = self.reshape(k_pe, (-1, 1, self.qk_rope_head_dim)) # (t, 1, 64) + k_pe = self.tile_kv(k_pe, (1, self.num_heads, 1)) # (t, head, 64) + o_k_nope = self.kv_b_proj_k(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) + k_nope = self.reshape(o_k_nope, (-1, self.num_heads, self.qk_nope_head_dim)) + key_states = self.pe_concat((k_nope, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # v + o_v = self.kv_b_proj_v(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) + value_states = self.reshape(o_v, (-1, self.num_heads, self.v_head_dim)) # (t, head, 128) + # It's not necessary. Just fa is not support k != v. V just (t, head, 128) + value_states = self.pe_concat((value_states, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # attention + query_states = self.reshape(query_states, (-1, self.num_heads * self.q_head_dim)) + key_states = self.reshape(key_states, (-1, self.num_heads * self.q_head_dim)) + value_states = self.reshape(value_states, (-1, self.num_heads * self.q_head_dim)) + _, _, _, context_layer = self.flash_attention(query_states, key_states, value_states, None, None, None, attn_mask, + None, actual_seq_qlen=batch_valid_length, + actual_seq_kvlen=batch_valid_length) # (t, head, 128) + context_layer = context_layer.view(-1, self.num_heads, self.q_head_dim) + context_layer = self.dim_slice_4d(context_layer, (0, 0, 0), (-1, self.num_heads, self.v_head_dim)) # slice 192->128 + else: + # q, k_absorb + q_absorb = self.kv_b_proj_k.weight.view(self.num_heads, self.qk_nope_head_dim, self.kv_lora_rank) + q_nope = self.qabsorb_k_matmul(q_nope.transpose(1, 0, 2), q_absorb).transpose(1, 0, 2) # (head, t, 128) (head, 128, 512) -> (head, t, 512) -> (t, head, 512) + query_states = self.pe_concat((q_nope, q_pe)) # (t, head, 512) (t, head, 64) -> (t, head, 576) + query_states = self.reshape(query_states, (-1, self.n_local_heads * (self.kv_lora_rank + self.qk_rope_head_dim))) # 2维 + + # attention + context_layer = self.paged_attention(query_states, key_cache, key_cache, block_tables, batch_valid_length, + None, None, attn_mask, q_seq_lens) # will slice out -> 512 + context_layer = context_layer.view(-1, self.num_heads, self.kv_lora_rank) # (t, head, 512) + + # out, v_absorb + out_absorb = self.kv_b_proj_v.weight.view(self.num_heads, self.v_head_dim, self.kv_lora_rank) + context_layer = self.outabsorb_v_matmul(context_layer.transpose(1, 0, 2), out_absorb).transpose(1, 0, 2) # (head, t, 512) (head, 128, 512) -> (head, t, 128) ->(t, head, 128) -class DeepseekV2DecoderLayer(nn.Cell): + attn_out = context_layer.view(-1, self.num_heads * self.v_head_dim) # (t, head, 128) + output, _ = self.o_proj(attn_out) # wo (t, head, 128) (head*128, 7168) -> (t, 7168) + return output + +class DeepseekV3DecoderLayer(nn.Cell): def __init__( self, @@ -278,18 +478,35 @@ class DeepseekV2DecoderLayer(nn.Cell): 8192) layer_idx = int(prefix.split(sep='.')[-1]) self.layer_idx = layer_idx - self.self_attn = DeepseekV2FakedAttention() + self.self_attn = DeepseekV3Attention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + head_dim=None, + kv_lora_rank=config.kv_lora_rank, + q_lora_rank=config.q_lora_rank if hasattr(config, "q_lora_rank") else None, + qk_nope_head_dim=config.qk_nope_head_dim, + qk_rope_head_dim=config.qk_rope_head_dim, + v_head_dim=config.v_head_dim, + rms_norm_eps=config.rms_norm_eps, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace and layer_idx % config.moe_layer_freq == 0): - self.mlp = DeepseekV2MoE( + self.mlp = DeepseekV3MoE( config=config, quant_config=quant_config, prefix=f"{prefix}.mlp", ) else: - self.mlp = DeepseekV2MLP( + self.mlp = DeepseekV3MLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, @@ -302,13 +519,12 @@ class DeepseekV2DecoderLayer(nn.Cell): eps=config.rms_norm_eps) self.routed_scaling_factor = config.routed_scaling_factor - + def construct( self, positions: Tensor, hidden_states: Tensor, key_cache: Tensor, - value_cache: Tensor, is_prefill: bool, slot_mapping: Tensor, attn_mask: Tensor, @@ -329,7 +545,10 @@ class DeepseekV2DecoderLayer(nn.Cell): else: hidden_states, residual = self.input_layernorm( hidden_states, residual) - hidden_states = self.self_attn(hidden_states, residual) + hidden_states = self.self_attn(positions, hidden_states, key_cache, + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) @@ -338,7 +557,7 @@ class DeepseekV2DecoderLayer(nn.Cell): return hidden_states, residual -class DeepseekV2Model(nn.Cell): +class DeepseekV3Model(nn.Cell): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -356,10 +575,10 @@ class DeepseekV2Model(nn.Cell): config.hidden_size, quant_config=quant_config, prefix=f"{prefix}.embed_tokens") - + self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: DeepseekV2DecoderLayer( + lambda prefix: DeepseekV3DecoderLayer( config=config, cache_config=cache_config, quant_config=quant_config, @@ -367,21 +586,20 @@ class DeepseekV2Model(nn.Cell): ), prefix=f"{prefix}.layers", ) - + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) - + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: return self.embed_tokens(input_ids) - + def construct( self, input_ids: Tensor, positions: Tensor, key_caches: List[Tensor], - value_caches: List[Tensor], is_prefill: bool, slot_mapping: Tensor, attn_mask: Tensor, @@ -397,38 +615,39 @@ class DeepseekV2Model(nn.Cell): ) -> Union[Tensor, IntermediateTensors]: - # assert intermediate_tensors is not None - # hidden_states = intermediate_tensors["hidden_states"] - # residual = intermediate_tensors["residual"] - - if inputs_embeds is not None: - hidden_states = inputs_embeds + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] for i in range(self.start_layer, self.end_layer): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, key_caches[i - self.start_layer], - value_caches[i - self.start_layer], is_prefill, slot_mapping, attn_mask, batch_valid_length, q_seq_lens, block_tables, residual, dp_pad_index, dp_unpad_index, dp_pad_index_total_with_offset, dp_unpad_index_total_with_offset) - + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - + def load_weights(self, weights: Iterable[Tuple[str, Tensor]], params_dict: Dict[str, Parameter]): stacked_params_mapping = [ # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] @@ -442,17 +661,33 @@ class DeepseekV2Model(nn.Cell): ckpt_up_proj_name="up_proj", num_experts=self.config.n_routed_experts, num_redundant_experts=self.num_redundant_experts) - - params_dict = dict(self.named_parameters()) + loaded_params: set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue - spec_layer = get_spec_layer_idx_from_weight_name(self.config, name) - if spec_layer is not None: - continue # skip spec decode layers for main model + if "kv_b_proj" in name and name not in params_dict: + k_name = name.replace("kv_b_proj", "kv_b_proj_k") + v_name = name.replace("kv_b_proj", "kv_b_proj_v") + + loaded_weight = loaded_weight.reshape(self.config.num_attention_heads, self.config.qk_nope_head_dim + self.config.v_head_dim, -1) + k_weight = loaded_weight[:, :self.config.qk_nope_head_dim, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) + v_weight = loaded_weight[:, self.config.qk_nope_head_dim:, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) + + k_param = params_dict[k_name] + v_param = params_dict[v_name] + k_param.weight_loader(k_param, k_weight) + v_param.weight_loader(v_param, v_weight) + loaded_params.add(k_name) + loaded_params.add(v_name) + continue + + # TODO + # spec_layer = get_spec_layer_idx_from_weight_name(self.config, name) + # if spec_layer is not None: + # continue # skip spec decode layers for main model for (param_name, weight_name, shard_id) in stacked_params_mapping: # Skip non-stacked layers and experts (experts handled below). @@ -492,7 +727,7 @@ class DeepseekV2Model(nn.Cell): if is_pp_missing_parameter(name_mapped, self): continue - + param = params_dict[name_mapped] weight_loader = param.weight_loader @@ -515,7 +750,7 @@ class DeepseekV2Model(nn.Cell): # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue - + # Remapping the name of FP8 kv-scale. # zhq: needed? # name = maybe_remap_kv_scale_name(name, params_dict) @@ -530,8 +765,9 @@ class DeepseekV2Model(nn.Cell): default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) + return loaded_params -class DeepseekV2ForCausalLM(NativeModel, SupportsPP): +class DeepseekV3ForCausalLM(NativeModel, SupportsPP): packed_modules_mapping = {} fall_back_to_pt_during_load = False @@ -541,7 +777,7 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): quant_config = vllm_config.quant_config self.config = config self.quant_config = quant_config - self.model = DeepseekV2Model(vllm_config=vllm_config, + self.model = DeepseekV3Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, @@ -553,7 +789,7 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): self.sampler = get_sampler() - self.common_preprocess(vllm_config, prefix) + self.common_preprocess(vllm_config, use_mla=True, prefix=prefix) self.dp_pad_input = False @@ -586,7 +822,7 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - + def compute_logits( self, hidden_states: Tensor, @@ -595,12 +831,11 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): logits = self.logits_processor(self.lm_head, hidden_states, sampling_metadata) return logits - - def load_weights(self, weights: Iterable[Tuple[str, Tensor]]) -> Set[str]: - # params_dict = self.get_params_dict() # zhq: TODO - # self.model.load_weights(weights, params_dict) # zhq: TODO - pass - + + def load_weights(self, weights: Iterable[tuple[str,Tensor]]) -> set[str]: + params_dict = self.get_params_dict() + return self.model.load_weights(weights, params_dict) + def exec_model(self, input_ids: Tensor, positions: Tensor, @@ -609,7 +844,7 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): **kwargs): model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, intermediate_tensors, - inputs_embeds) + inputs_embeds, use_mla=True) if self.prev_prefill != is_prefill and self.is_graph_mode: self.set_model_inputs(input_ids, positions, intermediate_tensors, @@ -667,7 +902,6 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): input_ids=model_inputs["input_ids"], positions=model_inputs["position_ids"], key_caches=model_inputs["key_cache"], - value_caches=model_inputs["value_cache"], is_prefill=is_prefill, slot_mapping=model_inputs["slot_mapping"], attn_mask=model_inputs["attention_mask"], @@ -685,5 +919,78 @@ class DeepseekV2ForCausalLM(NativeModel, SupportsPP): return model_output -class DeepseekV3ForCausalLM(DeepseekV2ForCausalLM): - pass \ No newline at end of file + def set_model_inputs(self, input_ids, position_ids, intermediate_tensors, + inputs_embeds, is_prefill): + if input_ids is None: + dyn_input_ids = None + else: + dyn_input_ids = ms.Tensor(shape=[None] * input_ids.ndim, + dtype=mstype.int32) + + if position_ids is None: + dyn_position_ids = None + else: + dyn_position_ids = ms.Tensor(shape=[None] * position_ids.ndim, + dtype=mstype.int32) + + if inputs_embeds is None: + dyn_inputs_embeds = None + else: + dyn_inputs_embeds = ms.Tensor(shape=[None] * inputs_embeds.ndim, + dtype=inputs_embeds.dtype) + + if intermediate_tensors is None: + dyn_intermediate_tensors = None + else: + dyn_intermediate_tensors = ms.Tensor( + shape=[None] * intermediate_tensors.ndim, + dtype=intermediate_tensors.dtype) + + block_size = self.cache_config.block_size + num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) + head_size = self.model_config.get_head_size() + kv_cache_shape = (None, block_size, num_kv_heads, head_size) + + kv_cache_dtype = self.model_config.dtype if self.cache_config.cache_dtype == "auto" \ + else self.cache_config.cache_dtype + if kv_cache_dtype in STR_DTYPE_TO_MS_DTYPE: + kv_cache_dtype = STR_DTYPE_TO_MS_DTYPE[kv_cache_dtype] + + num_layers = self.model_config.get_num_layers(self.parallel_config) + + dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) + dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) + + dyn_slot_mapping = Tensor(shape=[None], dtype=mstype.int32) + dynamic_attention_mask = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) + dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) + dyn_dp_pad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_unpad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_pad_index_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dp_unpad_index_total_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + + + self.model.set_inputs( + dyn_input_ids, + dyn_position_ids, + dyn_key_caches, # type: ignore[attr-defined] + is_prefill, + dyn_slot_mapping, + dynamic_attention_mask, + dyn_batch_valid_length, + dyn_q_seq_lens, + dyn_block_tables, + dyn_intermediate_tensors, + dyn_inputs_embeds, + dyn_dp_pad_index, + dyn_dp_unpad_index, + dyn_dp_pad_index_with_offset, + dp_unpad_index_total_with_offset) + + dynamic_hidden_states = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + self.lm_head.set_inputs( + dynamic_hidden_states) # type: ignore[attr-defined] \ No newline at end of file diff --git a/vllm_mindspore/model_executor/models/model_base.py b/vllm_mindspore/model_executor/models/model_base.py index d2db9794d..3fc1744ed 100644 --- a/vllm_mindspore/model_executor/models/model_base.py +++ b/vllm_mindspore/model_executor/models/model_base.py @@ -34,7 +34,7 @@ import mindspore as ms from mindspore import Tensor, nn, mutable from mindspore.common import dtype as mstype -from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask +from vllm_mindspore.model_executor.models.attention_mask import LowerTriangularMask, MLALowerTriangularMask from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE from vllm_mindspore.v1.attention.backends.ms_attn import MsAttentionMetadata @@ -224,6 +224,15 @@ class MsModelBase: value_cache.append(v_cache) return mutable(key_cache), mutable(value_cache) + def get_kvcache(self): + key_cache = [] + forward_context = get_forward_context() + for i in range(self.config.num_hidden_layers): + k_cache = self.kv_caches[i].kv_cache[ # type: ignore[attr-defined] + forward_context.virtual_engine][0] + key_cache.append(k_cache) + return mutable(key_cache) + @abstractmethod def compute_logits( self, @@ -275,12 +284,15 @@ class MsModelBase: max_context_lens=0 if not self.set_flags else 1, query_start_loc=None) - def prepare_base_inputs(self, input_ids, positions): + def prepare_base_inputs(self, input_ids, positions, use_mla=False): attn_metadata = get_forward_context().attn_metadata if attn_metadata is None: attn_metadata = self._dummy_attention_metadata( input_ids, positions) - key_cache, value_cache = self.get_kvcache() + if use_mla: + key_cache = self.get_kvcache() + else: + key_cache, value_cache = self.get_kvcache() if not envs.VLLM_USE_V1: # V0 seq_lens = attn_metadata.seq_lens @@ -323,7 +335,8 @@ class MsModelBase: model_inputs["q_seq_lens"] = q_seq_lens model_inputs["attention_mask"] = attention_mask model_inputs["key_cache"] = key_cache - model_inputs["value_cache"] = value_cache + if not use_mla: + model_inputs["value_cache"] = value_cache return model_inputs, is_prefill @@ -340,18 +353,26 @@ class NativeModel(MsModelBase): self.prev_prefill = False self.run_model = None - def common_preprocess(self, vllm_config, prefix=""): + def common_preprocess(self, vllm_config, use_mla=False, prefix=""): self.set_modules({ "model": self.model, "lm_head": self.lm_head }) # type: ignore[attr-defined] - self.casual_mask = LowerTriangularMask( - dtype=self.model_config.dtype, - max_model_len=self.model_config.max_model_len) - self.kv_caches = [ - AttentionWrapper() for i in range(self.config.num_hidden_layers) - ] + if use_mla: + self.casual_mask = MLALowerTriangularMask( + dtype=self.model_config.dtype, + max_model_len=self.model_config.max_model_len) + self.kv_caches = [ + MLAAttentionWrapper() for i in range(self.config.num_hidden_layers) + ] + else: + self.casual_mask = LowerTriangularMask( + dtype=self.model_config.dtype, + max_model_len=self.model_config.max_model_len) + self.kv_caches = [ + AttentionWrapper() for i in range(self.config.num_hidden_layers) + ] compilation_config = vllm_config.compilation_config if prefix in compilation_config.static_forward_context: @@ -431,9 +452,9 @@ class NativeModel(MsModelBase): dynamic_hidden_states) # type: ignore[attr-defined] def prepare_inputs(self, input_ids, positions, intermediate_tensors, - inputs_embeds): + inputs_embeds, use_mla=False): model_inputs, is_prefill = self.prepare_base_inputs( - input_ids, positions) + input_ids, positions, use_mla=use_mla) # for multimodal model model_inputs["intermediate_tensors"] = intermediate_tensors -- Gitee From c2e54c192cfa174ad676edd7d74b8139ab5a24ae Mon Sep 17 00:00:00 2001 From: horcam Date: Mon, 14 Jul 2025 16:59:29 +0800 Subject: [PATCH 74/76] fix file from CRLF to LF --- .../model_executor/models/deepseek_v2.py | 1990 ++++++++--------- 1 file changed, 995 insertions(+), 995 deletions(-) diff --git a/vllm_mindspore/model_executor/models/deepseek_v2.py b/vllm_mindspore/model_executor/models/deepseek_v2.py index c5345bb96..9b5170218 100644 --- a/vllm_mindspore/model_executor/models/deepseek_v2.py +++ b/vllm_mindspore/model_executor/models/deepseek_v2.py @@ -1,996 +1,996 @@ -# SPDX-License-Identifier: Apache-2.0 - -# Copyright 2024 The Qwen team. -# Copyright 2023 The vLLM team. -# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" -from collections.abc import Iterable -from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, - Union) -from typing import Any, Optional, Union, Dict, Tuple, List - -if TYPE_CHECKING: - from transformers import DeepseekV3Config -else: - DeepseekV3Config = None - -import math -import numpy as np -import mindspore as ms -from mindspore import Tensor, nn, Parameter, mint, ops -from mindspore import Tensor, nn, mutable -from mindspore.common import dtype as mstype - -from transformers import PretrainedConfig -from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config -from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, - get_dp_group, get_ep_group) -from vllm.logger import init_logger -from vllm.model_executor.layers.quantization import QuantizationConfig -from vllm.model_executor.models.interfaces import SupportsPP -from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors - -from vllm_mindspore.attention import Attention -from vllm_mindspore.model_executor.layers.activation import SiluAndMul -from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE -from vllm_mindspore.model_executor.layers.layernorm import RMSNorm -from vllm_mindspore.model_executor.layers.linear import ( - MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, - ColumnParallelLinear, RowParallelLinear) -from vllm_mindspore.model_executor.layers.logits_processor import ( - LogitsProcessor) -from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope -from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( - ParallelLMHead, VocabParallelEmbedding) -from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader - -from vllm_mindspore.model_executor.models.utils import ( - extract_layer_index, is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) -from vllm_mindspore.model_executor.models.model_base import NativeModel -from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, - get_sampler) -from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE - -logger = init_logger(__name__) - - -class DeepseekV3MLP(nn.Cell): - - def __init__( - self, - hidden_size: int, - intermediate_size: int, - hidden_act: str, - quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True, - prefix: str = "", - ) -> None: - super().__init__() - self.gate_up_proj = MergedColumnParallelLinear( - hidden_size, [intermediate_size] * 2, - bias=False, - quant_config=quant_config, - prefix=f"{prefix}.gate_up_proj") - self.down_proj = RowParallelLinear(intermediate_size, - hidden_size, - bias=False, - quant_config=quant_config, - reduce_results=reduce_results, - prefix=f"{prefix}.down_proj") - if hidden_act != "silu": - raise ValueError(f"Unsupported activation: {hidden_act}. " - "Only silu is supported for now.") - self.act_fn = SiluAndMul() - - def construct(self, x, dp_pad_index, dp_unpad_index, dp_pad_index_with_offset, - dp_unpad_index_total_with_offset): - # zhq: TODO - gate_up, _ = self.gate_up_proj(x) - x = self.act_fn(gate_up) - x, _ = self.down_proj(x) - return x - - -class DeepseekV3MoE(nn.Cell): - r""" - This is an implementation of self-attention mechanism in DeepSeek-V3. - - Args: - - **config** (Config): Model config of DeepSeek-V3. - - Inputs: - - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. - - Outputs: - - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. - """ - - def __init__(self, config, quant_config, prefix): - super(DeepseekV3MoE, self).__init__() - self.config = config - self.quant_config = quant_config - self.prefix = prefix - - self.tp_size = get_tensor_model_parallel_world_size() - self.routed_scaling_factor = config.routed_scaling_factor - - # zhq: ep_group needed - self.ep_group = get_ep_group().device_group - self.ep_rank = self.ep_group.rank() - self.ep_size = self.ep_group.size() - - self.n_routed_experts: int = config.n_routed_experts - self.n_shared_experts: int = config.n_shared_experts - - """zhq: needed? - if config.hidden_act != "silu": - raise ValueError(f"Unsupported activation: {config.hidden_act}. " - "Only silu is supported for now.") - """ - - - - self.gate = ReplicatedLinear(config.hidden_size, - config.n_routed_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate") - self.gate.e_score_correction_bias = 0.1 # zhq: TODO - - # Load balancing settings. zhq: needed? - logger.warning( - config - ) - - parallel_config = get_current_vllm_config().parallel_config - parallel_config.num_redundant_experts = 0 - - logger.warning( - parallel_config - ) - - self.n_redundant_experts = parallel_config.num_redundant_experts - self.n_logical_experts = self.n_routed_experts - self.n_physical_experts = (self.n_logical_experts + - self.n_redundant_experts) - self.n_local_physical_experts = self.n_physical_experts // self.ep_size - - self.physical_expert_start = (self.ep_rank * - self.n_local_physical_experts) - self.physical_expert_end = (self.physical_expert_start + - self.n_local_physical_experts) - - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - e_score_correction_bias=self.gate.e_score_correction_bias, - num_redundant_experts=self.n_redundant_experts) - - if config.n_shared_experts is not None: - intermediate_size = (config.moe_intermediate_size * - config.n_shared_experts) - - self.shared_experts = DeepseekV3MLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs( - ), - prefix=f"{prefix}.shared_experts", - ) - - - def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, - dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: - # NOTE: hidden_states can have either 1D or 2D shape. - orig_shape = hidden_states.shape - hidden_dim = hidden_states.shape[-1] - hidden_states = hidden_states.view(-1, hidden_dim) - if self.n_shared_experts is not None: - shared_output = self.shared_experts(hidden_states, - dp_pad_index, - dp_unpad_index, - dp_pad_index_with_offset, - dp_unpad_index_total_with_offset - ) - router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts(hidden_states=hidden_states, - router_logits=router_logits, - dp_pad_index=dp_pad_index, - dp_unpad_index=dp_unpad_index, - dp_pad_index_with_offset=dp_pad_index_with_offset, - dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) - if shared_output is not None: - if hidden_states.dtype != torch.float16: - final_hidden_states = final_hidden_states + shared_output - else: - # Fix FP16 overflow - # See DeepseekV3DecoderLayer for more details. - final_hidden_states = final_hidden_states + shared_output \ - * (1. / self.routed_scaling_factor) - - if self.tp_size > 1: - final_hidden_states = ( - self.experts.maybe_all_reduce_tensor_model_parallel( - final_hidden_states)) - - return final_hidden_states.view(num_tokens, hidden_dim) - - -class DeepseekV3Attention(nn.Cell): - def __init__( - self, - hidden_size: int, - num_heads: int, - num_kv_heads: int, - rope_theta: float = 10000, - rope_scaling: Optional[dict[str, Any]] = None, - max_position_embeddings: int = 8192, - head_dim: Optional[int] = None, - kv_lora_rank: int =512, - q_lora_rank: int =1536, - qk_rope_head_dim: int =64, - v_head_dim: int =128, - qk_nope_head_dim: int =128, - rms_norm_eps: float = 1e-06, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = hidden_size - tp_size = get_tensor_model_parallel_world_size() - self.total_num_heads = num_heads - assert self.total_num_heads % tp_size == 0 - self.num_heads = self.total_num_heads // tp_size - self.total_num_kv_heads = num_kv_heads - if self.total_num_kv_heads >= tp_size: - # Number of KV heads is greater than TP size, so we partition - # the KV heads across multiple tensor parallel GPUs. - assert self.total_num_kv_heads % tp_size == 0 - else: - # Number of KV heads is less than TP size, so we replicate - # the KV heads across multiple tensor parallel GPUs. - assert tp_size % self.total_num_kv_heads == 0 - - self.head_dim = head_dim or (hidden_size // self.total_num_heads) - self.kv_lora_rank = kv_lora_rank # 512 - self.q_lora_rank = q_lora_rank # 1536 - self.qk_rope_head_dim = qk_rope_head_dim # 64 - self.qk_nope_head_dim = qk_nope_head_dim # 128 - self.v_head_dim = v_head_dim # 128 - self.q_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim # 192 = 128 + 64 - - self.scaling = self.head_dim**-0.5 - self.rope_theta = rope_theta - self.max_position_embeddings = max_position_embeddings - - self.rotary_emb = get_rope( - qk_rope_head_dim, # 64 - rotary_dim=qk_rope_head_dim, # 64 - max_position=max_position_embeddings, - base=rope_theta, - rope_scaling=rope_scaling, - ) - - input_layout = "TH" - scale = 1. / math.sqrt(self.q_head_dim) - pre_tokens = 2147483647 - next_tokens = 2147483647 - - self.reshape_and_cache = ops.auto_generate.ReshapeAndCache() - self.flash_attention = ops.operations.nn_ops.FlashAttentionScore(head_num=num_heads, - scale_value=scale, - pre_tokens=pre_tokens, - next_tokens=next_tokens, - input_layout=input_layout) - self.paged_attention = ops.auto_generate.PagedAttention(head_num=self.num_heads, - scale_value=scale, - kv_head_num=1, - mla_v_dim=self.kv_lora_rank) - - self.q_a_proj = ReplicatedLinear( - self.hidden_size, # 7168 - self.q_lora_rank, # 1536 - bias=False, - quant_config=quant_config, - return_bias=False, - prefix=f"{prefix}.q_a_proj" - ) - - self.q_a_layernorm = RMSNorm(self.q_lora_rank, rms_norm_eps) - self.q_b_proj = ColumnParallelLinear( - self.q_lora_rank, # 1536 - self.total_num_heads * self.q_head_dim, # 128 * 192 - bias=False, - quant_config=quant_config, - return_bias=False, - prefix=f"{prefix}.q_b_proj" - ) - - # 1. kv_a_proj_with_mqa: kv latent vector; 2. kv_a_layernorm: latent vector of kv normalization - self.kv_a_proj_with_mqa = ReplicatedLinear( - self.hidden_size, # 7168 - self.kv_lora_rank + self.qk_rope_head_dim, # 576 = 512 + 64 - bias=False, - quant_config=quant_config, - return_bias=False, - prefix=f"{prefix}.kv_a_proj_with_mqa" - ) - self.kv_a_layernorm = RMSNorm(self.kv_lora_rank, rms_norm_eps) - self.kv_b_proj_k = ColumnParallelLinear( - self.kv_lora_rank, # 512 - self.total_num_heads * self.qk_nope_head_dim, # 128 * 128 - bias=False, - quant_config=quant_config, - return_bias=False, - prefix=f"{prefix}.kv_b_proj_k" - ) - - self.kv_b_proj_v = ColumnParallelLinear( - self.kv_lora_rank, # 512 - self.total_num_heads * self.v_head_dim, # 128 * 128 - bias=False, - quant_config=quant_config, - return_bias=False, - prefix=f"{prefix}.kv_b_proj_v" - ) - - self.o_proj = RowParallelLinear(self.total_num_heads * self.v_head_dim, - hidden_size, - bias=False, - quant_config=quant_config, - prefix=f"{prefix}.o_proj") - - self.reshape = ops.Reshape() - self.tile_kv = ops.Tile() - self.dim_slice_4d = ops.Slice() - self.kpe_concat = ops.Concat(1) - self.pe_concat = ops.Concat(2) - self.qabsorb_k_matmul = ops.BatchMatMul() - self.outabsorb_v_matmul = ops.BatchMatMul(transpose_b=True) - - def construct( - self, - positions: Tensor, - hidden_states: Tensor, - key_cache: Tensor, - is_prefill: bool, - slot_mapping: Tensor, - attn_mask: Tensor, - batch_valid_length: Tensor, - q_seq_lens: Tensor, - block_tables: Tensor, - ) -> Tensor: - # calculate q - q = self.q_a_proj(hidden_states) # (t, 7168) -> (t, 1536) - norm_q = self.q_a_layernorm(q) - q = self.q_b_proj(norm_q) # (t, 1536) -> (t, head * 192) - q = self.reshape(q, (-1, self.num_heads, self.q_head_dim)) # (t, 1536) -> (t, head, 192) - - # calculate k(v) - latent_kv_all = self.kv_a_proj_with_mqa(hidden_states) # (t, 7168) -> (t, 576) - latent_kv, k_pe = mint.split(latent_kv_all, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) # (t, 576) -> (t, 512), (t, 64) - i_kv = self.kv_a_layernorm(latent_kv) - - # q, k rope - q_nope, q_pe = mint.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) # (t, head, 192) -> (t, head, 128), (t, head, 64) - q_pe = self.reshape(q_pe, (-1, self.num_heads * self.qk_rope_head_dim)) # (t, head, 64) -> (t, head * 64) - q_pe, k_pe = self.rotary_emb(positions, q_pe, k_pe, batch_valid_length, is_prefill) - q_pe = self.reshape(q_pe, (-1, self.num_heads, self.qk_rope_head_dim)) # (t, head * 64) -> (t, head, 64) - - # k reshape_and_cache - key_states_cache = mint.cat((i_kv, k_pe), 1) # (t, 512) (t, 64) -> (t, 576) - key_states_cache = key_states_cache.contiguous() # for pynaitve key_states_cache need contiguous - key_out = self.reshape_and_cache(key_states_cache, None, key_cache, None, slot_mapping) - q_nope = ops.depend(q_nope, key_out) - - if is_prefill: - # q - query_states = mint.cat((q_nope, q_pe), 2) # (t, head, 128), (t, head, 64) -> (t, head, 192) - - # k - k_pe = self.reshape(k_pe, (-1, 1, self.qk_rope_head_dim)) # (t, 1, 64) - k_pe = self.tile_kv(k_pe, (1, self.num_heads, 1)) # (t, head, 64) - o_k_nope = self.kv_b_proj_k(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) - k_nope = self.reshape(o_k_nope, (-1, self.num_heads, self.qk_nope_head_dim)) - key_states = self.pe_concat((k_nope, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) - - # v - o_v = self.kv_b_proj_v(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) - value_states = self.reshape(o_v, (-1, self.num_heads, self.v_head_dim)) # (t, head, 128) - # It's not necessary. Just fa is not support k != v. V just (t, head, 128) - value_states = self.pe_concat((value_states, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) - - # attention - query_states = self.reshape(query_states, (-1, self.num_heads * self.q_head_dim)) - key_states = self.reshape(key_states, (-1, self.num_heads * self.q_head_dim)) - value_states = self.reshape(value_states, (-1, self.num_heads * self.q_head_dim)) - _, _, _, context_layer = self.flash_attention(query_states, key_states, value_states, None, None, None, attn_mask, - None, actual_seq_qlen=batch_valid_length, - actual_seq_kvlen=batch_valid_length) # (t, head, 128) - context_layer = context_layer.view(-1, self.num_heads, self.q_head_dim) - context_layer = self.dim_slice_4d(context_layer, (0, 0, 0), (-1, self.num_heads, self.v_head_dim)) # slice 192->128 - else: - # q, k_absorb - q_absorb = self.kv_b_proj_k.weight.view(self.num_heads, self.qk_nope_head_dim, self.kv_lora_rank) - q_nope = self.qabsorb_k_matmul(q_nope.transpose(1, 0, 2), q_absorb).transpose(1, 0, 2) # (head, t, 128) (head, 128, 512) -> (head, t, 512) -> (t, head, 512) - query_states = self.pe_concat((q_nope, q_pe)) # (t, head, 512) (t, head, 64) -> (t, head, 576) - query_states = self.reshape(query_states, (-1, self.n_local_heads * (self.kv_lora_rank + self.qk_rope_head_dim))) # 2维 - - # attention - context_layer = self.paged_attention(query_states, key_cache, key_cache, block_tables, batch_valid_length, - None, None, attn_mask, q_seq_lens) # will slice out -> 512 - context_layer = context_layer.view(-1, self.num_heads, self.kv_lora_rank) # (t, head, 512) - - # out, v_absorb - out_absorb = self.kv_b_proj_v.weight.view(self.num_heads, self.v_head_dim, self.kv_lora_rank) - context_layer = self.outabsorb_v_matmul(context_layer.transpose(1, 0, 2), out_absorb).transpose(1, 0, 2) # (head, t, 512) (head, 128, 512) -> (head, t, 128) ->(t, head, 128) - - attn_out = context_layer.view(-1, self.num_heads * self.v_head_dim) # (t, head, 128) - output, _ = self.o_proj(attn_out) # wo (t, head, 128) (head*128, 7168) -> (t, 7168) - return output - -class DeepseekV3DecoderLayer(nn.Cell): - - def __init__( - self, - config: DeepseekV3Config, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = config.hidden_size - rope_theta = getattr(config, "rope_theta", 10000) - rope_scaling = getattr(config, "rope_scaling", None) - max_position_embeddings = getattr(config, "max_position_embeddings", - 8192) - layer_idx = int(prefix.split(sep='.')[-1]) - self.layer_idx = layer_idx - self.self_attn = DeepseekV3Attention( - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - num_kv_heads=config.num_key_value_heads, - rope_theta=rope_theta, - rope_scaling=rope_scaling, - max_position_embeddings=max_position_embeddings, - head_dim=None, - kv_lora_rank=config.kv_lora_rank, - q_lora_rank=config.q_lora_rank if hasattr(config, "q_lora_rank") else None, - qk_nope_head_dim=config.qk_nope_head_dim, - qk_rope_head_dim=config.qk_rope_head_dim, - v_head_dim=config.v_head_dim, - rms_norm_eps=config.rms_norm_eps, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - - if (config.n_routed_experts is not None - and layer_idx >= config.first_k_dense_replace - and layer_idx % config.moe_layer_freq == 0): - self.mlp = DeepseekV3MoE( - config=config, - quant_config=quant_config, - prefix=f"{prefix}.mlp", - ) - else: - self.mlp = DeepseekV3MLP( - hidden_size=config.hidden_size, - intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - prefix=f"{prefix}.mlp", - ) - self.input_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - self.post_attention_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - self.routed_scaling_factor = config.routed_scaling_factor - - - def construct( - self, - positions: Tensor, - hidden_states: Tensor, - key_cache: Tensor, - is_prefill: bool, - slot_mapping: Tensor, - attn_mask: Tensor, - batch_valid_length: Tensor, - q_seq_lens: Tensor, - block_tables: Tensor, - residual: Optional[Tensor], - dp_pad_index: Optional[bool] = None, - dp_unpad_index: Optional[Tensor] = None, - dp_pad_index_with_offset: Optional[Tensor] = None, - dp_unpad_index_total_with_offset: Optional[Tensor] = None, - ) -> Tensor: - - # Self Attention - if residual is None: - residual = hidden_states - hidden_states = self.input_layernorm(hidden_states) - else: - hidden_states, residual = self.input_layernorm( - hidden_states, residual) - hidden_states = self.self_attn(positions, hidden_states, key_cache, - is_prefill, slot_mapping, - attn_mask, batch_valid_length, - q_seq_lens, block_tables) - # Fully Connected - hidden_states, residual = self.post_attention_layernorm( - hidden_states, residual) - hidden_states = self.mlp(hidden_states, dp_pad_index, dp_unpad_index, - dp_pad_index_with_offset, dp_unpad_index_total_with_offset) - return hidden_states, residual - - -class DeepseekV3Model(nn.Cell): - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - - self.padding_idx = config.pad_token_id - self.vocab_size = config.vocab_size - self.config = config - - self.embed_tokens = VocabParallelEmbedding( - config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=f"{prefix}.embed_tokens") - - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: DeepseekV3DecoderLayer( - config=config, - cache_config=cache_config, - quant_config=quant_config, - prefix=prefix, - ), - prefix=f"{prefix}.layers", - ) - - self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.make_empty_intermediate_tensors = ( - make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size)) - - def get_input_embeddings(self, input_ids: Tensor) -> Tensor: - return self.embed_tokens(input_ids) - - def construct( - self, - input_ids: Tensor, - positions: Tensor, - key_caches: List[Tensor], - is_prefill: bool, - slot_mapping: Tensor, - attn_mask: Tensor, - batch_valid_length: Tensor, - q_seq_lens: Tensor, - block_tables: Tensor, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[Tensor] = None, - dp_pad_index = None, - dp_unpad_index: Optional[Tensor] = None, - dp_pad_index_total_with_offset: Optional[Tensor] = None, - dp_unpad_index_total_with_offset: Optional[Tensor] = None, - - ) -> Union[Tensor, IntermediateTensors]: - - if get_pp_group().is_first_rank: - if inputs_embeds is not None: - hidden_states = inputs_embeds - else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None - else: - assert intermediate_tensors is not None - hidden_states = intermediate_tensors["hidden_states"] - residual = intermediate_tensors["residual"] - - for i in range(self.start_layer, self.end_layer): - layer = self.layers[i] - hidden_states, residual = layer(positions, hidden_states, - key_caches[i - self.start_layer], - is_prefill, slot_mapping, - attn_mask, batch_valid_length, - q_seq_lens, block_tables, residual, - dp_pad_index, dp_unpad_index, - dp_pad_index_total_with_offset, - dp_unpad_index_total_with_offset) - if not get_pp_group().is_last_rank: - return IntermediateTensors({ - "hidden_states": hidden_states, - "residual": residual - }) - hidden_states, _ = self.norm(hidden_states, residual) - return hidden_states - - def load_weights(self, weights: Iterable[Tuple[str, Tensor]], - params_dict: Dict[str, Parameter]): - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - - # Params for weights, fp8 weight scales, fp8 activation scales - # (param_name, weight_name, expert_id, shard_id) - # zhq: needed? - expert_params_mapping = FusedMoE.make_expert_params_mapping( - ckpt_gate_proj_name="gate_proj", - ckpt_down_proj_name="down_proj", - ckpt_up_proj_name="up_proj", - num_experts=self.config.n_routed_experts, - num_redundant_experts=self.num_redundant_experts) - - loaded_params: set[str] = set() - - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - - if "kv_b_proj" in name and name not in params_dict: - k_name = name.replace("kv_b_proj", "kv_b_proj_k") - v_name = name.replace("kv_b_proj", "kv_b_proj_v") - - loaded_weight = loaded_weight.reshape(self.config.num_attention_heads, self.config.qk_nope_head_dim + self.config.v_head_dim, -1) - k_weight = loaded_weight[:, :self.config.qk_nope_head_dim, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) - v_weight = loaded_weight[:, self.config.qk_nope_head_dim:, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) - - k_param = params_dict[k_name] - v_param = params_dict[v_name] - k_param.weight_loader(k_param, k_weight) - v_param.weight_loader(v_param, v_weight) - loaded_params.add(k_name) - loaded_params.add(v_name) - continue - - # TODO - # spec_layer = get_spec_layer_idx_from_weight_name(self.config, name) - # if spec_layer is not None: - # continue # skip spec decode layers for main model - - for (param_name, weight_name, shard_id) in stacked_params_mapping: - # Skip non-stacked layers and experts (experts handled below). - if weight_name not in name: - continue - # We have mlp.experts[0].gate_proj in the checkpoint. - # Since we handle the experts below in expert_params_mapping, - # we need to skip here BEFORE we update the name, otherwise - # name will be updated to mlp.experts[0].gate_up_proj, which - # will then be updated below in expert_params_mapping - # for mlp.experts[0].gate_gate_up_proj, which breaks load. - if (("mlp.experts." in name) and name not in params_dict): - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - is_expert_weight = False - for mapping in expert_params_mapping: - param_name, weight_name, expert_id, shard_id = mapping - if weight_name not in name: - continue - # Anyway, this is an expert weight and should not be - # attempted to load as other weights later - is_expert_weight = True - - # Do not modify `name` since the loop may continue here - # Instead, create a new variable - name_mapped = name.replace(weight_name, param_name) - - if is_pp_missing_parameter(name_mapped, self): - continue - - param = params_dict[name_mapped] - weight_loader = param.weight_loader - - success = weight_loader(param, - loaded_weight, - name_mapped, - shard_id=shard_id, - expert_id=expert_id, - return_success=True) - if success: - name = name_mapped - break - else: - if is_expert_weight: - # We've checked that this is an expert weight - # However it's not mapped locally to this rank - # So we simply skip it - continue - - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - - # Remapping the name of FP8 kv-scale. - # zhq: needed? - # name = maybe_remap_kv_scale_name(name, params_dict) - if name is None: - continue - - if is_pp_missing_parameter(name, self): - continue - - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params - -class DeepseekV3ForCausalLM(NativeModel, SupportsPP): - packed_modules_mapping = {} - fall_back_to_pt_during_load = False - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - config = vllm_config.model_config.hf_config - quant_config = vllm_config.quant_config - self.config = config - self.quant_config = quant_config - self.model = DeepseekV3Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - self.expert_weights = [] - - self.sampler = get_sampler() - - self.common_preprocess(vllm_config, use_mla=True, prefix=prefix) - - self.dp_pad_input = False - - self.enable_expert_parallel = False # zhq: TODO - # if get_dp_group().world_size > 1 and not self.parallel_config.enable_expert_parallel: - - if get_dp_group().world_size > 1 and not self.enable_expert_parallel: - self.dp_pad_input = True - self.dp_group = get_dp_group().device_group._name - self.dp_world_size = get_dp_group().world_size - self.dp_rank = get_dp_group().rank_in_group - - - def get_input_embeddings(self, input_ids: Tensor) -> Tensor: - return self.model.get_input_embeddings(input_ids) - - def forward( - self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[Tensor] = None, - **kwargs - ) -> Union[Tensor, IntermediateTensors]: - hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, - inputs_embeds) - return hidden_states - - def sample(self, logits: Tensor, - sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens - - def compute_logits( - self, - hidden_states: Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits - - def load_weights(self, weights: Iterable[tuple[str,Tensor]]) -> set[str]: - params_dict = self.get_params_dict() - return self.model.load_weights(weights, params_dict) - - def exec_model(self, - input_ids: Tensor, - positions: Tensor, - intermediate_tensors: IntermediateTensors = None, - inputs_embeds: Tensor = None, - **kwargs): - model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, - intermediate_tensors, - inputs_embeds, use_mla=True) - - if self.prev_prefill != is_prefill and self.is_graph_mode: - self.set_model_inputs(input_ids, positions, intermediate_tensors, - inputs_embeds, is_prefill) - self.prev_prefill = is_prefill - - # for dummy_attention_metadata - if is_prefill and not self.set_flags: - self.set_flags = True - - if self.run_model is None: - self.run_model = ms.jit( - function=self.model, # type: ignore[attr-defined] - jit_level='O0' - ) if self.is_graph_mode else self.model # type: ignore[attr-defined] - - if self.dp_pad_input: - # if dp and not ep, should pad input to gather. - token_num_total = mint.empty((self.dp_world_size, 1), dtype=ms.int32) - send_tensor = ms.Tensor([[input_ids.shape[0]]], dtype=ms.int32) - mint.distributed.all_gather_into_tensor(token_num_total, send_tensor, - group=self.dp_group) - token_num_total = token_num_total.reshape(-1) - # tokens_cumulative = mint.cumsum(token_num_total, dim=0) - # start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() - # end = tokens_cumulative[self.dp_rank].item() - # end2 = tokens_cumulative[-1].item() - end - # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) - token_num_total = token_num_total.asnumpy() - token_num_total_cumsum = np.cumsum(token_num_total) - max_token_num = token_num_total.max() - total_pad_num = max_token_num - token_num_total - this_pad_num = total_pad_num[self.dp_rank] - - dp_unpad_index = ms.Tensor(np.arange(token_num_total[self.dp_rank]), dtype=ms.int32) - dp_pad_index = ms.Tensor(np.pad(dp_unpad_index, (0, this_pad_num)), dtype=ms.int32) - - # dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) - # for rank in range(self.dp_world_size)] - dp_pad_index_total_with_offset = [np.pad(np.arange(0 if rank == 0 else token_num_total_cumsum[rank - 1], - token_num_total_cumsum[rank]), (0, total_pad_num[rank])) - for rank in range(self.dp_world_size)] - - dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) - dp_pad_index_total_with_offset = ms.Tensor(dp_pad_index_total_with_offset, dtype=mstype.int32) - - - dp_unpad_index_total_with_offset = [np.arange(token_num_total[rank]) + rank * max_token_num - for rank in range(self.dp_world_size)] - dp_unpad_index_total_with_offset = np.concatenate(dp_unpad_index_total_with_offset, axis=0) - dp_unpad_index_total_with_offset = ms.Tensor(dp_unpad_index_total_with_offset, dtype=mstype.int32) - - - model_output = self.run_model( # type: ignore[misc] - input_ids=model_inputs["input_ids"], - positions=model_inputs["position_ids"], - key_caches=model_inputs["key_cache"], - is_prefill=is_prefill, - slot_mapping=model_inputs["slot_mapping"], - attn_mask=model_inputs["attention_mask"], - batch_valid_length=model_inputs["batch_valid_length"], - q_seq_lens=model_inputs["q_seq_lens"], - block_tables=model_inputs["block_tables"], - intermediate_tensors=model_inputs["intermediate_tensors"], - inputs_embeds=model_inputs["inputs_embeds"], - dp_pad_index=dp_pad_index if self.dp_pad_input else None, - dp_unpad_index=dp_unpad_index if self.dp_pad_input else None, - dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None, - dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset if self.dp_pad_input else None - ) - - return model_output - - - def set_model_inputs(self, input_ids, position_ids, intermediate_tensors, - inputs_embeds, is_prefill): - if input_ids is None: - dyn_input_ids = None - else: - dyn_input_ids = ms.Tensor(shape=[None] * input_ids.ndim, - dtype=mstype.int32) - - if position_ids is None: - dyn_position_ids = None - else: - dyn_position_ids = ms.Tensor(shape=[None] * position_ids.ndim, - dtype=mstype.int32) - - if inputs_embeds is None: - dyn_inputs_embeds = None - else: - dyn_inputs_embeds = ms.Tensor(shape=[None] * inputs_embeds.ndim, - dtype=inputs_embeds.dtype) - - if intermediate_tensors is None: - dyn_intermediate_tensors = None - else: - dyn_intermediate_tensors = ms.Tensor( - shape=[None] * intermediate_tensors.ndim, - dtype=intermediate_tensors.dtype) - - block_size = self.cache_config.block_size - num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) - head_size = self.model_config.get_head_size() - kv_cache_shape = (None, block_size, num_kv_heads, head_size) - - kv_cache_dtype = self.model_config.dtype if self.cache_config.cache_dtype == "auto" \ - else self.cache_config.cache_dtype - if kv_cache_dtype in STR_DTYPE_TO_MS_DTYPE: - kv_cache_dtype = STR_DTYPE_TO_MS_DTYPE[kv_cache_dtype] - - num_layers = self.model_config.get_num_layers(self.parallel_config) - - dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) - dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) - - dyn_slot_mapping = Tensor(shape=[None], dtype=mstype.int32) - dynamic_attention_mask = Tensor(shape=[None, None], - dtype=self.model_config.dtype) - dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) - dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) - dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) - dyn_dp_pad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None - dyn_dp_unpad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None - dyn_dp_pad_index_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None - dp_unpad_index_total_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None - - - self.model.set_inputs( - dyn_input_ids, - dyn_position_ids, - dyn_key_caches, # type: ignore[attr-defined] - is_prefill, - dyn_slot_mapping, - dynamic_attention_mask, - dyn_batch_valid_length, - dyn_q_seq_lens, - dyn_block_tables, - dyn_intermediate_tensors, - dyn_inputs_embeds, - dyn_dp_pad_index, - dyn_dp_unpad_index, - dyn_dp_pad_index_with_offset, - dp_unpad_index_total_with_offset) - - dynamic_hidden_states = Tensor(shape=[None, None], - dtype=self.model_config.dtype) - self.lm_head.set_inputs( +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen3MoE model compatible with HuggingFace weights.""" +from collections.abc import Iterable +from typing import (TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, + Union) +from typing import Any, Optional, Union, Dict, Tuple, List + +if TYPE_CHECKING: + from transformers import DeepseekV3Config +else: + DeepseekV3Config = None + +import math +import numpy as np +import mindspore as ms +from mindspore import Tensor, nn, Parameter, mint, ops +from mindspore import Tensor, nn, mutable +from mindspore.common import dtype as mstype + +from transformers import PretrainedConfig +from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config +from vllm.distributed import (get_pp_group, get_tensor_model_parallel_world_size, + get_dp_group, get_ep_group) +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from vllm_mindspore.attention import Attention +from vllm_mindspore.model_executor.layers.activation import SiluAndMul +from vllm_mindspore.model_executor.layers.fused_moe import FusedMoE +from vllm_mindspore.model_executor.layers.layernorm import RMSNorm +from vllm_mindspore.model_executor.layers.linear import ( + MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, + ColumnParallelLinear, RowParallelLinear) +from vllm_mindspore.model_executor.layers.logits_processor import ( + LogitsProcessor) +from vllm_mindspore.model_executor.layers.rotary_embedding import get_rope +from vllm_mindspore.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm_mindspore.model_executor.model_loader.weight_utils import default_weight_loader + +from vllm_mindspore.model_executor.models.utils import ( + extract_layer_index, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +from vllm_mindspore.model_executor.models.model_base import NativeModel +from vllm_mindspore.model_executor.layers.sampler import (SamplerOutput, + get_sampler) +from vllm_mindspore.utils import STR_DTYPE_TO_MS_DTYPE + +logger = init_logger(__name__) + + +class DeepseekV3MLP(nn.Cell): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj") + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results, + prefix=f"{prefix}.down_proj") + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def construct(self, x, dp_pad_index, dp_unpad_index, dp_pad_index_with_offset, + dp_unpad_index_total_with_offset): + # zhq: TODO + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class DeepseekV3MoE(nn.Cell): + r""" + This is an implementation of self-attention mechanism in DeepSeek-V3. + + Args: + - **config** (Config): Model config of DeepSeek-V3. + + Inputs: + - **x** (Tensor): Should be `[batch, seq_length, hidden_size]`. Float tensor. + + Outputs: + - **output** (Tensor): The output of this layer after mapping. The shape is `[batch, seq_length, hidden_size]`. + """ + + def __init__(self, config, quant_config, prefix): + super(DeepseekV3MoE, self).__init__() + self.config = config + self.quant_config = quant_config + self.prefix = prefix + + self.tp_size = get_tensor_model_parallel_world_size() + self.routed_scaling_factor = config.routed_scaling_factor + + # zhq: ep_group needed + self.ep_group = get_ep_group().device_group + self.ep_rank = self.ep_group.rank() + self.ep_size = self.ep_group.size() + + self.n_routed_experts: int = config.n_routed_experts + self.n_shared_experts: int = config.n_shared_experts + + """zhq: needed? + if config.hidden_act != "silu": + raise ValueError(f"Unsupported activation: {config.hidden_act}. " + "Only silu is supported for now.") + """ + + + + self.gate = ReplicatedLinear(config.hidden_size, + config.n_routed_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") + self.gate.e_score_correction_bias = 0.1 # zhq: TODO + + # Load balancing settings. zhq: needed? + logger.warning( + config + ) + + parallel_config = get_current_vllm_config().parallel_config + parallel_config.num_redundant_experts = 0 + + logger.warning( + parallel_config + ) + + self.n_redundant_experts = parallel_config.num_redundant_experts + self.n_logical_experts = self.n_routed_experts + self.n_physical_experts = (self.n_logical_experts + + self.n_redundant_experts) + self.n_local_physical_experts = self.n_physical_experts // self.ep_size + + self.physical_expert_start = (self.ep_rank * + self.n_local_physical_experts) + self.physical_expert_end = (self.physical_expert_start + + self.n_local_physical_experts) + + self.experts = FusedMoE( + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + e_score_correction_bias=self.gate.e_score_correction_bias, + num_redundant_experts=self.n_redundant_experts) + + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + + self.shared_experts = DeepseekV3MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=self.experts.must_reduce_shared_expert_outputs( + ), + prefix=f"{prefix}.shared_experts", + ) + + + def construct(self, hidden_states: Tensor, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) -> Tensor: + # NOTE: hidden_states can have either 1D or 2D shape. + orig_shape = hidden_states.shape + hidden_dim = hidden_states.shape[-1] + hidden_states = hidden_states.view(-1, hidden_dim) + if self.n_shared_experts is not None: + shared_output = self.shared_experts(hidden_states, + dp_pad_index, + dp_unpad_index, + dp_pad_index_with_offset, + dp_unpad_index_total_with_offset + ) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits, + dp_pad_index=dp_pad_index, + dp_unpad_index=dp_unpad_index, + dp_pad_index_with_offset=dp_pad_index_with_offset, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) + if shared_output is not None: + if hidden_states.dtype != torch.float16: + final_hidden_states = final_hidden_states + shared_output + else: + # Fix FP16 overflow + # See DeepseekV3DecoderLayer for more details. + final_hidden_states = final_hidden_states + shared_output \ + * (1. / self.routed_scaling_factor) + + if self.tp_size > 1: + final_hidden_states = ( + self.experts.maybe_all_reduce_tensor_model_parallel( + final_hidden_states)) + + return final_hidden_states.view(num_tokens, hidden_dim) + + +class DeepseekV3Attention(nn.Cell): + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[dict[str, Any]] = None, + max_position_embeddings: int = 8192, + head_dim: Optional[int] = None, + kv_lora_rank: int =512, + q_lora_rank: int =1536, + qk_rope_head_dim: int =64, + v_head_dim: int =128, + qk_nope_head_dim: int =128, + rms_norm_eps: float = 1e-06, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + + self.head_dim = head_dim or (hidden_size // self.total_num_heads) + self.kv_lora_rank = kv_lora_rank # 512 + self.q_lora_rank = q_lora_rank # 1536 + self.qk_rope_head_dim = qk_rope_head_dim # 64 + self.qk_nope_head_dim = qk_nope_head_dim # 128 + self.v_head_dim = v_head_dim # 128 + self.q_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim # 192 = 128 + 64 + + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.rotary_emb = get_rope( + qk_rope_head_dim, # 64 + rotary_dim=qk_rope_head_dim, # 64 + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + + input_layout = "TH" + scale = 1. / math.sqrt(self.q_head_dim) + pre_tokens = 2147483647 + next_tokens = 2147483647 + + self.reshape_and_cache = ops.auto_generate.ReshapeAndCache() + self.flash_attention = ops.operations.nn_ops.FlashAttentionScore(head_num=num_heads, + scale_value=scale, + pre_tokens=pre_tokens, + next_tokens=next_tokens, + input_layout=input_layout) + self.paged_attention = ops.auto_generate.PagedAttention(head_num=self.num_heads, + scale_value=scale, + kv_head_num=1, + mla_v_dim=self.kv_lora_rank) + + self.q_a_proj = ReplicatedLinear( + self.hidden_size, # 7168 + self.q_lora_rank, # 1536 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.q_a_proj" + ) + + self.q_a_layernorm = RMSNorm(self.q_lora_rank, rms_norm_eps) + self.q_b_proj = ColumnParallelLinear( + self.q_lora_rank, # 1536 + self.total_num_heads * self.q_head_dim, # 128 * 192 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.q_b_proj" + ) + + # 1. kv_a_proj_with_mqa: kv latent vector; 2. kv_a_layernorm: latent vector of kv normalization + self.kv_a_proj_with_mqa = ReplicatedLinear( + self.hidden_size, # 7168 + self.kv_lora_rank + self.qk_rope_head_dim, # 576 = 512 + 64 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_a_proj_with_mqa" + ) + self.kv_a_layernorm = RMSNorm(self.kv_lora_rank, rms_norm_eps) + self.kv_b_proj_k = ColumnParallelLinear( + self.kv_lora_rank, # 512 + self.total_num_heads * self.qk_nope_head_dim, # 128 * 128 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_b_proj_k" + ) + + self.kv_b_proj_v = ColumnParallelLinear( + self.kv_lora_rank, # 512 + self.total_num_heads * self.v_head_dim, # 128 * 128 + bias=False, + quant_config=quant_config, + return_bias=False, + prefix=f"{prefix}.kv_b_proj_v" + ) + + self.o_proj = RowParallelLinear(self.total_num_heads * self.v_head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.o_proj") + + self.reshape = ops.Reshape() + self.tile_kv = ops.Tile() + self.dim_slice_4d = ops.Slice() + self.kpe_concat = ops.Concat(1) + self.pe_concat = ops.Concat(2) + self.qabsorb_k_matmul = ops.BatchMatMul() + self.outabsorb_v_matmul = ops.BatchMatMul(transpose_b=True) + + def construct( + self, + positions: Tensor, + hidden_states: Tensor, + key_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + ) -> Tensor: + # calculate q + q = self.q_a_proj(hidden_states) # (t, 7168) -> (t, 1536) + norm_q = self.q_a_layernorm(q) + q = self.q_b_proj(norm_q) # (t, 1536) -> (t, head * 192) + q = self.reshape(q, (-1, self.num_heads, self.q_head_dim)) # (t, 1536) -> (t, head, 192) + + # calculate k(v) + latent_kv_all = self.kv_a_proj_with_mqa(hidden_states) # (t, 7168) -> (t, 576) + latent_kv, k_pe = mint.split(latent_kv_all, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) # (t, 576) -> (t, 512), (t, 64) + i_kv = self.kv_a_layernorm(latent_kv) + + # q, k rope + q_nope, q_pe = mint.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) # (t, head, 192) -> (t, head, 128), (t, head, 64) + q_pe = self.reshape(q_pe, (-1, self.num_heads * self.qk_rope_head_dim)) # (t, head, 64) -> (t, head * 64) + q_pe, k_pe = self.rotary_emb(positions, q_pe, k_pe, batch_valid_length, is_prefill) + q_pe = self.reshape(q_pe, (-1, self.num_heads, self.qk_rope_head_dim)) # (t, head * 64) -> (t, head, 64) + + # k reshape_and_cache + key_states_cache = mint.cat((i_kv, k_pe), 1) # (t, 512) (t, 64) -> (t, 576) + key_states_cache = key_states_cache.contiguous() # for pynaitve key_states_cache need contiguous + key_out = self.reshape_and_cache(key_states_cache, None, key_cache, None, slot_mapping) + q_nope = ops.depend(q_nope, key_out) + + if is_prefill: + # q + query_states = mint.cat((q_nope, q_pe), 2) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # k + k_pe = self.reshape(k_pe, (-1, 1, self.qk_rope_head_dim)) # (t, 1, 64) + k_pe = self.tile_kv(k_pe, (1, self.num_heads, 1)) # (t, head, 64) + o_k_nope = self.kv_b_proj_k(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) + k_nope = self.reshape(o_k_nope, (-1, self.num_heads, self.qk_nope_head_dim)) + key_states = self.pe_concat((k_nope, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # v + o_v = self.kv_b_proj_v(i_kv) # (t, 512) (512, head * 128) -> (t, head * 128) + value_states = self.reshape(o_v, (-1, self.num_heads, self.v_head_dim)) # (t, head, 128) + # It's not necessary. Just fa is not support k != v. V just (t, head, 128) + value_states = self.pe_concat((value_states, k_pe)) # (t, head, 128), (t, head, 64) -> (t, head, 192) + + # attention + query_states = self.reshape(query_states, (-1, self.num_heads * self.q_head_dim)) + key_states = self.reshape(key_states, (-1, self.num_heads * self.q_head_dim)) + value_states = self.reshape(value_states, (-1, self.num_heads * self.q_head_dim)) + _, _, _, context_layer = self.flash_attention(query_states, key_states, value_states, None, None, None, attn_mask, + None, actual_seq_qlen=batch_valid_length, + actual_seq_kvlen=batch_valid_length) # (t, head, 128) + context_layer = context_layer.view(-1, self.num_heads, self.q_head_dim) + context_layer = self.dim_slice_4d(context_layer, (0, 0, 0), (-1, self.num_heads, self.v_head_dim)) # slice 192->128 + else: + # q, k_absorb + q_absorb = self.kv_b_proj_k.weight.view(self.num_heads, self.qk_nope_head_dim, self.kv_lora_rank) + q_nope = self.qabsorb_k_matmul(q_nope.transpose(1, 0, 2), q_absorb).transpose(1, 0, 2) # (head, t, 128) (head, 128, 512) -> (head, t, 512) -> (t, head, 512) + query_states = self.pe_concat((q_nope, q_pe)) # (t, head, 512) (t, head, 64) -> (t, head, 576) + query_states = self.reshape(query_states, (-1, self.n_local_heads * (self.kv_lora_rank + self.qk_rope_head_dim))) # 2维 + + # attention + context_layer = self.paged_attention(query_states, key_cache, key_cache, block_tables, batch_valid_length, + None, None, attn_mask, q_seq_lens) # will slice out -> 512 + context_layer = context_layer.view(-1, self.num_heads, self.kv_lora_rank) # (t, head, 512) + + # out, v_absorb + out_absorb = self.kv_b_proj_v.weight.view(self.num_heads, self.v_head_dim, self.kv_lora_rank) + context_layer = self.outabsorb_v_matmul(context_layer.transpose(1, 0, 2), out_absorb).transpose(1, 0, 2) # (head, t, 512) (head, 128, 512) -> (head, t, 128) ->(t, head, 128) + + attn_out = context_layer.view(-1, self.num_heads * self.v_head_dim) # (t, head, 128) + output, _ = self.o_proj(attn_out) # wo (t, head, 128) (head*128, 7168) -> (t, 7168) + return output + +class DeepseekV3DecoderLayer(nn.Cell): + + def __init__( + self, + config: DeepseekV3Config, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + layer_idx = int(prefix.split(sep='.')[-1]) + self.layer_idx = layer_idx + self.self_attn = DeepseekV3Attention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + head_dim=None, + kv_lora_rank=config.kv_lora_rank, + q_lora_rank=config.q_lora_rank if hasattr(config, "q_lora_rank") else None, + qk_nope_head_dim=config.qk_nope_head_dim, + qk_rope_head_dim=config.qk_rope_head_dim, + v_head_dim=config.v_head_dim, + rms_norm_eps=config.rms_norm_eps, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) + + if (config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0): + self.mlp = DeepseekV3MoE( + config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + ) + else: + self.mlp = DeepseekV3MLP( + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.routed_scaling_factor = config.routed_scaling_factor + + + def construct( + self, + positions: Tensor, + hidden_states: Tensor, + key_cache: Tensor, + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + residual: Optional[Tensor], + dp_pad_index: Optional[bool] = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + ) -> Tensor: + + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn(positions, hidden_states, key_cache, + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables) + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states, dp_pad_index, dp_unpad_index, + dp_pad_index_with_offset, dp_unpad_index_total_with_offset) + return hidden_states, residual + + +class DeepseekV3Model(nn.Cell): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.config = config + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=f"{prefix}.embed_tokens") + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: DeepseekV3DecoderLayer( + config=config, + cache_config=cache_config, + quant_config=quant_config, + prefix=prefix, + ), + prefix=f"{prefix}.layers", + ) + + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.embed_tokens(input_ids) + + def construct( + self, + input_ids: Tensor, + positions: Tensor, + key_caches: List[Tensor], + is_prefill: bool, + slot_mapping: Tensor, + attn_mask: Tensor, + batch_valid_length: Tensor, + q_seq_lens: Tensor, + block_tables: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + dp_pad_index = None, + dp_unpad_index: Optional[Tensor] = None, + dp_pad_index_total_with_offset: Optional[Tensor] = None, + dp_unpad_index_total_with_offset: Optional[Tensor] = None, + + ) -> Union[Tensor, IntermediateTensors]: + + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None + else: + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + + for i in range(self.start_layer, self.end_layer): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + key_caches[i - self.start_layer], + is_prefill, slot_mapping, + attn_mask, batch_valid_length, + q_seq_lens, block_tables, residual, + dp_pad_index, dp_unpad_index, + dp_pad_index_total_with_offset, + dp_unpad_index_total_with_offset) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + def load_weights(self, weights: Iterable[Tuple[str, Tensor]], + params_dict: Dict[str, Parameter]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + # Params for weights, fp8 weight scales, fp8 activation scales + # (param_name, weight_name, expert_id, shard_id) + # zhq: needed? + expert_params_mapping = FusedMoE.make_expert_params_mapping( + ckpt_gate_proj_name="gate_proj", + ckpt_down_proj_name="down_proj", + ckpt_up_proj_name="up_proj", + num_experts=self.config.n_routed_experts, + num_redundant_experts=self.num_redundant_experts) + + loaded_params: set[str] = set() + + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + if "kv_b_proj" in name and name not in params_dict: + k_name = name.replace("kv_b_proj", "kv_b_proj_k") + v_name = name.replace("kv_b_proj", "kv_b_proj_v") + + loaded_weight = loaded_weight.reshape(self.config.num_attention_heads, self.config.qk_nope_head_dim + self.config.v_head_dim, -1) + k_weight = loaded_weight[:, :self.config.qk_nope_head_dim, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) + v_weight = loaded_weight[:, self.config.qk_nope_head_dim:, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) + + k_param = params_dict[k_name] + v_param = params_dict[v_name] + k_param.weight_loader(k_param, k_weight) + v_param.weight_loader(v_param, v_weight) + loaded_params.add(k_name) + loaded_params.add(v_name) + continue + + # TODO + # spec_layer = get_spec_layer_idx_from_weight_name(self.config, name) + # if spec_layer is not None: + # continue # skip spec decode layers for main model + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + # Skip non-stacked layers and experts (experts handled below). + if weight_name not in name: + continue + # We have mlp.experts[0].gate_proj in the checkpoint. + # Since we handle the experts below in expert_params_mapping, + # we need to skip here BEFORE we update the name, otherwise + # name will be updated to mlp.experts[0].gate_up_proj, which + # will then be updated below in expert_params_mapping + # for mlp.experts[0].gate_gate_up_proj, which breaks load. + if (("mlp.experts." in name) and name not in params_dict): + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + is_expert_weight = False + for mapping in expert_params_mapping: + param_name, weight_name, expert_id, shard_id = mapping + if weight_name not in name: + continue + # Anyway, this is an expert weight and should not be + # attempted to load as other weights later + is_expert_weight = True + + # Do not modify `name` since the loop may continue here + # Instead, create a new variable + name_mapped = name.replace(weight_name, param_name) + + if is_pp_missing_parameter(name_mapped, self): + continue + + param = params_dict[name_mapped] + weight_loader = param.weight_loader + + success = weight_loader(param, + loaded_weight, + name_mapped, + shard_id=shard_id, + expert_id=expert_id, + return_success=True) + if success: + name = name_mapped + break + else: + if is_expert_weight: + # We've checked that this is an expert weight + # However it's not mapped locally to this rank + # So we simply skip it + continue + + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + + # Remapping the name of FP8 kv-scale. + # zhq: needed? + # name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + +class DeepseekV3ForCausalLM(NativeModel, SupportsPP): + packed_modules_mapping = {} + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = DeepseekV3Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + self.expert_weights = [] + + self.sampler = get_sampler() + + self.common_preprocess(vllm_config, use_mla=True, prefix=prefix) + + self.dp_pad_input = False + + self.enable_expert_parallel = False # zhq: TODO + # if get_dp_group().world_size > 1 and not self.parallel_config.enable_expert_parallel: + + if get_dp_group().world_size > 1 and not self.enable_expert_parallel: + self.dp_pad_input = True + self.dp_group = get_dp_group().device_group._name + self.dp_world_size = get_dp_group().world_size + self.dp_rank = get_dp_group().rank_in_group + + + def get_input_embeddings(self, input_ids: Tensor) -> Tensor: + return self.model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[Tensor] = None, + **kwargs + ) -> Union[Tensor, IntermediateTensors]: + hidden_states = self.exec_model(input_ids, positions, intermediate_tensors, + inputs_embeds) + return hidden_states + + def sample(self, logits: Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def compute_logits( + self, + hidden_states: Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def load_weights(self, weights: Iterable[tuple[str,Tensor]]) -> set[str]: + params_dict = self.get_params_dict() + return self.model.load_weights(weights, params_dict) + + def exec_model(self, + input_ids: Tensor, + positions: Tensor, + intermediate_tensors: IntermediateTensors = None, + inputs_embeds: Tensor = None, + **kwargs): + model_inputs, is_prefill = self.prepare_inputs(input_ids, positions, + intermediate_tensors, + inputs_embeds, use_mla=True) + + if self.prev_prefill != is_prefill and self.is_graph_mode: + self.set_model_inputs(input_ids, positions, intermediate_tensors, + inputs_embeds, is_prefill) + self.prev_prefill = is_prefill + + # for dummy_attention_metadata + if is_prefill and not self.set_flags: + self.set_flags = True + + if self.run_model is None: + self.run_model = ms.jit( + function=self.model, # type: ignore[attr-defined] + jit_level='O0' + ) if self.is_graph_mode else self.model # type: ignore[attr-defined] + + if self.dp_pad_input: + # if dp and not ep, should pad input to gather. + token_num_total = mint.empty((self.dp_world_size, 1), dtype=ms.int32) + send_tensor = ms.Tensor([[input_ids.shape[0]]], dtype=ms.int32) + mint.distributed.all_gather_into_tensor(token_num_total, send_tensor, + group=self.dp_group) + token_num_total = token_num_total.reshape(-1) + # tokens_cumulative = mint.cumsum(token_num_total, dim=0) + # start = 0 if self.dp_rank == 0 else tokens_cumulative[self.dp_rank - 1].item() + # end = tokens_cumulative[self.dp_rank].item() + # end2 = tokens_cumulative[-1].item() - end + # dp_pad_index = ms.Tensor([0, 0, start, end2], dtype=ms.int32) + token_num_total = token_num_total.asnumpy() + token_num_total_cumsum = np.cumsum(token_num_total) + max_token_num = token_num_total.max() + total_pad_num = max_token_num - token_num_total + this_pad_num = total_pad_num[self.dp_rank] + + dp_unpad_index = ms.Tensor(np.arange(token_num_total[self.dp_rank]), dtype=ms.int32) + dp_pad_index = ms.Tensor(np.pad(dp_unpad_index, (0, this_pad_num)), dtype=ms.int32) + + # dp_pad_index_total_with_offset = [np.pad(np.arange(token_num_total[rank]), (0, total_pad_num[rank])) + # for rank in range(self.dp_world_size)] + dp_pad_index_total_with_offset = [np.pad(np.arange(0 if rank == 0 else token_num_total_cumsum[rank - 1], + token_num_total_cumsum[rank]), (0, total_pad_num[rank])) + for rank in range(self.dp_world_size)] + + dp_pad_index_total_with_offset = np.concatenate(dp_pad_index_total_with_offset, axis=0) + dp_pad_index_total_with_offset = ms.Tensor(dp_pad_index_total_with_offset, dtype=mstype.int32) + + + dp_unpad_index_total_with_offset = [np.arange(token_num_total[rank]) + rank * max_token_num + for rank in range(self.dp_world_size)] + dp_unpad_index_total_with_offset = np.concatenate(dp_unpad_index_total_with_offset, axis=0) + dp_unpad_index_total_with_offset = ms.Tensor(dp_unpad_index_total_with_offset, dtype=mstype.int32) + + + model_output = self.run_model( # type: ignore[misc] + input_ids=model_inputs["input_ids"], + positions=model_inputs["position_ids"], + key_caches=model_inputs["key_cache"], + is_prefill=is_prefill, + slot_mapping=model_inputs["slot_mapping"], + attn_mask=model_inputs["attention_mask"], + batch_valid_length=model_inputs["batch_valid_length"], + q_seq_lens=model_inputs["q_seq_lens"], + block_tables=model_inputs["block_tables"], + intermediate_tensors=model_inputs["intermediate_tensors"], + inputs_embeds=model_inputs["inputs_embeds"], + dp_pad_index=dp_pad_index if self.dp_pad_input else None, + dp_unpad_index=dp_unpad_index if self.dp_pad_input else None, + dp_pad_index_total_with_offset=dp_pad_index_total_with_offset if self.dp_pad_input else None, + dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset if self.dp_pad_input else None + ) + + return model_output + + + def set_model_inputs(self, input_ids, position_ids, intermediate_tensors, + inputs_embeds, is_prefill): + if input_ids is None: + dyn_input_ids = None + else: + dyn_input_ids = ms.Tensor(shape=[None] * input_ids.ndim, + dtype=mstype.int32) + + if position_ids is None: + dyn_position_ids = None + else: + dyn_position_ids = ms.Tensor(shape=[None] * position_ids.ndim, + dtype=mstype.int32) + + if inputs_embeds is None: + dyn_inputs_embeds = None + else: + dyn_inputs_embeds = ms.Tensor(shape=[None] * inputs_embeds.ndim, + dtype=inputs_embeds.dtype) + + if intermediate_tensors is None: + dyn_intermediate_tensors = None + else: + dyn_intermediate_tensors = ms.Tensor( + shape=[None] * intermediate_tensors.ndim, + dtype=intermediate_tensors.dtype) + + block_size = self.cache_config.block_size + num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) + head_size = self.model_config.get_head_size() + kv_cache_shape = (None, block_size, num_kv_heads, head_size) + + kv_cache_dtype = self.model_config.dtype if self.cache_config.cache_dtype == "auto" \ + else self.cache_config.cache_dtype + if kv_cache_dtype in STR_DTYPE_TO_MS_DTYPE: + kv_cache_dtype = STR_DTYPE_TO_MS_DTYPE[kv_cache_dtype] + + num_layers = self.model_config.get_num_layers(self.parallel_config) + + dyn_key_cache = Tensor(shape=kv_cache_shape, dtype=kv_cache_dtype) + dyn_key_caches = mutable([dyn_key_cache for _ in range(num_layers)]) + + dyn_slot_mapping = Tensor(shape=[None], dtype=mstype.int32) + dynamic_attention_mask = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + dyn_batch_valid_length = Tensor(shape=[None], dtype=mstype.int32) + dyn_q_seq_lens = Tensor(shape=[None], dtype=mstype.int32) + dyn_block_tables = Tensor(shape=[None, None], dtype=mstype.int32) + dyn_dp_pad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_unpad_index = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dyn_dp_pad_index_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + dp_unpad_index_total_with_offset = Tensor(shape=[None], dtype=mstype.int32) if self.dp_pad_input else None + + + self.model.set_inputs( + dyn_input_ids, + dyn_position_ids, + dyn_key_caches, # type: ignore[attr-defined] + is_prefill, + dyn_slot_mapping, + dynamic_attention_mask, + dyn_batch_valid_length, + dyn_q_seq_lens, + dyn_block_tables, + dyn_intermediate_tensors, + dyn_inputs_embeds, + dyn_dp_pad_index, + dyn_dp_unpad_index, + dyn_dp_pad_index_with_offset, + dp_unpad_index_total_with_offset) + + dynamic_hidden_states = Tensor(shape=[None, None], + dtype=self.model_config.dtype) + self.lm_head.set_inputs( dynamic_hidden_states) # type: ignore[attr-defined] \ No newline at end of file -- Gitee From 97bd005dd79bbdc5c6bc6e6788f160b98eaee201 Mon Sep 17 00:00:00 2001 From: horcam Date: Mon, 14 Jul 2025 19:26:20 +0800 Subject: [PATCH 75/76] rename v2->v3 --- .../model_executor/models/{deepseek_v2.py => deepseek_v3.py} | 0 vllm_mindspore/model_executor/models/registry.py | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename vllm_mindspore/model_executor/models/{deepseek_v2.py => deepseek_v3.py} (100%) diff --git a/vllm_mindspore/model_executor/models/deepseek_v2.py b/vllm_mindspore/model_executor/models/deepseek_v3.py similarity index 100% rename from vllm_mindspore/model_executor/models/deepseek_v2.py rename to vllm_mindspore/model_executor/models/deepseek_v3.py diff --git a/vllm_mindspore/model_executor/models/registry.py b/vllm_mindspore/model_executor/models/registry.py index 1ce60fae6..11571d637 100644 --- a/vllm_mindspore/model_executor/models/registry.py +++ b/vllm_mindspore/model_executor/models/registry.py @@ -31,8 +31,8 @@ _NATIVE_MODELS = { "Qwen2_5_VLForConditionalGeneration": ("qwen2_5_vl", "Qwen2_5_VLForConditionalGeneration"), "Qwen3MoeForCausalLM": ("qwen3_moe", "Qwen3MoeForCausalLM"), - "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"), - "DeepseekV3ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"), + "DeepseekV2ForCausalLM": ("deepseek_v3", "DeepseekV2ForCausalLM"), + "DeepseekV3ForCausalLM": ("deepseek_v3", "DeepseekV3ForCausalLM"), } _MINDFORMERS_MODELS = { -- Gitee From af898eadc37bb6cc0db477cb08b9d06242d811ec Mon Sep 17 00:00:00 2001 From: horcam Date: Tue, 15 Jul 2025 11:20:17 +0800 Subject: [PATCH 76/76] merge-mla-and-moe --- .../layers/fused_moe/fused_moe.py | 6 + .../layers/fused_moe/fused_moe2.py | 167 +++++++++++++++--- .../model_executor/layers/fused_moe/layer.py | 51 +++--- .../model_executor/models/deepseek_v3.py | 58 ++++-- 4 files changed, 208 insertions(+), 74 deletions(-) diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py index a7b3bf7da..e4c6c01d5 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe.py @@ -5,8 +5,12 @@ from mindspore.ops.auto_generate import (GroupedMatmulV4, FusedAddTopKDiv, MoeInitRoutingV2, MoeTokenUnpermute) +from mindspore.ops import operations as P import mindspore as ms +import mindspore.common.dtype as mstype from vllm.distributed.parallel_state import get_ep_group, get_dp_group +from vllm.logger import init_logger +logger = init_logger(__name__) def fused_topk( hidden_states: Tensor, @@ -40,10 +44,12 @@ def grouped_topk( e_score_correction_bias: Optional[Tensor] = None ) -> tuple[Tensor, Tensor]: fused_add_topk_div = FusedAddTopKDiv() + cast = P.Cast() assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") scoring_type = 0 # sigmoid topk_in_group = 2 + gating_output = cast(gating_output, mstype.float32) topk_weights, topk_ids = fused_add_topk_div( gating_output, e_score_correction_bias, diff --git a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py index f3c441461..53230bce8 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/fused_moe2.py @@ -1,12 +1,17 @@ from typing import Optional - +import numpy as np from mindspore import Tensor, mint, ops, nn from mindspore.ops.auto_generate import (GroupedMatmulV4, FusedAddTopKDiv, MoeInitRoutingV2, MoeTokenUnpermute) import mindspore as ms -from vllm.distributed.parallel_state import get_ep_group, get_dp_group +from vllm.distributed.parallel_state import (get_ep_group, get_tp_group, get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.logger import init_logger + +logger = init_logger(__name__) + def fused_topk( hidden_states: Tensor, @@ -60,12 +65,65 @@ def grouped_topk( class FusedExperts(nn.Cell): - def __init__(self): + def __init__(self, moe_config): super().__init__() self.group_matmul_ops = GroupedMatmulV4() self.moe_init_routing_op = MoeInitRoutingV2() self.moe_token_unpermute = MoeTokenUnpermute() + self.pure_tp = False + self.pure_ep = False + + if moe_config.moe_parallel_config.ep_size > 1 and \ + moe_config.moe_parallel_config.tp_size == 1: + # pure ep + self.pure_ep = True + self.tp_rank = get_tensor_model_parallel_rank() + self.tp_world_size = get_tensor_model_parallel_world_size() + ep_size = moe_config.moe_parallel_config.ep_size + self.ep_size = ep_size + self.ep_group = get_ep_group().device_group._name + experts_num = moe_config.num_experts + experts_num_map = [(experts_num // ep_size) + for _ in range(ep_size - 1)] + experts_num_map.append(experts_num - ((experts_num // ep_size) * (ep_size - 1))) + # self.experts_num_map = ms.Tensor(expert_num_map, dtype=ms.int64) + experts_num_map_np = np.array(experts_num_map, dtype=np.int64) + experts_num_map_cu_np = np.cumsum(experts_num_map_np, dtype=np.int64) + self.experts_num_map_cu_index = ms.Tensor(experts_num_map_cu_np - 1, dtype=ms.int64) + + if self.tp_rank == 0: + self.send_experts_num_map = ms.Tensor(experts_num_map, dtype=ms.int64) + else: + self.send_experts_num_map = mint.zeros(ep_size, dtype=ms.int64) + + tp_world_size = get_tensor_model_parallel_world_size() + recv_num_map_list = [] + for i in range(self.ep_size): + if i % tp_world_size == 0: + recv_num_map_list.append(moe_config.num_local_experts) + else: + recv_num_map_list.append(0) + self.recv_experts_num_map = ms.Tensor(recv_num_map_list, dtype=ms.int64) + self.local_expert_num = moe_config.num_local_experts + + self.prepend_tensor = ms.Tensor([0], dtype=ms.int64) + + self.hidden_size = moe_config.hidden_dim + self.all_to_all_v_across_ep_with_block_size = ops.AlltoAllV(block_size=self.hidden_size, + group=self.ep_group) + self.all_to_all_v_across_ep = ops.AlltoAllV(group=self.ep_group) + self.even_list = [1 for _ in range(ep_size)] + + self.tp_group = get_tp_group().device_group._name + self.broadcast_to_tensor_parallel_region = ops.Broadcast(0, group=self.tp_group) + + self.dummy_token = mint.zeros((1, self.hidden_size), dtype=moe_config.in_dtype) + + if moe_config.moe_parallel_config.ep_size == 1 and \ + moe_config.moe_parallel_config.tp_size >= 1: + self.pure_tp = True + def construct(self, hidden_states: Tensor, w1: Tensor, @@ -79,28 +137,21 @@ class FusedExperts(nn.Cell): tp_size: int = 1, ep_size: int = 0) -> Tensor: - if tp_size >= 1: - # no ep, pure tp - if ep_size == 1: - hidden_states = self._run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, - activation, global_num_experts, - apply_router_weight_on_input) - # ep_size > 1 : pure ep or tp + ep - else: - # pure ep - if tp_size == 1: - hidden_states = self._run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + if self.pure_tp: + hidden_states = self._run_tp_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + elif self.pure_ep: + hidden_states = self._run_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, + activation, global_num_experts, + apply_router_weight_on_input) + else: + hidden_states = self._run_tp_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, activation, global_num_experts, apply_router_weight_on_input) - # tp_size > 1 : tp + ep - else: - hidden_states = self._run_tp_ep_moe(hidden_states, w1, w2, topk_ids, topk_weights, - activation, global_num_experts, - apply_router_weight_on_input) return hidden_states - def _gate_activation(self, gate, activation): if activation == "silu": return mint.nn.functional.silu(gate) @@ -109,9 +160,6 @@ class FusedExperts(nn.Cell): else: raise ValueError(f"Unsupported activation function: {activation}") - - - def _group_matmul(self, hidden_states, weight, group_list): return self.group_matmul_ops([hidden_states], [weight], None, None, None, None, None, None, @@ -126,11 +174,74 @@ class FusedExperts(nn.Cell): activation, global_num_experts, apply_router_weight_on_input): - hidden_states = self._group_matmul(hidden_states, w1, topk_ids) - hidden_states = self._gate_activation(hidden_states, activation) - hidden_states = self._group_matmul(hidden_states, w2, topk_ids) - return hidden_states + topk_weights = topk_weights.astype(hidden_states.dtype) + topk_ids = topk_ids.astype(ms.int32) + sorted_input_tensor, unsort_map, group_list, _ = \ + self.moe_init_routing_op( + hidden_states, + topk_ids, + active_num=0, + expert_capacity=0, + expert_num=global_num_experts, + drop_pad_mode=0, + expert_tokens_count_or_cumsum_flag=2, + expert_tokens_before_capacity_flag=True) + + # group_list = group_list.reshape(1, -1) + + if self.tp_rank == 0: + group_list_cumsum = mint.cumsum(group_list, 0, dtype=ms.int64) + # expert index = [3, 7, 11, 15] (self.ep_group_size,) + # 看下每个rank, 发送多少tensor 数据给其他的rank + send_list = group_list_cumsum[self.experts_num_map_cu_index] # [20, 30, 40, 50] + send_list = mint.diff(send_list, prepend=self.prepend_tensor) + else: + send_list = mint.zeros(self.ep_size, dtype=ms.int64) # [0, 0, 0, 0] + + group_list = group_list.astype(ms.int64) + + # recv_list = self.all_to_all_across_ep(send_list) + recv_list = self.all_to_all_v_across_ep(send_list, self.even_list, self.even_list) + # recv_list [20, 40, 60, 70] + local_input_tensor = self.all_to_all_v_across_ep_with_block_size(sorted_input_tensor.reshape(-1), + send_list, + recv_list) + + local_group_list = self.all_to_all_v_across_ep(group_list, + self.send_experts_num_map, + self.recv_experts_num_map) + local_group_list = local_group_list.reshape(-1, self.local_expert_num) + local_group_list = local_group_list.sum(dim=0) + + recv_tokens = recv_list.sum() + if recv_tokens > 0: + local_input_tensor = local_input_tensor.reshape(-1, self.hidden_size) + gate_hidden_out = self._group_matmul(local_input_tensor, mint.transpose(w1, -1, -2), local_group_list) + gate, hidden = mint.split(gate_hidden_out, + (w1.shape[1] // 2, w1.shape[1] // 2), -1) + gate = self._gate_activation(gate, activation) + hidden = mint.mul(hidden, gate) + expert_output = self._group_matmul(hidden, mint.transpose(w2, -1, -2), local_group_list) + expert_output = mint.nan_to_num(expert_output, 0, 0, 0) + else: + expert_output = self.dummy_token + expert_output = self.all_to_all_v_across_ep_with_block_size(expert_output.reshape(-1), + recv_list, + send_list) + if self.tp_rank == 0: + expert_output = expert_output.reshape(-1, self.hidden_size) + moe_output = self.moe_token_unpermute(permuted_tokens=expert_output, + sorted_indices=unsort_map, + probs=topk_weights, + padded_mode=False, + restore_shape=None) + if self.tp_world_size > 0: + if self.tp_rank == 0: + moe_output = self.broadcast_to_tensor_parallel_region((moe_output,))[0] + else: + moe_output = self.broadcast_to_tensor_parallel_region((hidden_states,))[0] + return moe_output def _run_tp_moe(self, hidden_states, @@ -183,4 +294,4 @@ class FusedExperts(nn.Cell): global_num_experts, apply_router_weight_on_input): raise NotImplementedError( - "TP + EP MoE is not implemented yet. Please use pure TP or pure EP MoE instead.") + "TP + EP MoE is not implemented yet. Please use pure TP or pure EP MoE instead.") \ No newline at end of file diff --git a/vllm_mindspore/model_executor/layers/fused_moe/layer.py b/vllm_mindspore/model_executor/layers/fused_moe/layer.py index cfcb53793..7ef59eaf9 100644 --- a/vllm_mindspore/model_executor/layers/fused_moe/layer.py +++ b/vllm_mindspore/model_executor/layers/fused_moe/layer.py @@ -6,8 +6,6 @@ from dataclasses import dataclass from enum import Enum from typing import Callable, Optional -import torch -from torch.nn.parameter import UninitializedParameter import vllm.envs as envs from vllm.config import ParallelConfig, get_current_vllm_config @@ -15,24 +13,18 @@ from vllm.distributed import (get_dp_group, get_ep_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, tensor_model_parallel_all_reduce) -from vllm.forward_context import ForwardContext, get_forward_context + from vllm.logger import init_logger -from vllm.model_executor.custom_op import CustomOp -from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( - is_rocm_aiter_moe_enabled) + from vllm.model_executor.layers.quantization.base_config import QuantizationConfig from vllm.model_executor.utils import set_weight_attrs -from vllm.platforms import current_platform -from vllm.platforms.interface import CpuArchEnum -from vllm.utils import direct_register_custom_op -# from vllm.model_executor.layers.fused_moe.layer import FusedMoEParallelConfig + from vllm.model_executor.layers.fused_moe.layer import (determine_expert_map, FusedMoeWeightScaleSupported, FusedMoEMethodBase, - #MoEConfig, ) - +from vllm_mindspore.model_executor.layers.fused_moe.fused_moe2 import FusedExperts from vllm_mindspore.model_executor.layers.fused_moe.fused_moe import (fused_topk, grouped_topk, @@ -191,8 +183,8 @@ class MoEConfig: num_local_experts: int moe_parallel_config: FusedMoEParallelConfig - in_dtype: torch.dtype # The activation type. - quant_dtype: torch.dtype = None + in_dtype: ms.dtype # The activation type. + quant_dtype: ms.dtype = None # TODO: add more quantization params, blocked, per-token, etc. block_size: int = 128 @@ -248,30 +240,30 @@ class MoEConfig: class FusedMoEMethodBase(QuantizeMethodBase): @abstractmethod - def create_weights(self, layer: torch.nn.Module, num_experts: int, + def create_weights(self, layer: nn.Cell, num_experts: int, hidden_size: int, intermediate_size_per_partition: int, - params_dtype: torch.dtype, **extra_weight_attrs): + params_dtype, **extra_weight_attrs): raise NotImplementedError @abstractmethod def apply( self, - layer: torch.nn.Module, - x: torch.Tensor, - router_logits: torch.Tensor, + layer: nn.Cell, + x: Tensor, + router_logits: Tensor, top_k: int, renormalize: bool, use_grouped_topk: bool = False, topk_group: Optional[int] = None, num_expert_group: Optional[int] = None, global_num_experts: int = -1, - expert_map: Optional[torch.Tensor] = None, + expert_map: Optional[Tensor] = None, custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, + e_score_correction_bias: Optional[Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", - ) -> torch.Tensor: + ) -> Tensor: raise NotImplementedError class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): @@ -279,7 +271,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): def __init__(self, moe: MoEConfig): super().__init__() - self.fused_experts = fused_experts # type: ignore + self.fused_experts = FusedExperts(moe) # type: ignore self.moe = moe def create_weights(self, layer: nn.Cell, num_experts: int, @@ -378,11 +370,11 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, nn.Cell): topk_weights=topk_weights, topk_ids=topk_ids, activation=activation, - global_num_experts=global_num_experts, + global_num_experts=256, # zhq: TODO apply_router_weight_on_input=apply_router_weight_on_input, expert_map=expert_map, - tp_size=self.moe.tp_size, - ep_size=self.moe.ep_size, + # tp_size=self.moe.tp_size, # zhq: TODO + # ep_size=self.moe.ep_size, # zhq: TODO ) @@ -517,7 +509,8 @@ class FusedMoE(nn.Cell): self.quant_method = quant_method moe_quant_params = { - "num_experts": self.local_num_experts, + # "num_experts": self.local_num_experts, # zhq: TODO + "num_experts": 256, "hidden_size": hidden_size, "intermediate_size_per_partition": self.intermediate_size_per_partition, @@ -704,7 +697,7 @@ class FusedMoE(nn.Cell): tp_rank=tp_rank, load_full=load_full_w3) - def weight_loader(self, param: torch.nn.Parameter, + def weight_loader(self, param: Parameter, loaded_weight: Tensor, weight_name: str, shard_id: str, expert_id: int) -> None: @@ -776,7 +769,7 @@ class FusedMoE(nn.Cell): custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", e_score_correction_bias: Optional[Tensor] = None, - indices_type: Optional[torch.dtype] = None): + indices_type=None): # DeekSeekv2 uses grouped_top_k if use_grouped_topk: diff --git a/vllm_mindspore/model_executor/models/deepseek_v3.py b/vllm_mindspore/model_executor/models/deepseek_v3.py index 9b5170218..8c22b20b4 100644 --- a/vllm_mindspore/model_executor/models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/deepseek_v3.py @@ -154,7 +154,7 @@ class DeepseekV3MoE(nn.Cell): bias=False, quant_config=None, prefix=f"{prefix}.gate") - self.gate.e_score_correction_bias = 0.1 # zhq: TODO + self.gate.e_score_correction_bias = ms.Tensor(np.zeros([32]), dtype=mstype.float32) # zhq: TODO # Load balancing settings. zhq: needed? logger.warning( @@ -162,7 +162,7 @@ class DeepseekV3MoE(nn.Cell): ) parallel_config = get_current_vllm_config().parallel_config - parallel_config.num_redundant_experts = 0 + parallel_config.num_redundant_experts = 1 # zhq: TODO logger.warning( parallel_config @@ -231,7 +231,7 @@ class DeepseekV3MoE(nn.Cell): dp_pad_index_with_offset=dp_pad_index_with_offset, dp_unpad_index_total_with_offset=dp_unpad_index_total_with_offset) if shared_output is not None: - if hidden_states.dtype != torch.float16: + if hidden_states.dtype != mstype.float16: final_hidden_states = final_hidden_states + shared_output else: # Fix FP16 overflow @@ -244,7 +244,16 @@ class DeepseekV3MoE(nn.Cell): self.experts.maybe_all_reduce_tensor_model_parallel( final_hidden_states)) - return final_hidden_states.view(num_tokens, hidden_dim) + return final_hidden_states.view(orig_shape) + + +class DeepseekV3FakedAttention(nn.Cell): + + def __init__(self, **kwargs) -> None: + super().__init__() + + def construct(self, hidden_states) -> Tensor: + return hidden_states class DeepseekV3Attention(nn.Cell): @@ -446,7 +455,7 @@ class DeepseekV3Attention(nn.Cell): q_absorb = self.kv_b_proj_k.weight.view(self.num_heads, self.qk_nope_head_dim, self.kv_lora_rank) q_nope = self.qabsorb_k_matmul(q_nope.transpose(1, 0, 2), q_absorb).transpose(1, 0, 2) # (head, t, 128) (head, 128, 512) -> (head, t, 512) -> (t, head, 512) query_states = self.pe_concat((q_nope, q_pe)) # (t, head, 512) (t, head, 64) -> (t, head, 576) - query_states = self.reshape(query_states, (-1, self.n_local_heads * (self.kv_lora_rank + self.qk_rope_head_dim))) # 2维 + query_states = self.reshape(query_states, (-1, self.num_heads * (self.kv_lora_rank + self.qk_rope_head_dim))) # 2维 # attention context_layer = self.paged_attention(query_states, key_cache, key_cache, block_tables, batch_valid_length, @@ -659,11 +668,13 @@ class DeepseekV3Model(nn.Cell): ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", - num_experts=self.config.n_routed_experts, - num_redundant_experts=self.num_redundant_experts) + num_experts=self.config.n_routed_experts) loaded_params: set[str] = set() + for k in params_dict.keys(): + logger.warning(f"params_dict:{k}") + for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -676,8 +687,11 @@ class DeepseekV3Model(nn.Cell): k_weight = loaded_weight[:, :self.config.qk_nope_head_dim, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) v_weight = loaded_weight[:, self.config.qk_nope_head_dim:, :].reshape(self.config.num_attention_heads * self.config.qk_nope_head_dim, -1) + if k_name not in params_dict.keys() or v_name not in params_dict.keys(): + continue k_param = params_dict[k_name] v_param = params_dict[v_name] + k_param.weight_loader(k_param, k_weight) v_param.weight_loader(v_param, v_weight) loaded_params.add(k_name) @@ -707,9 +721,12 @@ class DeepseekV3Model(nn.Cell): continue if is_pp_missing_parameter(name, self): continue + if name not in params_dict: + continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) + loaded_params.add(name) break else: is_expert_weight = False @@ -728,18 +745,21 @@ class DeepseekV3Model(nn.Cell): if is_pp_missing_parameter(name_mapped, self): continue + if name_mapped not in params_dict.keys(): + continue + param = params_dict[name_mapped] weight_loader = param.weight_loader - success = weight_loader(param, - loaded_weight, - name_mapped, - shard_id=shard_id, - expert_id=expert_id, - return_success=True) - if success: - name = name_mapped - break + weight_loader(param, + loaded_weight, + name_mapped, + shard_id=shard_id, + expert_id=expert_id) + logger.warning( + f"Replace: weight_name:{weight_name} => param_name:{param_name}, Get Result: {name} => {name_mapped}") + loaded_params.add(name_mapped) + break else: if is_expert_weight: # We've checked that this is an expert weight @@ -760,11 +780,15 @@ class DeepseekV3Model(nn.Cell): if is_pp_missing_parameter(name, self): continue + if name not in params_dict.keys(): + logger.warning(f"name_mapped: {name} not found in params_dict") + continue + param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) - loaded_params.add(name) + loaded_params.add(name) return loaded_params class DeepseekV3ForCausalLM(NativeModel, SupportsPP): -- Gitee