From 55b4e2807277c1de3c99cbe19e24edb5d659a48b Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:06:30 +0000 Subject: [PATCH 01/11] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20Dense3DRegression=5F?= =?UTF-8?q?ID1066=5Ffor=5FTensorFlow?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/.keep diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/.keep b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From eba9d1f00df140754c4b7366370710e4310da835 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:08:33 +0000 Subject: [PATCH 02/11] add TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/readme.md. --- .../readme.md | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/readme.md diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/readme.md b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/readme.md new file mode 100644 index 000000000..7cf15b11c --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/readme.md @@ -0,0 +1,40 @@ +# Dense 3D Regression for Hand Pose Estimation + +This respository contains tensorflow implementation of the [paper](https://arxiv.org/abs/1711.08996). It is developped and tested on Debian GNU/Linux 8 64-bit. + +## Requirements: +- python 2.7 +- tensorflow == 1.3 +- [tfplot](https://github.com/wookayin/tensorflow-plot) (for visualization on tf summary files) +- matplotlib >= 2.0.2 +- numpy +- opencv >= 2.4 (optional, for cpu visualization) + +## Data Preparations: +Download the datasets, create soft links for them to [exp/data](./exp/data) and run `python data/${dataset}.py` to create the TFRecord files. Details are [here](./exp/data). + +## Usage: +Both training and testing functions are provided by `model/hourglass_um_crop_tiny.py`. Here is an example: +```bash +python model/hourglass_um_crop_tiny.py --dataset 'icvl' --batch_size 40 --num_stack 2 --fea_num 128 --debug_level 2 --is_train True +``` +where the hyper parameter configuration is explained in the source python files. + +## Results: +We provide the estimation results by the proposed method for [ICVL](./exp/result/icvl.txt), [NYU](./exp/result/nyu.txt), [MSRA15](./exp/result/msra.txt). They are xyz coordinates in mm, the 2D projection method is in the function _xyz2uvd_ from [here](data/util.py#L23). Check [here](https://github.com/xinghaochen/awesome-hand-pose-estimation/tree/master/evaluation) for comparison to other methods. Thanks @xinghaochen for providing the comparison. + +## Pretrained Models: +Run the script to download and install the corresponding trained model of datasets. $ROOT denote the root path of this project. +```bash +cd $ROOT +./exp/scripts/fetch_icvl_models.sh +./exp/scripts/fetch_msra_models.sh +./exp/scripts/fetch_nyu_models.sh +``` +To perform testing, simply run +``` +python model/hourglass_um_crop_tiny.py --dataset 'icvl' --batch_size 3 --num_stack 2 --num_fea 128 --debug_level 2 --is_train False +python model/hourglass_um_crop_tiny.py --dataset 'nyu' --batch_size 3 --num_stack 2 --num_fea 128 --debug_level 2 --is_train False +python model/hourglass_um_crop_tiny.py --dataset 'msra' --pid 0 --batch_size 3 --num_stack 2 --num_fea 128 --debug_level 2 --is_train False +``` +in which msra dataset should use pid to indicate which person to test on. In the [testing function](data/hourglass_um_crop_tiny.py#L23), the third augument is used to indicate which model with corresponding training step will be restored. We use step of -1 to indicate our pre-trained model. -- Gitee From 0a300808c78efc33ceeac3bd9d83fce576909a00 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:09:42 +0000 Subject: [PATCH 03/11] add TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/gpu_config.py. --- .../gpu_config.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/gpu_config.py diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/gpu_config.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/gpu_config.py new file mode 100644 index 000000000..0e3d0280c --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/gpu_config.py @@ -0,0 +1,41 @@ +from __future__ import print_function, division, absolute_import +import os +import commands + +# check the job id +gpu_lock_path = '/tmp/lock-gpu*/info.txt' +lock_str = commands.getstatusoutput('cat %s'%gpu_lock_path) +lock_str = lock_str[1] +lock_str = lock_str.split('\n') + + +# on gpu server, use the gpu for tensorflow +if 'SGE_GPU' in os.environ: + gpulist = [] + for line in lock_str: + if line.find('wanc') == -1: + continue + line = line.split(' ') + job_idx = int(line[7]) + gpu_idx = int(line[1]) + gpulist.append((gpu_idx, job_idx)) + gpulist = sorted(gpulist, key=lambda x:x[1]) + gpu_idx,job_idx = gpulist[-1] + + gpu_list = [gpu_idx] + os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(gpu) for gpu in gpu_list) + print('use GPU for tensorflow') +else: + os.environ['CUDA_VISIBLE_DEVICES'] = '' + gpu_list = [] + print('\x1b[0;31;47m use CPU for tensorflow \x1b[0m') + +num_gpus = len(gpu_list) +print('available gpu list, ', gpu_list) + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import tensorflow as tf +config = tf.ConfigProto() +config.allow_soft_placement = True +config.gpu_options.allow_growth = True -- Gitee From e2e6eaf4e5b7eb065f5a32dc86f913ecea3b5183 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:12:57 +0000 Subject: [PATCH 04/11] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20data?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../graph/Dense3DRegression_ID1066_for_TensorFlow/data/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/.keep diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/.keep b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 9126b276b71dc2318ebff3eb925f6e54674044ae Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:13:08 +0000 Subject: [PATCH 05/11] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20exp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../graph/Dense3DRegression_ID1066_for_TensorFlow/exp/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/.keep diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/.keep b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 89df05a6fdf21420e4b98ba69f97197397346cb6 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:13:26 +0000 Subject: [PATCH 06/11] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20model?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../graph/Dense3DRegression_ID1066_for_TensorFlow/model/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/.keep diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/.keep b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 4609cecd2b92a067e12fa69cdd2054b0c9050d40 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:13:35 +0000 Subject: [PATCH 07/11] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20network?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../graph/Dense3DRegression_ID1066_for_TensorFlow/network/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/.keep diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/.keep b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From f0a78c7040e82ebc9a3f788184d04c2025cd3d10 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:16:15 +0000 Subject: [PATCH 08/11] update --- .../data/__init__.py | 1 + .../data/dataset_base.py | 240 ++++++++++++++ .../data/evaluation.py | 109 ++++++ .../data/icvl.py | 264 +++++++++++++++ .../data/msra.py | 219 +++++++++++++ .../data/nyu.py | 310 ++++++++++++++++++ .../data/nyu_bbx.pkl | Bin 0 -> 428477 bytes .../data/preprocess.py | 268 +++++++++++++++ .../data/util.py | 188 +++++++++++ .../data/visualization.py | 137 ++++++++ 10 files changed, 1736 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/__init__.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/dataset_base.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/evaluation.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/icvl.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/msra.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/nyu.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/nyu_bbx.pkl create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/preprocess.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/util.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/visualization.py diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/__init__.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/__init__.py @@ -0,0 +1 @@ + diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/dataset_base.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/dataset_base.py new file mode 100644 index 000000000..c7ed20d80 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/dataset_base.py @@ -0,0 +1,240 @@ +# the base class of dataset + +from __future__ import print_function, division, absolute_import + +import gpu_config +import tensorflow as tf + +from collections import namedtuple +import time, os, cPickle, sys, threading, glob +from datetime import datetime +import time + +import numpy as np +import cv2 + +from data.util import * +Annotation = namedtuple('Annotation', 'name,pose') + +def _float_feature(value): + if isinstance(value, np.ndarray): + value = value + elif not isinstance(value, list): + value = [value] + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + +def _bytes_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + +class BaseDataset(object): + '''provide basic utilities to convert the initial dataset to TFRecord files + and the interface to define readdata on the graph + ''' + def __init__(self, subset): + '''subset: e.g., train, validation, test, or train_1 in the nyu (with view 1) + ''' + self.subset = subset + + def loadAnnotation(self): + '''the annotation is a sequential list of Annotation namedtuple + ''' + raise NotImplementedError + + @property + def annotations(self): + raise NotImplementedError + + def convert_to_example(self, label): + '''load the image corresponding to the label.name, + then serialize the structure to tf.train.Example + ''' + raise NotImplementedError + + def saveSampleToRecord(self, idx_list, tar_file_path): + curr_list = [self.annotations[idx] for idx in idx_list] + # if os.path.exists(tar_file_path): + # print('%s alread written'%tar_file_path) + # sys.stdout.flush() + # return + + writer = tf.python_io.TFRecordWriter(tar_file_path) + for label in curr_list: + example = self.convert_to_example(label) + writer.write(example.SerializeToString()) + writer.close() + + def write_TFRecord_single_thread(self, thread_idx, thread_range, num_shards_per_thread): + print('Launching thread %d, with files from %d to %d'%(thread_idx, thread_range[0], thread_range[1])) + sys.stdout.flush() + spacing = np.linspace(thread_range[0], thread_range[1], num_shards_per_thread+1).astype(np.int) + + shard_range = [] + for idx in range(num_shards_per_thread): + shard_range.append((spacing[idx], spacing[idx+1])) + + if not hasattr(self, 'num_shards'): + '''in case of single thread + ''' + self.num_shards = num_shards_per_thread + + for curr_shard_idx, shard in enumerate(shard_range): + file_idx = thread_idx*num_shards_per_thread + curr_shard_idx + file_name = '%s-%d-of-%d'%(self.subset, file_idx, self.num_shards) + file_path = os.path.join(self.tf_dir, file_name) + print('[Thread %d] begin processing %d - %d images, to %s'%( + thread_idx,shard[0],shard[1],file_path)) + t1 = time.time() + sys.stdout.flush() + self.saveSampleToRecord(range(shard[0], shard[1]), file_path) + t2 = time.time() + print('[Thread {}]end at ={}, with {}s'.format(thread_idx, datetime.now(), t2-t1)) + + def write_TFRecord_multi_thread(self, num_threads, num_shards): + '''convert all the dataset to several file shards + num_threads: number of threads to load and save the data + num_shards: number of file segment on the harddisk + ''' + if not os.path.exists(self.tf_dir): + os.mkdir(self.tf_dir) + + assert not num_shards % num_threads, ( + 'please make the num_threads commensurate with file_shards') + self.num_shards = num_shards + self.num_threads = num_threads + num_shards_per_thread = int(num_shards/num_threads) + + self.loadAnnotation() + + spacing = np.linspace(0, len(self.annotations), num_threads+1).astype(np.int) + thread_range = [] + for idx in range(num_threads): + thread_range.append((spacing[idx], spacing[idx+1])) + + coord = tf.train.Coordinator() + threads = [] + print('begin writing at ', datetime.now()) + sys.stdout.flush() + for thread_idx in range(len(thread_range)): + args = (thread_idx, + thread_range[thread_idx], + num_shards_per_thread) + + t = threading.Thread(target=self.write_TFRecord_single_thread, args=args) + t.start() + threads.append(t) + + # wait all thread end + coord.join(threads) + + # interface to the batch iteration + @property + def filenames(self): + if self.subset == 'testing': + pattern = os.path.join(self.tf_dir, '%s-*'%'testing') + else: + pattern = os.path.join(self.tf_dir, '%s-*'%'training') + files = glob.glob(pattern) + print('[data.dataset_base]total file found = %d'%(len(files))) + return files + + @property + def is_train(self): + raise NotImplementedError + + @property + def approximate_num(self): + '''return: + the approximate total number of training set + ''' + raise NotImplementedError + + def get_batch_op(self, + batch_size, num_readers=1, num_preprocess_threads=1, + preprocess_op=None, + is_train=None): + '''return the operation on tf graph of + iteration over the given dataset + ''' + if is_train == None: + is_train = self.is_train + + with tf.name_scope('batch_processing'): + min_queue_examples = batch_size*1 + + if is_train: + assert num_readers >1, 'during training, num_readers should be greater than 1, to shuffle the input' + filename_queue = tf.train.string_input_producer( + self.filenames, capacity=32, shuffle=True) + + example_queue = tf.RandomShuffleQueue( + capacity=self.approximate_num_per_file*8 + 3*batch_size, + min_after_dequeue=self.approximate_num_per_file*8, + dtypes=[tf.string]) + + else: + filename_queue = tf.train.string_input_producer( + self.filenames, capacity=1, shuffle=False) + example_queue = tf.FIFOQueue( + capacity=min_queue_examples+batch_size, + dtypes=[tf.string]) + + if num_readers > 1: + enqueue_ops = [] + for _ in range(num_readers): + reader = tf.TFRecordReader() + _, value = reader.read(filename_queue) + enqueue_ops.append(example_queue.enqueue([value])) + + tf.train.queue_runner.add_queue_runner( + tf.train.queue_runner.QueueRunner(example_queue, enqueue_ops)) + example_serialized = example_queue.dequeue() + else: + reader = tf.TFRecordReader() + _, example_serialized = reader.read(filename_queue) + + results = [] + for thread_idx in range(num_preprocess_threads): + dm, pose, name = self.parse_example(example_serialized) + if preprocess_op != None: + result = preprocess_op(dm, pose, self.cfg) + results.append(list(result)) + else: + results.append([dm, pose]) + + batch = tf.train.batch_join( + results, batch_size=batch_size, capacity=2*num_preprocess_threads*batch_size) + + return batch + + # TODO: merge this function to get_batch_op + def get_batch_op_test(self, batch_size, preprocess_op=None): + '''return the operation on tf graph of + iteration over the given dataset + ''' + with tf.name_scope('batch_processing'): + min_queue_examples = 1 + + filename_queue = tf.train.string_input_producer( + self.filenames, num_epochs=1, capacity=1, shuffle=False) + example_queue = tf.FIFOQueue( + capacity=10, + dtypes=[tf.string]) + + reader = tf.TFRecordReader() + _, example_serialized = reader.read(filename_queue) + + results = [] + + dm, pose, name = self.parse_example(example_serialized) + if preprocess_op != None: + result = preprocess_op(dm, pose, self.cfg) + results.append(list(result)+[name]) + else: + results.append([dm, pose, name]) + + batch = tf.train.batch_join( + results, batch_size=batch_size, capacity=2) + return batch + + def parse_example(self, example_serialized): + raise NotImplementedError diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/evaluation.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/evaluation.py new file mode 100644 index 000000000..ab7958393 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/evaluation.py @@ -0,0 +1,109 @@ +import matplotlib.pyplot as plt +import numpy.linalg as alg + +class Evaluation(object): + def __init__(self): + pass + + @classmethod + def maxJntError(cls_obj, skel1, skel2): + diff = skel1.reshape(-1,3) - skel2.reshape(-1,3) + diff = alg.norm(diff, axis=1) + return diff.max() + + @classmethod + def meanJntError(cls_obj, skel1, skel2): + diff = skel1.reshape(-1,3) - skel2.reshape(-1,3) + diff = alg.norm(diff, axis=1) + return diff.mean() + + + @classmethod + def averageMaxJntError(cls_obj, score_list): + score_list = sorted(score_list) + + th_idx = 0 + for i in range(0, len(score_list)): + if(score_list[i]<=10.5): + th_idx += 1 + print '10mm percentage: %f'%(float(th_idx)/len(score_list)) + + th_idx = 0 + for i in range(0, len(score_list)): + if(score_list[i]<=20.5): + th_idx += 1 + print '20mm percentage: %f'%(float(th_idx)/len(score_list)) + + th_idx = 0 + for i in range(0, len(score_list)): + if(score_list[i]<=30.5): + th_idx += 1 + print '30mm percentage: %f'%(float(th_idx)/len(score_list)) + + th_idx = 0 + for i in range(0, len(score_list)): + if(score_list[i]<=40.5): + th_idx += 1 + print '40mm percentage: %f'%(float(th_idx)/len(score_list)) + + thresh_list = [thresh*5.0+0.5 for thresh in range(0, 17)] + precent_list = [1]*len(thresh_list) + + cur_score_idx = 0 + for i in range(0, len(thresh_list)): + th_idx = 0 + for j in range(0, len(score_list)): + if(score_list[j] 1: + enqueue_ops = [] + for _ in range(num_readers): + reader = tf.TFRecordReader() + _, value = reader.read(filename_queue) + enqueue_ops.append(example_queue.enqueue([value])) + + tf.train.queue_runner.add_queue_runner( + tf.train.queue_runner.QueueRunner(example_queue, enqueue_ops)) + example_serialized = example_queue.dequeue() + else: + reader = tf.TFRecordReader() + _, example_serialized = reader.read(filename_queue) + + results = [] + for thread_idx in range(num_preprocess_threads): + dm, pose, bbx, name = self.parse_example_test(example_serialized) + if preprocess_op != None: + result = preprocess_op(dm, pose, bbx, self.cfg) + results.append(list(result)) + else: + results.append([dm, pose]) + batch = tf.train.batch_join( + results, batch_size=batch_size, capacity=2*num_preprocess_threads*batch_size) + return batch + else: + return super(NyuDataset, self).get_batch_op(batch_size, + num_readers, + num_preprocess_threads, + preprocess_op, + is_train) + +def saveTFRecord(): + # reader = NyuDataset('training') + # reader.write_TFRecord_multi_thread(num_threads=30, num_shards=300) + + reader = NyuDataset('testing') + reader.write_TFRecord_multi_thread(num_threads=16, num_shards=16) + +if __name__ == '__main__': + saveTFRecord() diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/nyu_bbx.pkl b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/nyu_bbx.pkl new file mode 100644 index 0000000000000000000000000000000000000000..944d67e56d04480da5065707b7a53e4d63583f23 GIT binary patch literal 428477 zcmZtPby!tR_%D1K6zLYFLApai(e(vIvAY$!1HtZAN)QBO)3x`e?L@J=8%0r2M1Ab8 zGrx1*_d3^_`#gX6&%@`s*P1mmduGRPC`Agj=&5NGmJ|}i|%#5h$ z=%}S_$45_@G;i+0nCRF^F>Mzp{hz;TJ9qN``M(R4BNa9dRh+na&;k|5NX1CiNX1S6 z-=9p5Svr48+Xbpam8SYeDnu%>KT&gxR354r(yw2?-v8Kt|NEavg_wz3hZ?TW*2DH4 zhRD*Z#IGUo=p7mx7Bg|W()9luK>hy@FkOv-NPR=>Pd32m`U*sp$6@1a0yIQGS8jj| zB_lMvsX%|bc(luv~b1!LSDT!C>5;xW;TfVLuV!Nm~gDhy${qY|CiJ*=5R zfTjr8yfi?5rU7yrD`0jk4pu*7_?vDg0`97Ym>Xt<^0W%HZ5NLPsU(7y2=v-(0NY|i zEWKEPYro>q{Dnl&7J&*|eawDkgnt7o;JPIq0X;|r9T8A(XNV_f4Nx<-0wZR`<3=O_ zx*~AT>2v|>mi3l8js1M^9V@zFC zfj)k5SQ$ousR+D?(}UE|1bt7Hp+j~&dK@A*Z6*Q-wG1$Lk0CCuE5|6$codu^5!#Eu z+inIpy1)p>wJH(UHXcsN1elA!rJefdwb}&r6D!bn3VRNL1auIAZYT87J;)dq56ZFZ z@iyGtPe4Zz@V;n(?`Ms1r)>qS?#AKw8*t+UKfviNNv)`Y?$w!p=1nXwx?yb%P187J-sTBfK1Cgdsi^@D$?l zJ&gbx5xBI{05cU#FyTf8s(K|LZZQG2A`mpf07taV(0aBUGq1*>%^L#jM8HYQ2%p~> z!Fx+NLe1l$7O|LbarPpRd&K~*^+wnmS%K??aj5=CA~=YE%-s-YEKSgKqypPZ;}AWB zMCc>}uOx;zecu=vFUoQHVLVFH2yheu)d)i*IGdu)i3)7V--atO1UQMnun#N~@z9+`fTsxbHP**KT@%DLv3}`IJT6Tmz)J)gSLngC z#1x9h%W<_V4hqerE_sW9{TDqfm~0G(4dpo8$hw&oq!aKFf%Gi~sBAL9tsdnFWFvu( zEz$gQ@D%~aKe`y~W{f>Q%P`+K0q@-j@DqXTKDyWuX@Zh&<*WmUhv}0Ad<1_HP~NJC zz~RPN8&!su>^LZ{B_Kcq`kv9l9%Ew|t}8>VLp+}6&gUa^7J+?{dKhV9gvX6#_|*`P zNj(V&6oG-cx;Qb{7^<=|98`+OGIauiL?FLb7fYnS#?AJ%b zF#~jEVBLp!?0!C*2i--W`(-_h8)%5?c4gSmGaduANd!RzZmQ~I!&pP4Jt@QTaq;*w zousoFq9(Kf}dk#bn)C&93JCSOW@L?EfZ30A)|hQ=E?I))}=X*~gb zMZkHODGtC4C41$#Uc4Rae-Y461g`8b!)ql|ob4!wn|Cs9btk3NUj&92n?bSC47>d0 zs8dQt>OKMnh(O$7GvwHsLE1rv#resoe@ehW5s;iQha${jzrWV^PbdL{uwL|~Af1x7TOBKEo*Dvy(J zWD}{aLq#As$^v2eMlc!9I)O9WaXX4c7$yR{9V{{9i5YxfO0oRmc3k{Q`laC_(Dj`a z1_igr2nRXRS0zLB4*?@Yp!%g1-Zq+`%V;@bUM9h!H|bkPiohl-8$|e;A$_A90~@#F z=yC!^iNK+Lw#c+;58bJ9L}esn#Wey(i$LITJDiSbk8Pi2&{s}IpfdquM4*3&J;IK+ zN9XQk*!e6Omz&5E~Plm+Y9k_XdfT<#|$-NUcZ!kw}XE{Ox zQgCk+DWz#5@V?Fgn`+uaRZWgtFH#WjmVoIZ5SnL)W0%Zu>YEJGnLD7bMutB#M4<7a z9SS;`!9rUOgY&7_8b!cN5ipoVA-}a*ha`ZZgL%T~qXVB8A`9G@N)wz+4eHVl%hqzAkGNV-Nx{oCWp3hI+hz3mTE|M!#xxV{juWt41YRxG zMqNA`O0yB!?$AsK!x!@7r4=I3!%7Fe1{+~cloV?MGjXSwfR!Tf?1m1^^^8#GFGIq< zOdOVx#m!YBkljWb7nZP*}5NAhwqll4lqr@2wQgKQmxvPezq%L|~?Z zHY!>SvHoi*R+eXC&TBHNTq^>mL$ndI#Rw0cv$_@9`}zbD1| zks0_|PQZE*n0~(uyQJ}|Bto1B+_lh#b(9hM2TJj6cP3WfAQ9q4VB8Br z2ZV*{;RqYuw|Cq1|LX43L}1E>4%m5LA9q(tG5$y<3^vc;d%m3_&~tJ}EGyN;_Y^6P z+{uL9Y;x1-B2aVQ0#Dr-C}K^tT_&Xd1Z0T7fd8yuF;f@A4#@C^^{s^p)A>uv6afWG zE8HHYi*tXaD4m*tkt3$@V3!CS^RNU;bm1^tipCVy3H&7>O9YlYu*RNwy4d+kiu^I` zSG1A3HdwJi2j1CI2#eDZ`ig)Y5qRij3p)i} ztSXcthYcMTt|TB=1itv&WAtksxMfQ5=XM6lj0wmSf!>Ghaea;s4lkAB-;8u58j;GE zF9I{)IpEMLUGyHuiuHXuq6`Tr5P>Oi4hT`!hEj|SU2dkMXFa*3LJ`m!>I7c{9jt3B zM~-AC9?=I^Bm%?p9r3tU3t1y%$Qrv7gM&$3Di(pAcbt)s#0Dt!GWaI%L~bMjB_c4> z$qlNPwQxWp!}{wxG2kx&r6MqTof`&)=-^|m6oY2$#DqxF;-n%l@3K4kYG~obJ}G*d zv+(Z_OsixmH@d3{CMvLHGOSFYsheC$xgU65Ktxpb8mWK zQ9c_2>97&m>U2EOB5k@{1k!@N;L)Uu?gdgDbx22t&!p$85P^p(9>`47!O^cWxQYeuXvObD?GaK+H6l=L?TOq$I&jUE;+{zcM!Y6qw+QSl^ML9&9hAJ2;_SS1gyj*i zM+6oxW%saA7x7+FTwq||1_JhqK>0RzxZc)6#V0AIA5O=!z2sMPp9oBT?gnK~_AY1< z``Ha*P1KNp{UVU|#toC->EIO`dxwUn7Bqyr+LanBQ>MLMWts}8Dj)1ehZMgj*# zAh*#SkJuZ6g7Q*yQA>xNgn&aLa7@z^(H^>Z!G>`9p&3|~OTb|f7`Mw4?{8{j1A7j) zp0NqXd(x(lh`{n%Pt^3##;-MOKD{9mTjr8BeN+UFjP}B1MJ>!ukmA^EHgu>Y;Ft)c z8G54s9Btr3DLz(YA;X3|hguP6&UHsu4IKn4$?)~pE}UpXA{-ZiY7p!X_IR2^vx{}pUXvtk#jjY%g^Cj!<=UKsOT4eQioYyy~t(Y;C4I4J^` z?|Gs1ggV^xq*xN1g_CaN_u-TX+-dN}yX$J`aajt_FIhOzLhj+T2n;Uu#Y@{ZxNx8p zi`Qgf%N!Enj0oJ=>xH20s#x4citOqvw!>&DKh`)a0#`cuVp6aQl#HYZ+>wP}J*M#B zoCwGk`{7FuHKdoz(07akRhK67;JgUT803$MSt@8+FU8Ub3C0?e2p2@)sM663kglF6oj8{GQ?m*G;Mb8?1XoW}#zG65+B4 z3<>o`%?nkm4UxirP8PO3A`z~Lz~XFwOw(4zjW8)5C`mBSjy#8}B5`gA|nh1QK6o9kKln}9|6umqouqh+px(KX#=ZkUURdBdBt9-1*-P%jQ4H0-g z+z%J(l+b#k6qT`A@G>L!a8m^SNd53^j53nXlw!Und(u})gj*uuqv(f_5@nS8OY!hs z7Umo$;I;@14Guutd1@G&Sc!OB1pGsj?mj#nr0`7}I%0fTne^kM)J*C)mGz;H? zNwGcgX{H1}^hkt!;)6^;I3vNv>O5SB49AkACevF za7``6igno#N=fy8Bm!lY{+Rtj8D(s0Sz0QAcMa*69*e-i%f7JLp#n$N;#PaI_RvH? zg9rpo_JiJLH9S06ika%!Xt$1lCn9idnm=0Csp8BTDJm`7@O*cu5VQ^o2{H|)RJK5XcFO>2q>-ahnc1t64`V3y<37yKL~g( z0@3xp2pyn`O~s{1=q!Q4OL9p~BJgp$Kh8W-fd=dO99fH-(_<3f88?f-zajqk`bQZv z*o?i8X*POvpU8t3BCzk6Kgu^K!uygGK|8b9Ry_hj35gctX4>Hyek2(L|~&N0ImVb*fx>9X_}OU!)pj=6@mHp0}&gh1Rr}D zHY8;scN_t)MPOA_AfCGa74GE95fq+-iknL zL?8|)D&Zqr?2o!8!BBltHU1NU)F=K>Qc%GDzon=tmax~&1iTZ0&#MDal&FYT3UX8~ zmcaQt0q;eiH9P=|Vid4;hYU?KB+yJH;DZQE@%6)X+do3&7a1C3BuM!}z(*1I#X5oP zxbMQn2{IgJgA>PQ((`>1f$817;HC3h2(Oai-%$zD6-g)XSp>5Fcp$;|n^5T`!~2LV zEFDTBd=Y`}b3HM--Dlw&TkP-uG7DR$5b#w5HVt&gT^6B`y?;2_Aqy|>5%5g}atvLu z==D3{%4r!w|LwxXECRlZKxe5dZruMUynD@FO^wXLXJZ0>h(O9%S1f7sP8h%jx@+wv z7}SP%mqIHn3w|regZm`{QK8N#-11IH(w8CQSr%GCNQB=aaLc0; z8iL*nMtM?5XG`#XHUWP`Kn?D=I{%x{u~voVMvuZZ{2>fyoGL9Hk-pr_Ug1?&*)_0O4^MwRw7ZIbz8%r%`NS#$N zI!lIH9kxDyju>^`G%s_3fvPH&6v^47JRMzi__;G@zO@1~Z)eR2x+4kSjCH=aqZ@c*O+ z+bB66%68!SNq&MG-HtaYXI(KPLLKK;$uTTs2a5jjGtg))-uSL`MdGfuSan5)C$bcb zJx+`^Z;nXaVJlTfBU^SHZ<>mTYsBdA#(0%G1{P{y8r$S`Xmu)1ne*#W(Ym~`W@ev6 z9iw98@LrRO5z9y%J>I0)xg#xA4U2Ee;TN2Sp5?^o^QP-~cf{1FU}1wC&o-uFeIYRh zyeZG|Kyr*S9tM^{?Oz(E^drWQH`OCOaq567zSNZ=E1m5cI7^zp_^Vgwm9s>r&YwX=gk9kZ+Hw;hVlL~Y=6$S zHtZnn$DB9QhIwGwFJ;7sm!tkTTlBp`Ob6ba(egr<1{JJyU~wj}D&Xeb{o#z6lk${3#PX%19g=-k7PlV24Zv z%Kyqy+{F6$gQUN<<4umP6SkSK8B}%!3a{oOcmzN2 zh_>g=-Wo?}98$-C_3SiTNda_~iE-e~fT#8tv%3ux{#77sQXw?<@x$@xPQ0o6X^Y^A z8d#}Qg>p$DPE6t_3DJ(c>D6L~IL|haIaMJ!xex=cP2grz(N4Ts-NPRGHto=AQiVhM zMM&&Oj5BXy*#@dF_1c&_s|vl;iZOJ~cs`B`Z&Y_%BRoM1Q70;4XM|65@3V+>dYz-|!oHa3Cy!rN` z6Li_m+uj{&@UB}SZp|FU$MNRP{t0e4SZD*y=`}bprvR#QVtjZrZi+X8T%6&!sv7<7 z<>5{fxi(+koXz*aZ+$jKFsMP+>eZ=_l#`szg zPT9G_X+#Zn)#f1a05Ji)dAl+Yn~u5SMME{Nc;+DY2Qi&_)3>-YmaKQjkWbYRPD-Gc zKK}<`9mX8{N?DW;NE1NJry9V!H9Bo7)=`I=drka1EY5NyCmdq#qS{b0?)Y3Ol$VX>$z@cS%G3Z(_ik z0If(i?{UY>P1QK?EfrSoq>m5d&CH&WD46Jqqo=B&o4fG5lF(LH#hW!DEAZnU@t*HyfR#oG;OA_=d$nR-y-W=;U0EYFh_~ufB z{2NL5Q%OuDZ#uQ_hiysj=aoXJ<6B?Z{U%wi? zniDW7kc@W*@Mc7EfBY+PMG>nzJuDN^Y)i~Q-dt$tkKtFHkPGsI5xnto3q!kK4)7XY1>4GU2_fCh%s=!cGYIZGm_)zx@7I2#I9WX?2^H&eas@W$L2#X2>Z_$?a~bIF``5^rSh?BJ1Pj7{Fv`1vOX zrWz#9WZo#p+hEBsV|Z!RU|>cL>H~mBry9!7^B~2Pw!<+UdHh3-9gL!oo3^nsm zaD?=4b9u9Ht2$!0KN5-)8c~fl= zgbR@d*qvR8(YggFRv_c+1-xld12WFDG$9rh@Ttzla#J#nkLJyRcRdl--53R_6}aP` z5B>gRKDUrJl|Kg|e6KNk?_7cRtMl1A5;A9s;f>U92#yArVR(E6syh~7l^dBe#quV9 z#!%Gj8KaX>4vpi5Fo`B+5pTZd55a_&=Ge^EVNbj%Ku$F=i+Ph9Jq*{qI^g0SIleD1 z#58|mmhfi5!y!=hZjZ6bazwTkqHY>7OL-G)H57Iutgw`A=u9>(Lcfi~EaOc=+hG{L z!4iFXvaL{ai!kvTG0S=LV(2gwXj?$dl5P7BC`3yTd8R9P6Wo0$P8wUmd^>x~?^*=M z9>lEVjh^X9%zA8tJ@HbkEGmNG17cS3#wULmM*XtF%C99@`y5|zJhBxlhM<7pThaZ=V;X1q!+7n2dTg#ieIisK-VuwJ#Qk2~-f>Rv+>;^hms`vBApBQmk51h_8-h-ngDON|#6DXP6BxE-68qNOnI1iP^xL74f5C@z4hU zluKZ?mOURDXCrT>xQ)k}UN*3rUW^AQWba@}oK3uOu$c&rjy5o;FT%5gB3yV%%x2!K zikSrY5i67g6r(p=-#O_;#zJTD_%wp8Cr&3Oo;UBG zPJ?P6Yq%K{;woDk)LchQ0&jM0o{o{J)>sr#z}7g5@OLn|pG4kduAPQcN|rF(Ux28E z#R#V3#3bG*^q!8B?0#056d>_u5eAMR*S4KEwNq!|NJk6Q49mxIy<+rQL9Q*CH|0vR z@Zp37MlZ{U$!)gIr$kH&Zzf%ujpv`(IgWxnl>R8fobANy;7zvU99Vy_K&nn2)J7KJ zP8>0*yqO+88;yUh*m70>rSK$(s*NVXC9OV8=Tk8!~M5KSTusf*~y#p|1H4u zSC+_qln?F9A}m`);-vHD)%95X`ep_FoB~u#EQXH{F&VtMoU{lZ&)Y)lay|^%^Lc%U zm`vUz-C2x3F*XRXE5x5KMfg%o%r4$&Jz9+V_V!Sg6(IaV5#Em`^Mx$loQqtH+plaf z`$i#h7Zu^wZDJ(6v6;IV5mVVIkE%jQT3MfPm6&YawBBC?!<~+ZTTqNgjz#dNYc)B% zndz|zf1cT6N@_6{^(%t0Iho7k^5$pPB^Y_Q6WXmRMf2-Iwk3j?Jl<>>upEw$9ntT- z98TkkutQ2rJ~2<1!lj!Ns`ARPrC$MpeTgaH&0fvrIQPdM2U&Kp^zMZiEFq?lH!It% zz`{s7`2MU$(53=5#v*gGBHqkjzZ|BQ?2x8jgF!|`xIKWxDdx?gl$CfKWs8OTcO&va z0gfn=I3>K1E3bgiY>nrC_h5r&A>LgiaY}i!HD)!wYT3areGhJZ%10nw7nAa4XS zKhG95JN9FwQXxWI$+gLNb0TpK_D!*adF?^$lNVtBW^!$E-aMbb7R$nIalrT}#=R{- z$Sh*Yc=I-B9j-mK#jnH@n7F7ATU>}K=gsSm>mXdWM!N|oV7{Ud*g@tp6}+*yyN>PF zu|bN(DWp6q#N|_@%~kS7GHV^%yJClKZO)=(NfA2FBc_TseRr-y-Ah|s%RY-I^NO*& zjPyIzyqO%e9+w{3qHWDNoLF4~X*3!0)$nFEyPwoddxVZY&vLI6LpqU+M|bna^V515 zC)h({-5LB+DM6Sv8DH<=O;O!Lb;|bPIjwBdr2ua+#bZoImDa$eKz5%ffd?c zIfC)*0LBOAkjkjqG zUZiVdFB|hNYcGS)iIl-<-Xy2a!b#nBSme5nt=|apI-dOJ$kN&4M;c8rre?3YO zaGIF2ylGJ%j$bR=Vq+w0qgPANE|t{LbG!*#7>PndO?VzHfrdIe4`DKxFSql&Irgsy z)~9G;xO(aTK1kIwmps!8yqR(<6j#Tz!?5KgxZG8Wj#r4e$eZ&EgK(*9J0uj8pflUC zto4)hA(wb_uGI&DN7`au`%*NSNpU!b^i`L6b1KLayA#^rGdqTvwN{GSW28^K!kdg( zSFAtP4r}+wFpbsg%9msv=_+qRuREf{A1&w)kYjE*OVV_k^w-yTv*v;wvZUH@sw~Iz z<5K*1Pu9h*^X9m&J??7j;BRpSe4XS-)Ff*GH+b`2)fVcX^;nvP3Y=I}hS#r&xyhSS zFAFqGGJyK6N(8nmN2gU}j&X}O5kERYU^x_0qpBbuz_OVpkl)kWyivDjsk5qC!o6pe zIDV8R1I;79+jn>q&2o$x95F=fu`0CtQ;w5#e0`TUN1Zz&H_;fO8>%o-QU*6GG7h=N zo9+L#$Ch^{aGO?%%AV!8{*PSSeclwto8y4DF)ANbV2!*CduoY!z??5i}Rixuz77clqVM>u#A|;y!knwjddQeIqleTwil-micMsU)WDlx%AGKMw-tQc zS+3wa`LO;<%oEb!6@Uj~!i zJ5lIH%DRa+n%x62w%7-?ifoPea0aS1NStQgygJ_ndj(&}4dwW%mjN|pvX=IOH}kFr z!&4~`llrlIqdj)uu_?K>m%Qmz*&S!LcE(0lgsD#b_R6jaucIIX<-y}cVIXoTR=O&NZ&>~&jINEy85&7;`v z*fTB&kqxETvLqQVyA$(<;~(b-I1|61R5+kftuBJ ztZqZ%{KuQ5XTaR(uE=PRqU>@K7TqA`9dFtU48zW+A<$%pm{L9`VZ9saL*DboX-F78 zv;9(P`f^MenuHy{NSqJ6v3$@2u_uB7cEIJqp+p$dI3IaqvoIXZ{kmddvkWCj#A~_+ z`H43VDthA7><~OVFK0=O6S21!xu4Ix(Lw}XDRx0vUv@J1aXfSaiTT2t1NOb3tke|` zpU9wTn+We2#C+vV^uAvBuGSUNGM1KORRZRWCgvM&I)Ckj4do%&cbsKSJ(hq0tBLu} zo1sa)pgyV#GzQACxF7**MiTRbH?#Klg6EN7Or9gdDdj};KSQ4BPu{He?Tze&AV{{! zFmYu9J`5vqe(^?aLIhOgf!MW#oqe%P!2T-I7Jl>QEjVxjN8)E+O z=5Rt7wkrE!U9c4Ebdyl~m6*T0`REu1-yS|V!sft>jgw%mPRu{v*gJ<|;ukl}3SjAz zSRDV&r0-Q=2kb=We;U*UG;eZ8!C`hdz9t#H4v?}|}rpX)3ath*9Cq`TzW2vBVsG z6G|alyd5F=#HjKnhvi+6&Nju@RZ{G&+kwFe^o)m|IjS$-w?sT_EnBSw=qhuFS|V`z&_6)ZuXT0TDSBz3eM zZ~8wmLeY6Pm+_Ee_|1HD3?cPRi#HJyjbYr>7EbJU+taxS*$%{L^XATW1Kd2WiOnq8 z@d39YTzpBcO@}v2SQb zw?p1jWoTTN;iY399v&d`Qxo2N*6jqxTkMdtCCdhCkqhHIG7mQ8O~nNVxZVCIoLMe~ z@y#4)JtM}9H)&rwVaE4=!bdi*_pZyviFd@b=gsaLM_y*Y2j zw>aWmNUPB7QNj|7=c1cH;&k9m)nF&oTRj)n{wQYa-MR3YNaA$l&1h>!2xp%N&TM^H zYhEtizZ$`>yIAlhEY}ecl83^SPQ}Pykqzr$Vk~*%)7}jWbsq|97NrRFli<-{vL0f^ zo6=XV@D6w&+^H$S&}9;&%^%8Nn>BB)up`haYaa`)>r2pKFDWjEq@dy{SyS!A8~rKW z5Zl-yczl;4`f?)9T99=c8x}_6uwyzAq(J9SBWOW&A z*f*OJ_L4PKXWks`8;sZ0uLKLr3S@h4!GY_F-JnGLU`Rtwhwe3 z>^Jq~jX!UO=lEmhhc|+;=Sj?6H3vWY5);51_lf>&jrX;%%Ka4bhRnpBJEZP(=FR>C zow2U@jnG~93^dB7q4Z@B{@Mb0bM5Q88B>2xg6Ar7KizqAZD}ONnl}plLa*YW{&aZh z5+m^DmP-HF8X;LZ69qcDGHt5BVB z6%kHrajmsGAEzg8F0Pw|A8{XqY3Husgy9CvH79W*cr$qJR7j$}2~R^WW7Cw)n5iAc z$LYlzRjpY_tNkvli@40*?{0-*2Xbw_c{8eH7M7m+Da>`gfb}O6;p9wABySXA=3sfs zZy{jcIrOSZ#<09@{I&JrjgJ!B_aylzs2@BH{rprcSVl}=-ZUx3;7#-&p|Q^?T(eHc zn}Nji$%0YTDJ=<2%*v5!gJ>Cc-b4vW!JseoB(b+}`ljYAtr`x(HStdgZ@ zx~zynD#!6)vjmo>$h8gR&4qp|(JB9*uwi2zChf_^iociOB2)o|=0`DiYY{@*lQ_e9^P_Piy4_dAS+%3k zbz|=~2a-4=cyoX3CKM(sVEdw@c%D&;?D52mxwLl((#=`bmSalG06GY(ynSZ=B8TBIyzAFe7TW;}1g z2gSo$HY1cwu zkYnvK^7}K9H{L7aabTMgl>KXQ^o0~R14%ob#G5(w?DMHll@Pu0I2^~a+&B)zOy#OUKz7uYjN9GhM7BvnZlbF{We3RN(oVV$5GC*BP~iH&vYtpN?q3@{)-yE9zTwB zmf3379%82PX3LnhnEqB3wgZpjHA~Q?F_hHn>AXo!Tmt2O>X_?Zi+c7BXv!hdkIvvt zq~-!h&ZuF`>|-!zYx0R|`!Y~E=99>ezKspIgLS`1{ln4WDX{J>)<2xRM>ABmaAn~kp{k@Z3YrG1Vefqe!o)|<>r=JRH6bZ>mp)x?9F zN1>9*)-Dyv9Ag1*ls0uoRHHhUC>=qMo^lipBIEdI-mH*y!Iv6!%zl0tN^9hJ5lH5z z3wg6v&mW%G)uG8M%|TXfQ`5-YIfgg$qTCR3ljS;Edl2Ks$)W5+=IF7!3EJ+2YBf!k z%isVMze*92Le>u!@n&~Vd*t0~2YaRc=;O#frnZ-?hb-pJ{+Z_Jc18;~hwp=FP#Nq- zk+qs7y!onZj9V$%7-_v1`Nic}e2?s_S<0INK?~dVYeQ$+ZtM!Ez`Jm=_Oy&QiDTQq zBT*NPhglAYo0a(LL)H_Q^X6)i20}jQA?JQIR?v8to1H=6Ie)70LyegAyy^Y^hhR6}0#5A9tH)JpFiM%2 z4ZMkU{UHqUwt+%lIYzTQHxW8lg5oVEld<75Ldsl0hn)F5n`Fdm)v7vkdC?P&9xm^9vu z%XlozUK)kBquJTh@!N6zA~8F8({%Q!&`=wN$G;2k^U`+oKR`@6Z${i}5aKtC!&IF@ ze9YK^bCJYk@TO1w1HpO57?|EFfMH!KTr!Et*;#WPRddScLX(cH-z{VzPKMX6-|v_1g$2Z7f3V;tbrmMBbN3c%!1#C`{HJ4te`x zoL`ZJq0uBxHg7&$c_O&}8iprLMF{$kh1B-Mq(# zH&2CUoBOjIYpgBAWn+ClF?qc4aeg7(IMxTpeT%XEelFHl5tGlGualaE#^=3oaA^@n z-^)jDH`3+`coSUqT!@+!f%x&o2q-E*=yPHUdGofUQOGL?Mg!{RnE#k^?@e=am12u6mq2wjp{x^X4akCyP}^_u5G{(OJ-c_voY zC`a`_@>^2Mo7}+X!iCv>c+{Z?;aw`QKaBj&NqMtHE&*oKNgbK zc;k16JT`_qi1cy9)bK`La$j(D@1)dFTO+JK0!X zxEt1*WXyMvH#a?6g{9%%_^~4!^MBMJ;~i<+hj_DC^Q}-k)(dwoXCu$F8tdj0bC@?X z?))b#U+#lM_HBbNF4aizA?65gdcS=qObYVGgtZcU>01pq85xTm<;~LZ?*wlJAEbum z;Ms`^d^aNI7;kEKzZdEoeQ>l)f;Y;Q2y-B&mN%1}-wAiw^V#-R0>5q*aNa@s(c`=^ z@c1U=hxx#?S%QB-6*#bum=nCYzT~rD_1zbmIxHvXw=!rhA*PNuA#c74tL1(;GF<{I zwF+oD5Ob0@Q_X(}G5h_ouzfbX*%#Y2LWnuVo6j$Q2uIHNBRI?ozc2reI$75SdKjpiMhy|^b`dQ(htB|-z@gQ+Hy1}5p#(*CzdF{WOirVypV+kmdDt{ zg_QMW-s~Bo1Wh-8q|TE-e!2`Dt%$k8n@{HyVCCwEdFxqT`kQ5#L7&f6-WbmMDWn|t z#cB4zqe=Q@2-6~QuJOjZ`$yqPq%V5tXQ7u*1*|?3bDcM1hrbh^jr7JyC-zL`b}xmNaCbO(WwCu571(!~^i?-`)AHb{P-g88x8YeR393Z$MAG-(;>~=WC&K1Z zS7fwdIkJ>1puU3i@wa(XsPa%)p6r4iA0$v*SHb2@3hvb|h?GbWu2hM= z^qkRM-rOs?Efn>4Me9omx{oeLcyD_Chd1tvt_j1hIl;?^eTkx|44%8lZ~cAVSd6P@heJ#m24el^9*7h^2TT76=6oW6TGV==v^hp z8hdg-^}Ol5`=+qO*#*;nX5-3KIV8KuZ~Y_Q*bKiR?0V>gq9h4!vMZ@O#T z5IifLacpun3Y26p(;&~Zfj7EWZwaB}oG~js8-G``Z$iu?ZfE1a2ssL-k}`PCo0_cqLdb0wxG3bH=CTadDx|ENc%y!zNtn%Yqh>X* zvDp8-$A}}QnK!AX&xI*{+;MDtE~}q%bpMZ-7reO}@=6%S_PPr!FP$3uPTR;gWE}sJ zHzhYd2uf+Lu-}z~MT=!vxq;Nt7T##=_$c`7amKd9Ttu@EarCwz<`r)ep1v1WoODK; znK=k5kwJemxwcl`JW+iwbY1Fz2~IgUe};Xy@e(nwd6U@gqmb_5fT{I4?7USetOgMC zhBqTe{U;0>X^$fgIk+2Kf|AL^yycCTW~-3gYRB>}<|3@97_Vj#^B-^CvNb!63)Wb+ zIR`~^OAw_+%sbvxENT&UF0;YGV0LXii*cCdS$NNz9StvpKibx8?^6!uPAtSuZxZJN zZ+^~uE;v-MoIR?nj+PhTU=68nA9-_aO|#&0(-Noca!}d65XSvUoKL*bT>4ySP3#CO zWW%m5pPfx1ZSFH~5;Yoy{UIHZ$ew93%QN@kD(QE=@TO%>gYfrAM=Z+8M%!h1m{Cg1 zSKe%x)gU|y?ZEC`f*XhO5UfhfH{Jvddm@yyw8tUa9Q=1KAMr-SeCN%?swaZ>k@gtj zz{(&g51pTr{_O{ER@XfhRDwF<+1hNZbI5~k95Fw6qw4cqs8i{H6Th?ZDJ&0bwiEM< zH**Ubg~AUVQI?;Bm-TrpPZlx1d1GbQBuE!ppw=}9Qq_D!^&n;Nhd1YzJQJKdcfjd2 z*=S+8O=7nY^Ora6+CCQ!DqFz&V-9||%g5CMV*c@l?Iag0r51>2&B3=pdFV#ZQ7R~N zd(1XwzZBlgw#HNTeVxHC^0Dk{13TE$jrxVy7r{Rn{PcEh59;s ztb3e`xAJ`UDKs+XQ|8SsA|4c5O9eY^lN-~q3Yi^@{wL>^FFRl2 zOvbRPyqTWzOc>{Ek3KGWxE)%68oJJ>#+yD`O+s%C8z?W#gKAR&iVl%D>byC;wng~7 z%@U3S@-e@l0CCUB{b=we&ftYmKE5L)Y^}yCvJg>INV&D)&F8i+g@lx#b(@G(~MCwjE-n<(8 zPIy1j3QhX?7!c3Sw$X8j7H>2i-w6_y2k;-uahkus6c6c`LYp_eT;BiZWU5q+e2q@0qj{muAOcqjvj9$ zsxO5S3m0tcU4ZXhi`aLq$nTFnZ^pR46v|ax5Ye2EKEn$zDvqx_=ZNMFyg&q7dgUm0*4;iPN4piyVFkV~%x3fD${OHmw*w!j>gSJn9)k&bl}a=Z-0ef*#Q`uTL{y?1?XW$;&kLqe$!uJ>|=i% zOeut4Lp~Oild`tpP4R8E9wK3%acwTZGznYx1~Hbrv6`d^4;NoZ))ivSpIiu|iLv61 zLxdtGzx6`}dp=V&@^G}27;E0N>{h~#E4)DXFPK9vo zn1gnANV(baCXMaEsL%GrTsB4=#KvQP>G?T3-qd$i#^hbTuyiPZLqHBrSdeS8=gsd< z3b=gU7hym1aX^xdUK@#V;EnQA1?;=%gInVYux?xq{umR}i8tHXUcR`C-q5WmfXC-- zH24zZ$eY4x%J^m&0MmN~_?MZ3ZqzvOrpE^*XpHs4tSbdLp_79^dTq|U$sVJK!#n+< z#(sbPl;q;eYtj~6c+=Hg38DT0uuUky?;AOgOego_${US~O6YKvm9?Y*ZLK4S|H#r&q1Vg<5c*GW9Oxt`cyhn^TZyqlA zE!<*daPV9Ky6t8AuNz7G@!`#lw_k;_OkZ56EWkI1e0&{Dj4yB2%=jXx>isXW?m8~2 z=j#KwtyrMash}W@bjN*!=QHC-&Gud|ByEm4Vf6Am3{J~~Pb^#G zccSKLoj9M?=nUNpd3dOthX`%9XU>_LjP2sv<(4y!+{;6}h>2-FitWvFp+@2KGl^BI zE6kqdiaph$?)W)MS89fxc_|5Wa>JB4d2rXv#lzF=?4=tu!)5OzSCpJFJT4Dm7jm)p z8#C_Ij81tYi4SqZiPRi?7wh%E_Aui?&C20#C5!*KBX&bBwj9jCoO{f8Qse#Nm1KFC z1go~@V3c((b~iEOMa{RYcakn99(cAT7q(HkaC2iuLe0pSuM+7HcQ~~b-+Y5}(D^g-_5vuMuy)_HP`%2LM7c+j; z{Fw7ea^;x^)<4WZM2^T~`Iq$v{?sV+eJ}Ct?TM;&Vm{=R6yyDv383cBvQHAD#hz$* zBr?B`75n{rvc4*ingfZSBw;n4xIQyOjwLGDxn2LQaa_oJ>Oei%m zhu%rHS4dEqBP7QSWFZWYieTm{W#;n>TNtVw{KjNj6CQFySBTLcXvP1Vwssh&FsQkk~lX<4Ab6) zR;?SMrOf(+iPQ`_e@k*9)gJu~cjBqIw%C8HoJrJZ9KR)*al;O21v_xHU>{m?SUJ(u zB8JXZyen)({Q&lzIE9*R1uc>d;#|IA&Q{#9Y(%s@ zGgGNq^y8}}E6)M*Ms7u2?g5x~VP+aNjmthu+&VbnAq+iNps%-D}O zKQ>o0i<(V$|4PE!JE2^8Gkj+qfOj1;v#G(_zmhY1MLBVsaKo$#=TexNL(SX;t&wQt zj0GJxL#ot-3V${}olDK$rnb2F#1XHaY(nqygK&Pq%9%$^Mep`l86(OWx(UTz2T|_I z%9&5i%Ud1b{lOWBsy8CI*FpR-VB@(3)O>U6jEY^(aDKNQ{Wc%M=}D}dh1B>rcfr;a zXRM9hh_c6rV9Uq#i>N7@+!a@Mx}fg@5o`L|VSG(xV(j0XEYvO51sYL&@hIX71V6XZHsX}u83%}5%E<=;iJsV zN@`x+Z-ww=H~5)sM&IJ2FtcN36*cd}ze{=syWqNPGYWSd!{Uz2tfoez=VwWNJ7;7s z-G;OCk0WF#Gi#{nG3mKvNvbn!FYH2d$T67RV9&x@YF1^um;CJGic6j25gc<2TKkz< zN6o*VA0($-TrlN(61>!o@>egik&FteSSO`7fSYK#YJ9+jZo z#uLyv#LNzAl4p{3~SegbA5G4y_RQByy) z3-m>NlZ!V>5ukn&mmaZlc2hHHQWpeRNRV}<6g%`zz=-GI+e6LD^WE`fo&*!!OVJp4 z9G6G4a^k2-Xs?7U36M2ZhW4dL5ySIQ##3`eR~bET19`7XpfLI{s?*pSSOPT}!&DLV zQ3Bt2C1};N3F}X?cAZE~$puyHbMO{9Rit=wupaw5Fq1^hv1Sd7_~nIou@A>St{T4% zu{E${YS!p$f|DS`XVcOQ0yu($2hHif=ENNu_38vMwI@ zcp+y<3D8Q0Yt780QS(&f*Qr@3LAKaiWwcEOy|K)sQ)4r?ABJy`V5#_j{0>RsvxwC} z1~nm*4bZ4EP@H`dv1|X60@=)DQgg(tA6kA(pe5o2x2TD0yU9!zHR^GF(NEld#07Dt zx{nktJP%YhHMiR7BWVH9vP9%w*ek__<*Xg#P?HdpBFcM`{i+PIO7udDsQqxIK zA8}?9j1u_)6HKMhe!|Mhqvm#bZ`^kR^tGjMOp{`w6YG2Psd>KE0KdER9Q$ z*^Bj~1=Nfad2=<#cwuNq8GMRGoayPz6jHNuhylVDd*GCY44oep*_*;@O;QJYQBaTAfb;Jo{IP!&r-x*w&AR=Dy3%JPklt~@xt5~ zDfZbE!n2xPTNyR)TKC230&zbrB{2J6UX3HHoN{Uc*Y-odaqcMnQj9;fd6>C~nF?x@ z`y0adhC9wENb#*>9%hec*H%f5gK~fLZx(ajN+O>3n`}(dXXVJLnYPdbdI=sd5;2VB zdD+P5!%P)5Ze30B{evsMi#_=Br)1%(8N0S>YASjTz@>6`OpYu@f^!yrE@P&Kn#$W2 zm>=f~Sw=AeH;X-EeVD1GraK1UQ%6@Q+$+L`;0!$I!b}}CM?I`i;o^=Zt&1_QJ{_-G zF;h>?v1bGD?~4mg_bSHSuyl;t%1i?_gWFrfJ;x0**A2(HDD_J@Fs8P)wfbE&C$bBx(V&BTZ#%Ijzr)GT@ac#mZ2+2ps&*^B->!6XE zD^qPS_oy2-dgNiqkWB2{!rIOOYHoD1#Tk7Uan2|QDef6K!_R>=QFHXP9ST;tp`X~7 zrzG}iei_2<=O8tT%J!HZCB{rmSujr(^WHqSbu#p>V)HNzYn5dPW?S_9KjJ~|CMW;4@FjZ!BkoXc>>t8$SS zqPsYcFprs|)UHZWRE4Wt_WQ$`fEk8#t_ZE`OZ=EXqhcy+k4=CWC>Q( zr($Cg8`GYr=Af4?_L;fkLW~S!dZl9WPBw>ffto{Y>|wdz9n&94k=>Yv&n|4deUX}r zhwRZP*1&oOl!|)$4>sR>g_`8K4k*6pD#qERsOg`9o!ZP?rAAxq5vmAxNAu}2EZdifso~6Aqvnx# z=6(gcVVXu6hThG<`!F`=eVrQbcssbf7uVLK9HzU(9+@u8+@L0Wr!9tc5HazGm19(1 zCc5zTg`3pmsoSFVoajT$%Tec&jr2%X&Mj*0t+B?vS?(C$x*X@X<>ScC@03*Nfe7z`GK$6>^-Gu(`B{)XZAh7hA+!=lwqwSQuY} zZmH}!eMHSE8-1h?b3xQuIohR*GwjW*KX^>d+ugm#uip0q4j<^U1H`bHJLrMk?>i(>yB2!Dy9+Me0=(hnxC&!VP))uvVWC``O}CD zW7c0kr)JOmo=9Be0L7w8B<3AJ@jUh}c|pzmY26XK$^i+(DzJJ{6FQ$@@6(smtat5< zjLuG&7g~<@=bP}gnVp||MUAATBfiGlsB0wX3h+1K2mc@?8cdM%>_Tq@-g=JVLaW)%qMDg zM1GN+QFKG&`&^6(I*j{9%zUQitnX(@^LjU|6T3&Ve2$=LHLHU!)Og=~C;1xfiF-3L zVC8)n7IXhs+yAewTd0}*>67HZQcv{0lnO)HA=H>nrRFO&S%X?6QF00H9!SBKqJ!v^ z&04}YYC7cqkZcnfW4gN}qy3vE>>tTm+;?i!yZw`l3h=^{ZwUz0YQo9wtY!b8rq9r} zxI4sKMEpuZS?PX!v|~@uPij)a+QRq^u)j7DzMu9X;Q@QPe=%dPfZmV2u_ZJCAy*p^ zeTvN_{ibHTNjDfwABg3}BJ0boz3}VBzQ+Dg^Qk}u?GJn7)|dpmscb;D8|&r&QnO2{ zh0=zBsGpgLr(bJfahknN{!uedVt}_b-f##`z@)3yIB|)+9b2^{CxD)|wZ!V5UT7X2 zk8bugXj#E_Ahf3DpOqb|4|&4=XdJeFtwzBt7Co>HHPPMO@bR)6C8+AYOZ$=Mc2t<)V(_nH+okgGMz`2YUKH)g{&kE92aj6}bnFiIrmZo0-nk44pj=Mg|u6 z*I|#Cy_92e9LuKAg_^b=Kf$5wLa=Cg>M-Kj}v8js?gX1I1Y8G*;-@c7P*A~k=mjz?D= zQ{?PU!^YPl^2$VJl&A^Mjlz9nQ)pJEqrO)aba+;P9@J>mjf25;V;oD)!jPfj|1o2A z-IE%<)F|B9Zh}gaJZya-$3b3BFKW8B9gojtrf}O-fUBb3Y9FxsQKlwGT-&E~Q@j~h zgag+rvCxlQn+i2YyF}saLJQ=@m0-&vIn*XHqe{)I8&OzgYKaw*WmxJbM^OkfYSetZ z8HMk0R?wGJ;PM~QAM|7GTAdo3DPu5J%s_o#EyvJh)i|Qe+NcIK3VX+3bBrzePOrf` z*J`}TWJZ&kJ*s0d?U;yEa-tspHC4l>9qU81sJYT%EXKF8!>JbyFq}}04bxfwrcI4a zdN_9KI^ovReK`898ny|{=uorHVGIrj*}*TO5zAt$vGy)|=5(pa*%^Vv3Ojr{aR6N! zs(>nH^r-RK903opTWWQmLt=+-75wfo)0>(#3SscHx5tTAhoD|1MkhDed!Y|CTaCjo zMPw&6tq?1eKGpc}l^K0%s$~&4YHx$@-w)!IM-|?6VDBsgYC0%|A?~m>TFy41L!KP{ zPqF*yOU>f;<4`Lypz8D&yPgKsK)H~ae$=Gzi-d`Vm|fo7gj?Em$S`NdkeXGU!*N4> z09O2JL|jNcls>X{U_?#J{ZI@M&(GrMMmQg)79Az5 z9Aj#>j|zfTgbj>!8?oVI4U!i#V?xc4xZ#*LdH}|X2=Z?WYtViVGp5wEc===dV{53M zI)GjqYA|COGiKCOKl8x^ZF?M9(uC<9YO!J@t7~&=G)f1lD>R@CH8@J6l}aUX9u zjPQ$<*r3PC89>dByAt?Jb4J;>gRu51htCyOjx{xnLxE+{VkSxT5PJMA$L*J_Pqd*% zs_ljF3tlJ~(}Xv285UZya%`zNyG;Cki5HTho3Qb(6uvT6jvY1Uc6mU@X&`hD9)RRT z5jwnP#-5trKip7SGzi~^9Kdp$BG~gj-hrA8#%?$hIusclnnWby0!;YC-an4iBqzCv zXw<{7c-1~!>zRi)hgcmrQS;s01AX87V*bXxFjUCFqj+{d&eSN_yJADSFHChBAWO)^ zs%@+txKQ)S*%@7%f?ztb7N$lbLeK!#u3f1yRCU8i=MmU6P!5aGbX*O*l4-eyYF7roC@9GhWm*tguGhhH$hxT!Yj8X<|i;-H(Kt9W$-b z@p>d)DAmF9d@?TNFav5@n^{7uU<_*C)WN<(GJfW=I`F1u$5sn8Uyg*|f32~r)&mM6A)sC#=vnnZe52)*FP`s zM6`aZ2MN$^%Vxd(sHu9VkDD%&aJRG$HS3dbqny>7KQ-O0`{K;{iD;QxkBA;g`1YNd z0BRyf_6D{`qfb&jKA0wBAV19$NKLDy1~}&&4YR{_*t9hn_iwZRXE-%WTlYbyo6&eT z^ndLj6~8XAx*kD|*0|pInG=oUPW2cYnWGNMrp%0_ zX7xH=GxdgKsetLd|1EO>~+agVKU} zObpG!1Xnh{6H1N6I8A)Ch{dUU^%(Azg(*qQjG|_6pcXv7#X#X`J)E{=;buJB@jsfH zrnegS)DVmM<~me7%R;0kGhx(}uhWM4fXP_BwH_foauA%$OgJ@evo*0eFb4moHsH*K z99&Ff^I{RyEOpmH7sNnWrw-*hxmaPuOe8fYV^uM@EC$b#>+ths4id`QwT+?X$t?|x zy%3GsWA&K!I|q?cX2w#}?x+TG+E2n_?*?2Gr#4;xFf)#t>%TOxQyzmSCJk66c1T3? zdW)jQy_FUwZjKh&PU_Krd=7eBvpN`0jf$xTO3ub$dR;x{HD$y62D`Qi)HqjbU`%$j zm`|=pPJoC!a)z0S)Z}#3fdA}?@Yqy`kicws4`OB#H5WY8F?VY;M!MIbUB7I!(Pk!^ znuf<}XnkN3DxTM3)#e<$=)_D6H7%MVqsEPi=rOtuRX_9LGLV^AY7|P<@mO&Jo}H=1 zu~d;&`~)+TsW~%F72O|B#KFiqWQ*Ma|BYd03N^bB*_OAXUhqM#>C?=KZF%V6cqq(&}PM(5>Y;3?)^);+Dnzdg*%q9!3%1&iK| zMekG9$huRBMf;hVP0jG(>KL_SEON%yV%Vcf7!79kGlv?7Mpewp7>m_eH5fIh3MEsS znM+Meh6>(zj7P=3YCO3k=8<^4&7)@acNP3Md<;hXtA@kjDx9`t<;y5*ja51KzAV>TKW)@M? z&{_j0g2%vRwYauYITAWEvzQw9398Ua8Uy{ZYDC_w!l4n&ETJanvI-V<9fKghYUrj{ zA=H|MW-8v8FjF?&S>v_&~@2Z)`g zv8-A@smcGXh_~gVuxU~?OtNzEw33-E z)WqEAfy$|)krh#e#DGF9F=J*cHTD)t2(23phj1}hb-o1uTA0~JjpZLj7>^IfOtmU; z>b)Few=uJwnhMt*=yD(&TSWx@lqXfV^oyAt)Fg*1;nL~|)U>IFeN7E4Ynj< z0yDx;b6AdN1L`p6A2YkCIX=3VILQ!4XzF{vC7O-;Ssk!n=5py4k zj5!K&j8fl^dHjA7s7dIeh=l$TFdr?)rNX_);?GYaH9;$spz9HdpJIJwq4i#98s*J(KhTIoI_3Kk{)m{ z7>$K{D=}kk1D5e;A(tAx);+PyJRIGRRYBRf0k$95wdGM$p{0Z?UBWQ=dnL;A8!%-O zyS994wx;z!*PJl84yb}|WCJ`0v)y?G)JRS%;p@b3gp^kzF0TPDS6DfP)cCbl#Mi8F zv8z#zTWR%Jxq_8bM9r+i9w;yjgInK9*m~8&avd|p)O4BH6ZtDgBj8>oq&@2~pq`l$ zYFvl)Le%w9czZ>TcP+K}6v~X0nw%ZV2wXP`3M(s-7hH!TUXF~KHk;INVDMO#>Bh<8 zTLjF=shN9B4Nbp8@a}#kEGnz(Z-X-G}Yctf9s;PEAB=3BgIRmbKQf3fD%na%!nL60QoR)}dlew-S3bsxjS& z?a8X6=HNnAoZJ(F)uEM`E~4rD`N7Jmr)JL(HQbsJifzWCf16T;t>2hwpyrjP3d-t7 zqElKW9)^i5me-ltOU>5#YWVXa7`MwRu*hGI0Ts;bqekza3fk5Np^r`_Tpm}U<0EGF zQ`1&k4Sj8bF?^c%4lAib3XdGoNKKig3Oc8Sz-x0keyfW8F5_5TAD~97j|#Sh2g77$ z1;!qcW6mIEnyC4;LSz^nAB5{a%25$ejbTHWIY>>r52~=!4#xVja!4zx(5nqIhp3TO zsvzKVATE6pk(;_yW86blZ-=S5WTA#yg&^#iUV+EIs!+FpnIqJM$$BAXND%skl;eS#HxzvI!^hGJ82ze7P;X{VQ*-r87r2iMgwf|p zM4zmNRtf8`&rp-D)&(yv_@lY70*<18OMJ}CS!(W9biu2@064gdYxAnXVL3DBsCm_{ z3$|GWzchqS&TwYVQ}ZQT0oS+qqsFcRO}5q0sAlE@HS(Pb;*>xDs-Kpl)ilw^ zyE1c;n$3m^C{6K$wpRtrt*X%~j+sl;SXy?%GH-t@KT(dgHdXk}Gsj-0=G1KkXtxf) z(7H18HxhN-gZ-yhsA(6XAokV`$DZ7B%*+&fCr`0!yGl*V3I!}V?1#rG<@hJY!moIA zplj5eSfYUYYJqq@svL{O+*6q@ySD4ptQ*r=?7axY?wE4OuFEknoL$=uYFySS;K;ub z*dww}PyJji_Jgu=Zc?MLp$i_p9DxHD%26QZA^$#Nr_^pyGka=hlua3qUoXqx_pS|?Beb1E;SubDxlM@ zAXuF%gU5eWn99Ga?oqStb0^&CHUiJ1$}nq6wKy%z>g_%?8%}nH>gN&oo>vA>Eitx_ zXXXJlw>xykgrz}fXHt%yMO83Z#LR!xj5F4@x73vE?S>wmMx*XgIo61XkWJ0ZyrX8$ z0Yx-l562*9ky-L_J&fzw-0gd6BwM;;(EM;rtg6JO9}QUN&CCaCp7-jGH%Xzm{jUPf zBgGnOF*6^TS>GK~euhK-xdQS1_oA;GGoPqw6yG7s#a`8Z1H_!ez6Lyh#n$*gQ}bY~ zB6gn)L-}Q~XMIFHh9xocg_@BwyFoE5934I6c)vmH(UdXMLXEq(qKGsehIpwQW2Lol z+R4mUYJAsp!@6H#s9!0^=s#lZ>=`rPsQKsL9cfvk;Rrc&4p!s-Kvr+xsgd96hDD{L zu{1!AplellAY<`Ato8Cq>jog`i}I7(2hM#1x)gm7usD6{ms$z64eZ=hYiit=_C)A{5$Gf4;%AD}u4@d~smM0ejM%Dz zx_^NfCguixK9<8_ATw>L@&2fWwW9)YdbS*1tILsT!_I-Vqo&Prb@Vvqhcm8namJ(^ z^yj_2YO_wooOh;<=E!RWq zQNDO~M~)@$$}q`?nNHL!Fw=*=VgSyj$?;mO9oF_?MuD2Jwnm8U;EzLpB%@t9VTA1lh&BT6=Fz_0NabgZhJFgi1E;6G? zP4+2Ay#MG6106ZE_7q`3DT@H7M9s_Vu4pxT817yYea`GcD4WI7vzI-n=`zU^na#sc zHn|F^rwTE7DKkB(nJLb9elYcgYWpfk#uvcKgq^+YMU6+52O491Fh`sh`mR-oukTqo z%GB%}?gdK)QBF&xi0)p9t`k@}D%3nz_C#biAB2d0s$Q)CK9$UNa_zx__wXW=JtR6LbOSSQLrw<;9Ih2SxDgOOt@44R8_;uFB*+)as|FqbL z6H$u!1PJmzkT&!$O5k@^ME20G z5!u98KWa$LhyGfq>f??FN#(eEuNL>Ov9@DG&DvybbXRf5ym4i?IlT@g+t``!{?vpk z>fls+7o6WzhSCpp(9WGk`!{21g1f8ZVkeQ!r$-s$?$)Dc)^uu2sF`|4Rm=z4VMc=t zzR&8BIg%MuYEIgzV_~ulPJ7E>F6v-crx~;yGirWrSBF!x4T241Xq8os8TV(byIs02SJ=i>fKW^AePG}p(y1SePp7Gv9D zkqK!Qt2aAp@{aYzkVVcI=vs{38M#QXVddCU)E?Jlr$jWh}#_GNs(uL{dS_BPoZTBBC<4#T6abB2F;fBZ!MR5O=iYrab zcu-?)?Fpsdt_YA8A-8ur6y7uANlk!~2L_7R1apTLqvl>J^ds4`;6;tq76~?uae@E5 zLfGv|fu^mU#$-zHMZq1&FT$#TBrMs>?q@JH4I&(IjE6gl4;JBwsDt3Otehd#d^ibg+Twpn++)&&Q3!sd?|`h#42{@h!6$gPKz?%Z`03kDx|doX20%%?|MkiV^-h z1?Rr7Z=@hr#yV?UPXbk9{kTq^3B)1WRrYz(l>-tgOKgT*#t-l=1n=zEwMP0jX`ddLwm6Y8TYQFb;RZFrRSFlqvtb&)o~ z841B+UH4EXGIp^4G@KgkBfYUu(FGo%HK^DjGHiBa?Ja_uFU$1tV2BHHJJ(^sgbb`- z#L9`J=JX$ZJaqLy-O*Y^d1l~vDXZ%-)D#cyi@;_#7RrDb<#N1Qqj8r^a#Kss=)F^u!;bvzE&Wf>S=MyQg z`oqRDRDTIkev=;>0~R&YDOK91Y>G zC_&EfQnbhuaBu~?wn@|&Za2manFmfZ$#By$0Rfj-T}M-+v&R@OMbuK;Pg0Ed6pzjy z*A2=gxO!ci|~rcl%NmnA0VIU{{?A*P4yh8E9!IhC5{v6g7ka>M|OBD5d42QOBz z`jurqYPPmy7>Y{M=LEoaZ^9BS&_JaIhE0Xia+cKgEJuqk70bS^c;A}ic<(RSpH zc{tj84~}$a{mwjU)Hiw{D8U8iUgu(C(;nEbW@bJ$uSK-mCl;=_6_X3&#@!gqBMvU0 z=8(wz`AtO9eA_(_ihK7UHHh`23#qZ2>W)F0?x;4(#XaLV?5<*F5jCF^JaJOn4W1%< zu5VmCOs_Jtm>L^(caeR;74hdp#@&KAEIi8I3rnbpTH%HZrtZ-3&%xfDcr3ZX%u;F| zdx>wCbT52)m4o^H;t_C_nPt?tCAgvJp$8s%<>2x81U&xDzN?l~W3bN^D{gwAK%5D+ zX&(=-U}jcOGi{+W=6&*jOK%bXVNL>y{$tm+lA75zu2?kI0|BS95&a?_cT`z9tEhQ+ z&lw#Kh^X`5voSX^LG1Nl<*cSAa-SIcvuL%6Xe++8$x}cB{D-y zD^Ear5;L2q8SvK#&2m4y71<$@yb`c(7Bicv*?FNqiaPnDQLRA2lYHw52$mmzIO77PZmZ`d8wToH5L1uwnD{&N{zLvv8|g|*S0)C8{Y z0|yNe5Auc#Dt`I!dBmQdUDU)p?v1VG5?l+E;g^S4m)XU}X1l5BWTg*>9iAeKr5J0j z&qq~1Hm2P}P1t!o)S7v~rnwLk+7}>h2b&j*qvnsUE>tFa;;wN%YGxE*+*LMj8BdL^ zhpyNw<%LVV^WfYyUu687N#`6As9FA13!_EGU89pZ_+*`r&2nZEsWIEGfo?0sICy&= zc9iFfas6ysP7*b9+G~nElODJ^G!Oo#3b3>19BPuOQS73LaWP(q{*{L|V%}l)24+&I z>DW~Zn|wSG-!C7xZWJO+dp<2Em6}oJqTU|3L$_5SP7WD1I-)JKky8`Po;Fn@Ic9!z7`mO)Ldt^xKcy5oL+A;N#BsW(H# zN(r`)DMYS_`|BWCOz$V3nvo9XIM)^M5ZOD*SLDGkpP2$`QlFVa4?w1v-%(J=M`_R! zT23J~UnZL)O=Q{G*SQet`S~ypWu}N4|6}HOv|=DmiFwP2?0h(wF;h&9Rge|T)xDu1 z=59Oo&xe66GbPjM5f}3Ifc+k$iw9bW~9_8?X<+8MG~mV#Pc>b51;-rBcsN% z%mVWkdZDvY5r#zPA?5@#rPQ1{XN8|4oT2W!B5W0H$8h{oTGwUNbW1ivmJ8rMwir{} z72v-zX3DAQ(#0HBjS?*DR*Y_k@{qfD87-%Rnvq^+*mWHEwO!1^?#YFgB{P-OtS;?~ z4_aa^eUO;5TbU=~Ml&O)CM-w~3oIlsm{)|5DMh#)#!MA8%M7*P{K*T>;(m5-ErNUl zGu70jiMT7X7E2K3UyQiBMX4rhZB^bpzZp64;MkD9o(S}2QiLzLKu zQ(Ihw^-Ea)wx1dWMQxmOcSSpqtt@wG0cNQ((@4$s0eYBN>xK~xCD3YJfNy)*d*J{z zQ+D@8pZQ`Z6UcL2u;!ft<9i!!UoCzWC)2)#kc|N zIXz9yA3Jk&xMd4NJuz>&DFq$*`}7PoTf}?ezh`y`MkzY!q@cc;m2;MwzMCzew$vJD zwaQ>!nhg6aX3kO5Ji`p$#WvVxQHB9^DQMM}^>62?iTh)Ywdx|n*OXFp_e;jCa8}L* zYINo181HC<%mt-5{xu21LzuZpP3S!f447>Nd4&w?Pb6d82zG6ksJS=M3RV`@a7ijf z?w};>-^j|jOpRruCH_mcf5o zjSO=bh-Z7l`6L)6Gjola8{*rge7pt5E-6LFr-}H_kD2S#1e#c4Z4}KZW#$ew7hjnnsM#2<=S5xLO+d;|)>qx7 zruK(9whl7FldMu~pO%0rQ<=F(&2h@N4LUvFc?o zpPGmCwEW=9CM%A2{c}5L< z)Sw)yhtlIRR9Yk<NuOD1G!WNh3j$nFo<2-Yie4@ zs)@Z$I+z_LL)7g!yyoT&HFqY9{cpW=QTSbCj&)4H)vK(Wx74_7(8T9#T|@?nwKl^z zd^*a^J8H6jiFg4W_0S;hr)6(EPUy0>^PZXjeKkzB)J3a#GJF~sj~UNdJNQ72T8avK z=;&dsqKJR@JPuLYSveo6xnZS&4;S>%m?uL*yNY-WzR$|}Le0&14czhSgU=mG;X5-9`#ZAl(H3g%{nWtE zM+TU`P=*HGc&r`3`kk-T?Afe^mKS|7Pqc%@@8WRw3^U)Txw=dX6Y53(woiuK74g{J zie1}xYTBl2;kIi(k()_|l+-wUHDu-oHMvi;#UB5DNZc%*AB%WsE@$Q^HQQUX5MF5n z%`f6VZ5t0s4Ku%}iS^V&sJ{^^Cd)8Ute14#!ut5%)D+5e;m~4)uVVaXpOb*r?#%q5 z=G9*vlzr)s&*ELu_H{hMe3|)6O>B!cG$M`hOJr8;c{Uy?)y({(X2VoH4EbY>pja8a zr^ci505h#RkUeJ4K5ApH+!)!rWhnj|kLJ6~w5H~X*njOh#{@ACWVk2Rz!nCw_h}ny zjCSA#f>^RaMTz-en~N2&5BbfhLY zv=5GmansUl8NP>$@11qbbfU)brXDg4%tf|t@f|WY5vTImwJA`uF-;rugH7>HjJIu* z5-_VfE2lFxqo<2^Pl5%0iT+y2HBrPcV&!z9CUJ@u+KcgX!X7D9S101mJ@(9XrAGEl zyz6_L!Eve#gI$u4HJ)U;cs4tpmNqdZQEc++HL z9%AJvQgd>b8ocfsi&!W!?C6#(^5rn2L`{c#>In8V!44%EHup*qar;>v^q}UJxrlMI z#{}nkOQBVrj20DEPETr{`>PW3QdwBt!CdL>eNIIQN@q;V*Nv83oW%zLD^nr zG^lCpu7a7<^l@aC6dJdZG2WdSO==dUigNlIVBZEQ4t+>M$6#i(s4-Je#lg+}@N}0H zy0?`)&ym1Cuas4>P*dj2Wbg z3)_tFFkOlb^@$ju%l;odYI1T!eAvaN=q9ob_smH`pF^yi-qhTwRD(&R1xCG>;=&fu zM%%G+`cPwWP8A#4*r0uD8BXm>MBWuvjy^SJx0T`KVvjyIrI>Ie5q7OuIR@0|d{oAo z%}%KCl_EATQDom`&q7~nnv&I^Khz5y#C|d5Ly5RQmzjRlILE5sl9L4Cky2c?PZW7C z*tHo_qw-D#6&D5~c#RZS9w(rG5i7@tn$Mx?&`a}1TBr=G#QuY;daOU_Pt7uCHP|W+ zLAkXQGjtP>RK$!iHRz~@k;OhZFiML44@Lfksmz#A(`U09lJb4vo+`yYVcsP&V@i#g zkvcB9`C-t%5*YXpKnSW=_jq=6n={qa~!hAJDer=%@=msnAg(OCn& zm;K?bAcf(+!C*p<*`+uCN(b=s5ySx!_*(k-Mh8cnGtUbYH`p7=&`Ka_-gcp}1QK`1(43xvLcTWTMUCQjb-Yv+XV}k3aWOs(U!SvfAfYDAOA}oU ze6dul0ZukcLnv==pl16&4a}J$a?LhMQPnXWsS{W^-qh3#P(z2Xff!{ho*(UWJoaV% z+dyiZCTfbDjh={z5a$ltWMJ?dW(HBSOT=}bINcqcPfOwREDZy`vG>nlYIf`wZB)w@ zK^sb;ADn?^87pT9HHKoYMlx6IRox-Qe6jb-^AD?oq15yeb7`K+Vtwwpc%QD$MYk~4 z4t%I_-lU235A3m7j5Q}a6u@FUYj4A-Des|)%vuLL4w1rme-SRUW^?Pl)Ex29#+X5l zfSrhakWz%tz1Vt*A2n<5X=AaLJ*LbnLC>8fFdf6n@uz0~HC+_lv&WXxB{(-;islA( zKLONe?&ysrmi9O}w-|<6;yh^)tLs2&-evZKqsUp8I=mRB`=rQ=X6;}&HQ#0$V#h=q zMExv4me`|scMh9V9YIaTtNu8mY7IMu0;sx_q5BcG$1I4N#u?`5KVtxz2Nj@FthX&c z$jnG;n(fVzsAP??-g(fE5b>z`v-82h)N~$UjV>3>(X5h(RFTiLbp+d+7eY)Lu>T;k z*!N<48b?#}`mPI-M_FRToeWgeRp2F$gAqo}k}x-@s+r+jd^)_ISK$75w%;;y4MfOH6O1uP$G?vv-=RI*uN+Cu%;rJJonVMjvNUK9 zmc#W8JHIf7nt+MJ@bJ13e$PxpjJygjAG0$YW2tE#7>KshOptUe6`GlH?A2rOgvL=5 z*eV29OZr1`cM8((iMX=S5p=B}iW=422<#X8Xzlx^BHN}KwXegd8BdM2%UBfVn_#|Q z3N|mPMob7Z6R0szibnfMCKxw51=GAmyupysw4905GcvoMqN(Y-YbG9Vv_!$V6!>=*`O(+0a$=|nvYLycRbmg^ zk2HMTDe_QQuySIldE-A1*{v)@jIm5iK2?LDqapO#CR0;&Z8nr=Si-t*HqKPn;u6oJ zHieqh*|YIdpN+Gt;Tj@t%*s*W%i=OYmx54W=((W(GArrt=Wp#sYs%ig(uE8rTOiGm{$Y z74xx1tUYJ!lHtbw8dSB6q;)Whniq8oG30|4-aame!90vL}G`yA#~ z;D*T6>^7eLr*o*e;JpwlL{4?XJ#y5k)FNOfySBO1==dx^)@WN~e5}HU12vde#mqcv zR(UT*t(FZ6!>du>u@>9jFf*STL#L%kvbDj7v>J4tQ!Cw-WjH7)*D+7odxcUBN0@m*dM$Ly8 zafEWpT2~kE)8*W#EXN{ZP!I;#UxM!XJXFf_nk%@`u^5jxHbF4EbQ!na<^l?F{xnvY zza!D@b!#*jc@-({2e7r97>BInrc{3vrhg2=$75Ge8n0r(Yv4ZF<_V2sS?5Xa5o$wSfMlpmt@T|=%W2by)_9LqXx9{-5OfR}!F z?sW~LD8J)YCoxuD&&|HV>*aCr9fM=OtO*ie{FYg^*% z%SLWa4;+HD8i7#6Uco(z<1felo4EP-a}esD_NTGvWqeg35##sjI|urFTy zcL^5zvhmtSm@V8a-O(SrpZa6m=1cJJN_iT$2(y(NpYH>qKZbtO*_V(>`Swm}#6Fa5 z+*tP>fOo3`vAMw|L{TjPV`uSgww;?%%?H5NlD-*CE@M-*Y;4RGCZ3y)&HLc3No#z0 zbrBaQW@B@Kn8V+}joYAps3g;4*+rbAykr+P3bT`&Rr~wEdQbq8wp>7iE?F>?^ZvWI zX;!ZPqVUTzjX=#OJx0Z`a2v!5H| z_yI6$5r|G{=V8As8`B;MlfaE(ul^`#6oiKU=W(J_HV)>w;W2(2* zPsCAiv%h2zUKO`N|C3h{za$5-9_YiCNtIRSrhctPs42Ekt!qCIbEnlg&;0 z&x4V;upOFBDMVpz2KHwNlf%un_(9ZvhT^HZ5RdJt4u!2S2e`SD8-|K0p=jB@7>ly9 zuz!Rwx!eR)AA%##+hF0TV$_ewLS!3Z^0=9?J`9ZxwZWNTMf6_G!pS`G&dTRz@CeFH zZrK*$^NZ1k@{Kpl73Lr}S2l#Sv2M$GNGqZV;+&2IKGUVz{-+f?19* z1>B4@8G@NrLHJZrjIpMfbXH546Wk0eAA)DQ+d^YjjMQyeIJileliVCn9gOuOgRz0y z=;OXwcv&jUDQ+G&8;sXmD2K0SF~Y`XA$y82r@0BB8fusS2BY?mBCLqbhEsddcFu4! zyWS27#3qD)p7CqA+GHLH@Uxt;QZ+ztmssXKTW7Vh!b%xa72K$Qgrn8HKpgE|g5=*BFq$vS zO>Wwp3dfS3zWB7N7=CnCV#-NjZgJxh8G)~FC>MI|5{x>QfrDMdy}r#&{K_!w4)#Zv z^TkL`%|P535vP)yb6dkPoxZor={?shn$Bn~6{d=t*WOg~@_`@f>XhO!t()E2EzBKm zzRn55jW2%aGpiJ_ltcfYt}u7GSv@fvV_kev`(ZKVo6A7qD`D<&lS<~FkuOqum7w4! zofkPP%zbX^{}&G3>wZ}Ot_1ZWGB8TEo&UIbw{!^ZP4tCoE&V>oKsRd<=K(k6^!#l2 z=z|8MN-(Z@I;w3G<{>wyRpAIM^uaTS64)sMnRC z$AomMIV;RFZjA3oz|YSQ6+tDqs-Sk=QJCl4Y^hFlnu~lfAg&Z|>AYfy9EZH%CTDR3 z$}jq$q_`A0bdKYuY}YTjS!hJ{hM)N%_izacucX6Nw(D2id|MiZ8TI`!b80Epolk?i z9FM-{W43bBTTJu_?_^@?&@XOIyxPRJ%oA7jdT1E=r;Gm zO&(cb9uVjTa48%MJtNV(&U=$WNx?w5{TmBM`GCS)v) z#fJOAt5X>q>CDW-f5Lp@=E?kU4BP09lLlpY-6S0amxTGw%?GM?KYY6n-ujecidQ-& zH4^vW2RG{mgkkX&dbaDA!6qvmd*_HaKe_3Y6OQ(NK4?{0il-maF`sIuEs}ER zcQ3`4-)XqgOPD&`tnsH>;^Vxr=~D>|dZps(Z(-_k(f(dw!R2@~J{8yHebeJ+=Id};tM|dZHRZ6jPs4y6 z;y2aj##KvW|E*LT;_D5x%A`HmuZ1z-#%WaqPOb4l=ByhywlEcM9}8p1&AdNhXl3Y) zZ+~vU=6NbYM+jrY&AqG$ygllLu>;HT{D~ItON24zW=+jV#E$mDtHU>-rsuRtqA)GE zIgl5Qb7dZw;Cut0=}g8S*&mp2lbaraIoV!l<9`GF+_dOAMZ_`XCi-XuN@G0nw`Uo= z8);#&U6_{KDEddhXrmXhJxZab`dOXLg)!sCd3+QyYtp@LUW)qRT7=yeYp&+pI2D9p z^geG4Z&8Xes>N~IMHmZiLT5)Jew`N{8Pa&vPlN7}Vh!1n8=nQ?*muwa)k90MY_0?2OgmV@+XNabx>05}yxx;c3TG zOdY5}?LNXda{6Q^`#rAOF7^|s)TXprbkEw{&;(%`@(Y6rFsf>VZwNDW9u^% z_HR6KYeoe+C2J_}yfB{J6fcXy+csVpnoy4Wdo&oHB#aj~K^r46eYH30I+kO$nFg_H zVZ6Dyem4^L`g&n&Kshp~Ke*9C7$0sB8iDJ!URV=Yj^jF7d|4-qFE@3&MB>B(PmIee z$6v~67r9axKW?&qL|{;eCkh9ZV~DN>tqq0o=Vtzt2;3?6K)e0rusW}%+PlI8aC3H8 z1h!Opp~cm5s?n-J&U9e{xf#(n64SPMBCB3G&P1zW@?4nK+>|6mp!zY&-L$S8tsbiZ zIgStFrkPhHLdUqG`Mq+S=%&HJw<1n3H#7Q1<9%0G^opy1X|@K3;>CAK8*bdw>Gx?( z`5pU~WA9)Un#k|%w%iFv2~ss%IRIdcDWk;dW-)j zl$$3@!*TngD|Roqfy#3#EVmY>9XI{TBd{pb4OR&^(7;8F>Ayu^1#WhBiN*w~5%SNF z#xC8}2n-XZJvS%m9+<)%_h?*|HBXJC!@_jnX5hd`c=YwaChB)Kv{YlBQkah1D7H zXx3PmF5H-oj=-{+?l^Mo2FA2e)B9MMuG|<$MnYNJ3o)+c`0!E%Z)0J)aZ^TfH6wd_ zU}z%s@sxkF#W!)^x^vSpDH8cpJ!s!?DQZqrBW$FI(}SCuox<^Pp9dbEE`f$>Fno0r zrYARDbt7<1<%xUiOEJ?%jTc|Vwe{jAuSXPYqdl=wQHpz2V%oq`u8B2GVU zYSfOz!He#QOQm(0qbl@nE=+%Jo~A`0$;%yWib_%JsKR>LMh9>c6cvTA01q^4Rz~Mh zR5*K3{67P^`I#Gu#3~O&((iL9RfUdm;@SprqkAd}+u(`qD226Bg{!lL8O+VeXOUQu zN&EfX%TRKR@=VtkW(YT%*GHm{g*y`T%4klNep3fw!nko<90_d`S4^vM1Dl@Ed8;gO z55l>Lc8S6ee>a@pSBmowRXBV{m;*kpF3SmZb^Wj${hO~3TuCo=WPVb*F5#rj$ za8s2Vjl#`tShlXEFw?kMTWc8p zP@RyBpb8w?mH_vU;vP)rW<*#t-mLb(nq3w6@ns*z^%ZeuaC3EOH2mD@8$IVHDz@%} z(SBiOa?`1D7>14Xg8t2Nxb)i(&y~W=;wE@OG-}v-qh?YC7CqUA#TsE|b8|)+ji>@o ztXfck$?NxFevB}4xVg|dn(9({!od3`+P&I`y6uFS%T0TSp$JO!LecedY`eK1qiYB= zj~mya(P*Re#B}?cXxm~Rq7sFf&&~7B!{AKwVtbVpa4Ot~L(7C&z|FgaVVLOQ0q@|O z$ltOLKaUEtkeiwn!>BHZJ6?v~!e?*#eu!F!9B0REE%Z@LFNkaMS1N2qaB( z#Zk}OxS*yn>?2{;a-%7td$5kq;GVh-eR`k9{Sv=V95*Lg3`hJpSFAIy#HVZ(rXCb# z9XFeIj=-y?uBhGV7B&W`@M*p<>$!1=i9vN=H!K}diA_`|Wp;rWi*4X$OvXr@ujU3v zw@NIeF~$5|LHSUDP_vR!a`VHF14RwL6|oX^_A%_fVnD7)>9E1&8Ad96mPJaO)QD>u(Sjm4L? zE|h1z3P<*7;1Mm(n{DG}`J=HI@y{9ejqboRS&LJ1M6IRm+-$5hk?QL>Mu)&tDZRfzl)oNboTP&2v=OMU4;n)(^1t@oK@V-&4e0L@%f1>5=PyI z(mWHHQQ}PG9&YZbrlZWl1z$WXk+?q-?VE_RmwUO%-#H7%r@3Qr{aaXHkd6M0#hLtl z+zig1iQ82F=0<4+>VM8c4@YtSbU!ziujjyo?prk)`v>gGM%h1co;HD-ZMWy4V2>-p z7L;SRa}Mo+5@(7Nxf$lS5W%xNaA9XDdi2Oan``~}nNi8ZeD&}h!xX3Q0HqQP8sK7)`~vdBykg@w-lRc+^au@&X;^WfSzA_bCb-CZOjs! zcCY77ni&tZUuRBx)C-8E7J`93|N#kbbf;F(Rcfy=H$B|Sc5B;C_ z;QvoLH`T-Aur103N5c-oaeh83Y($(4Zi;=@V;I$x_%<>ZPWE}Q9xF^HH;anaV{N7r z>c63LB9*zwG84Z~7B_=V$Km*2XY8t;iIInM@#v8-+1y-241AiKhsC`` zyUyX}R^oh|tD-rf2ALSw@BnBdfv#x1MESsOtd?A20fyTyAPRj6=~# zZ%i}I!kG5Cxb#b$mCxhmOZ9;mVdRat1=;xBF&Fv|M4y1fhJ#p7F8#|`wLggvmC5O1FbEQ27{-*hO4?6b}l8asW;+=Jvn~qOH z5bfZB_k~$FA4qXNiL>QLxGC5{=Maj#;ck=-l`01|!Q#Drl$*nA{Si+4VQt4}<7L7D z40|BP2gkUH9q$9H_RHx|a;NHO<$0_^Hi7-cqq1e1e-3U+l2c!5bs@W#QDbY+ROmf=_ajcg+rCXZWD!)JzmP z;sl9h<@Xkki~eooEY9SpZJ&&4P^+y?96-#*2yRIK1EhLgfE*otuQ0Es>NNgsuD1(5LMI>>nept&p2#78bZ%5D3!= zX}D;cgX@pQwH0yWw%QCm=-m4Yw{)0P=HNrRFvZ;b7+?myMgd68NW->6IVjT?*H*&K z_4bw+x0rIMoJz;IzjW^Xl895vjn2cCxJz}uo2{dL)GKq)>!2`Y+{8{a!@{?LSVFnr zW9gmMB~6$c-1t?s#Qvjx$fSMl!CP{$W|1)E+_cDViKVmrY3+ydb<#6e+vflGV5GyC z8e?j9s}?(;`tVhKr&Vxeaj+%&whh2%+NHX=e+~}GR(g}0{nQc;T@FBa45dOu7tPet1XIT(9in0wsBPc_1Ltv9Tury@Hi8=-rIxzA0F>BcDX^M>lA z7IjT?&^<|*|F~J5V~mdm-dJUoiY*(J2i$z3sH!3kGLuFF~ovN-styQ3x}RLIQdYR$J{tmX3(qey%6+Di-Q5w zi#Uq5^Msq(k;ZVModHF()3ASEm}lJViEe@1 zQZMwqrG>II3&Uj(_ne!f8;nr%hzE{PJGh>ojhjEk+wlcA*WZ|+w89NJZ?x!FoDGYi z;@Vzv(^|I$$^+f8YrhsQGqQ2|yomFPn`4)a;l0uWBU(_EgP0t=xggAIZk%I`F=@L8 zYE9Fk>SGo{=Lz$Mn=!*F)2V|St`uuw@05-CpM-hK%~nHGWUcT(8fDRU>6wkkrNX@9 zrk0K|W)-@@RilON&TPE7EX;ducBdQT%VT${@U20U)GWl07v=*u7Nd-?=8+rLkJjQ9 zWl_n_73L#1#ny(HchViYohYm9vTV%UDahZyvJy8c6Bxyv=-(UH$5s1;A`lLnI|+DKp7pnEfMB7 zHvz4wvQ-Sth>WGI^GCB_a6*_r+$g3Rpm4Pd4z|_ezax}cN><_f%gw54MlhhA0hgYs zaaK1Qa|Vkz|F|jJXoxOrT#(jTi+>yF_vtT8wR-FX(7aMZ_$+gWYp4caZ)IWE3}LEs zV>ZVShkm-i|ELD-9%Vs4PM8|pn55~#5UzOZp~XDPLiF{EFg3ZcpQMjDt=wUGQH}VW z^lUqd_Ew9Vf^GWPNxN~*Y@-$aluXR~BfgqybMwZ}0QX+ePT~a`6#8ak;dXItb-4L4 zQ6KeJ1wiAX!Q04eqy&pNb-D4Fs*ki~!Kk{VhH1@g9O)pw0_$<>2DHm=U>5%Ena#hN8gQde6$1Y=X@ei{)aXd>g&LM3PD5@6 zJJY?+ZUf(SDjX@w!rEgZP9tt=v^T(-4Q&z7ON}R+vtXek;xy(aK%)=SLm}wYRgI@V zGZDXDm?qqe8fXNMH^JEIqecUoeNomGrYSescc}XGqL75uB*bLi8;0|LAe^&0G%;ZnuFKRY))Fm_=2Kh0*7x+9F-d?B5nQ zj;gWYYZm6!5XOL;{p#JeqnuYIi!WeR6a!nscG`@IVTMa$0EGQNTW5i8J zdtHRad0=#j3eRZx?;5*8IwC%PY@T@Kw6aqo+W zx6%uC%k0+QD-b%{z$xEr=UsNK<4C zcEziybn2s(7IV^t3Fc-%Rx`v^(W)!$hV4YB2sgD6^C4}xY1FEbmFG^T6ql=zd&gfdH!rHGiZ&zQe>UHJD`h^~PPj^CtyD2zsnugO~M3&EP+;rGad+Ld zY(v$$?1c&ECj6ltRbzBUwdiCljnU#tp=di1-0WRuk9W=8uwzaN`cQlGo+(TuHy;1l z!-?k7@&Z$k`7IUoeop3nNEA1dGwm>-k2^YtC!_eW7ULa6oM>(=b?vdVlPf;lPr>~; zskkV!cn#%d`#*b(Xyk%`)MV_an+l81BF->wZo9U^P^y??vLzWhi&C-amoUS*x%A5( zr9GYD-8LCtme6lnEKCeHUmrVQ$!RCJ1f*c>?^FaG7G?xDaS8S`cT3fY9wt%7EUIMr zRhW_7BweyaRIwv`x+mi)W%h6k6m4e|H$CHQ;D65%<7|`ROBIfX&lF}fH{-h7;PxX& zBuz^}zEK*2jsTo0>ryy=aR-8jGFpO1m8_#Q!se8@q@4IBsc+6(f=nK-IC!{l&eW%FV>>dYF~v zfT68YaA7}Xu#(x}r*UJuQ5QvR9B?Z;86jS2@I5Z>+jMTSa+_g*K`Z#SNrum#G=#Sm z?piVSpT$k#lScUD z=mbUmWE`$a#hycAY&n~o0{(xuVu@B^qUEX|6_?1>9_YS08N$x}dFd5fpD4^mZr<2i z;FyjB=C@R$JH?qHPquC1#?-+A?@Aoe_q!7Bo~luOgJ=hvx$(YfiKKl_czIEYdP`}I zd!~42ZQO&~ImcOhLvYQ*x_=^I)EHs#DD$G7^K2^0spGko<9#=p;P6Na1qP^|s=JiusK8;@ULan5?s-lVe_3cTb5n zH|YH{S+qATH}e|UU~pe|I2}@A=S<2Z^Hn?xsoWUOv%zvRH~hG$!1i|<+}4U~OXJ3< zwff6y>hyc-X2lVW87w6}%+9|ay9 z(xT-gaSw92sW@(eKV59m=$#U6Ur?L~@m|Q|=6r!3wi(+Xxv3Jf=4+65SB&}cxfzkv z9NSmeKxeEH2Zm~J-bS?1gWS{!YK&(i?a_0;5;4^8oJkPtr-!&Py3z!VgY4j7OQx0@ zZTg9^@?mZ^DH`IDhdn->P#~B3gK&9L;s`fJlN+GUYI`)DsK5Yveg=;bagK5m-M=B` zsO%6;-&uK0HMsIpm}A^*uH6WJEA8-jf&!~*X;9=VzRiwvb2qVfY}bZ9HC?GiUL-y7g(s2%>&w{v`y8uez2`*xX|2Np&M{A!K0 zfl4G&cDkr0;y1m*P01f)G`nww=%EVCXiC>MTJ%*{xv{HjhS%vfxaqDy^=m5p`6$da zZl--~iP6I?vCUP9>J!zdwnmui+%(Oy#QiVU71PwMC{Bg#@5Q|?=H}HiE9`G>k4YUA*piikOBNzd2{-N! zEzxrk#ZfBIc3TRz))uCeo0EI3kbK=1W$$T>G>q1F<$6{bH^aQF&`r-8dEWHS+L(e) zd7{1D;AX>FYj_Q`g`ZY|l=5U;+agRkH&y3t@Ko0pYiB4hd;!hHS_o6YP3}S~Y#(Ed z`yL7ykE3i8@)Y4sZvH!AiCU4iuyRr0?WGj_3Ks9iTio0lV}(ETZ1=CLz}%b^)IKWu z#M|8D+gsyvus!w}G&0@TBhns&iKa9Q(=-OBT^Q*}?x=on7-1zF-!fl2u@xbWpKKiQ3Il@XSe==iC%nm?I=o z2PgX}(1zX%`-((g^@5ws6jQ9r(?Pdsi8x7F7qev6#h2VHxMzx>n)-P9QGszoQgCFd zxCgJevAo*?dc|apDsVM38S4j&{`xgH?Gx!-!GC&Ck5}MLw-g+R7XQy1Zq9ty!!LaU z#4JpN)z4%+J}AED-g0wkk}kIOFu;*>i7+ou!K<0#`FY1p$>gTE5MqR<-Du9iRfXGa z#Ix|8n*}!;BKeOIW<5;A+|?HNd5JEl}59fg#lIT<9pwM{ed_Yd}?_ zT4K^I1s2l#=T)Vczy8Ec^}+@)soxS8zA3PJrV3?$MV!yvq;+nHwFTyM=2C&Ynrgfn zE51vdu7Bk_m`W|PnzIx2JL$~t-wRd$mOdL z<{vkK3mVbhM9O})UjbWMLw!G1m}>Re`5$xI1K6hwJPiP$#@~nla z&&~YU=D6537<*`J**Q1`r8UI=(}0^!MY`DaHwb|(6!4jy0?ljjjnR;sku&wNus9gk zofObCQ{hIXxYv!i*%@zuPuGJn%Uy{H-)YQOF4|~gZYGBs;)Gj5henxM8KgtC7t zaB{Q?pXEAJb8eRWG(n$(p(qJf;>k1>#>zQ69d3Lsv_$${VAcyIlC{)_$Vy$h+)P=;R=jO>WOLVpYZ0R1%F;&5) zy@+GLO}!SD@aon9O=~G&znjM7=E4|q6P#@ZhmY;>oaP3vMAG?+y<+@k#LbZzmRRlG z9)ET#5JtaGt?!~;8*@{3!VH7b+GAXq5>{VR;Qc{dTMKT!!5mlXwMQSCgRGO6Lf^!q z?U-<5yV4Y^YKP+QSp_arpSY()7*lSF!&)G1Z77O*DPS>)GAvt(XQ3rGYgZUz__ek; zlB+;C#rgMH7&C4<)H8s7eh7xryvqWb-?_R$Jlp2n=q4M%w{9qmX?>wxal3Fi?>vLx{&6C zy3V8h)PuzP$C{g8#X7KF8i2L5ewtlH-_Ga7wb^jfqE&Nb(|NNGbg#d)Pev1SF;2AQ zX4=4JP!DiKHxHWgru|Zf`-=N!$4#T9I`BSgPiL`}h+Rp0mOlt%&rP+`W|%$K28wG+ ze7%{B&>rI29Jp!!wkf`ix5gz}V+p4DkS4C;{nLt@`OBJMe4rI}JfM01yUAExUEFI& zZZ`VR*nhVv2Hsbq)7>Ok$a4}-+$>Wy#cJo4DA}b%??EYu86n1&&fJ(yY=X&k%#eRB z2|eqjz&2hO7j7;^H%0OImdI?JjIPy^5O7Y6dtJG?Xx|i`9;O&V-`YlYNjTj}7&mUJ zjcJZn3oVehKLxs{6jO^@$0Q}z zY!=3kn>HWy;2U58Ly8l3HxcJw3**nt-$Z>F+M1!mC@p0cR-i>Su_r5ln=7AbT(#dE z`x>Rnjge!FRo_!V08s;$6a53y&iH8-6j43K}y9O^Y%> z;^tPiKJt5-<7~7R&C_Xa?2fp$U~UGK>f_8{Gj#HxTp*_tVO=2ZTN`c^F$P#w-V!(N zQNEgK2~g;WIBmJvHC+!aolT)Jq_*>WKi-&&I3e7awbsMdVWx<^rpEEr3D_JeOei;f zoDDEEq$PY^w0M`EfEOc#X~#{pmjODzXo=f`;lmROsw!Jk(N zSmh+*wC85XKV2-WZHB{?v&XVVB8+3i^V5NwdRB&rZQT;qhFbg`n21__!gSA#~Zs;1ddHa z%PTi`COabKbwQ7sJ;Q8-Y<>B`OM=SG;m-V)ZIQn58V0as+cjc(kS-894! z6HD}=GX;C-Ea^I#*P}Z(#UTcmyxIz%sVbaX6wMRsifik^P5T*!2>)(|&{Vp%t%=av zCf->+x#_7jfYmHBOxU5tmih{`A16#NZY=8?&>E9D4kxGLFrD$7F3%MA=H}vZ0~|;* zL+58&bn;Aut&#YD`f!u?OCRlX%;-E^Dn7hPKy7(#eYrWUX8`5wYML_^ylVRr9PU{Jn@?T^!vmpQI#Uh0B-J&HpGv4H2&|M ziaU)IC>|}$KyG}^4KaA91!_&zVys&t6!O{zadYOmKE6eoqhzHPt0EKeMgBg6xv^ej z00a7N3aX!qPxNe?{S((Vgq!;Z^{_DA5`UIyQP5NYw^(7qxCy4YfN^cjaq$xM2Y+Z? z;;=B`-1OT>xikFCP(gXJ%C;$R_?s{h+>Ex=N2R4XZ0l)Z6Gm$pr-g~+X04eXT9wk8 ziJcZ_rzGO_5Ya}Xxan)G2k2X389hHAY!mUOgD}zD^v=-3>M0i3evZZm`U?Co6=o!ZbI9|iiSh&bc9sr6JB1N++ILb(POEtD8AR>YaWO?8DX;{0u}vbGl8;}r{dC+Y2*=o6Ao6=zZ4~&1O?SN^>tqAk0*5berg7v=QwoDb*mE_Eb!3EX*`+atsWR zU~Pw$X|!*D80~L;B>J7{+>FmMK$9GMWYW9Qf4&lo_`7M9gA?1M|%4u3!1CMr&+=*;%38dUDS)S!Wn<6oV!+m9vWd5 za}#-72Nx}DU``d3Z@x{$=@MaNv$P48fzaAB5mbMS;N%<^n8kH$!f zr-{gt&-OBI!kbbaKtpRZI7s;h-_d(P-h<`bd@9g~pOr0I(EGG&s1oNUiECTI&6=Nj znEl2M-nCO{jH$rHd|_g_@d($0(%2SL)Eaczpg?Om_Fu`(1p{4d)~CIO8#Jg4Qee+) z5oZ-Q%V^&6;YVAH+e>Dq0^>&rvznWFqx7j_jxBN(8eCdT-wOreSy;o(M#|Mv*TN1~ z6DijQy@O*9i#Th!d1$SRrd#X~7No(&dWmR}Doh+VHAd*;(Ih)eP1oSb3|iBgF3dV^ zs+@H3>46n|ynj;$RoQXY`+jtZREEX)RO4ji<@c=Lf!>+Qnu zvQ$`&PvB>hHgePLDD5F$)*rt%?Z6+(5qo`-Fq^o!VrGk1zJ2j}?G99@=dQ(ZVK#Gf zw$cU@+V#ap%N-c7p7tK!7iJ4L+iBkR+Q!}poWB$Ozp7w5K$xxEY@z)~GZythfi@l! zDUZsy_rh%B#>dbWsmps|;o+Sa)r9u0JQ8L*H)Wfx;cwOh182o6er} zeSV@F=BLM_Br+M^XN1|o%_vVB9P{gf54!QN9i5CD8N%%3W@)A^?J?|x?0P#eo8~&5 zyb^gk*u~BDAZsW#bVAoH@z}9~&W5@Qvzwb|MmF&M+zvx*cOYW30voJ^*~87CY-{)) z48gsl@z`&Xh~xla_Ht9h-x}QtgJEY859`T^h`%H5!9H#@)#=>S(jdgt*nu&+^u6sU z;_T-pV+M^Y_XZ-k@lJdR+zaP9B2EG~Dd+59f5VUV3hlzNHGA>wurP_-wC`h&#z%b6 z;oJ^<^528;EycAdxCvY3h|7b$sha67C}MYG@Bm?y+>B3lgy|knm>k-L{JOhw&QDxh z5;q0coU!t@8~T3Qja}z=AZV;G$=vv+x*&feU0={1OkceNt&Rzk!p)I;E|g!{0cVEo zK~>3i_~;9x;wIdM@)tj&ycVbS&_1hp9M~z0nwxEu<1}Hp9n41W#kZc@utX`0h8y)Q zA2_wN!QuJ)(0jx-G_w#!%S~Z_KRo!)8W%q8!>GKiIND2?RBryC2mXhJHCEQ$kEd<7 z;+3T^Y1{-h3B+SROUxUS0Jqg!U@1*HH_sITDB5L(srD8ObQf_lxXIZSjQO3& zjH7)|7dK;!W z#L4Dn`6*y|lokAGeeRcP3zT<+$>GL2xD#&Pv%|zRTF>gW1q(}rIl#@44;`=|+7=Ce zQBIifZLk<2OfEOc%eo+KvmJ)h9=rV;x8dHuef+-VadYNmS5zId!@K7xFkTgpqMgFz za})HrC;l7K3VM^&2>87nPtOQ*kedxgy`VL6hOvVd!~4hMR!?CLaWiIQUyKcL!`j7K zgeL63VO?Pkb2IN)Z@gORhGLqlnbLnJy!?eZ!p+zL{gBto4XtXW!MgWOq!jGs_w6V* zOX?5630)67f0BxTwB5+LCd@Hz^jh_Y-!)Ghpj-~>LA%j7RG8!3Y}XqI`@>$a8J~{s zS$nZyyD$aZq`D16I`#2Wd#B^wl)cbA+{6E$6Wq*sFc`l&`eTY^29|Bvi=GoioRi!v zNEw2=J^{4mnugZT_QJJ=xYwt+u^u`YD{TX@@IpHLP9z|%o``dro5JDYuwNbswUR2O zyiLIMOXB}I!_CBMgAvm>80%uw(e7v>y2J}}mYZibLlB`2Mw>>N*#0R2Z!356`*w~S zXPXE#JJ1&0tTWKDp%Sl;h&boDX+%}KhuDPT%#RFwY^TJSslr^~=1BD@961kMt($@6 z|0!WKN|=k>7%Re2eMJaP*UP}Yf+S4ZC(I>o7Bz{&D8F{N+cyKT*ORD1k1&_H`5#$# z85iaE{0-dh?rxC=2|*e@h=2izg@j#{B4HCMHY&~T(o0ICf&~iLjg1H@D2n>EvEzS! z_p^KE`MzIsuCrI3m`S+nhHGu7;%Q?Znv~P9ONp5))T9TyqEEcRh@MWWzd)H>>IyJgK-LNg&2Gv_~ z(OKjJQGUS84Qi76yFuC48aoH&Vo;SF8=aZCNzLy7S0o&phU~n<*!V||ASGsQQS<4h zD`a=4A;&NmI%7oDVOCheyN5YIKU7o=+91cc_sra*=6j$s zYH!Vk_O2Y5y%J-pI?UXsX44pF{LGk*3dJ0x+(;4Qgt7E^9#C^@t}~vtpO1_MA~#Z> zRGdG=%tLC9zj4OwkF()EAqN^JDVSK#Obs==6P)4R#R)bcqV|n)GQK7<^N1QrtrIfZ zEdazE<>yebZhHnZwbV=>=Zp+pSC~i-!&T&Z^)HQ~eejr?M-478yz7EYku&dnSpxP& zF;hoPpT-4HpSK8~A7{hmaw7b9F;h=X)vg8N8)FeV49-U7*#wm5G4q6)gS(utM}HAw z#e7MYpG?fXGV_$0ejl8W)Nv7}oy|gNu?+g#n0ZD`!Li)qxxwsTwX3gk;t9yo-RShE-cPVYDVpu zC%&zgqH8D4x;5u#<{xxKv*U&gLPGaUg zHA|)~K%W0-EY3%2y7Zop^Z+lc z4-=K>onvsTjG0f=)T%i_OJfb@?Gk&y@?%iZo|(_o%r6+50U z)P%Kh63;{*vEMKqV{b^YHIkVYYJ!~RA>g$SzV}JTv%q-hrZe-EnkV<>;dSCVl#B1V zw{eO1V9U%mYOY_Nhngq8=pxoMo<5KSpLk}zQxp7Q4ocSe!&o;RKWB(NK)qSN{h&tg zh+V<{}~451&N8xr%+rx~*6r{H5mVkNLPC z?}LF$(%>^S1FPP!IRB{Wd)x_DCw$PmJPl`7XW;fbW?FS2`+r7P&PQ4MwKzX7P2>(q z$FO#+ZLO(!QZ)}{A>K${A=;*$1OW#!%VGAr0K9YZK;{GzzMrv zd&B!+8b&YA#4-MCY)4I^<2=m!!N>{p5WiQls^w;wZ|sA;!-E)M#6 z;6swgjoO%v-?_|mrl#6#4mN6gAb+YH57!^YiR;XCp=Oo%e{RINqo-JNd$C&NC|t>& zr(LOOs&PcZB6l2UkmKe@Q7aPcy|)`R1_x*1fb%N2T~CF2i>R3C%}jS{-125&NvsFX zbxK2Eeh#8lndw2zklFV5vdSF+bLIHjJqMqTGSicqeJL~Wbh!uY_RDc7N96ci$i7Q@ zQ8ORYaoun=;&tU%(>@oP`&lL8BEQ&)rQDgyBZUv$=LQLA6K&2{K61w ztfCF@X@Un*J(8emo)4G)DfHcCC^c{D^s!oJHI{u!!n5v$sMcj>7&ZOB>!P@1HC}8M zW>p^U_%bt`8l{OknDAm17W7Jlg?&Cweqv?>HLZfR;U-y)R=pFEUYm#NC8_jyMpEN? zN*l{cSL4{mMC5wp!?}c+QPga&&_eP`5A=SR0IlWu`1FLC(bTlD$XnQ_!~jZ(+( zc+qdu6Ci7s4|Sz<8fQE;HM`Z(NyQyGweh&uHXjQ!m{Fi+&;>QTi(L)5zYH&i=0o>5 zGZUzZuGPSaV0T z)DL6Eh?@4kI#@GyBUBs`G3Zhe){SKMUSn!rPnd+cb2j1GmW5d@i&>5D5bn_zU z>oQ|bjcad1SdIup*z_b=ein6?KeFek1vRY<4KPwU5Lp9~;4ErsNA6+<)YNx2L22t? zEKn7-rbOLxGyWc8NsXSB5nKiZ;!SFj*fU=Mr)%teX9_i51}4}#G8j!alCU?e5O(3r zSW)w1fjMl41f&1sL|ip3z>fWFK6olM=0Rq-ekBOs8)TKmJH0R>_E%vTFoto;erkJ1_j5&W2AvMT{;z?#^P;)EA1UrKQ@x&_u z$71q!dJk&2l zr3*VAM`~s*H^ibhn{e)^6nUclX^-(N&TM9K4e;NQtyu6_ip`7iv8#f`nL|yMR3Ezx zwjjS+0>6C)i1TG;E;YU<^FKyWIET(2wi58Bu1mOCgIK2H;gyh}KETJYlOaqgH z{PF3$1S>>eJMuLpOR1^1QA5OYf0!hQxsv6Da2~<#z00Wim@jIhpY+9N@&A-~7h*&; zGs~&zC{x4qSpk?L`k-lB0XFny&xIA#gw9Yw>veu;YLMWlSsuPTWM(Bb^K4ad=Z7D@ z=Eh<8zg#5qb#|+$QT?EbL}Ww|)rnmrp=Q==51ggq_3_%>OBBFkJ@hBD($ z4JuTje$^La_2TfnA{V}|nem{e=O-o1Nb|)JnFO=zbFg&@8>jH3rgOCtZU^|nrJDqj zZ#nqzoW=2?X3q&FlxX?^!rUszf!1Lb$D5jD7e&}5`ocb3g0jy!7@))6ch*qTE?NnH zHP>Op&Ny7$n}Z#rm|08BG*QQ=X`?SDOqU?^R1QWcG2=tc(9??8{mchpj&bPpCI@OS zm{~{7_(w`87`qNV#oU~wYYwDZ>^;$!nh2=CJ$elq0wf4qlMQ?R&gVx>=Qy5i9q>9`)t%vNf?&eev|LN7SnOMux|vF36+GlA5UR%v5Wk{2vGC!q9D zI?mWK6GV+yS6%Em<%N>_2}oO(flR(8IGCCR&_z!%e&8Q;3feY)I*+xxxtu}ss_YmX$3CI zGdrlMyP<1Q*+tDLO)Xeydctn13=v|!TIVM-yQy((uZi8iys&6$Ja&uPu{9gm^|OZ> z-6a#@XyA$bc`}$4ihQa=*ga$~HP&jHaLn?8yoU@Y=VapCKNe>nHP5f8;Z~+MCe%nE z?UaeHb6K4I)HLU+qhRM6ba9hn$wZMmFp!xrYT6D_h3QZ))ZG&Ix8h6;RA=S@H9^}o z5H!sj!^g^SJ3`bzy3EW$YI;0TN9r;!yitiqx=SX0gtB}5A!>e|*TA}FPh5|dVfR>3 zbJ&HMaB4J4)p25&H%^JZHm%OYm^sWuP~-2fi5u%ZaU)EIHx^mQiDf2|nx{URSW)eP zE@Dh*%s;WFq5~UuiK1qHm?oSHJuyy<5t~(Iq0?JtqN!ZpS|$ostiB3idsw0Se#gDRw{{e^x10E4U0$9I9zD}klC`t#< z$9f{GvzVJ|$cBkCi<3f4#Tza3xa5i0GV!bzdr-G%GLuS8b*UC&EyeTndpri6&xS%@ zX5`ewbks&@zL;kcb2)d0iaaWo%%o9s(oP5Sw|GGQNsM`y9>#uUX40wIy+a3y!JgRP zHvt;*Y#hG9u7wP0B6ez{^+$Kqd&FaSR5tw9vpAX5*eh${v-@h)K2Ct<-fTSm%Hm{E zQ?O|wHXT@jM-B0??4OO1rp#nh6FO28Q7e|B-Y6bQS|VqF8@pE>re^PREnN7p1at0+ zv8QrTw_-LkIn?aju7*gBr5Gvh2X*o+BRHQ#A|mKwXMwBht`l z&&)AubUrGf|H*~e-%*0-FKL)JjhPZ^GB+y1M|B}Oitomouha187&E2Rya*HZoUXYc zE>wa8acS6jh?(QmEKF5^`PRjVx0gbxO*;CuXXnldYJNJ4+L}Rb$X*%;b&(5x!z5-- zQnUKO1hl^7hS3`(xN=VPK`k?E&EG+a2&Oa(P6ZWFNCa1oa7i9_da zX&A-lvnr`6d@vp-W-dhN1PO*b66elx7N?4uX0eBVSGgO8l*B=GSsEgzFmsNYpraEo z%hm8=WpOmfA+46&b9(fqHIYi7A)XDLu6EnA{+0kP>mhW^%iMX~EcgPX4fthM* z?5>Z)rQy!7^_O6($X}Z`l$qPqJQ6v6c66PO0AmT%|A;)Nd<^mqHMzkGC_FqL&-Y32 zv?Uc0GIqbdOU<9&6R@Rj9&kv4)<;v}%;VgnCajAB9IwxX*%b*+kC4MkgT=W|O_yQ? zkq2=uI;%->QN*zY8w+?qO?R;;-*V<`_<2fk%}b8e57?OZLu&r4o`6peXJJ~B3_S}{ zQCZK%<7=o{d1?YqH8`Srk$4Z8m5T9`nR!Ib|8f?ozO=`--%`|i$YC>s%}LZ!qvWT6 z#HEh-94WqO6Gd*Vz05qO=H6H(d|x^nzTWZJACiphuFTX?Q){mb*CY1OSRIdCh}_3X zY%Zssnh~)|=w#pkSwI5zr=%jWA2Uy=*|SXvH{aSpv2_B{H>DuEJu^?Kc^^Fi1FP(@ zZb~9Xi(K`0PO-VEXVg4Btbivr4!ATw0S*1+h+M`@12qvQ%BXI%N5PFm%o6pZo;$I1 zs*TiaaZp03fjwkZiRiB&>VJ-5<~cQ+4VAIa+5vyZCt{JUB zBJ@_Kq9}x|)or3Czd;SYg$`K1JqbHS&gfn(Y`yYJYFa9lAf09hwP#7F7T4*q;VjN8 zYI>hhMXP&`SecfDWBH=516Z79YM%8Fbz`qP;M&t9Y;sP;Z@!=DH8n$esUYvJJtE}E z@LViMr+l_%`wcZK^Zr*;-u{2@@3>tn`XGpzx6~xaRnTnch!4+_(ZfcLStFSFj~d%; zs?c8PAofWlLurp3w+z{u`ghcPIHdwJg;|&=a&`XNF2_kNX5LeC@QE;891#$a4A)I^ zsQrneYvw*sGtgNXjvwq$lbMWlV&AS!3^O08nJ!Vr>O1z>BytuFk;-9tjG0f=v^Q75 zzFY^y?GtN@=BHtr6EmNwk=ZC?r= zo-^~4n(W)kI5F1&6GSbN%Y)Oge=uv?FKVJ^sN&Kn2mHww&-oTPItQ~jzp0U&P{Z$e z4wz}1f-kQ{jTu{J{!p`{Ocj1VXTf=fs6RI$4Q+gx`Ady{g)%O!aYUz)qSi&89PbR6 z`A1F9&uaK@{4AUixy9VWYWgl#N8WD- zT>hH^W2H3A`O1!`BQ@6R)#2aW0gFTpv2Ef%T{wot=|s(!$La|9YL6yS8=}3D9A5m~ z=}gU?)9RSn#Q}{iqBd)$*rNp&rwcX8;&|q0*rT1O%X45r8j3WT=}JvUu}-y}g+0EX zNyXlia%?VPecg?k>$|nk=9)dmN6T?U)P|^!WTrbcPTJadd)*$B-Nk;Uo^qJnXQl@= z^OtBN)7t^}SBP2#55<_nH)eWLlTogN%B3^0U3~xTo}Pv+N0{kFP0s*593L|i|3too zzSrak%weWCH6G6=p~pEpJTepCrXtQT9;XjAY7u&9=`aI548&aHTTxG<3yag28XJ$v z=zVk=s?LgW4e<p{P;tIZ*w0LVYUalnplerK_=uWgiudFw zd&I7V0n~Ww7{kI+8OfHg;N!aGl-gvJ53PP z%?8I?($FnF1-C1h8B9&f3llgUv%xblhyK4fKQx#bLd~%n6FkVWMuD5y>pMFYGhZ?@ zl$z=$Q)GEq!)SCmayFzQ>jX2ysL2X8#qf64&{&ratw2%3{x>tjshMD9j88YMa9f^+ ztQDyk@`RZY)Lg%6iambTSbQcODVe+XWPWMlc+Bl zlFQ6kYGy7lgUMTK%)XtDkB7v!St2vzs2N^m4EOg_VY*M$jp~*vayhg6=y+=Gb~cB# zmNk5jq~mUOD%^P=C{SbD${aoQZLoY(2D*rTdpn=CZ2~p=wI+z2GZlTr*jsK#agL5; z*PJ3XH_c5jZJ;%}jmyAVk>@A$0*j+W&3q$M^v|)uuGDn+50)e90W-?fg!r4_VewSd zi**tEeNu7zDl;n747M=E(n4!w%}>XAam|e{Vn&r3O>sO2GHnp@F-`0fNW~|9ovKk& zu+$Vf?lu^>?0+?IMSXBX7Dt_$nbVDNbg~V0^#t-=7f<6 zimXMRHE&Tj_pzwQC}l>Qn&o{>&~(`jVXEnvAaeE$`oY?!L(Qm7W{_=hK((F78MikT zJNf-uml~5zCRn@AQS6~i7qygAk>8iKZ4x#2Pa9!Ho4Ig!A?hRE5xI0inbD)hOwA1U zr_Do?xbK8Ti5z_rX7s6<6K{gr)AJB5>M*X4OGTeK%uJ?cR}VAn(^!BAkxSLJMD#U( z1{+Z0yw(&=>qTA*QJ+^;S=8NH%i&}e-+0wmmiNQ zHRsDsVKHCvp6=?jQ0`O z>2nt-8>XWmGX-{j%-B*Bs$hXCcQqQ-M?W~NcI;)4nL7P&w*G#$>{L>*<` zw&~P33^T?1zHV5SmnQyKQ8V;4>w_88+|f3}@2M{E87S7vsifl1J7#856FkKXg-))R zvqaQ#{a=3a*6eufsIlv5g6w2hyl<6(Kc=Zj<>PPm)btj$T$PtEM1;txl)O^pk56Z9 zbD(CrgDJjixuUhmaT6Pyg0cauZL_HPe%us~{<@;2Zw72Hr66D=Gmg}B{$qmPPA)iq zBONV2laZ0g+BTb-uvk;9ig81;XF6I9OU2KwEY2Ki^fXOT7rY3*chb?PAq5G1{9rCM z$Hlob*1;8dqTdp{li^&)+BT1xkvC1?7qbw>&&7IRF$Q;$Uvu-RxnX94!kwo1ZnjdINK`)efjvPGc{e> zn!+V#35?Z6&YsDl-XR}DaiONrR($^qSb`{1QQvxdGSnkjzqwM=UEK^5x-7@;DWb-A zN)ndnFylsz$`VrqZd(fby<)!gWFn^AX74Twso5~g7|ZLH;nxQ-#%Cn*CU~+qi>Oig zYl?J@73eud)F?iYfW`bhWHB}2$3*UoMJrI1nJ((2$BT8OEY1>Y{Hjecpk|q>ifQ}}z;GHMiE46$y=3VasdSw1=m@O{eSET<-4 z!vwE(ti*-k=~yV{w3hy5W(75Y`%N+A*K%wT>tiCsT;Q{t%&erw>XA9ZHCCZWTYUdC z#f$kSW>!(7eANt@7gyk9-2jJ<~;ASs8Y2W5$CTn-B~5C$56bI$h-XiAS&lGoI9_uQP|niWMS8u}ziN)!Aa}I> zFAdKh#G^C+-d;;hfXKO@WFXdP2Bg8zEgrTLS=)T5sc1U|i;X?eU991%+7gc&{90H? zO+Rl-D8E~c(oN~m9Fc%S!`LW1WEYty#Zqq-IvrRHT=>W4e19V&2E& z$axlL6E)|$O~uiBo+#>@hIQxS(PcR^o2iLvvcW+cv5vTd$TcqZ^Q;MHW(zg`@+l}g zx|9CkjihSM$)0x>$&9Bc>@#?cXs>B>%&&x6>Z)Ii&HQ%<_ zpw8R_%f^X)R__w<$CsH4Fq-t5}>p)EJgp!6VQe zjr-*2`c{Iyd>nQ!H60tLihYBt&~}!^i&&``uuKXKEoQ=~iFdStvimC3+?9(uo>Ej#Vdelebq}quez-d_?BuZU z5$lg#m^nyIyAC$kQ0b0EFHxl~1iLbsiKC`{*XgjGj+r^F@lS0kEn>Hvs;)(Dd;=cP^hM{|yNu|bE+($xu_3f&P+BnH(T4lL&XCZ*GTb4JR7~dnK?|2!aQ3{ zRP@B=xl(leA;I5i%;ZqBKF=0vChj;de!uU83^Qz*$))C=%ofs{?vOr}V%8-o?inzX zM@?rjzmkyU0jFqtCte+TkMlw-I<+l#ne1* zo`vcxPoz{N;ApYf*Y%mjIYP}aPkZ!y<%x|O5)mcddn4X5bCjBay0fw1mls?_ZL>9E ze|c~{Gsmde@MSjs>*SGRsrk9i1@{m5qR+-ObV?G>Egu%=1T_+8SGdevFXq_Nuvtm0 z!8py#NopoOSOn>SjTkM)<2PibisvIUr>H3u;{feMUbfKnS@`uh4Ih^?bDEltk5^*Z z(yd}oo2Y54oQYFwm?@)XocNZ@?H7#LAK6II&4S9L|C!~A|KF3LoT^i49%wGuhAB64 zFkwp$ye6|qXQ+AS>WNy79k}I`i{lwNIA6!kgtOGN99e^ax=^&O%7xOgJWOh4rh*#H zn`<$+B@`z_MWf)ag)nhqZL6fl@b5ZYp0f)dd*`9wyJ9qzu{c%K*l+hovCUrGtMC2GTb!F&*08XW-d`vzSR%T(n#nl z<}x+MM)_j#_!y+BH3^CB%+m<{C9B1ANd%+}+1N5LI>pui~!;GuNq^*5ZQ);whq-aTtzP*O2(>5PjO; zpyqqmbtvhPi0vY4c)PDR;5?bdxk=3z6)%kKnuN<{heb89TiEu2#koaI`xa05bQY_K zm9mkUT@96HW~!-)3Kb)RDq@F3-z+SjdKaCFn7K`j@!I8>^)3ZcvC8bS`CXVkXXXwy zmQ`Y;^F=Bab@nLHkK8U+@ z_kZ`X-kX{G)ZE$VhU$swI5$p??8OfeyoH$u)Yx5}kD;3B7@3odsm>2j%-i;mnklaH z&>)^Z#%@U{dH4|T>{y%{YI6POqOX{#o*{OAJnmWpcXMVQQ8TjK5#z*)#-EQw_L)63 z2%XDJEj5O9Ghr!KO5LxQA>OS9sr>(WOwHHkX=vLk16Mvs5xn&wluxiYb=25jut5)v zbR;!N(bua6%^u9uQ!`Jz{co?!z}Wk7m|0o_>}Tc)HFmaE=;5A$pg9u!Ui%P(`Tz5j z8o31MT9knzGqKC_bqy3OSe$3nyzMgu8#ZU)qiZZ8iymTS1TziPwAcbhx?*>ysAM|# z>O(x{=V&7}0qT~J-N?ZCd$D*it_Hb*EY5Rkq?J>!;#WEhb7CR$tAYRDgY;hYf|@fo ztWY*ntlBso2lxLTqW1@8ny87nY>Cd|eCz!^7DHy&AX|^M?Ikr%gRMl>;0#!gkHac) zJjP+nyrQP}Ix7@~X5d5XIE}t@9->W`QGp5=K@qy{kmy67K(;s2bP1d%L)WrO;!ep^3z)obV{5t0m+V5d~ z@QIrKC#`3Q{(%zUBdbh#DkO*1gJ zR@`^A9^v{XW?HCeJ9rBGb21QhE)JDSk3{u4X1-E0PgF=ea4iFsVkfdeyIM5%X673; z75%2*tcas!E=BtnwdnNj0KK-qQ#0YI1%`FVfa)SC4!)^_$50mM2Q~TEO>rtW18>BA zG^wNxQQujdpVagbE2Hfj)3H}%j`&brhpaEm{G#Spi78Z8Wx(O8$UqugkBi+|+kR6s zaHV*`pyvZmuBG8FflTCxE{ZEGxL|4<<|`GuT>`IuarWq z<5So=G4qd_(x3Wx_a_6#A|$xH;T$zUmu+oW#W5ZsmMI}1XbnCw5Fyy zbTa-0XF@?_>@0ip6!-fv(}o($EIpi?kcHVZr1+cr6be0=X-iFNjvkhZj9)<_!==LS zXGl?GrX4k2!wpe2S!8W8k;1pjGkluFOnYitp6X-e-%QM@mZHYzDaIQy(}9{QA3gjL z*$KzL6ei#q&VZSY)Z8tZj1t2v^b)f&U4A@8f(|pCs2Lxok7ewwEVe$y$$`vtr>5eAArw5s%9Y7dIBluNz}d|7pk{f7 zA>Mhf}X2k5htJ+t~!({a8D zHXcrZM2tp%GI)UA3z+FgP2ga2Ox-KP2dNa-?eC(HoBq@^t~Z8)wiMao9Bt6LgFDqM z&H!riB_?>C5rdH`GLaGerpN})?$-mUQOP&O>5EZlQW58l-!)i&U}g|CCmx8^pxY0^ z(N8Mg5iX)b5bNu~)ST^Qi33&pF;UEfI^RBzZ_Ah&LQR*kmbh%R7Yl8q7rXWFmVFoU(XK}_+V`pNHVQEWHJWGN_s_8gh$;^0awB%-@ z;>$ursl>rwnu1?7%qUPZ_N>@7RPKy)*EliWkchh8Yz%S&HA)xEF*w~3?t9~~Kx8}Z z!@r#ssktIn{D1pv2e+CyB*aNY_COX#iJF>6#`vl<9n%)ap_PIZRohr!D^oLB%@l(V z+hF?PSe(_3MrVH(M}?YbKVuwOCBC_@#bWu`D1@G5aa5@Z8*dC%ePDQYEG)I7F!(+* zYSaV`Fu|((rqH+&i-?U87?a41IyH4M##p%D1nxm`&^d4rgS)fU1RB&#er1G`UWQOg ziN(&7hcG&Z#nGf@{eEL?-)aDp>R5Qa7x%0$Y=zZCYDT9U!R~?{sw(2pyekaqFIgNd zYO0nQi>eK}*t<0rP5%4f8OV$_HDh!Q;rUAkwFhF+bKxFb9?t$#9cmu$nvBQEI?&i1 ziTV@QXnfq!oG8QVrMtqlOTS2<1u0R|5hXI6xF0z z+f1o>6KsG7j}#z(6NfLiLa{iGy&srSbGhCK-6|(wdRr-sriF@~Da@Etvm(e4$WlXN zrg-OBx&tK>SlcY9d0B3Zdr6w8EtMklX$V3KnE^Fg*Ts8cp*DI(Nn!Xd7~X%Fv83il z)D%(eQV-2nWC*zsf?2QFvt$Z2uWr~N`J(~ylH)POWE*1m^T&#s`jR&mUALveJ1+!b z*J|08m7dJKq!CLV$ zWLC_$QWG@dlSTE79r!yS5KWIS;6qo|2X55t{`1Bn=hb?ARosGcNoNpj!^}czUPr#S zcpc#3H3FlUV1GfgQhCoE6kWE^f5AzTXN<6xL&> zSRp>(12ZeBvFq~OVotFI^gII)8zjfsaMrd})ZCoWVDaU?0si*c0JDN*94TUEH8n$z z*IA^8>EY_W^;n#mh~>+fai^w-UV}yWI34Ki3P6ZwJeF56<3UaTvU&@j_v#p?z5zYV zr0{9AonG6X)O@mPwCKA`6`I!Tv1z0P)9YXO;GMD3tN#*^VFz~&<>A=&oUNg9W@=#zO=CKri`nu8`07_1_cske5slH zt;u5XDn)o65!b?vXn2`1<44Wx0dFj<>_z76S(`CkB^uYhu;cNk=D628i$xEW#hlg_ z92*f0O&t~|fSTE>-djW!Dq+y{V39E{8k0<^1|t}3_@5rWg!(Rdrn&bJNJ z95MN2(a%pA&T-oiG&>sGAF$)uNKL!J9dKf-GS02sj#jHWuy!L=^7}gib7CYEM{qk(zb1* z#y-;s%b#mt(YF|s_KCw{Cl+TrHG{*=?8Llgr}Q`+nZx4jpr(3+HI4^qV(>c| z2H%W>?rPR=q106BOhZMECNji0+SBQ=Nb0ha9?wo{rUyGf(`q6%yiUSQQAsSnH8Z=Y z=`!32o(Hrr(K`h#+vD&(njOz>Y97=s!upZgFwd6b)xkK#Ht(Ws+e6Lzxl0h&o6G2V4xb66LXA*w5D#lka)@MJL{il)CJf6K1ZKms?@yHQ; z5LvK?ePMB;sF{C#7xp&k!MN@S(%!}4B-nWnP0fG-dvWo#E{qqHpl6+^06K)37-~+; z-;1nFJ@j`hMT?pQFFP|6OO18=eJF{TgmR@)^c3qZ3*WHgiKAv^uYK6HL>Hq}N^ni= zG}_JMNT}J;Ck$I#CP7!@IBt2x;nhtRM@mi4W&4p(G6@r3mts-ZIC!*T$0MVrIe#CF ziYMXK-x5p}S^i5$GZRnEqLTgSr#=a)^G@JjZmg)1&P)O|!6Ezc+;(8JIOuNIxap?+d#wG}g|)U+uGgXc#xbUSnccb3E>(~lWB zHM@PoaMp4Ph83Q~l?@57o4`yOHCu)s6n#7$dgD)`p*9)U?bvmiPL0$11CWWG9J6Cj zV!p_vxlo6h3~IVxIDjt!^HJndhGCa7G50BZo@P=rw$~xVSGYmhtPG<&4C9jUBsIn>-NiNK8~f$%+Aj=SO-YSoRITxw!IhQm>eSzdi~27MJy;Ne;JpXO0B zeN`k5{@#wnmI@5DJB^tWnaQW-Oy?-fe!CA#w^pEcT_vK2vbGgabL4RZE?f&kSlS9Ts0QqwR#61Vyv#IPxujc8cbRN$2Vb!Z%B@9{^dnN=1kGStW6Z0AZ86y3ytUhH^|QWIAZ1p zd>(!WS1zzP$Ef+7ABpvoMTNJkl{k9fHvAKqDWPV`%Se1KNPug71+p&OL)9T>N~uXR zjl{dqWNe77fYs=SP}|AOacX>aM8LjGj@Qx()alhA>>e{GsOjD(5=#zeV5M#qy7||l ziN`rf&F+$Lq?KpGUs0HpI$TI$aZXXQ(=i-{;@xd}Y6ULYK0$mcGpDJ!s~UmD?+?T7 z%UPWCeTw2VX3D6k){20ssM36CXa$BRJj2^_%#>3zd~^gR*yQ8svr4?{)qtoPX3kLa z?N|iLx);DBz6vEV4Ps3aGiRw8=pP}n=@ek|$tq+>8u6@{nF?ygrAA`S-vX3*oWpa+ zMjYhVLM1i#2SmbGwC!(R6-K)?!pfJ$siMXpA_6;a6yV_TDtIR}qPc>ZbJX-37=@2Z z3t$jeh556eqc3mUd1}fekubeifI0eA`0%|EVOuKEgCB;dY`C{pK-qiJA$Hk?6UsQ># zgkxzZgFiGN$e)=T)bw$QzF@#|`8+{FE@`~7^3 zIbDf284Y-LjK#T4jmGpyj2)a0*Pc~)u(ts>%9*)Cjh1&Lw$9GSU(pAq`VC0k&dgnE zeuhWl%)|mXHCAGWM#a{=#-DKgQ_rT(sR7r%*=giK0l6x!J~Y%_*G%@ z{6^??VdeofHfJLd7Ep+g*P^d)HlVW(GY_d5w>1hE4fC<$+662+{2bmV*?dV2HG@t? zBBP-IM!px}TmBsX)-v;mnySvx$aE@(i~U8k7`?>y^UTyz(_vUNmi;V7&Y6oCyzdpV z^VmGoV`>zyMPlxkVz?cBvzw9howe-=HS$$aa2a_7AB!$w?S^Jtc4y`(HB(BWFf^|iA8Ri`+4(gl z>M`?-nlePAN~;*gHJ6cd>NRdPvEyl=#zvUr@5Lg6=4B}QzJ`7yGmX^Dtd4?wQ!yHU zT|#lpE7+B@wmqlDZf`VBClw>G=Vcr)Xhw85b{@Q-X8VCC7~~gWMBmHMw{J#ucNV9K zn%^Iy@Vt8wwjRC+tG&%QJ(-!8)C^RL#*~RgNceXNr|!NK6~CEzMa_?~(HM5I5Dv}a zyiR|GEpwP@rsmjxQD~J?2&-O~(bVoGp5I}8@S2)!2clu{wh+AxE}`=3OJtNY^M;y) z!7-5aDS*nbO9&eH5(CDv_xQKeY_j`$lHS9nAM9qNyQPBIGkJ*#X!9@KP zMvi9RbDyc%{v-mUXXoMD$_o5ZeT8uy*?;6$zP!nK$ z5U&$6G3(-K+&}UXQ?4@em70HP2l1#?8m`?wDXJkgq4x>)Ecr&w(=lPVBC4^>sXPTO zagO#$VPosxsk!rNKeAeh%;ZHUMNz#Mm|V)t4{GLS?!jNZWc;)|i7~EC7&ej}&rfPn z?(V{wfvLFbbpj!!FYu`oGry=Y8M_1ZtwmPqSEcY4&+Sp$nfXo44E1dY$rPE>g$X~~ zhyX`+Jb$QBcodAu=d&;%vILz{pQDdIGk>W`m2DOK_p{*f_9!CE8zK41%s*;2-dvB8 zPTBY=7vm?(8ezYdeQ&qwPWJy)DFxs~_roalI|7Sq4VcX5SXxu_`;I?6Vsga3?IT$H zyAfxdS)4Z1d_3j{yV-|v_{9-CC~Fj%d6{WTP2^yISgp)Kch93}{M~>jrOdRWCgqzC zHtObJ`nRJ9a(#}=QfAsy^S-|yZtp(~FY{w~F#828Qkm&MO}`XB)Lb}>)`dr*uJ;1D z3Cwh)=F)Y4to@sfmD7)k=Y1ot9%H5xHH8m-(Zenqze0}UM|mTLY-FZ0H5QG22#U_e zwfdvDVESB)JuuUS8vkHl_&m$Tz(Yr{T&oeQHZaqb8m|!n*yolF@2n$YZG9sOt}xS$ z8cE%}-dIp45c@@DcOD*|=>j-p34{Vxc>W(~Fvwr`DpKekP2L7h;gx6AT%`OmAw| z%vdX`jb$P`_D=PN>E>Nr^YeW6V8ERue@Oqo*t{mCpBgU{2yC) z{a4l3{0-Pv?Cwrfzy<}Aw*iV>7=WONC=x0lC`zhyHzHEfT?Y(GLwbQm`ThadYu4F&pB-zhnV}}4{}G%Bj)qNZGG6Vf!G?Lv45OxMov5ao5sf*XDd=ck zgOmJzFq|44X#;#0_iHQhF7jvAORQeU;*6liF8v6UDnz{i@!q%ZZw(GPF*A~y*CmIc zV;+sYCsR;sUxNraW=2u-?couu78T74y;GnaU4yZ$?3|9KWsg#J)Kn-LhjflNYsEZzmtpz^_M6JW@ZvKyCM(6yNl?o(jgTe9@e0vA2XAwSu3h_Dk_RD zHX$kK8vhD$|JZYT3N;F`q9UeR6tpeGJGSkAm{-HhRBB$_*2k!*C>))ai~$<|p+=dF zZ5lP-)DNOpQ3QHjPQv=U*T}!d;z(07<=p{HYYj(_8OfLz_!>WtF*BVSn-|(}SBiwu zKvC;R%)>qkWM&360~NI(B^Qp!Rf*8<{ua$E+3%T2jp@TZNERKWKOBii?eupr1~W3$ zME%=^+oA)9<&rpPPJEBNQOw9vGthGfo-2o=#5ESHJl-R73o~-m_^;lIl&6<5^J@&6 z9N)val$lx7bQrc(+zT&5$v6f+vp!(Bn%^Qh^%Ozi0#5srm+QBZjM z1^?@ZO7>vPr)JU)6?AD4T@_D7!aVaE?(zHe0%~f?RgfUgscdK@W~P0Ij1r3@PtBU= zt6?lUF(16M~gvyhtM)vJ(qF&r}#!eLfb z3q`($yoeg*v8%C7hNS0i=#+Q%ilGSIT4QS;vAvkCsd-CQKII4q6+r@ z495u3ovGXUdZ>J6W-&Dvv{c}}ArenSMdFA>^*|~!OQ`9!bS=DZM#8Bu0(bt@W3&ug z4_-=5n5BxS#vFy~r@~?FS`Wu67H1hXhjdi&=c$O(6oHi7di>^NTTV^HTvfbvi^hbn z;n+5^0T%oite_@UR1TNg7lp8t%NQKf0RL(>Hf3sFYOlqsA|J33QhLm+r6zu{Dl}F`;h1a$?BCYm+!A(; zuA^q$2T_&TCK@olj5Q@c(M6jXHEMK2*1&y8G#-bFd6dX{^liLA?}_WFdA@WFlEl8e zimb~p`TP^*4UW`opvHW{Dk$x}0)M?Qd`_)LVvrLx8>u5LaKMgQ#iox4Rp>x_!ZT2WECrqo6bsRlhHzBF`0_jX$F5 z12em+Fc2J~Nusyp0+Q8S5a7j6VnUx&Og; zcn}?%7BydOM?>NlfZC}J__t5&!8pR=?4#yM%Scp-J*~T{?eJ>U3*1`7;%HN|^86^o ziF|^JA{%5JdxG3t7DtB~-3OylG+(Ue$lAbYMKz*jS)BdUY}OfxkU}r)x^4~kUym^* ziJ1e`Y`8WA>JL4TUw9Uku2uM~&WtWK(H)0iYN8v4?LUi)b1E^dk^P>7)O>z16k|+W zP&no+Qb$+dgCmQhN6q=k1F?Id6Q);K;denf?rdV_5H&An^+(ldM||#Z8qZ$cf$}J3 z^r>0bwJ(OQKZlWqr_f+@3np#s_Z+4ss-zFb>^z5Q=T2hf$WnwIW^s;ClaSgAWdrT- zVY>xBmKNc1J~IZ?XjXQ|o&~lT6<~o2=0(t2%*;`0j`f$q`fJuO%{U3S;Cw`eFk?u~ z}aXjmL8vhk$Vc91Z=NL6km%1aP!3dwEP9xMc6P5hBK2FX0 z`kr{!%@~iwHR`uG9ezP9&IxLEAMJ&YVy>doSX+3jrC`D(W=yH+)S(Y-&K$vRaX%QW zkc9jEoSIRicCt4Ns`c@1kv*>G#p9YTi(^iWV_APVJl4ZKO$Rtu#Nz8qc3q#Orr2~K z@*VUrbhQJ5Bct(PE{kJ9&5+rH(B!-yr?k(DT#P7DyOkMBY6`0cpwd?d5oQ5VMTX=={I4MBB(9q}IGf)UywIMKr5SW#oOaX1QH zMb}3a7xW(z4BfFT&KYJ(Mqr5BF8I{BVM2Q#8oMxamKvq!<52%-1JWLN;%RTO2VI_x z&6*mgu9G2CwidPx-bhLFL+1?k+_s@c>c>=6%Bf&NnGbsY_7fd|n6afMqiqI;U0exe zQLkp-A78l4WyX%0jVonf=C%w*aiVrpkROzXvAQ1i)MR+f!?;F8QGfFyO0I~!wix#A z;y{gJ&?5BnR78zW7!F+ag~~)`&QasuQ3>ZDk8}gkZFjF9I=Qp=xAW9=7^;lNzVoo~ zsQ9k0@x$zu%v_)*Ow_9U-c??7@DTMXUy9tz_3Yi!ks7}qs+cCd0Qw8Az~hAmKZ8plg2Ewg2!$r--~%tl_QsQqRdfUZB7 z38m)kXJ=fikVRd14(e;gwNuA_&n0R?emSFStQxYE(h%$0`YDaGjY_|>9{~|kQ|oG7CAYd0m$TQ zl=0MjHgm?#T3I+B&cUB<0jT47ISJIXZgYm_bve=bG6(U+fmqJh(G#iJlj4l8Z8A{l zDuJaz00Pw5?@6L2=$sRL5@azvAqN)+2f}X|Gs)C^dEtyNB{JB(RD!=E-&u7kGbz-Z z7XNL9PP5VQH3uUeh#KYmoTgHvQtpIqb#gEfwIH|j48&5tUY|zI!c1qV)yv_uxF-%x z7j+7Mvo)i1YJMrX;O#tF7}sXQCnEq4_;sB@P5l%X?6Hx>=F8&RnG%4deBC*dnn+z| z6zI-`<4Flt=?CELQ8uZ7JvzS z&O3*iEBBlsag{}zLk>>W1;Dl!JEszAjIv#DaF{HDH%RbQ)En=?_if}-(|pDS>MAm5 z=^?@HI{~=dlf}uSrcIpFg#I(cy0`?@;$D?$$nK+8sp*sKjL-%dIBb`|MZ6yzRb_Fm zQS-3c1E&Z^Yu(DoPkr;Y)Ly2Q*4YTOINGb=|1N5nmT z%b`FVQIoUG1zJyM;j>o`&WUI6fmC)~7gJ;X%>}(JR7?xRkBiLQrlz356+L^*gr$!}^zjiju*WcS zhnj0H;{W3@9p{ZDI5<5Jdb63iOHHpXVy?SlI&n*9MY-|sxsVo)8?bvLn)Jov> zDFCtY%v4e{KiUQBmBc&Jid+mFBK~ty?7DtPjoWr-g#GWGTvvjd%K~9}f$c-7qDD=; zy9|3Mi%H%RWX%hJyE8imkEmIE*clr7av1qm0{!ejXg+3f9#eDRsS_fbT~erkN7TG5{MV& z%)Fo`Oy3y~E9PL~^BhFH4Zy-B>^gW!jhlxv)D7k$TKwnYx&&e)zjkVBjR3^7u(7?Orf#ixPjpy-m^=w?S&BX`GHh)BQPU{i<0}>9F-!cnGZqFQtOGNz zshQO7471}3_-dYuF^S?D4Iw;7mJR9xfrz~01uzCWBq}e z*8kOJ$X7!3_FR;RxqzAc+WAP0TBa+^w3lFYw_GUC@W-;=Y;2#Xxp}}9f7U38`a2SY zEb&J@Kh~eANicN9nv})p@=bhaiTX%$XR$b6s7ajeih2JQBYRpdR8su0m|q89sZspn z0?$cGP_fE|rl`xj_Z>TK->A{p>WZ&t7r`?$7yr!-fW-hdw(ry&$#=z%*Nb5HG#B2Y zr_n$)W`0m}|CKB3_AWw4!(40)@<(fDW@@Qf+RYs@dlivBGY`EQ{2())nL27F=(=P7 z14WF-RrK=lN6cVmeo|u~>W6%Js|fd9d5{zHB<|+ckag&c{rj?qlLme@>=Ug0rnFs$}z8JB9J?Gk}8NB8Ku76&DTk&}a zd+m#99hqsT=5+Zv+}fNFLF_vcCvShztlYGWCQtfael--WRsH*a_gA+M@_*u8+4eafUt2AER*xa zcU?9=+MzGW|M}KpjYr0cNHvn6Smf~b_|4*Uq^8pN45a16JzgglX4O8BAIRc#qNdjj zD^#m4!IoV)c+=SzFJqbMOwFDdXE3ziGMHv%<5{B*#w}u|3pE`Mox)YIPxJWtY;4c> z#Y}%T7u}Vb2C?RR#1+u;&w|NgUqts{ak^16XsIPy#8yj9vrHsS@x?YdX1Y_OvCsnl zEn9^S_eB2BB44o{%uEkzl=Dtu(T;T}tIxm`kt6z;uN6p9GuXufgWA?2c4Y<}#qTjq zWN~^@qq*`FdMm5KTQ)<~!0^M32h8-MW=vNrOt)Q&pI6hdP}UFIE-}-anh2}Y2&h+u z`~D0x4)Md^WM=wMqpEI&VJ}rsd_4^>c&Bry?h!uM-&q-xs%VJ+xy=3b$1E?vyVU5hys#rfS z6DQJr5%QAlWgAG%n*bXWTv~^bgY6dU5PvEYuR8m{ellCr8$wNXla0thUx&E^GtoWP7a4{u&QNMrsM(=hNp!x~ z%EG|Tez@$(j@vM5rk@hImq*s&jA15D|MP){HH$NxnqwX9u+?w_v~FhN?m_X~&S7Q* zHFFzmu)|~{KKW$A)yfYS4VW28%_XrvW?G{fhM&zqKXE@$v}5qh?T+CGtnA!m@uFOzTCjl;O;brzUpdX>5PE4R`LO zKy#g_&$p793Do#5JB_?A>*0Mc6^hz^SjhjU6RGKT=QP~3R-?0N8V2MCpiPOznM6(h zURJo%q=JscDOeQdi{;_WOs3|T{26FOug3EqshHK~gFa)KnL^Ehb>g`#w$l6WNJH{x z9~7=&W-2we*4kq9$>mtHG7}n8z2UNwU2oH<@!4jF`pBi2Qk98ihF%!M>sv}w(?R|m zq%J5y?rJu=&i29$zL$DBH3L>%K&FoZ@?J>LS>g$;B=*iXgPJ6b3-~-p9*xH(NcZ%F zq9!vlsre-4cPd9Lgl}aYS~_?jDxSSx%TUvN(iL~> z>K0+i%p0h@;{j!!`!7e$k5%6Ay1fKXUR=lbCJ&73#p2AOri-O74x6k%_VNNu^z}gA zBUZ~`HZ^?%{9#iCOxRb5KSMp?+mD$!)Tn3&!Xsu4!WxPZp6~U4)mxuSO^rh!6y~hO zRH>UdGS(Zt?O2?7)F>JUW4*Q-9+%t_dqcdSEXT}zYBaS)ueueRaH;twj(zb$)n;ZE zP&0CD5Jr@3$KrsSNNn?jaSS`w^3;5+4TSoxT~M+v#-BP*JnGM`9R+HxP6|ZG`aMt| zR*bfGFKmfoaTZduS1$k&SJW}XqX?mTUQis(;w+-(bfK8*Kd=L@Bt^Jb?F}ajW)!KZ z7I8j0?uJE4A);Z6%kV|V4a}^h=9@||f|l<^Wy4LB zi&}~&7BjPon%IzFQ3rB2_C=MTbdV4B$T72;ny{WBnEhc7VtY71Ws zH)a3RwbY~;gyPo0oyhSkL!rJ8>IX8jj+#@2VVJdf8!pYdhq`;-C~{`^Dm7}F+`_T` z&}Q+TT#lJfywF_A>aMJ(=GMQ^IKlr8-*p2aV%Nb|YHaU#LC0Vnx?5etSYJ0R zi)LmUHMPZ_=+$)-o*@UT$Gamsl)Z;+r)KOZcf2&%3cZe52vPDx+6iWMP;*br5gT3F z4&?=6|C^B~nyuJ8(oSkt7&u|E{Vte}O~vA_zSz8mz4NJ4v%Tj9gq`1mwn@qG^zwsA z1T(v+**ww?xij`b4{^9TO7w=i$KFSGQ`0NXTI@yJhi_)FqHaVGx}~#kj6Kxc>~R_k z?;gM+(ckz$STG8l*9_m_0>brb&SMS8POM@o*6A_oR6De<$ng~{4fINV=qDJFZ-t5M~(jy zBZRach1;FWh)uW**KoEMRGXTK4o9(~-5BkQ!!bQQ0%JcjqeIQ!ZN{j(YKq6*!=b(+ z8b@>3-q!upOw~6=H)oM2Q5cS%C!^7!ADeSHK+RKMV@%OB!=}v(j59zjE(>=!! zQR$`_vo{j0cJZiO%ghmKV!Vy;<=9Ew5P8y%ixRN0lo1Bqsoue@%B_5VF%otMhb-58LPMG1c+!YMl6c3|zW{jxWN0m&C&6NgSMaGINX? zOLt@ZO)^E$mN;1J$708Hws-$HHFIzr_gl>{P&*zi_u^4s#p0ZxW{vxCqzyL5-~4!- zD2ah;H8ZBvyjgz&^@q)oy)gkZm&Rhm8a6gFYR()yj^C?J;;DQRMy`)V%V1{Asj2vB ziaoo`Af1zhDY`MRdd0?ek{WesGt@sdLzHH+m`At*dH#DWs0r<3h8g$FQT;Iq-hMHN zxXR*KQgg1z9OkVj(aR49eT}4fp#WL~m@5Ib$YVHm*!#(G- zSa~2BOJfrdtj?}$D{7X8n*pLPUZkSvZ!LaP2>)-Nq2^@CNmQ8FiQ1dVXv$7T=gBP2 zS!(*sI*Ei%4x(`D7$&WT4<7yC>REb6@mA+xzMQ zJ`GL5GKDO>Uc!tmHQw*c;Qq-`^v_Mk&?6EIEMm_EJ8G70I*CC;MBj$q$&lZ46?2v| zV^2+0nHkRP@_<>V6rBB(kFojeIp;u)uaY_X9S}8dYf?}aS%j1i?0I^Qnty9gqVo+O ztZPnz&)X72NwYZTsp)s|G?YjC!`>wo?uDhI?lFsVfto>DR@mVmgbNQ+k)U@Mwdi>GBhC@{) z9pxvgFgKGK7i!|mt?}WYs2`G;fuozB;KFQXT&aneYlY~YSFpS(UGz|U0h1E;F6Ks! z#{09l@hS%Q-eo|&TMg!4XU3fxhtFrw>6Yk=QJ4XrVXx7zmKhIfY)j9g_E;j8d1RoE z!aFE>GUG|j+PBu2a4;FQ_32nVTja_&Gvh@~slin{jE>316yt21Q~!yx zrObp<(=^B)_?nHf6k%@tgo!ybm#EniXAg_J;#g~EL9Kf|x({R~jGC1#_F@fLTx(fb z(7RR->vPOpre?9|g*D)FHoEI&!st;w6pEM$r^f%U9ge4DL2+j${@$%eh!-;v)cjjz zhZ!fcuwj;1*UYQO;UCOIQd6K|2gCg$2TU#lm5~jYUC6G3C~6)avcoisEXdcUqf5$9 zlq_c^nwo?(TcrKSgz=wrD74mL!YXF2Q1jT#7XE9ppu9W-l2vv1-jkUaYIG%{Z?G`E z($e6hREu4cnTe(5{tz2|NcQ*Xx z(s13Z9{01DNuuW4w6plyk|my5sqlJGkJaAHBvbR~=UJQ!&&JF_qW-8$Bka@Iyh{o- zcN)&3N&M%e#G32-{~A&Kn9c8`QuA%_8T>BF#_W_-7&kUzLkt^R8Z})i&Z3XVDM&t> zf*l>3aA*!2TRJs6v#em?n~hN0RQzmf!sRE-WKdJBVhQu$9F+7Fzvq1;8cLbTq-Kig z3E1RkLm@8(pW}WZL52OEENWD|O<^K(^ppptV(HUgs2RviHZ^LwCfF|S?~^*EBG$JV z|E@5TL(TM*c z=W&YYQPqOf|Cq_6CgXu2tVJ&1BmESNQ)|J-t;}4dW*Ff-Sv zNg8+<`Rj8;?qv!-Y-z)mWz1Zs=90}J*tv^59#K;^;aLl|e`nXh4Qf>G>f=sL4)**` z#rMiKY<|e%*l8X^a@;fWeE1=Q3S9>y8<9B5UhLHH`H^)>C#AHYlrHRp5=K_^~%fDVyVN9FA4HRy$m3~G!-w`ZXcRJ2M$Hr>(M!)!0(tS=HjrvZ zj5Ram)Oh^RL)8!o#+j#y-T-Zo_haTBHBy@o!hWqp?As8vYgOBE_ZTx3)NFG+fDk_k zCiPB3zb9>Q^JV5fHRi7lKsF)=L-JBl_o59uVwrhB&Fdx|>_awohNa-;gEq0!!b~MK z9mnm*@q%pFyh*{=A#DiS$jn1(cBSq|MPfGWol?*o)rybr**&C+n$i+&l#6%&4HD5K zL(&TSPAtwNYM#63iaB+W!#^|$drDhSuf^g#rpDUv0K)XL5&SC=^)@ZA=*mnrHCA_Y z@Gme6?FI3WI5y++U$$oSgqpDN`{8pd6Q^2YG2{C$(YubBr_{)J=%Bbm7HVo^Fht`Q zBzu{8Mvc@FaqVo#g6qgDxa8S{{$Xql>^U{>PV3^GRwm{tMIpV%FWBcW^Mabh`a@_7 z&p^eRNbxLb7Ijsbc}dOaqXvkLO2-CKBlf;lGmi27{x#INCmzNqWQe)3aOl=IBjp)e zJA6gW*#>=B4^4;vl*ELOc@= zgrcZcoYRgh&Kqjh`yNG~CFxKfei1s`S};SInYYw5MIS}GQ5u|sf-%?N7mmJX>$LBv zIla*kmDAF&tYaWbZ~lT#7Z&F|HRk<|pg$xH{-%LAz48~H@ZO&vsIfjT*0YwS!oV~L zA)kLCXbg+g*t?^Qar;3{=z2YD zQ%J?6zZWsYyaglpzKvRHA|wZJXG9tXUbu+Lfi0N4ip8m;rc2+02;G&AHUGqQy{82` z<}ve=no+NHfwDAcRfXbPX{+d&$xJ;p1K;k)jJfG(Q4PgAt5&SgW~PCfFdbdUiar-| zTSIYSM;mMhFw;oQq3_yg3`v8>r;G5L)Q+tC?EExQJV}n_MWC2eu>h1vAame2UVB=c6=Cu)l;Bn>KVC!G2E*HC=D$VE&qP_-cj0 z;(M#uTf$5$HK|{9@cVZfj<36nNujM!>BUSNHUHFfu%{{=<88y?mEMZZQ<-U}CbmW! zYNOL3=NgW;x~=hUjxp-iDvA*!lTK%@Q9Sd_0_vP61Jf$Z3V;Bo?PbKa&4*vV%4b zi@b`Y+GzYR5%0>MS)7j4y!O(9<@9uPy?zCsgWEB$f|*X#Y(1z6$M$smogae}hHYr! zW9v+fYPWsp)FT}p-m%C^YDb_F`#oK#Y5S-NRfP=Pz8MSSkT#rH%1l>klsanRO;`rD zigntekFAJ~VWt~3MM|1D?UsQtZV4!S(~9ZC*n3EKYE+!G(ELRl>xYTZvTs2f&!6i- zjdGR-k{Z*+`j=P({MCZFyl;{eHBU1&u(UiK-Nf4AtxwHp8pp=glbTD{H4x<}>V%o3 zp=f(Ee65)2MUC3Aeb^&%J~}t0VUt%g-tZju-qdvU(nRy_OzhIn#JJ1NaOCxB`cPw` zuPNpo(=os$13QQOf;z8h(U+R)pncGoCa$+r83=y+3v17?ci4W^TsG4}*wS?YA)k(vI~^xvz6`hRJdGdl|xb~fRQ9Gia|K+RJrEja#3MW2dnTrOzDU0x4=AT_;3 zocPPBNZOl&;p3X{=PeuCAZk8r(!sT;6ik^Q!SnSE=n&1uHkg`Ti**rtAQfe{x$t>j zk7JR{456m?d|fn%9Em{&c^F+?kLEkf45jA6H$A9~OGW6LtJwCp9)LHTw7x zk%q4l(Z9Z?4l+EqVmLLQT#rIu%)x4Q&qvP5dIV2o-*Y3Vi8^D98%NTh?3<6)k@e^t z$G*)*QnTC31phvzLP4bvkId^)!sjhVQS(~r1n%5SLwHjWl#Cis#PgX(Q}g27amcJp z!)c#l#Ar9*Sugf`#!z#q*%WD0)39P)5q>q-V`X1v#!{mx_N}-tOviTfn{ZIA$2R^Q zJdT?-etr|uE_JBSWM%?2G47`L8=r<% z`%19UsSbMl7)+#Q>WGuD6n$eQIwjEASO@T&$Vt>Z%d|k9Z5mQFZo#y79n5Ul*d|lc zFJ9ClE=a?~hFf_3pcej%nVCY(>wPEDv`6Gj4J(Dpn_BD~&CFD4&P}m^)Pi(On0XtG zziQF-Cj0+Pqh{a$X;?j@3{p*X zNSCmCq6{_P{+-785$PB*xeUGY>u~!Qd(O#H^Q_VaAz#ywxcnYu7yd-Ywamy-GfCbS zyTrV0zSlkU@UBH;8+)giMUB<~TkIK?4de71KT=QV9>TY zOp<3~n?udT6bF2~^uJ?JfsYSsVOhn-HkX}od)Nic@fF`C-7C@l_y@WiXJ!F4o5UXHSvy4Da<2z~b1h7oDlC6e{}ETN9Q{1nhN&wGL^mEY1>Y*8g+G!PGQt zYrc=#xpinYU}h;bU9Y<%YEBw5|K5j&*n^Rr#mq8l6!krD^sVS|x8pwEPydNXKEJb^ z8mISO=%JW~4KMFwhFTq_@cF70)Kukq;k4)t9-;gIJ&WovkzYH?)ErUs!D(@?3b&|) z%9VQDHe%N`s2MZH7e0?u;dG}G+K+!?5WlWhQlt9K7e883(ZTm262AQWzxvazqUPQs zkr!Q_3cZ+zc%JnWtM%E~R#T(ARMfRuF7^bZKg5@Q_0ThBW(_r%C2F;PPr<|MRq!sX zhxQ6)RH%s(^$%}POvQw0fa7{B&RS}c%mXo4 z?3bHX`3M%98_>mrnRV2BQV+mfl~h>ne~iEx4S4Oxj2bl_BF;`RH@M365oDK(-b8Pi zSx-&X(m>o6z0NI0RO7VRyAt(^nGMuj^$*0IyD8{<{xO~%YCuXUGaIRyvNaH^`=!D) z@i9J?|3sAwGn=RxlHrGG87VOGt-?-C@&2}-na$Mvj`e}(+hp|G@&G|Q>rk|hnJv_W z5BEm!0MYO9`aN9j@>BFcWo9ciPDUQE2~I)juRF+GSPxYfb`RM`&5)a}c%YDiF6TFbNy{?bh_;ATHHb~kn+v{M?=r2^r0su8dMV{r~p zb5rCx``r@r6b6A<@aHG|`SVnl8t>Sh==dxZ2YiDO^7jYY_&n@EYJLZ)!^FSI`{! z9_zcXcYgzFYHIdjomC>-wPP^)=38VNFmsd|ha?@8O-;no8SyC5dJQLD`^=D<{ILCa zm=%wQy%Qk)=>^)?u{cK51S%ZD9qj}>^-dBssh*+7dS;BNQNm%dFD4%E#NPe<7f*2Y z2|I2k)L4!*g2~Q!OleESJe8-Yn8nO7YW&V0!-Oxf_$I!a{Kh`UQa-PIoSN$|O+>z3 zEDpAcJ?I~*5Wa}TIYG@I&y(oubp_=kvmiQ=!;04>Fr~(Kh=n)?F;J?_g87&GFpp;c zQ!{F^wNB%Y@)bP3mW}&sD-d*s8FOlS*jVA>%qwU(E$YeJ23$<;Vu%ehw$vDQzku#R5inYR9aAsd zMZSa?J8G`&b;QcQk(k%%I;wre*!-EXr$)y6yy(*$iDQY^@Y?n+6pu0EK#hOMc~qW^ zgvzsP@L67l+s@3Kqoy(UJkD*5f@JeGMA(*LSvxc5sX1739^d~(;;!y>$fn=LgT-@7ipUdMDGt~{HQrH$sRR3g7K|K9^#i2BG-x;e`-ReoWpU0U>uyC zhl8#K@Z-lIfST3H_RxD6jO~AOkw3N&Pd2bPfz)K%*yE>SFkT$XgOXJt!j+f_qNaJM z1Gd@+BTt;4eQyfTQIDBmYMfK-uxLdHI*XcD+5v@l@4-w6HPQz5B7Z#?`q2`M8dit_ zmzcRo&9Edp?6C|%{(oZMX?h_psWTHwjrlb@bW;t6L8aK=DpQ0heBSaBHUGBSW0Fr0 z*6E3wzj}qR7{cCn!l>COYNNW?1!1CHE}RDyA?Oe@m#J}@cn-=(15s=s@<<~K@sRJc z3#aCfmB^dg5QrYT^5A|!mN4U_JmwyTjk?P6C2$XYN|yx+&V=u%d;&PZ$b*7zJtAC$57*c-4+9j{SYjUwy$MA zKJd5lSZYSCw}tb%02GVL)Pttw<9`@f#SAXR@@*kwc0Gau(xGm}V7|Cd&1-{z0eo3oJ?AS%!l zF_T2iho;l8D)z_6Q(3V3a0BVrm`SE)g02O^I*JTo+f1yE%7;Y=Gbz;EbvTI=qLW_G z)eKQhG9QO{r=e78j=wqylP>;PzBdELWAjC)47PHXM$L!w=J0Ry!>DEHNO_%)331G% zQ`2|9B~EPc!>X?7xUjfD%*HX3LCwHf78up$hdMEfaJH}jmSxOjQlltq2^CFI5hpPP z7Cj5mrp{Izv#8ncXo*#iyfOE0is-CSgx+(R$)@JZV++jB7nKvm*L{e6Ash}flS7Ta zzUWLF=Y{7zMCR1!B4i(7MnX+{wHZ#`^+vbEWT>QxyDwjL&86m&%qdvS^MIX79z5ojqNHmQ-@n%hn5Fw?S%3)YN=3$8%|SNDWJZ&9efW*}%*VYTQSg<65pe8j}*SNmN99 zyNQ{6YW|Echgy{z)Gu% zshJyb67nlNP_Q})`z3`a=3P%ps97bd;tcF5jzLud_B0frnD6?zMU8@}R%09JjuAf+ zFzIC>2CZUaE2XAF+Z+y;Js{JZfW`j`pfHY^+tl3IYKrYWJTZTNJThtuq4AsDL+(&B zccC%%jP-<_OFS}F3oyo>#kot3t>#hD{mBd4*C(J%PgI5E)vU{?Q4Bc@r%Rs5TN4k3 z$)XzIRu-q6n%8~wu(R9?Usoq$YDF>rGh*f*HQ9mtakiru_PtF&W5i8lt1weRje3UY zpq%1`LFP#qrdNX5#?0KOM!WO?<`j5hk8BcB-EZNqDl-qL(R_9Q8=r||T`guj@0VcY z3T7&)i3~e{QJuWtEvhX{e|k$~Wi#`Tnpu^)FdgZITbYSiF`^XPk26z6%_NaEq;tv> zYn~^mUeC-^YPxUKg-^97Od1kU z)9p6KCNT4inmHm1N`AkWn0-vdf%&&F^CvUUsi`T|g{FZQVq+3v>|2UbQ)XUJvtrLd z=!*{5k7|;jURH`GUIp(ZH4{uk2Hj>a=m#X>uY4(12eUXe)C51$MZk9t#BUN=T}EOj z(-mf3QIn)}5MTRwAu%`^{abG%fd3c%qbBUV_-}vrgi}Nk)}6nF_iilCYifq|J_J2+ zPNl9VVP?%Oth8q44K?Mr4iCAK8nxcp7_Nf3Af#D!Hjo$ zct=f_b|ZWc*XW2jNqBwc7LN073h$|zTWo}~bDmHNO~CPfxA1*Ddj@}?X5dmI^q=p6 z=wXSNJhTM%zRY~2=DMm8W}foE#-oWSYbe2uYGyuBvoXRDBYJsYLvaGMuHD2>8Mb@! zGc|j~u2bLno;b210WzY>ayifT{X$J;7b6T7E7luxOdU1-)*Qp|V0WZw$Ki*5 z5p?%6^OKqhPmQp-)D3zT@p#+47^OVhx1O4uIAin|6~p&Ej)Tu1QO%iGfoPycYJ`cn zC%WU5Z5&F)F5gGoG*WXx^*Ek)b4Ni^Je+fjFo0M1Z=%MppQwU&!X1I_ahT{+gpG0R z82qB9)zb(kvfZHDAs#*Qi!iZ*nPzI9j55aP8h6ae634oz5b`|xzl9pJ8^$BFm23IJJjl;`lg{bMu zs{EoMKxVp8Gi;Wzs9-Fv9RqO;dKP2j3ud}gvqao?O#R&;X^2Jr%wkmY?EfCr z)O0e2LZKTv>=D;n=OV;DWpSja`Dkp67}4oKQ8Nw)2Z&4oUg4!DHLZFkI6llxWZ}kP zP4^-s@!!*n8q;yc7_d-OKNgiP?CnM_zGGBb=CODSV)SYU;~ z*D=T%k%0&6nHf$^Ul$`pN?Aa6Z48=xQqk)^Gb5<^cGd`b$|rDNIu4P#$@p=CnUU1A zyb;xsOO2rG5R3h#iK1!^Goz@==>q27iFh3fJymH=HYP=U4V^)|BmMg^I_1I{H%Cb1)sQKI1 zQ0&|g*`bmcjM^H35njxUr{%J&Gakw<9$!Ry?~dLR+7iN!0w;*9dbrZh_Wwu@|Q>2#Su(Or}P|#SlLq zZ9siNEKDYd&KJ+wbufh*DLG?k9#cbJbqofN_Q%6_%uJWeYn++Kl& zCb6(o^Tg9B?6}RKrr?f=s8+KCisR!@G|2-Oj3SEG+c7hj8r2iW#O~ImNSPClt5z;(*~QE}YUZyv0fmE0aHLS|(3|6e zZui;qXFfIeDosQu&!zZtBmuY6T;Q^Z#aTd2`=Jx)G=3@0O-(@UXcy!vF(XgS8oA?; z-Mb9StP?P_#07N&m{Fi+a1S$7=PpO~$wW9Tb-~1P_InmmW8Z3uOIU%5dkNU4>x!VI zEY2coUId#X;L37zzaNi(Q{7&TiNBfH9A4;RCLA4D{O2^)cmJphRMs9 zBkFttzLvP6(wdpY)O0vzibomBaL`J?_l|BN3y6(v2{l_Dp1_tF%5c(2gsF-vig!0B`Xw>vxfjZ5cJIVojlRTNww$@6i%ni{IHZvz(e!CMU4?rbr02 zkH-QrHWhvhR#5XR^BBgDUyVI;6L4{Y8y0_Lag?cfYkeHacUB`zBN5+3r<6}E%z&Dv zmB-L0bvXvTPe6OPD+(?!vyz$}N~V}tvIL{&B|u}7E8?q|Sw)Smof(=!6|wtv0;0t` zv%VBNr>m*i_}dKG*A&rLDG4!JE^stsan?|i+TQ|GgBD`_OEI5#&;{d1vp6c$s7IZ~ zoEQa6JCTfSyPfdKhQ(2(X7?j2^tv)16GWB5qqm(fd3)Z~t^#mVurFf}J10yXR(R7r>*+Pwivm*>H&ca^l zEF^w+#0PU`wojeS!cG2l0w@7+d?;d2+1I?ABRIa5^1bwYJ3Gux?Y z{^N#lkC|v4k}Wz=i)TGw2i!qT%ym!fOq9W_yE!mdazy%DHXpx}n!c;OVY+xGdTNM0 zG9nvw?q6oqsp(hZ139UgklmICwLOj)&)1B0QPcT{FQoU)#9ozakScS+fkiCNZfYC{ z`s4gAIV|a)52uOFFq*^69%}aY41nrCS<(9P2HrV1!J6;&*h|fANdWQ!XX0c;KKARl z;CD|JM}wO9#6Y|lEQ?&R=6r6qv)Dby)`K;vIo=$EoDnk--1z_4y6e9nx~31nh%I0% zc6aA(*M31k!Xy zULAng3YL$wnVMe~F=(kB3(fHr=(#ii3rv_Xqo#NJXi?d443^ZFV``e17a7isIW-qY zMWIK`Xjq9%;-?z^aI9rHW){>OaomqV0i*C;bPP?p<1e~`F=I*1(6#$;?%+t&c@!b( zhCkfLGP8vm?GeGac5x);EIo-C{roX!B{Npk9PS$gjh7?wto0Z&;g8PiS(c_XHN8&x zsNVMxr(#((Yn zaov!at<*eo_Q2}u;dt^g3(1rG(2;j0-9}CO#yu!$9Ew$MGw^YcA6D&Qnd^4cXw^7F zEn_IOAE!h0r!S`XGhs05keYYXF>s@1|A`gQwC#thCHur`b{{22{ z-+Zw@3w@yRo5gXbCZ*pBY)MpvnlQD0e6V#ri{n8}#Y$Ct>E91FK4EAy@jGoIA^ z)LV%~Vpo1DtFHnR^RXDyk`@qAU?TAKT&sw&A{SC_ zn-82jvp7D~r1VqAb`M2dwGPM8lfGEqpBZ0jZq}$HZ9;#nofi)GA--t!VcX_Mjn>gs z_}sQH?z%*vLf01={h9Hn=IL}zG#>5^@6iz`e(4MKh0Fv{qv*35Q$BUavzAC0uJy$W zejWr;WBg7NKRe1}a_>m&y6ub1A{HlznoB1&Fubud{>_erZ-O5V>}B_ZU}~fdtMSaa z0~Uuxz^aEomQQ8RtPpBct2ObXXIsecjYLAFFHYWICX^b@u+`8AY>P=jQMgdyhoj@! zeRLl+dn(poUXm>Oeu;o$o<9!Mu>A?6=CjLMQGK>8)_;z~rdj^DI+@)^_fykv^BP30 z>;Oa2Ro;1_KU_PpZ40MnsN8B)K9WI?)loQ@=Lh#17AJz5g)dg4Jf|&EpGIPoSb25* z0XwfFso7JdiAoh&@&7y$@>BeAav+NnMNRD}4VX)9pmZ||p$q(B-j|tZYBKJs!*jC? zn$JXHsrVlJsK!hTH62=1a8f}ACYI6YT;z`q+u69qQnN~44QodHmTa~~DI}!KmH-sF=u1JT>8CG*M{tTe2G+jeMN|sBUBXlR!=1S!#$`FAFEL zXe|EdkJ8mFP9inMWonpf+6Mi)#Nf}=0L(UJ<^VN=tkmG?{a2d)JQ@uy{&+i^nS<2q zI;$@7b7f#WAsRvR0+6rDOcFH#gVi9L+6F1*(TLOa#~>wUlBo&%x)SIA$snjA8kdg- zp!PW%w-jnRr>Wuc(6%_D8jWw20XWlx#Yv^+Nt!wo)Po(_=MJUfc$EM859$k^p=HGe@XVbX_zrfBai#Bk`P96ZF%Vti|4Ie}BH-BMk7jii zr+^ww&y7%XltKIz(eeJEKm0UVoTJqAd8mhb3uHy!agWLPX1pJYP{f>pr9bX)n&(60~)GQpWi|_4QCGQh) zNDTBv${J=)Q)4$o3pGc6Nxu{0FwIGPpQbQVM2&HKEqse>k@obC$K;`YD4E2r?P6*k z9Mgh!_pg$|-2}Yu>5FX@%#={`;*&Ot)_;>mArW)V`oQcAGo{olouva8&ySLS^a0Vi z&IdjCJ@E`RA^UWoKH!bUrx6UP`v23c^0o^{z36nJQ|Gx9Ow(`qxs8YdQ`e@rD=QwsX{EndzfO zjDfsUI)2^phHCNLo@dErJT zGd0vy-_?hA$#Y5jO&YeEd!t<|GZ&~?Q>KS#i|5h-(N+C}f;ZloF>{d`qu`DBGWwZh z@jeYRl)Uj)V&)Px?bmO>wc_W}*7`Jz*z1i^h0N4aGpD1T=%&~pMa@o!Wri1SsW5Yy znnjHpvB3YOWd1Q78tLA!<>&PkYTo?Vh!bUb?xxk}A=Lj%lqdnR4! zl!5MIrTK_6?7nl2nw(ezME-1$l!j(tzrN_4$=?sIQ#0p;A%cg%RaotoR!wA?pAt!;zkb}|!_cDtgq12cE1xnE-n^T!XR5c4c({Bwp% zlLL)&mzufjOrfcLM>^i+5M&c};GzLD_o(@iy&1C;Z%ZScvJls1J5Ctd(>V93F%H^{ z^*63cAEZN&^Rvg28O%JOX7^DG410Y|+PF@1Rgd3>FWv2EoQKpX-Y|oP>Sbw{Miw>? zvc=+4Q5*xN}Dku)_jB?pRavmKsx$3o2W3 zR9g7y2#$WWMBG_s-ci%O)&buWOQbWCvvD=i66;)c(Kzp^2~@F1N%?tckx~wFKA7Xi zS~hMUs0q93fZ$uTlAc@+x;xmQ#(>>>8>#7#Y>zE>Zb-eNbD&#dhn}ize?C&<7Hp5a z@paNx-yAe4JK^R5C%QkMs7VkxSz*cVq*Z#kFi>*An^-?pNINW$8o&F0klnJ<~udZ zJ$J!6RS9$NABUesGKT%wOym5ZCg`6F8m4I?d6auYqVa6#8vOJF2YAntl zYV4-Cp=Z4#tjCHDdE-wZ=?xpVztsG*bcgywHz*{XhU2_4Owwc9_KzBq{vOcy;e|h< zTd~`=vq)NNPRBr|2U-6!Ty%UlX!OMr^V4wda2`vqvE$Q*n!CB4@DlS!+9pLvv8lyD z6;m2VmYTKwykIjg1ZUcxMylsk>~k`vrY$vkt39D6)`g}$6y0O*-NJGgHrDN^iQevw zKQbb>Ijk7FJKw{{$;`B;X0@R=V&+9-zf=UF3~z-ig?Cyc7*;FR`#AGo7fp-b+-yvQI*!Rw=say~d8GhBQuRYCg2} z#X!>(yzhJlFWS7rOa&IF3pE=y`(def8tO#lJnO~}NNLB695s)=`=M4Q9kC0_;5Yaq zT=F;3ZIh>_!vlY$-^s+%(6eYA(FBK718NkgQ4@1+-Cty2PtOV%Y;A($cQyuHsfk)1 zfUfs4;b(Xj?D6bPN$S!jqmi}bb42tLS6cWPY!3xJ1+GjVPeESj3p zpvX)QYC4|v$D7J5_=&Y}~jeW;mX?2QY1v(PuB8gc(MA$t(J z=K4}&XX1qemYMh=>OVZ)*#y^J>>i>>jlpg&H1|4$pt|!oZv7d3ma_Y3KWg@#_kjJI zEQ~mL0YAj~7Vw3c{?r@}@IZWT@qLka0lBhGc=(>(M+ZiQq>zrf5`YG%2GVolF9 z^uAMzu)m@+=`m);QM3PID8h%Q;ZE&koXcy)^4-jgr{;sG+qCm=8tS)PhH>|AI5UBb z+XQO79YqbMtaK<}x&+hH&G=Wq%tUG|Wka#JYX&A}T*R-{P0-h5-zAf%S)3J$4{y?u zT6qDd?LTAubY><~BfSj<28lVO+ZQoy#AjTd$-Wz>P;ob-^T^Od2S{)28cDt zS+CH~Xe+%}&7tPs5PuxX$%2J?HSEQ^*z=2Q4rMMiTfF?RaZx4~&ZHEFwjF?Z4-sQs;i z%fy#xe#6W{YTE7b!IZIw;3L*9D-8aIjk^zi36k9T-su4 z)Yp5Vy#5eE`iQ*f+b{6x3o}cou^H}(kGY2syR!|o?_{B8n@YGGc#aA;W+ZAZ&UeEYr7Tf#{w(6vpP{<~o2LLZ z`BU~_+OR_yzv(Qd%QZl=gc&7jUWK?~Z~silyf251-4h%;!RAnusd=<+7knBs@Mltm zSS#@a>0fuyYhgJxT}9o=^t~B)mr{n4%j!|8!sd5YP?KZ41G56t(J`g<}KGy^YNq|)K8~Dwc!l9dOb#23^Qw~8MetD z1*K_d>VF1p)9N5|m2KNPYPv>m#T5NC(J`h3jyuJ+-E*Pm^?GW2#GL3`DFrtMl|r%W zBT?go87*qAXWL-t;Z&SoT8a|`9w9M{isMt!ZEh)sjeZErNH*`ULrsFY z1(cMe;N0*w+8JqFTDi!v}N^orbBV6Knkb2Z~Of$jo-f1`xQi6j+ zA7Qew2aTgoO^5j=xKxk|d9@PU2!4dvzszi)=GfE?h#Qs)i~M5v?|Y1qyI5Z5MrsBc z>0@Yd3LYyJWAe(!2yS3&D-5V#p#)=h1+tVjQ~K6HN!sfnGfCpzG!z&fG` z6J9^Yh6ZLfQM3M`4k~V>;HWTlpTzwjimkCRqQ?J{HY$sgF=bp4?7!Aw#8YOBsd+J9 z2Rn|ZU`4lL$W5!qw_AR6e@v*ka9bPN15)tmUNQDV+9Gso zQ;)lbY%SboYIa`M#;vQ#SeIIanHTD?PKy~cY7D%!;D8hyoKcMSi|c{6Y}?GKIe$+V zoh~Lru3ZUU7}jB)9*bi^jai5`l$BC&ys{X1ZgpaIgBeR|E{Eyj#=sQhzc0q(>^g*x zWM&IBC1Tq;_$H$zr5Fi4>(OxmGgj27+v&k;eKJR@@B85?TGztF`8&t$wfSq$@lIyk>##+I5rxjHy7Fc}X9m0(B5dgv*#{n<** z_R)IC?v#QtVjlM6{yOOXVsW-nqdHg*dsLILsa-Kfieq}hk;Sp2=CGWesMDAP2m2B< z>epc!uPtm(O}}(q#EwZum-EG#WK@sL4J?iWHA{_k(ROVz#@{c(p_F>u=huQGHM{k7 zp{ksWZn;GW(5=U~muw8SQzPf9hlccI%L`|%WE^-2r@L+i{%4XGLfDJRw)NF{<#l&Ap zsI?Z=`g=aX{$OTyQ4=lhRZZGSh~HF%elzN^Fo+o!YUG~jVC2DrcrmtEe5%*u-6CdO zsrk1`2i@-;L|IW0EZ)@N%P3}cQ)6*^J)+bO;E!{WSo2Yjb{(17Lrs@pE$mH+$Kspf zd_z52WSDWI=G1UqES(e&Cxc?7tJh=YXLj!FrRLxyU0ke*fmK>DWHsuM(U!$=r)K^$ zZS=8=K~lFOWS7>XppM1ypvL2j4s;D-@v~Pk;=j~m{WWGhsi|qJhtOHE$X-#569M)3 z5yy-dHDM|`7}YBV>sJ>;=6OA)Y-Prqn&juY&~p)8DUFKoFt#2;cQNBb&B+mZSSxbl z$9F15&#v{@H;@@$YHohiMP-i|jObg8vt{*AG-Jk(n&Pqg7-K8)&z}_`y;nU#M=|41 zP499&EZiK0s_G&*XVxKu*KrP@rlGSg%5O%Z>kjcOsjSEOek@KPHQ|@^krEt*3USSq zHr2zjiM@veQFCm~2Aup5iFGZ-XcEu49!pr9U}`*N^pPaiiNvXh`&;jNBo1aKgqq=> z^boEWh0c-1m~-zja=x%_3#G=ms~)UeqoAu-jK8<)VBUts*+)&obbTCs5D9JlV$|NR zLoMH*Flwe0>0#LHD7?}sLT*7F7X4w{wx60VZ#E#}NF)ZBlt59l4!`;N7EaB>;Txg& zG7?XF7Nc!d9kd3p{fVHawLu@b`y)|lT7rz|$9Qv|ZCfNYqZ9S9X?+wnif7h(qsLf$ zi~XaJ3q#eE6l`D^I?TP#%9Jq zS#&^s*Y`0t$*?hqrRLHBJ-By_Mco1MuR84^nnPKfIBM3P(#ML!F)*n+jo14hpoVv& zkEf>pMjhNLkHhBJQ;>ImfWFBrP69OrQ}xh)R01G6_xVTP$Cz`>BvP|th&FbNIe-%V zllbLw5A6=I?~()5~05b=vc`rvPmU&9qo_C1$EP4-Ytw1yo(w<|}{ zX6a>g3S=ggn(#xK*sY(4z^Mf&J5vK|2llO*M$NUtdHs2G_cSJ4@F000R z-o-MDns@T**lnJGw-a*2y8rX2vu5LVh??4`>UiIp0P7_=s2y?<@59+KJxtAjn;N(j zo`47cWn<-vTHK3f<_I^PD<^M^7B#?+hSlIbH|5k!nXnvNlDtvY`3N+g)}Vb8+qMd7 zdTdw0A$f1;)gFfWj0;$Kl9{vAe5p{z$9hlPe0BsQ$6SPgD>IeU)Y`2;wZ11#`-%FA zF*R8DAdHS%6*aA!Rq=g=J9PicLDH4;cx%DTIcmCx#grY}^rj1g__?<8z6c69Fpt zVz&pc!*fLU+;h0mmBp#0=B~Gj=;^f^Mys=-+jtIPKUtj1)ZCt?f}O8L=lk*5@Y`F7 zK?j++LQMw`WsK3?gPx){L}o%I#(6Pwl^WM#Whf}`L3{C>>*s$K;kTK&M$O^76573U z!P*U?uJD@*DD7wFIyLGOmf&fv3qIY=Kvhl!++EqY-JquLqy-ph?uy8s8E}@bKomDO zsrmNgKP(mNab})ML4AHX4i&RFx2OrLod(5h7o2cQg5AwBw7h5LHZ@n|rXn!g8OEm) z@!Y9Q)FWo*4mB+X;}KBej1Gt5F}1D?zdtf_mzs@TMnk4h^zL(xMgM(eSoGh1dOx^F zjeN`q93Hp}O@-0;9wPoV*)nsV8pXRqvHHkPC{Y{}iU37z8coci4v3Hke)I8N24uj0iSQF@p%+vy?@cP%!sR?~F5}HHA8oDta zh?$a)oxJw*3u;cuj70DvLs&0$N6d_Tyc)yy=Or}-cgEn)5Cdf2a>GT@H|Ty`IK38L zQPb9WGW1$Dz}#{VuFcLv_$+oFyrxDkbs{G8)r04KXFT1Mi>TNr8s`l)Il0p?bgUkH z>~^7YYcBe(X67w5t`lZJ{a7<{0{TXP{~>~*mHh%@w3bK%BomNrteJ8A(6^|Uc} z*)EhS<-+_D+nQ>!+ET@4KgCqnKMB zYVU_OH$<-UXBMZ0n%MNoxbZ{>T73gxKR*juyyov0YJ9yXB4(5L*Ay6tPv7kp8Uv4iVov^b5T+%kV{j%jt<>yS9}WA9dg47P z6!P+ESf9bncWSP`9gSkSjR;fQhb|{l;h)FM4{A1k7zGtIL#%u(`b&!Z+bO(0`A=#z zWkx}Ez9C$vH_U;{2kf!_$!%xWy3mox)*KbpSQm>^%5QO+nOf#Jw}Z5W@(p zYDmB*S!VuF6JkFCnVXCeTo;Ao*@;;Fn3=!SL~4(MiGc~UHAH{o!g$QkXV0vE)Ew(O z4ob6)Q6PFe*LcMt>Jy72)03?Ki3y*8>itGI@HQTI4#gnAoS8P%JUu)a4r4cA%It$U zT^5BGAJ|ySQnO!fGOE*zpd6lri0hHi?7-r*rRHqLELd8Y;CWdxPRxly;SCn29W|~B zv+;Ddk*Ix~2FF(s@Z;Ooo|>#Vb8yMj7@wY`;izi_{ur}39jMv&X)fBU8Kc%N1204$ z$2)hJ=}3)r)I!YvVuCH@nTXO3hr>^H59vhBsk7p{-qslJpJqcx^hL;!iJ{Mu&eWW# zS`Nh@Cb+pX2a7(1;inQaU8s?FS_wCW%^2*Nhw8auNVAQgapb7!`dt$XUCr>UDGy(U ziJF5uSR8q3rY~FrpC0C-e(_NpoEZk?t!%zZftuLk>oCd249Yu?;lWGMTkt!326v@K zxj+Xm94wGB=Qu9z+mCgLqsCJFV_Cm)3l4{#K)q`?*6d`*v^zCU?>6D@2`fnV zPQpv{d6w_R_NNCmGsc;s%)}ZMQ%|A1eFS>*d8D4ySj(GX#!M^p{dWqsuOm>tkj)MD zqNdBoEl})kg9`EQ@KKKlynMmtAbV3&^1=p%u*EOY_5Rzl2n?xVrVll{;%wn}$rjFA zOJI~31+8Xg`cjjaYme~;Tk-2#DenG^#Bkn^QjwaS(|2OPJ6rS%JA=qs(b#0j?uq@V z*`Du=xY=8=AoUCaYNN1-_bKjAjZN_`sEPh63a?8MEdEiM-DCSRfSMgQUGcH!R!r6^ zgYMK=%sI`>Kx$qqd*EB2t+=%33|x-JK*o<9(?Qg%74K6o>>S`EmE!!dIBaOg%wTGE zU-!b%+m52oTN$1i#bdz|W`-M7kmmP@PR)yDp5|N+7wrvbGwQ_E_Td^In9jfr|bppl&Gc%T&cdK{f zPUa3245);Scn_Jz`_GM|X3ZT}bUC*Z{YBmI*z5!7WXASqJT-Z*cOyr^34`xf;_TQ1 z$iL6FZ2~nFZeqU9+zB4em2mr<_`m)G^nN{&nkgN2W0>-G%(ke+!@5M={Kw2BYI^l? zhU;Y~EIL;y`oAXPFYl8wnHr0Xod{pG1M^}<4?@>Olzm|5!4zsH{B^__aqi@Guf)}= zL^zZ)GnJY}o$Rr0{ti6tb{2VAi5R29#(Ekx@9Vb0HF^gUMwg@JcLI9+WM(=wNr$b` zXQCME+hy=-PQ-NH+iM0jldY|>d(TeXSyzTsmqc9b%Dzu$Qj^@)5-)Z+;ri1qOvojWjSR52F0C16|>GmEIPw%dfKeokl+xnBus2}qVpU)il(oY#GrSwhXhJ|-Axw-ZiJiZQ)S0;I0YETu+ayD?tJY!_=HOYr+h z0+y{}W*IeKCKy9Sc_&ULlwy8<0^U4f$5f(bn)r^X4-mbRnoH2oAptToSR7DuP%?qN zzvu^iwghl!jU1N$i5!=K( zdl#;JK7j4YPO-|Pzic`i-Yb!W>!*@dD#?4vqfE`Zl&1sM$}#G$c!2_ z%b%Ly-1@Dk{#gv`_3^l<%#1oUx9vB>$=L=rD@rgwEe<~(viq9`HTr30=#pg%r-EV( z{~ZT|{_H%^q^9(hC0f63!EcnHon0&(m6=&Z&DFiuh_A84vCksc{Y5P5pE0wVn&Bs` zaXQQbO{&GBE>0}c_#FNkYRYfgpo5YbMynJ_DLyOVtKnzO9vN-Fh30&lWI3F{N6KfF~bz@M}nHeo=>XjXk z)zcJ(O-0E1F9sD?*tTg?Qzh0jEx2fk`PHZK_jNS74`Xq3s9Aj30jc6zwf8GRXr~yQ zm0?Dgngwwl<8eJa*(yJtah#5v4Ws6n#^GaIQnzcLujW3%wq?JP#mK9A>* znK7WodR!0^^)lh~pbX=N*T8m!CmjPrYEEYaU}%0OE=HCj*S#A33YghMjkRkaF3ilt zoXcm>I<^|y|1e`j&9>M6*#98|pIXkKr0aR?tzo%e#?&~u`@*bUhRE$LgZ!*>nC8av zxJ;-idFqMCnHl)9LX3fN74A=Bxl*RotoZ7M@c!v=9#n>1W2^A=2{W6i`Lw_T9{TBs zmdbHvLlxe?W_es@)TBJ$jaE_PwC+?n3Qtzy=s))DY);LNMi<=rDQZAWuRwZf6(06v z-CV0}woo(ZuQTe4 z)6mzl5{{p%Q0B&r6*cBBoUl(d9R*cqk+JX`TDP(91#4=)Y}|>^wdq)>T7}V~r+V}= zW^AY#UbqvoVqK_G=PJw-xdZ`c+4q7iH60opp`@OM6N9RtlvIh_3(Rb##xTMcVDzaH`ya76VmoR^s&0jyMJn2dREiq8Rk(DU z8GCAWEf)RWEmPp6Qw6UKY0*+I?6yH+@`I0*-Kl#4a&m1xWV%j~4) zz-0@}sym4KSrxdmvPUREQF|Ic!vW=gmTT$aS*-NiGoiKxL!O%}(Mn!5vx z;I}RowIjqD_So}Cj$~#xH6K52!Z49PT0G?pT<_GtYY8)Zs9EM|gksqkTrn+$Lv;-n z&1c4qn*9$pVa1qesF<8V?#2r^_>f)Od#U*qvcpn0$cH}%*U1W!9^nQoP9QZ2`Z|a|69}bw zCosXiO7zEOae}DH_^bs3_aK}$IDx{6l{nhW;sjG;_-QTLR{Eo5XCY1=t3dTU7AJ%n z<+f|k>5CVxdLDz>rZQNJVc%Jy)MRO{#d*KAV$ zvqfGSixWkSYS&e$@1~EOmI8DyKLp(d7AKk-tL_@GP+SN9Lj{;;pNaLuSezJY&MehL z&(SOKNBnObup%9czB3a`&C9RqXwz#s5_;uh&|1;EFM*jjYC2tAg=@`Av3^uOb|f9d zyeMYksTp6P4%d!Lu|w1moz*iDs$7${E9=oQZ!Z07|uVOLPlbM6m#19t#b`MN}jAcI5M#jSL z6Fa6!)Lc8e65p(+VfnK>{M`_PJ9aEiGBq#%Q^EI+)6uj=)G$tnfvOfWDb$!O2ih;3 zhUB$J@U}xNdPFmmO3k0ki!eH28aA06hOX#qI$n>NG-~?YUx<@)r{bZghp2cm27_m@ z@9lJIRHx2Fblp_31}F_zEKV*pO`SVpN7Hm?{;#?fH_Qi3PX#D)a_UAM; zi!{DS3k~OCU1xttHe&5+f3|H!)Fcl7BE@c-i|k{5xcW5)W8SfIr2&)TK!Z3Rr-i%lwddD;C3cu>Cnh z&DPo$>GOdF_D zd~w4x2Jv$28C*e4dgM2$)Mh^H+U4PwhyLEF|F^CAvl>1SrnQzNp7VWP4Q-MjjrdQcLcOlJF2 zLrvq|zmnRd_4t_Kjjy>$c-w}V3)DP3{7+gwQX6Io-YD3d1O;8TZ5OGrYxpM(Y+VOy zb5F#nC!;xynM>3Z$o!SojMc)WAWv8&CyBftW@@Q9uk=q!($a!$xCf@+PD0dqW-c=` z^N%$0ycUY@?uE*MBy4!a%oS=zT#~`r#d_#E&>dmjl8|(ZnXA;u8_43HsTPKL?!oPZ zM3E=M#`+pHs@vK^GG32I>vtjFR=mS5Wac_GN_RTINnIOD8=avjaxkjgn7KjC8gDsl z8laCY$vd!RPXcuL`F4|<9@FH|(`WfzxSg(ChNyz$a1zncc_{Bstfiy7$JCy9sKskA<~tZyVN)q$)Tuu z6F!7&6?rw{-Kvb4d(_x&>k4(z3u@OITYLLVL6S9IM~nIq1DL6!W~hcdmTx!2(vMci7#xFB_nE1u=HL|t zeE4U8+zD3jxE2N1ugpB5W~`|Kp1n4}^@CQh+$(yZUSQ@aHN8hEp!DMgOz&-p=|0iO zARyb1<1@E@Zyr4$whdge`iah);Td-+mLs96d z#Nxc7rgUOge3)m5aXYqPL2)!bt!3slHEC12L95OX(MHx7c0Up(vzd8AO`vx-43IO# zjy5)k91?{kCzyFl&8!aHP^P~@)B&-4^}y{QJrt@r!gpQ-%w90_ftn_x?y&8l57}3aNXr+w)alGLQln?l6Mbq#-^XE2 z$et032j{-%6&9|#VA^S-Kwqw08J2?=)`FhPS)Y$bIgK4p9Sa-k&5&A*6dYQ%fN{!;# zaY$}fMc4p81lk8-AAd*sM$K{K37Ap090AV$I29L+R*A)FrN*Rk5?**JL(Nds-z6J5&|ur z`}Kz!xA8#pXrSx%NMu|Kh0*}FZGWk0_hC8w%~xQHT@<h9#ii#(}o(k^i>!!PZeXY|1Xa^6sw;z zBTJ1*KYKg~*?`t%xoG$vg#%OB8jiNqMCNS6^?(f+rLJY}7lyr}FyDfi zj?{b$w8c4#(q)2{S#Z8IWlP#|nL1D9=NI=)HEqoS9zKJX5fMX`4+rBmU*c&Pzg; z3^To{x#(;L&5I`3Fa8x2j84W=O*U?QsPQW^M{AZDmc7i!{d1`>31+4*HIHv>#)NG) z=oFEMY`F~4hkzMHYVN06;%2x5-iZGtEfHd^?<{8eQG+uUa9JVNmo(%-Wps|HX~s-{ zYEDSY5OVq$wkkx!v$+Ke=Cb^q0o1(MI|Eq*3h}Nu90Nm}u{?sAfz)VC5dUO+jzQt+ zK3IA*wZNYXPX9zXs z?S>;#{}{rj1|X{E7g)_=+cuP%l?w-=Lv|rb;`~rJsResSF*A&soo^IGt-V4dKJYij%VI`}J2^4h{9sd4Wt zi*s6q$b7ID-Tr($zu`HrLpz$9Q%;|xnBgaoG}jdu zqQ7DSuVFlfnmrwxq)`Ws4=3%U!ik`nQ_!? z*Lp5ZcvOfUw;XXH@++K*m>Ew^rsgwAzNh%Vzs>=FExuyjVP+;!lhn3B8aKKS`$V3v ze(4uf>#$=wks9SeH>HtfN8zV#2hYl8%n4y;5;YAku1J4ukHTNi4!MI{u=yM_ld0*Z zbykueS^%?}tr$P78IR=HIXZ=!Bk#(kib45Uw!j|FR-bVnr=)WwBJUvC8h?(y!k8n>{7221SA|loUlx`aSz@2UOJwl%b91O! zIQoQSBbSAQj@H=V_yU6`vz)BC)O0^oD7D*|1>e1vsLOheW$l=mN6lHIqmpuNCO%BE zMD_CL_-w|^d}=(_AD8-!5dGBc%rR!mGpzi^#$W+8?amcQJG3(Jyx9z^Dxcx$NET-y zHK%j)rRv-1sExG5>L(2t_<+S(M2-K1Z0Ub>9ELfWAujVNdS$RUi>Y~1pC$D^nu@W5 zEn)fV2}<@dvxFMIV~3@sqP{?2t~o41>)>&pnWfaIUCES+z9izXngz? zbWD}{{)|WG7Bh?~x(~Y$W+ZA}-_Md>{)mIo(alg^a}P%%nE^FZZ)8Zl)T2eOkIe`b zHCXq@F{4CH+mJ(&-kL~^Uu}-(A=hzRpBZIp@-!RYi&%-Q{} z7T1whcvYNJIw0k*b`LQ?})ZEUjlpZbj#o}Q*AtQPK zt#V{WlN#-=HPYQYUtC<^guA!O5Zl0>KdY#jHTt~N_MZ=AUOHj($a4G%V`eoqex{ct z{pH?hQE|ciCFL+4!nSPgHCZ;-q!(YjVHoX-l#yr9 zJe!$y)QlQ=TPj`bjUj4oux%+tA%AYKr^dJ2ZKqDug z#tUzg+)%xy6jcwH(WWLT@_{7#*#`l?_uy-BDVn0${^(G%TJE7V^OE>?+R+W~yA;FA zi5XpLd@s~Vv(EcqRfoMu|8^SYLCok;WAm|I+L`W+siWPXF}etGo0-w4#x~=r)GGdM zr_SCB-R#p)?gxg{EcoK+B&Fy z^T606Q5c%Tj1@JyRllTT&sIZT+Y=*SM!`0T8Ea}%mHtWtU$4UU-d=ERiozpaFTjSH zVHLlnUh7t&cDSf*(IyHnSF$*^)U!O*an~0XuvMq z2l7FYn16f@Npkz9AI%AsEO<*i`X^G(AdovDjP%5TEL7WHOXad(E4f#+UWSBQ7st$ zZp>__Cg)T;+?1J*%?JJA?C3B0cpAxa zTZ5>{cIys}4xJHczYkK84q`o-38p6Xv;s1$I-<+ZeF%%zf}A@uA=GRWc^gh>kK}Lr zuqjpx$2b1Zn2ocPwQQ><C8&r+%2yEq5epD3q`NtX6722*1r!Ug z69loa^;!4%?atgk;QgAt*V$*sTC-+mec6kx$`;V<$jlyU=G*o{`}3XAeae2ETwscq zSDD#MO+>8%E`@Y~+Nu3`xzHSD?byomK5EYAD2eK$a?oxcgW_4n@Vm&&erkF-DkCMk z8!n2h^?v0hcs-r1A|Iee!+s!A=g47IrO3L?Gez8O7AJ<9Q`1$^I#UkOKjLtDpeah4 znK?*}oab;{J}i$WQC)0tA9MKsW+s*zhhC$hRwNI_Pl>pgZHZyG*jUF=(^*vK-rUv$ z{Z^(R!`cR)mDw1?Q5;aCW7NYM5IYhqBLsY}h5unVR04%waXS1601| zA^)9{w@TD+RS88BdPI0!lsY98Ow@rF?S0NN!T@- zP0cGsAIx+6q&qgI5Oyx1h?&Yv4mFdM{1AHOt8T$}DVqLl6&20clOdOyx54Xi@ot-L zNKzpVs)k|xDrWMiId8fVYp4Fy^}8>@+G%0fcbq-B^QpN}8-SC~|LS`DlVF2-7$)&u z(goCrUyQ1iQBEy{kX zAxm^3&nt*P-D-9n9HmC(t2+W8Xkn$OY%ykGBwhuxILD}Y%?%TSHC6vrbfgUYCTE=a{)j z&Fvjl*yL=1EB5=5u_Fbu`ZIHh8rd03P`z<6o@mD);!Fx+Co*%Hn)m5T(Ar>)-jid| z;FE#LyyEQ@YV<3Y;IWN0(nJ;7FWs`x`x_hUtJH)Zw1V>$2k2+SW8}^(3@vBo8a2i% zZ1AL`Gqx{Jz~+J++~wExb!sl=+d*N6E8M1w>U@K9aYd2Exk1f@a#3mF#xe}Qkc6dk z^2APNW^PimbD_#Ou0TUhI zELL1MG$rG%M?MYAI!{cYRa0$HCne8zC)75OqT>R z?3k&drp9Lp^u)b%szNfpt4ok{ikWI^4k{R9@O4j=HYa1eiWC}rHK&G}Bc>+M^7n#` zxbK`cl_I7OyIK-*p)(dgzrw{U<#o=Fh5l*%+bDx^dUl-tywHH2j zjYXF~CCHu2%mZphtBKw6KfUnc)j_dRTY^)3Cg~wHmnP1`oA%y_T`oF9*A}DTJB#y( znn5Po_&C%X>-}T#OLWX^%4F~Hb<|AwHwSYEd86OggHRh+iVZ#3`*l4v!JX$|jQ(1z z6rGcF9ZIk}j+q8(tdFb1Q{oA^Q*lrVE5>_Yc5IKSDbP|wp~&8eTNy7_`HMxharVvh zgqn+SvtZKZi3LaEF?3@I&YxxGDK#UH&%h{4Z+vNqL)gz!z?fy#KcnWq@iTDtiO5=REmfX%)FqcwQ3d=7kJ|N(?n!EFTt8G%)F$= z^TAA5cJP94W)j9%m0*hw+gZ~nUX5+<)39hp5mb2>q$X-cI&0yq`Z}yM%|L@? zA*$D~IPa-h=C2JinYAzu&&2&GDNa@~(@agS;yke%ay72TWnp%@1kIt$v{1847TCIJ z6`WUOiP>uje%3Pcfg1e-b0KqLB{FTZ5xFuSlNPaT-BxOh)aIlAfR$)T%E2?!JXn8Y zaXwNLEIQr!H@jnU-y9ql-7kZAoKMtLeF09TxFh9bE}YNj;6gP!w$IeK1Ouuw+!1y@ z7i)&+;BPiFU#Lm`HXnyaFGr^id2k+-g=d$T`ASXfy9M|>#~qbN@^GnV7M#tQX`@Cy zW)UWAa>u2q`AC+@f}8_0->CU)uZO_$75MNsUu3UjV)_d<*59d7Q`W<_g)5+AQ-JZi zGDSxL7Uu^wEv}1jYxxRfwkyDnQ<sEKyh#b_-%^c0?n_x}9Lsbt#0kN1kn#E~H&2`hcI4QnMd`fa*FEZ3B zrZUr>n!EDa$U0{Z-#&RrPu_=75zKU;X2S(7*k3Y4?6zEtn-GoqDrP!TV{}jx+4e@F zyHze~pG4zi5Hp>qIk7+o6CN5s^;0fhY}*Y<8Z(`#2@lo;YV@%AQx4Y2MnF%Q?a1gt z&C)qqVmIhQtdQm)=;SU8;X6>fQuAVj4yGh5gwcvTOwZf_RlYN{8#S-TYs23V*grED z+ak83lJCrsqbB!(4s15fL#&hNAiQxK&PTBGk*CIHoi@6E)WN5}+2}kn47YbM)18_r z<~kUrp^btuxp?_?N9kbD%sE(;l8a5FLSZz8nV!^iU!nuoSF_>ZoP*Y$ zTi|_*nO@X%GMtN~XX==^R6G~9g}{gJoa#+Yn*3Zi3|E8R;yf5F3WS9Ri=#lz?OAg% zZ`pK_QIm&;#7!`kW2O%^e^PW%^lS=thv(wU^NnKlkzJ#T)M)kwraha8!=jSgHu(U! zU1g>(HTT{F*Q3VaqH;b`)@{U97iN^G>2q!oZtNe1sf9A zaI9nB3&W@h|80bA^W;%0*3I%Z28wRQ%nYYS^P?g52X%w(w>)^MZo-)wW=2pmq`?@A z)Z{TGDjyvU0?=*;Gb5?lIMWna4PEgvD<7R#1|U;~nNie~=9%Mix2`BsEx==o4d}9m zeQS=U#&EX<4%Em&RiyySPHjR53HuHnL(Q@UmWUnS1sxjl(X&1Pe>u{&dAqnF74;EINMk?hxatanrU!v_%i-c;=%qDga}6 zcZdnpB>rcG0H+@C7uD+f{|$h5Iy<(B)SS9xg|@9du~<~aw-lYe9{4jeiJBGttYq($Tg~l7=xh*=f-+9c|0jE&YE8hy*0X^XznTG{e17N1e z;!LIH+bU~>iHZ}0d*z|pU=u!XVR5EW6L{4MUFv%vKQkA~o*NPC&&+gc#?Q9J&Ii5F zeSMyIhYy5IEHg8x2}rZXMNI{~8Jvf%Zvzlm$IMJ>jK|pEM4N^GBb;s zt^IBB^_&7+@8sd8_9kc~GNVRKorw*!loZkPfcQ;^Zbo-6X4I*9c-0o?Ui60VVex)_ zVl#B_F*BQ*!MfIHNK=5@y<9kr4Mg~TW;Cej@zVynpZj2SO&%)hgYb-3ubo59xog(& z`K*9aqdcs16WQy$s-Gq`G83(#)Yu1`ujWCfArP5I*s*C*^F`enqwM>@aegjFbO^!^ zK5p97yt`n7ea?!=P|ic-n;>jzWN~z;3G}c=(u}@vIFW||O2X(dGnbk}XRWZJRRL#R z^YGuDKrH25yXR4(Dt;fC`-*Vwmj|Vx!Kl||adfFk-)jS-5=ESwm5WPzgW=7`0MrDF zwU1c8zW8@24<{aOhTKUOXFfFnQ8vhV)d%yf#TdwiV6X)<3#hqKWR3g>irBj@55L2M z(BuDSSpBd3s)f|7dTfL19TZVBC=Zt1gD`a&i?fKD%jq^4KCds#lEnJTf?zntGowe% zlv~!w8Ka0er#!4ay&1=O1yX%#zMZi`_7^35*Ug8U+Gd=$VsQ+pktwo*Ur}F}2j!vO zJOs*o3=F9WzGs6+yOeO{d>-bF34z5#7RQL1Z!R{7jOYvP4&vA(n^CiX8DnamceBO* zMM`)yG#^uj1w)y4i8rClWSq2P!xr@rX+Up~gZ4#K5Z%q*ei zL`NqK)a#3lQwoseuo?ZHFk?kcZ?TSk;hGA5?k|8rw_wCpGGk56&^69*3{}BW^8)-? z5rhuB60Hq2ChHyWCb}OwnH4~NX$Z7=w{u%+o?1G?L{&~7PCe;Eu7R|$-Rw;*RKGfvdh{ZDr<{UO*^CxKnJP|?Ai8E0yuVqDSZ^l)4k zV{qU`D7NsicA>^C-W8v1hQeD`0yFci(C3xvmr~57qm8h_ujN~{e?qM73oS0cb%^pz^WysyB@K={YZO(T1@cdDCYQDy~ z<3q|+GzLo0C38E}^VqxcN@^sL?l@dL14V{n%{g$psI1Gz&4U_Od3R`an1K$tQj`Vm zz@#b6tfI!`t~&-#o`F82CCGlg9XqG9>uohPhV49HXg?E9sv<|zX9p@PS)4W0_%(?g z;tOZOtwn-~+je5)J7(5WGjg&ABn{Kya!-QEbvvNJJ20%HrhJqKEc(pEZWjseuG)ns znkICi-JH^xQ4gXabK zQPcaCs7P=~9pmpv5I!XW9bU2ggZ0#mlU<9}8a4QQkmB&Ea9k~C#-EyZ(zWpKsg48M zQY;WVQ}%vg=09pydaS~g_3F6Rw-5`zhNFWVGaIP!kY9}z@71wphZI9kM!-OojoU_Q zvSe0a_ovx7qh5&kUJ-c6tEUH0WBkJdCf(FAc99hR6T}XS8|>INQKOOXfw(FS*e@@H zOmrlM@=Du*)EpO8hG9lD_yDGXwp3p@3<3faq zigxB*S)47@JRGqaF5k7$Yf2F;AMZw~2|J%qYUFJ^u;{Z6Dn*X-mB~?%1&gzln)d5f zLT#%KE;ve%I6ex~_;WjqnoiS1Zn*Y5JRc^(nbh5=;@8eLYUKAVgN(mAw(gPO_>Cy^ zJIu~!J2mwwPH1;Z9Z_n9DE5y+;01Qy*+ET=w*w*$O~Z-P5_EbSg}K+*^JgbDcQ@K$ z|CpIjoFKvVti3pG#-5G4sF7E*!jdi1pe?S0mtXfmuN|8k45w!E(ZzUfHWS;jB#2c$ zfSm2jL{Kxy*%ES{XQI>50=(F90EM5KiKNDOk_C*rsbQdaugdLn5a;%=xz63xoSJ5e zQ-^26*+z;BB1h3aoS7(U5^O9*C2tJ`FO;BkPb^-SFcVFUX{;F{MeatsixLbu5sM!- zY%Y2aHT{At&^&$)THZ=9w|hLS`ZBYZnl(!1Xi=F5wLXPt`7El&jbvsYH47>%@KJs~ z#){|9p2-Pt8O6+gYPPLejIlxU(doJrCNc>a$@4=FP?LPn5=ZYZKx~!YeE%Qd8Pz3FJji@aGMM5Uq*up#L5xRsW zBljjV$<&PaZ3Qi7Ggyf@DnpZ@$SWVGP-AvSRPSvw#iK7$+&`TJx92QQDm7gPEJpZL zb5uKs@2ut|1YBSyjT-m)mT33V0(PR?RHDdLUBXQ|H3e0sxZGhe@;nQW-7N)kkFq!! z%=9uwh1jWiUaWn*_e#MYdG?!TQgc?r0K-D85gwk085dGe!1F`0sp;su2usx+&}mCHjD1p3=f{pMhnj^li*VoF5z<+r8q?QQJf6%`e&bN?`&-p9;~VsYSE4uBHm_M9uF=KQ;nXkND( zXGPVBpsBgoa-Ti74^i{*_N+ zdv__PW{1THTohgSwtF5x&-i?t%wgs*HPu^(AzbW-8-4Qt9(0l5>nrwNb%dHTdc*L+ z-5>v)4`6kI6!ZJBI7g{5@1crQWBw=vpC17Ie2Uso*xT9`uPL+Dk{ES zlw;;NHCwI@!?v1@7%lRo>THYgcmsR4JVA~5?P2KjFc6PK1>=%QrD83NnUmDG)sI3? z(?Cp4jlp%#LuiU-<`gxPHO3*_B@p9w#iAwT5ZpI0bDEm&`r~nA{ASz>h{G6R^HqY&B3kuc+(h+jPh*MJC>n+5HlC4 z`LoLe{xVzewr3trzAnSVT4pX$F|HgFk1}(WnoKo2QL$(X{uCACpVDE(6)MbSM6<&@a-c|5AH5Wy<+5V-WC|55*-{>P))0xG&LCu~<7kKswL)7zPD9t{G zzM1UWxk*jyEjPSx+KxSnr8s%x1Og{BQ$bC+zAG$xhok)l(aH1PX)O82zBMbU`Fh6% zw^|~w^-T$)vd&@)-@|c>n$06z@kUK_UJ#uxdp`+1g{BSsIE?UH{zgp#uuUK)D28FV&*P2eXcnor0yW}yA;9Jp#sx+ zRhU|8a^Hw9DGTDTXnqlD*HprkSKzrvjs8wYY}_7?Ad@2WGpZ8*1vXc6pPJMlXXtAr z!b7DH-Bwp)O*k_TsEJB)Ko_yEBK?k-r;xvk)XB^|q()L_kM;x8;QOc$4XO9w9mUKe zYQAoFfXv}^7;Y`X{Jjrwp^oj_t)pg6qdn@*XW`syv1e*<9kwMgQ%}v~zK+n!%!Z@r z0z1>a0f&||(?HEUZzt>$Yk<*Zg;<>M81LS*J&}*8v3u!=8j(wWx~&N2eV^e9uN3@* zntT16Q7*oL|MM$CBwnD#mBo2V%_U_g{H`g)@+C#c+WiK;y!!JqYP#7vW3gW`JiZno z+^Z21zHj$AHG6{`F?>uZF3S}oIIcVZD#rQI_8QUA#9^9AI zXh@uJPrVE#y-V=2^*!!aGV_WW(@9IQ|9BZXMV5#T)GcDK7BjD@xvk)aktfSAvPUWQ zb!fpPPiEdw)5Br~w4`OYBK8Zd?EV1<>X~^hQe6}iw`+-uiSFr_!hnaatjm^B} z2&*l_AB$3SOm0U1Wy~~E6LEMMrVlQM*25B*TC`x=ZnjsxiJGTh+;Fm8IVQIh<6fT@ zO#Z;kdupN+m%>-O3?syK;GNQfq!@N=&D6~7bA-!)iM)1AzAEWG=5a`;(~LOk5r3@t4dr^Xil> z`X!X&iJ1fu;qS53l9}JseEeaJ%JHQrHZDNZu_i1_W#$hxDqpQ}d|@dLG#6mTlP1J3 zX67$7lUCWG^oQ6#`L+O8&o|*n2s8hvi5p>y+Ha-!ZY{yBwM`ho?};*f$^M^_J8f}8 zypJo{OOP+l=e!$>BTG%%PkSiGm7rr6Dc<#ZkF}SWX-7?@j2%*rltAuw0oo=sp?V23 z?Ws}i?Esr#(Y?f7BD$Y8LfM{~4%FmV+hbdGiFnRQ@Tsy9)88}Gk(#m3>|t}Y7yyiQ3MYKreT zBEV6M+Zze?PH)7wZY)k$YWxm5At|B+6U4DiSJet-sabZ$5i4etV(Lc;YHqwkunaq&?$jthcfj+B zC6H||!Onf}P@~LD4{FW~c1DF}2^MyeLOr4pK85VqdQ!7@f+H^PFUGeH5`>}=n_n=~ zi<+BN&Z2Wt37p0HoPWnAoNi^NH#G;lIHRp|2|PR{*yjEYkMo&Ppe8|dRs3*Yyt|3D z!;8)Du&SJyKGX~x=7f6ju9H=#jrR^)MSixM!Z!q*8Eq1tj*#YUByg4YL2{g!7_2LvKT0V$%95j z4P!=`nu>kSs4*(Rst@^y2x`QeJ7KgNqY5>c!sO;};i%s^`9<+_NPRmE@_DuIS<6PE60 zW)L;o8eH(;Z4nmEmEb?0Calb5W-v8xGDLTSHKL=Fcn0@RYeLaFW>l%!e`^`mh7=)L z+~eI1ny`KoGef9(X6}ZCp<*6D?Dw{P`W_wDGc%MLgXznlA?n4otPx#5EShmdk(puC ztQ1|X+{K#N>ti`kDsD!XCtK-tFr1pPFXJ%OHU-14Rzp^)0Yfa<^)`ZTJ2NAxxq3Dp`VGlwdsB_Es*lmkZVf_%>k%5l;*6%| zkLX(Q-8lt+-rj+NS_3lqH^vxhG$X})LtY9R*WQKV$p%rEjl~&D&EE0^9K4r;J0EM| zpWT3C_RNf<#!WF1bJwTf#h<$vI;sJiqnH^_%>q$7Qg?nbWaVmc)TIHMbC{Vx&2sfb z>~T)U^nG`s6XE?{N~HMKPfaN3ZJ&)#=1-o75%{FybC8cAdVj;|MM z$M5c7d0IW@X0tfcsEG(lfN4N7j$OEe^_g{;Fo->ar&Dt-FCOpvCZnY5T^RMK#|dX< zW>8}#iAUSZBq$82LEo@C6q+(KlNwcz1pJUq#+>+S^be`S)3waZq9!ye9ygCAq3%}| zCY=(UUk);(M$P6}p}x&O&Nth6X@WBMB3Boe`aRZe!JKc8xBg=Bac8;-@8vnuBLVPH+`WBAC&mChfTw z&g@CVxG87gRDKJ(&TLLqpBkIub(r@z0g3LXF|1b=oEI`tjha&PHNy#IyAm<<&r#eoti-T! z%$QMA`^^>$<|LwR-(gI)s>J8X>>4$vMroNHPW?;7iOj<&(yWBWEf&Xun!B59G2J2= z9e$T%?8gdtE@g2nsj1m$jr$vuu(PC0`E|EwE}=hWcoEZ;l`otQbdSNo`bi{s) znU&~U#LOycLfQvtl;L|*5{XE!k~f|<3{%o=GVYIKNauvkBRJh=klbC_92jZBd-?jDUD(=0D zdNJckP5FI8e6dZ$f=5#5f4+$;GR$~UBgrsCMtTA=G>gzxoR7~JcD;F16W?fztKvJb z=!F!9qbpGB#f%R%SY-mY{Rz0?P=xM-Z=#atM)*=Q`=TiVh9<(num}?#-^9)g7RQg8 zqf1S2+$I4{LB*IGT7iQ;>^Zld8pR+p3>10f->QmH6Hy5{z9+?>8oBOf*qNGu->St} zGtvV%$Go4H-{nf~e8jVu5VW zI0U>XhQ{z}Oy#*N!PMOTVu9#M@wh%+X0}peyxbhmY(=f; zL&fOmSdCYX%!E;M?65gZisP`ewHQv>RnVHx%rMm+wlDZ?$Oyv**G-YGw{F$L-GXB7db2-kw!x&&_^n zYLzT-c6=QA{F9Kc`}ngO^KpC2G7_JLs6Td zd13`#TC;rrL~3?rIpIp|UKoiQUL!Z%#N{c>BvCWCgClgm?!}jsh(f4DY`L zPjo^t%^@9K^-eyKP zE15XbBx<)>F>{ETtT$HhTDBaru9>(ldTM49HD03P6rf?S%GZ} zva#f5Iyw$yagI{smTm{BSQl>+`NvL?>6pBTnPb#g|FMPahZT4;KL;^0(;&N(nd8(< zRkufXqg7BElY`QBX;Avk%n52?K0?f)NE1;!keLhA^b&hQZ~MEV>46l+55)f9nao_I=DnRTub1K2 z$RhNfAlACaGINQVgGo+!>ADPye-**CA`vs6v3tm6YI?14LEt@CT+b1?`{R?)z90Kd zuTaxtup5f&MJ=ZAVv)Ne);EnvAQyq`fiiO?R%%r$Bx-b?YZVkz=E z6~XFDBAod>{yH^Nt~ujtrVGA`9NHOs6VdGuyIZ7;E8-_lvV} zrZ@p-FEVqBny90CP*Qcq`MM0e7xO$zzp*j6O-*l6a^(F+M_4>d#rcH^B7c&ZDr)9z z*MiF&M<|_0LFniNIK(hhP0jVKb8u#o19W_n;BYSi>HC&l@$(ORcezVVo$54LAGa6%E@H7fJPDg8F;h#;@WMAB+~0@uBDW$c zmc1uFpvFCLDAqRFqyEHRjF^)E_YurIq$b~Y5U!+(JwVla@HZp@9~;>F^&@KJ9uGvW zl^y;Li-z{U1SF4Trj8n$KPqVRwMX)|NG#GzfKdYbmZ_(v?pZ&~bGJq336Yp7mjInK zW*VsZY@~<|N!Hl2ECR;jzo4;>na9-V+4X__J{zRzM&NzB1ZeTv8BeI`GF%ZAdre^y z9)Wx@*6Z5XcjHrPq^p%+|H}~dT_UkpIT42*vtxTk&7BEKVtqiYv*<)3CN2Rpm$0#Z zPL0*VeyD!}^b3!~&%gw<=rHqwnwIg(hc`ByrKWakKV06a4K3{`^b+@;PH&ibN6qm!{m@~oCStbl#uD*d zD7(i@BQ^0`l+doDCce2v;bK?<{N6FsL`|M{KiprdiP-2U#I8<2P7n6`yr*XHAZ4_g zX=6=66dGEJO!q&%$m_3x4 z57c0-GU`LLa3?AXY1@*pSCN@kYGiMy;8WIIC_IhA4-0o|#Y7bl)rDoYR3)LlpWuC1F-yW7pwvUFC8@8h=zk@GK$=o`9e)oql#EB z(m}m`6z<+jLjSGIe5FP$Lm8)|wV@-s2OnjUAREX`8#RYFDr5eVx#)c~8k3ciAm5Xj zZ`3T`q=b9_v_AQ~o7k*On^>;5Z%j9#L=}lLT*m-}yz&YwI4^9;XfMueo?(5idywLa#q z+k$n0$?!;E*G>m&)LlR5K1nr0Q|T5geJ1i%_1Jq=M{3^9Yt&6WV2&x5x8P`G3S^(M zIGw23w7*&RSIlChhK1n4GV0;>r3Qb*Rx>A$2wORM7xjB>rgWXyzm6}_-RFm_-%GM}?J-Kml7@Im)& zmI<_{ZNam}>F6ZK#-Il^OFq5VRlR45U;0~6_$LE8H`w<=Pig{FKkG^dT0qAv6a&79 zInv|I^rB`$|2AEdGmA0EN91bu%0TuK_HEXinzhDly7Gf9kyE!7V>f2t%w1*_s0lXu zrrT_`1gdSJP_oK^R#!F`(1#l5Xc_#SU<1SBqW_WT3s-xc8AWQID*V;Gu+|0(M{Pxf zau&wVNuYCfeW_We`d2qxuI>%!c0GE zMqQOb=^ASoJPJd*>@@hyV@8>pKH8n}Bf%PmJ$K;oq!ie1N}|W6LQTDEXDIfxg-gf| zWFHXskaTAHQxh<&Gg{-V@%PM5Jd90-mJu@psHr_HkFg$0aG@+59x}-o*M*sZ)U^DR zL!-nTANEC{i)<>KoY)u)qGtcr9_YWs9D0LxV{T$HGVPfeOwFFsKJeXcj^j1acyTBN zJBrzLpi0f)cZ&G@!W;+wL}Sg;6u4TjI76rj*xwI#BFx|!wilj#Q_xhv;tZvx%5fkx z_L`ul=s$irBn9(dvg>*nHR%rr!~VW8Y6C?t?ODmtIL+b=rzZUQa0D+gMoH)a7)?zU z_34-yK}|~cF|Zt9gp1KJ=u?segM-YBq(<^(66DGau~RP=zXv42Lyeu!C~9K!(y~oS%tz{QGADH4U>Y@XOx>!;8~VR+)(*)7ZPqL~2}C+F->k6Rd2@L}P0f zjC|R5{UmBsA?j^tm|)=ZY&`Fgix~lIO?)yn@*xLIPYIb`r z!xL3|^xvF=k42*2(qophFqN7Kt?pQ4xfF%}M4vl(DdO^3p3pREK73k%k50=m%`Oj( zVNy6RVmav3sYy!p5dA1u!z?%tNs2`%AHj0!XHXNVv=Y*Re)zmP4+mcrBczS(MVLuV z*UjrNre7e=?aariT(NI+Ior=Pi<%|bYfoE7zb~KzT zK+$+ncZ2UgRi{Swi5KSY+J(d#307F0gtZsjt2mpQeVS`gtrCTUql<9*%mw&vVtW-e zsJYYE2k?l*yQL*~R(4s`DP(32HK7~);NQ9j9q)_%u{SDk(KwVo8#SrfGSe6O9b<6b z<`5QH+(qtFX0)h@{OT=gJ;lRiS~>i>KR{M7J2q`6SO3)uZYkD71eym7BG51p0D(RJi!oXBBD zpPB{jyrGs_fWyw^7}Dbl?sa4LDg$cfwfBS4?*jNPIRvBNuTaWn#*mtBCBBHgD}~G8 z5(KXMhK5Vb7*R7P#ZTmAN};g52>mtMP~4H-dyT1?;_Hj;8HJd)SJW_8|Bke?%$QKK zW1SDOpA^E;tq_fdZCI1Rj43q-W!GVEZ6R*9inYUcZOGvDk*Z1wWO@>xZgC-%OV{r-xq?(8{dNsan; z(Nq3^>ST>7fllpLba7^8F*Pgly`Um$dwPYIVq?iy6dz({2{lvIy$~s1jOgzr2)*(d z@1mHoqGrcsAB;&VM1P}F@y+xJj=bN8H8t(TST|NkF{eujn*M#npPMXi(1w~>JH5m) ziJIXDO0fJ$D<=PB#+I676%l8h6qDPPqT6e6nLUlR)P#suk=OG2Y9_; z#-5st=+)>e_R1LaE=8`i8IyTm5eI5M@9@B5uLAL&Q;aDin(>R*(R8GyOl1XH((>Va zy%+@@-s8guc5F`6=x$#DpL&r`GpiWAf|_8X#m3E<8tt{qv3^kjWI7bXsCOgQk7UM$ znss}Z!B6D>MV^vE>vk&!w6OQlrPR2NTZ+`KjlD#FO+qv~fN zrd(@9bs2lVcB5vSvomyqb1|y7=zX)f6$>x3_xNShWcPPMPE;=b8Hjo{w?4qdoP94W zr)EZ$144?ju{Ki*$9FAAe!$EMYAg&KFvKti^AZZ6I;aI^znF2S#!2iuG`Wz4hBXDa z(6W5 zk(tBHT56W7nxf@gI^sHtyeG%k7*ov5I%#DL!~@u(;J4)&zRC{yG{ z7^frIKMN|KoeY7P~eVp&5N&Q%uRtj71OXe7~avf^h3X%TeFcuzUal)wyOtOH|-BlP@QHW1szkHv6?0FhNO=`XwY*alE z(jdjJ=2Focjm3$iW>kO$IxccYljs9gakChO{MdF=^XQP+W3%2Bi$(6F=W8ic!&#gt zYA*aTgZmN}yl4`=Z`Mok%7K|^YU;(j`f3wL)Si{1`9%RN^qJX1P0#^TNJU>;-zX`% z8|I^MA2WNY*=``7Kkk+YQITTm>Rfz($;>`#zWg)BsMY4k|04Puh&>E@J($@~jps^Z z@r*OUpZB7d-0O6h6*F^ynvSKW=#pUswTc2H3{OK6e-DYF#!SrJ4tTi`ZDXZ)A@V8A zma%K}AT{5X8$qKpklS6n z$K53ea*D+Sp5q)(%|bV0I0a3|z1@XKS$_a;f3k5)pe8Ke5S7uB(c!uTg~Rqktp_`{ zL~2r74A8J*0(y$RilIC9;5N^TPNJsR-w5xokA#Jox18%6iQ}m(PBJw`u43GxhQaKq zMD*?pN0gMA6l&)FHiEL>5ImkQ<_{cqq9~l1RB9@_8=^!y5SpToK<%&XXydAuim|z&sGZ7=ErS}%!D8HgC}Ks56l46h zK$qV~GpSK*HN+nS1(e^HqIRd~yX?cRgDh%(k1)apuO2WG^WEcef-s>gGuhO1+iQdc zZ{>0ETp_kj2}IHdcJIxh#_*^qZf)uYH_u|cHrxP{GtA^t)BCC^LJB*>WJ)n)F099~ zEN1ek>Dtc>2F{%@<(v3+UcMed{>K+!Sgc{#p7Eo#GiU|KhC|CAF&}a6IUP?`q*c)qht}Ch>OHtqHKe%pTaSl?yi%-PwjO;qFjGd&S~YX@H0*}b{bCHndr#L0X3D8~yW1Se4c#$GjN6RV z^*FGJnZwi+d0Akii#)=Gk<0lHDUX;rLXE>oGtAyAkATf3=<@wPtdC~qC^fO|Eg<9C z9VaqM;J17OY{WZWc|gE4DW*@GU9$9yB>;HSw7NvYMeKi!|FtDJXlkV zqgm_WQ_1G+E>QEa*k`ygD*K|af059=6aE>g39_F_0EDZopy7@E0$kb1G)@JrPA zO|=v~-W9P+^gOYa`r(2WGnc7pjW$Ql{eJlOM~dJh>#m`@qaiYCa4z!j;TH*r6lA=NwNg*p;e=*rARYDOK^MSJ^1c=Ra2w)GoP z@{`TmC2Hh6b#c*K4r3k`BgA1NmOHXIm#G=OP8;cFawr&7jJGE@VYm)6SEvcr)yCzw z6Cr0=0-tm*xb$V_DmC?&wc&P19+l6FU>f3uQS+F&M$JM8T?~DofK|7P@MDL#pY~wp zIyJemS~#>#0g;0jom-qO{}{XUVT%A_Njc7z4OD;3CujErmi_yDKBPTP-AALjk+;&@HZ(RXM%!JUChi& zYR0cvil*?n$Xb_=Nm{{3=lO51s7YKUY9T7jg4V@+-24!PD`4xL*VI&=St{OH&w{I^ zINp2%u{x2NH`H`&(}V=(V7TZt+d3@>Gdr{O+goa?x@e)WKow7=3J~)t2+oPjyrbsh z*`*j7># zff%xanUB=;KdTMr>oagVvJi2_fuh$pGoPp_@1lbzMpNQn(Wy5LXC3TGOTx)jD5i+FhA`FGiheNQsZN)i_Sabutc{6`Gfs& zj_=3#Mon3pK8Eg`fU85xaDS*DX4bLa^PL*Q8wPkZaXji;%8}dE7n3G3^Me|juBQ0w zD-HjQa!5q^;CcYNCjO+RSiD!BxM_^&30{G-Z$y2M87$5(YRo^0_Xp=kuZ_$O--1y1wto}MPPg-nr8cm8j5Uef0$7-!^QukA@QpMR`Yytd=E2!sX5jmzBBoa z!INHW3CwGq}Y@KA#{d@(2Q?^3S zo|(?nWNop8p5b_O+^d53I3J9UV5SQ-ZfDF9ATFa&zm+(V;fHQ{Y`xQi z8si)*-1d-%+V~3GarYB_Hd&mW)YzW1#u;S=)QR_r#q!%Q=>m(>iyGYuQTKSX9Bf5j zo&#e8@nJBF)0>*#%WP5EV>042#k*+506ZVfOdo1~PPfCWG&!WLDaQ)0K(tyh)0dim z8$?~H>`4fV5Orvcg7C9~nSRtP*LTF%40%Wl65sSr24dJFcCG49jknx#BmM;iI{&6<8fz+%R>WH4%qSyE9GA!H`1jQs~22r!D zr#)f^O-J*ea^&j

sLAhDcDe(B2MdO4HD7WjPYohhTjqi!+#-#&NdLXja6TS>@;| z7mRaJ%nYIC=yZEnEmwq&Q91Tk1>)unHg7|zX}Dnr|NI$ni7LnT_kr*}#o|a(vu=_- zY@RA%t;maB6;-pmZ6#xBnex@AgOeya@i{Q@yLff*@k9G2MOP>3>qyez}j`aqO% zGn|@1x}q=XD`f;mmgDZ6Kv=Z0I3uWet6>YNE-G+;RE}p$0&%`O`=5@a=8u;hHg!@( zryr##Kp^`0u{fisIh|k!x!o#|5pz8;CJ^$e%#5a{aE>ih2C3rlvogFB>k!-V%#5KX zFTfU=2`VtyE^3=>3qUV3b{veQ=J9n~;E57`tCu5bPypV{WpSjb@fvRjtEtLJFfGTQ z<$=Ht7H1qahaTICUdzfbI#`At+XLXp&p8=t&Kuied2eM*$STK~kO27CvayY)#_)y> zjwUEUa$^~OcKBgPFf+2$91-<7$kx z#rN=%ZvgwOYhr>-cK{3wy*+25Ba09o&BCk)U3>~huotX2v}8tYyAVUpsj(qbBg7s0XPt6N~=n`OT!^0H*=(;mGfLFti1*4bMoq)~pJH!TT@bDXGowt+wMhn; z{b@QLSLI-;SrF>3v)`ja&BZbuwC$P>|C`y!vk1Ytzs#so^K7vO+SkoM$*L@TUl@uV z5^QXamdDIk5{ZHgn&BD8W3Fx^a93Rb@ znNQ7)GxBhMuZnq3MD2{(;n?++?Q2t`M%Q~BY}d`hzPK1zirSd3IZ2175- z!&|K=40|001x;oaP@{EDO4Q$-2j$ubWQ~fzY%^vSQd2Zk>~q~P56^mpWAfi{#OSc! zvxpk!to|4yp$3D(P`HX%HO9;=rsmMKKDeqhAKv3baA8gqhDb95YF3}=3B|GM2%Q&< z$1Rb71Tz}cIG*o`zM^)FVazO{X27Lh2$f%eNBx3uXk!#s=dgW;n$-O9 z>n`3QEkwlqK#Wj{f)qDPskywfH&!|<#1G?O+?*E!ds`Mqi<-OMy^y7(fv5T*Sp75_ zBcHL{eQj!zvU=hA3k|e|h2X1OER+v1qeIOk{l1VZ)WV(ap;#~K9Yv-xqf1TK3w>~= zR1dQ)L$SLm0pBh&qeqR^s{R-;*AQ6^p(y;92!l3emQnM$Z$FGTGsEIO5y;<}j5iCI z(Wk~~n*=@H_aL^`@%XU344ChLLt8@dA3 zW)ZmgRrCRJX2yt`z`p}AS=$}|mPKH1?`+Kd!}iMOW^Aa*3LlG%z&OaN#vrkIKh&IBWpAHMXlqVfeii_`it7j2Z24O<-mPH4T+xacOLt$OYbx z-HSTlT)>PIHAk1r;`rtieDsQkOK}HG?lI#`jhD0>tldSub(KU+P3eGs20N}@sJWf` z9~7RaKuSvVFw{RT{;$lqQuEhW5l_pLVfrXV^fWyVC*F(1jhe)HN+S0l5r>ASAvOLu zc6hOCh&wfE&s6ZRPZAvWrQ?-P2m1YFW+gT1L380*mV|Hbcj8S*2Nq0b*BuXPZvI#R zy=lqlJT4PHmK|8j_nUcABeNSw>z#x-lf^Yuw*xV&Se#YV^k1ff>LZD;e365JPud_o zo83!RQ}bHnTe_Ae0pD^l6z%x)hnY3he3@p2mJi98VkEvh=Xb#V12b!>IbCc6gB2+V zI9LLiF6~&9&(;U)sHu!|L>F;ST>DbIr+CqUZ%>$6PtBk@SLh5*#pLys$luk0t_jR+ zpe8iW8&jOby;q?gRc-B<9mm#H8>xBU>;t)#$*|0A#QWNI)RnXK=q73;`}!f!G6lQi z8d0{t9j_c&94~4$eTjg4*#H6O%Vs)#KqIJm1BbDKNh(U(0nc~f)iSrGcnO2Pcs&G;tY zj!-bOm74usgAh`igud6BQM9E4gGMmpLrwPVAk2$P!j^3kZsY9-!ItF<4?^CvAZNUJP}tbnsM8yU1UVE zv2CMfu2L}C^b%2UxEXbi+VOA`8(RQ1Ypw)~j=720(ys-v@7s{7$KnK1vu~5AT-84j z9q}zVaJvmYbu3O0HM3p?qjhVd*!|OtZqM5=d_9X3OwEJh5QK&$V*cJ1UTVk1Qf4BkIXEl~f0WV?@VgZSqdIUmjG0Jk ziYA1jLVpMJHCl1svjchin2DlhtXBwrbV@_g&=#Cg5qIHj%tTY;)fkNbo~FWhP%Dnx zb>RMVW@4y0R1pH}VQE7-JOoBJ z(y;h=GmPCk5VoC}IBGtJhM+w;6?)1o&};0#&(+MtQ)8(fib~H^JZo;oX44Lw-oQ)( zHN&04Flb>4ervU$+`Iz=U6@Iv=KbqXxL--e$387s(9i+v24<3|(dZq9q^n8zu)YNk zpLL*@J2T1DbU7S~_zy|w^`!~tJlkP?pP3YD_KPQsgBnTr{<{gQ?AmdIXEml$vv_O> zX8lNngiVw9>p;Um_GUSan&rzvAaOVmM*W(wPqz&_z|0P64(|)b!bypc{@Dn;YJ(cj zd#a7HHq$GuxHL;f(9@QTq{ z)X0cxi`QZjkm1)ND%TvwQvRiBH#JQ?0+BE<0kWG~&=PzQO54~RWK(059DsSdMAx8> z7VJBG05LoRE{B>YS^lQS;*@q8T+3(4x#=*%K>Z{_=D(0hYrX}RA??9!wVPB_7_Gi4xXap;fvN|Smw^+?5AeUO>0PbZbvVPDs*f)hQU6}9H6GhMq9{;H;RGl zt1!CvF)Zz1<{&lRoou0*8;dygDjXbm43ow(bBLN1;`clp7K=O6t1+SC2z=a_IZTc8 zQd^`+#NdjkVs&liQJnA3%n@o{)QB&&Z=&%cx(Yk8j$q3fW{y(xvzs;Sjzq&>bQyg% z=m;VnFmsHWg`)e~tcy_?ASxFP_VWC_D?O!ex`A*t(OMc4~T= zn4`z z9`mk`m#DFN>H?{Ii7@?CgKz3>c)gls8(yX+x5O29GLuk}P>Z4|$Ix>IGgqj2neUDu zg(U2IS&!%*N0Hgae$Q2E`pUSYR5KAS8g)3Ka1>kk4u)&g%pJKBF0&GFRIwJdCk|r} z?_hGBn(Is4k)fP`q^ufrsyGb4|Jc}WP_yiw2bQ^tYy5>;{1++Pv~`N(R#92=n0ZRgoNg|dyni#!T&lw9)_OFQvfuNJ zn$9C#QIWV2>-?&))UOtYU0Iyx)HnyYVq=-8KG|M{U$bg4Vke99f|^zfCmcNLfxN(K z^f20kiic=p$8Y95%Z zz?roxaND8^an|Md(az$$q2|{pCj^w(!+UfU^nZvQm`TjMrRHW|M|=yk#;K!~*gL)u z%kr6dN6p!VPRLkift#WdMqTd$oO5F4JvDjfMOVQgX1K7j5-b19gKq#cAE;^TvmDW) zg41lvO0;~*MfDf9cKJw6*!ArbHB;uUK;3&S*nFzO9qmkX z;TiIusYw-c9kOrT{HnTWisIjYAffs`pL!(u6R&+~4WEeAFslmk+(27}v zVdYh5kV%E%4R$Vkqb8@56Yl>~!|g*=n7$!dWInMt->KP|;0)E@b8%9(3W*NMFx<_| z4{ByybcV%VRruVff?ReyiqEpK{iJ4WgDZ|yDdVD3HC{=?e_L>BGp?^Wo%DjFYow)-DyKCgF!><2|SsqBG@c_jAoojHH0 zNvU$h10_Y&iHaygWg}7ah0Vb~YQEibL5#>C9oTITN?k=}$q8mU4JSK*GHI@JO#GK6o=a|oW7r60x>K`OV|DvrU}`F7d!keD z97we6fu86}zl*OEhft#tFXqj44pa|RW7xMyoaNtthEk*c*9|Fi=fOR*3isDXL7snW zlcXjw$sOMh%txZ=zOY0o3h%bF<82rs*7QBX8$2eUZCsR=pd2E&8uc=xFiL%pKl!Yg}>photX8$2S^5%Rnejc!rUILqRU zq-Otocf44m4t;TN)QgD5IDSnWMa>u;F}4M2hhBh)YhMJ2m?)ch89jbT4`)lWD^s8oOEHx(zTrgyWIugcILQ|YS1N@nhre@V^ z(d}J~E#0~j8e^l8tiYZ%$5G=S?+WW_YVZrJM6g~Iem!DwWT@$C>w=Fp1fyfA~o56obYSue8j6)BJIB@c=|FUM-9YUIsVgp=#^KXr7;3)7BM4F z&Er?j_#`S5$VgTpVSWV89cM;?n*5X%IC^a!9&1*jdZ@@i=AB6=QM1X;8U7lx(R#N6 zFZIK*f$w~uOif9DX9TaAjaVa5p?sIfTE57}_8&F*j!v+dH47yhDq$=ehVh4)nL^EK zV<$9^7g>F?DzSZD7+!Q`?{}tBqj%5=?weKNDV{MF8HwE@n^~M`)Er56f}5=}ERR>f z^l%6=(wLb}%_%D<+}2cvwo?TnH-_RLKSmX)>HFLnmED!#xwHbawufTqY8Gb(HDd-k zqe@gNF%cD?UIYX~N`;x3)O3F93cH8Xppz(`&3=g8v7eYxqQ>j23-td^ft=V0=I#)L zaQ^>Mrlwo0J03g`8KI9Wap6fIMme(oj|w%h2VC*ZdJ>K_SBiH6fvDcgt|6+_+}Y`Z z>uQrkre-D9>28Cs7yCW4sJUR^g6;ZpI9OeQiK1$Pyeu=bsp(elhWfmTs7R>7>P|iw z@tm1C)Fd{#Lpf(6+Dt3aHD@a(cU&>%`l=a6Q1ugFlz;yYc*=lh|0Q$`iU-lm6`aWo`GKD zm{F%@<03=kx|rkqlMF04wF`wknOQ*1!*Uah7S$G_6*KWqF#|0j%q*m4<1Zr^#G7DF zVJ2k0?8I^29efcrmLY}+vNOSfADIZ;vJ>xnvH#CvYE~RKK*LQVu};i{fx%9(gNls} z)P#$Qo&8Q2AuS;j1;*(pNMlBW8n;;nnCf7JZk`!fnwO5yL(D9pMmld9Rtz%4%%n^t zW$(bTEzD?A^D){0ivEU>m&im>%?@}EVRNvQn)EyRm?E`le@A~rUCYVHp+#PMCrFymzgluA-Wg*s*osPTTFkKS22Vuy4FCb^~{h|ilL zHL`a4sJ*3yMZb5UXLt(M@d^k=)YNL|LsLZuRqmoo@zGR_f!ZB5Z)Vgy zmDE8+&{8<)Wx(WB3L=tO9CKMb~n1&TZmdhpnmU)L9E- zWwmhOP$n+Krs0?xtKMWoO?}7`q{xV|ea^yC*&TS!J0jasQ(LTw>^3dbh>ny^Vn@qO z6IO}IjvCc%OAwH)1z}hJ={!#LV6Jx+oRycFe4x z=JI-Az+@ftR^1KrYbkKu$cz&;7uGC6y1EYfnP)>^PvpgRFyl;3uBb3(xm5=$*R$a& zs;AFVXU2t^qUfdQHcA(-&gNj1ve=W*$c`OXYOKY%6MS1&A|5X4|3~M@mh}=Z))PYi+Qux!?AmL$QJj;NNHxaQuD5-rN~v+ z!RxGixVffc5AQncLrwNGbDXNs!>)I^xHT;WzoxM`zSJ~6v_Sv$%i!ND4}UI-^^PJl ze$-6Lv&7n%W%!($i#2OfQC-cB9e-+!_FJI4=y-82G7laxDY)2^nQhbm-lhL@GnILK+ z9IX+St1rIc`$HdKvWkWaI3#B(YnZtwTbn(NDL;8CyMc1ZLyJ zf)w2N&P*sZHXkgpGJ6>m@^>TDF&X>g*x16T$y{iIsQ(ObL3A(aYmtIW{2m-mO{l&t z-o4bveZMR$N>0YrST?o@YIH~2h^pb@xoL40dK#y|Y$7v})YuhR!REUmY=SZ%-8}_v zvzUpZ=9ixh$`2c0ymBV4yiP&sCANl*re@R+YixdE1e>0j7_OLtjLs}h3^nG)c9>>v zf>tH*Y&Jryd-*$|SZXY{+d#422r=SW=2(vucr~*)+o|~!Y7O(%rWhuk=Nf}ja5s^e zIBLd;{A;tXCYUSMiKV+!MSd1L4&tdf@yim=BaESYFB69Udu~r)CV`qD84Iyb$P{UY znJ}$QLm^)$CQ|c$oej2in4rg<3{>Aq#r1F&CyAQ>nyhf8vk3|sGsHW~RBG-`r7ED`Bpipq!#Jh+{Tt~TuYwu2h~HXBsGHo>ma3|u&s2A%25q*F7d+8UEY zm4Aoh8Hn$Z2ER-;ww=_Byl9Km_a+!F-lI)fkcM$`EY2=!&Z^ntTB$J(?9W7*SYJ<+ zU~w|2nf}ub4-cBcB{~cBZ7I;?CX<>Qehzqa%~b61%tD)DDiYqYb0Lcw_do|xanT6R zBeU@MW-5BWXJ$7w{i>G3{*p0#r|uT-#Zu65f|+b;9#}bI(RdTYCGWe*-fmnOn1X)4+1QGxsXOU_n(1a})yP7p+bLK!jm0UZ=AN2^*k5G=kMEfn?U5q7 zrZ7`N&97f}FkNqo6_J_faW@&=6qzZd=5vZYmWeJV5#KU#CLslxzRZ+SvsB!(>bjZ1 zTR8*m_fueaiJ5Y0mMjuEX%|d!xkm=_o~6RmjF}2**1WgH2TfDN?%9cFVt;5wFLrKM zQq#{)+_!(5L9TQsp6jL|jdw7qqUNN!ji_W`hLyfM5%4G#G9fHZH8l@YEphCs1zw9T zj;GF~VqXq3d#LFMuz+-#1+3Pj<73}c98X}ThMEICEMWJ@6z{(7fYrTJSbt!qmYQMT z&9V2J*e9@I2jb`Kz}lf~tz1XV6>n1l!nV8JK#5mnPzI9zBhvPKk>ZTD-B)`(l9NNnHFkRoG`|BNh^F1?*$iaPe%^# z1l3B-0}o?#joNo9El$ zaBeD2g^Eged~Exvu?jLq!)GxE*HdxvOa^-MI0vX1RA3C##nyOpCk-D4iEj)FtdrV7 zYEJ4I!M{+fd48p0w0OU+{)PRXL)2U!VFdjR)>!9|3ZL0oa2mqm9HwS?UsE(rvxC;z zRG2tqLV?f05o&IR8{o=nYjg`1=bLmU_8($nJ4(%q3!*cRnl+AmN)@?G8Sv`D#&(RF zMf3K-a+dGHMdV2 zLi(^F61>u&Cf4yst67|r)J&BZ&r4(U;ir-YpK~eLzmu6$)JRV^g5vaLSSzlheXWxr z5zWkLY6|NOu|af4SXGyfYfVWAb71BSHLJcEU_#syTp6+xy2%M}uVv;eHJ_qH7cspB zm^peEFW)dM)Mw@zHQHuwcxohv=VuD=4q?#l%FK0Y9#ngxp?V^;KNKN& zM+l1gGjoHQtG(8tR&;{eGN~BJy+nPOy=+}|lbR{pHe&N;X{^3igz}s~bXmmAEo!uV zw_>Qo7$_)|!t+WXG;gzY?`>)_BK>j6c?4dhm!U~tRFMC~%pGcmBnDzo-YC>+m7`|# zHvFw(<}NiC_6DQV;Ndu=QI6ut0XTYqnS0cz?iXgkaIDxby0hx~V+(&Sxlhebg-}RM z9gY(vl{hBv5B1S3&I4*1Qp2!lvJ`Y@S7KGtHjMbg-tRo5=9H+Ms5x#J8qZha(D^{T z?a$&oqGp{$BzoSK#MWO`$i3l%aSvFW$JESw5`k%+Qdk^a309#uL6{xM@_S!W^KN!DG(yH; zq(MC-ZUkcUNM>G9vtey4CchsCN7DwVh+0+?(%IgZ*VII8k43}C@d$d?gi{ZKFja<` zH`I*1vmK{j$YRXP7R0y&!}lG_*?vonWal_&b)SgV(q^3O7L2Fcn0ZIdjdRD=#qvP07eTreo`1qoxRaTW z)X43L!IoJ{s8MZ3SNAA17cujRnxvX&Op=_9b7iv5&M&e8UB20*GK;*UU_%V=~uhf*fMd0VyC3t1ufKe0Tq4S&lo^RB| z7)Bs*mM(6eZ-ATFi)$mv;(Vt@eoH9YJoRx>rU4VyCnIwUGe4*qtrUibq6^TNM|H@I zPsTerW`0sr{v{Z$qLYi~*aoanPlHk)cD((fM&eR1?ryb4ctIU3Tt)q_Zp{3q=4xXw zI%;gu+rAE^>FF?3Vdf7ty)uI_Q^En#2WoNuPCDAsnfXgig2;XRkm88!8KO(C;w}XL zVdft-V|xeT^B+gl7}dh$(JuV5V&`_J5oG_*^dAA3lI4utyS32t$U>_zGo7iKDiMST zk=xniTZ3&AvLPSKOc!c~`)z}UoU7=XTZ_5k+nMVHX1Y=nzjzxawm74#doA?F7l?0< z?3&n(nxg~!VW8&*6_;8VJt#zxB)eXBrzTX?>={$>gN(ZbOEv*uB0FHF*<#Ay?*s6I1u#_tTB`OxzV(4hL zh8;jnkD{%pky#1<>0-a8YYk#~7v+J}{FCv&mH?@f8&rE};$*$jmFRdP^ zcvObE_F9yBvS)M&YLd2W#*REsY&9(t&vW&n<^(f?sR_B_g_PixIB~cHGcMGlY4R>n z<@5j4_#8q_!pcp!a?l-D78T>QxWtUm z!&9D_;ne&K+6Z-NH*_9QfDv8mvGfW1JtL?|vh#vYpgWvH3*hv%76yG-oRQR6tlW&d zL){RSSBM2^by#zp#TiA-i&tCWr{Ip=Vh$$ciCP#FSe((+I=ok9W-K+U)xE?w?RD@!Uo7g5)MMRV_IsqMi9E6yb9!#T zP~Bpf>NjG43^U`X8L)LTj&|LIsk2Kk@>3lqRWKt%&3{WaLss4k+bKc)^B{_dp5s4O+wTfOj9+Zz(yim@iF4)@$xoC(xK4fVp9AV0X} z7bCK<7GFNHI1{NE`D!!D^S8nIa1pu;tHYyx?0O(ajn}ZP$eSC8OT&uMdcFqJW0{es zChfphoY)v3a_mZxKD-8{nan6qv-7$yW{YlU^2J4nHmt^mq0CI8W}oO}+fO_zT^Ue< zg84OYImXOnY6dE7fr04$`e%P3l`!xQR5`J2~S=J;@5~GOxj+H$7h+DLXC;^ z2CQ`rfbHXaC?2jx$pmJmQgeUaYLw~)VB`*wzvI0J!-JTaM$Lx19*`~EhTpDv==!P} z9^;sqPEFJ^H#m!KTOUW|BTKB4qAoF`NKJohXYAeSk2@E0aPd$ru2nHJgPQO#N6eA* z!{|rZVvj&A2Cikt=uB!3nb_fis~?uvX5rn%T3l{nMv0mQ&9+#y%@=)aGcdor7IvQO z{86UH{-qVl<9rabHUmjZ#d(^@j0!a?BP}srXA9oX%D`sXIvDV4qAE2Tvd!@5>L#Qf z&cxn>d*HL2T@z%v@?ti2PB_q3h6d({AWouRxa+X68}zy_Xpx71u(3 zOg2n6S0M2hGxMoYxnl}L-8Go+n~grAeqdRrEIJ2j)I5A*jz=!5aXBvs*SyQ%=Evfw zQ}e9N9H~xgaQAR7`ezm4xGpmbsL^<1is;>*Fbm7WQSV~>bY^BDH3n&xIPKsGHSzBD zl1dS7jALdIHS0?(;3eaUA$#%>_eI>hM=`UQn!njr@D1|Bi$?`Wm|BS4bC>}&4pCN! zJMV!5&kHc?N|!dFtdc39bYXlda66>G7FKjHxE8@ z*}0%ejh)EV-LhyU?)NUnprbk1G?1C4)MOqtN3gCFRHKUEz9a{aGnmn$Myt0K{*H3M zi{r(}a?gfR7iP4n34daPvoA#5#nGiGDb7N7ey!4>X2vFagg2PsdtcFgtydOGC$sCD zE;Z|79WiyO5k~ee!-~Ec7_ov~d-bSU+sP4GI}AlmS~-H>q{G*QnPt>i7`xz&q8`#T zDxe;*1NQuzkv=t(GTpG)P#4ckMb}~RUHs#3_RM5JjbxfT%tfd8!gm$e5s->4rpy>p z)28Qvxo5PH5MF`hBg9@SJ7$cix$whX)T!4*QWNsHM}D87pdr-0_0NxA|~-E9y{fL5h`(Qo}2h@me@$s17o5iuErf$ks+#WO!8_M^fe^?x9 zWtp*~X3YT~{QNQ-@qslkoEV3~E9~8dJvE`n{BSX47B1!1V8yQOxKz%J12xYc2O{*M z65Nz(AhS3Y9UGZhPEF&TAXIIfiS44EU6o1$Ay}{yF)QfajM8ot4HX{D4eNb@3>s4(K{K2{pM3~)wLeAq7KG%Z5GFk znnMZUh<-R3e$VUSkR#&o`UmdRbX<;vTYou3mek{q=#DqVhK+3{HSYD1kXs~;MK|iO zza;`sEST}2rpYP_?itdM8e5O}Q{j;EXU3Bnhbhqre>z%pI&Z+t`H^V($KG46qGsBa zDCpFUgq}|W$_Gbcz*QD!H8t1nM&sXwk$9!vfd0KBpfHln^%`n!uZqI@I7$4&A|q0yj(@ybZ04OE^I{4JaHVYVR1H6*>Wxhw=5;m z`FsToq7*E9ly z!Fin+LXBIK_&>_Z z!u)R|{v3|Nkz>q+Qge1!FfM0Lfb5!jyxS9ns@2SdQPXLBFt+4Oz^5k-I3X8<@axQk zQ#1Q*AUw(^!*ftQJlbQhFOZoCY81`}V!@aHU|CoTS8?n)$1xL0%~9znew`|gjIP1idE0T8k1d*-Q6ewqSN1%lkJ$r@itTXV{UlF}{dg$QSR~ zyd_hk{dzU>I$6M6su=krc3=kYE0RLZslID4BF+jCcZ)E$B^^~$+3!iEW>C^9e1Bny zmn(~~>H1E1^W!>=nl&yS*f!oCR}UBBnMDR9HnQKdgBsOJcg$>cfLvPvJoPi7$FFbc z)GR&kj$zl9BD5JXvzwYH+OBw{w+@Np@-fV#6wX7L$)@JMn6t)x8*xs&C!g+K zhOn*7s=^YQpf9Wr?Bzzx8>v^zA=;+TTJ2HBXi>I7yD zXV0gF)c79LK<76}cw?E3n<4GEy@r`0YHG0r>W)b`Uz3aCKF1Kr_t+Fu<8oOOFP|ph z-;q4@@IHdD8WyL78uu@5Q12uQ$xXF*EpjxSvzaNSCNkawA6#UR(XSSLW(1@56`Qv* zYBs!aMX0000e-2!H+sJYzV7Gqb7d{~jEy8m}D3|-l|P)$wE zYAcMKJsQsEO7TfO7^dBr*+b0+OA~mV9D|<~#mE#r7N4pzQ$tO5s1X*(j>W`2MbPpO z!N1q+oU5hASyvysWk899g!4aIl99&M)Pz7a4yMH0_n=Rl)RD0cF-atk$Hhk^d_gK<0{ z7xrUA;c3Ko>s7mA4wS)7B^tnt^xZpCr{+kQ%>RJeT7O)W(HBBe832A#@``ku`^zqtwXO>EKiTXxw{T zfPS4qu`Y?t!7*xrYP1lxdkmKKE5z3+p?E)q#c88v?FVg0DNCd5Xb}wULoqFl#c8MJ z*)Toyd^ip_m5LzuAQbanGSfj#qNqP)zkVF{-!Fu8N(j10vaubfX680sbpJIT-q#9X zoe>7P7tEZX=Gk3sbeSTDb5cccJ`jdEYneGo&72@zXgrWdTTLN4YlmaX0A@~6lipts z<22=Q;C=zdOb$nO2{WguIU=bIR~va`j3|VJQ#cf+ux|xtsL{33Mq#)Dng^4V>z7%a^W5m7S&Yp@ zWszQqO5+Z&D{hRj@| zCL=~0+h$Kdg>nG~-4DlSNoFonV?0J1-ABox^kzQBZ3~C_VKxU>sOcD{i<_$_;;&bM z$XSSh(L)yJDm5P(w6LjG7Bf@}kiRfo>=kC_8Z`#qOW`9u9($)1z*jv2oy^$&tn1YD z8?A-Aj^pw6cmc`>gd^3NnH$v1Ii!W&`7$UNSb&4GBk(+n&B0A-Vke6}7mH=!s8N6t z^9bC0$ILBiiq7jo;ruw!8$|qn#5vbDp3T8+YW7v>V8s_{#OD;iZ&n2Sl9;(e&CR=F z-Mdi+=`MxXUL%g{yUg6BX6{!l?Ab96iIoNT^(h<`Z!=W2FUHEgz1N z_RQR;=AXA7x+}_Hv66TW&Wu3WO=ccYW3*cbVLzlXS=@ixM@C?h0{fpnq$X9=wv-(; z9&tYlApI;HVXZ9ABWgw;T87*-X z{2gL|z$$6%e_Vva;yYF0AZA`slPk>aWNBPeFF_y42sHAVO0TKeyh;zf&1ImhT8v@m z!tkIIi}Qw>FCUg+%B#_MIKBuO6U4nShMBk2G@V@rortjrx=@55aostziJ5oQ%&}dD z#pg$3Zfg;48-!uTM|KT)PmO_^9^PCWiK!!s@#j_;u5D-L12qo>|4HgWE>h~i_tJ35(~~S^OYLUBm-CIigo;;r$u<35`i>bW`0m}Ldy_iFOJ8n zLq&)%ia^O=W`0uR-P;JY#c~L`T8zXAk=Urh%r9z2co^Ywnmk737U9r3@lAur`Av=L zLj#PoRzT^+Vq6{qtJLe(-XDpNpoK@oBc zqM&w!8Q%~XNc2{Z??i<2c+#8=wE5cGu z@oo4wi_?u7gF<7R{yi3xCKe$vDH5sE*zf61&E}`Z*lj;b)ITdiUT_3<^LthgYR2k_ znnlY;!eV3z%AQ4{Y88vqlbRGsGuTv(fM;MaPGv{Ji032qqUOL7L)fhzfs%}3%o9CA z%zLoox;Hg$vL^VHF&qWEi(z3MiRgjM^r5DFh8d=<9FFLIB~Ukxg!BSt`cl(R&J>0F zrEsr%3I6_$!~;oY`cX42)C6e;!=R{Bf{*!PKQ7;6)1R7Ky-bm1CWY3MB~V!&iELjM zX8<)%UYNijM-tNCN>Dd53j1rA8Awe}B~z>ll*G@4B^WYP^utYIW)L+a94%0yGz{-0 zOK@Op6b5%^`Fs-8%&9lW|FCtJaaDa!-+=9Iv9Pb{=sHKIqbBCeFODR$U10gq& znL*U}{noAdI@jj%_eCkvnw}+;s?4%%#Y;4TNhC7H0@G@}as|utXM# z;y%do4Z@d)EY47Btc!K9pmC_Eg_(};9|N#u8jCZGniNqV)+}ilqSM75o0)-_;?Cm8 zQnT-<=pVdoI3|nzS&udaV#y_DhEo%GL-bHC9fh$+rHB%1)y;~`jG*RQHyw0R9*z+) zQWV`3eW1EBGm@J61v+TX9|2u)Y~S7o!gn1rqo_%#(nY42@7?`KiZgN|7hxVVqpA7V zO&1=1Bal5a170D4Nbq513^g*3wc(*M5)S4Wm~k@@P4evjGnN|daDB`-8-ZN0f3DCf z5Ob}W8Ar{k?grR7WH=r@NyqwQ0dPFQ%y?>ij~d`$A6aO3%*3dxf!O|-nF-XKTCR`M zak9AAoR0pDfq3|anTgcYh&c7*WpTzb9b+6sf50cqOrj<_&H!gJMxb9sCN7A3XT?iq zCR4Mwojwe@42OkS-`?F(j46~^j`I|1oX;8HdH>1kmA*Vqx5^hAnP+XJC~otYWb z^cJSG@ko3rO~>!fq9^bsc5E}LIiI43xgSU2^3HV3KIxCTP!?wvHHnU*hS}ZGcvT@q zzg2<2Rc2;WvoBN!(_W21Yp--%>J$JqXJ+P5b2nNW#}AFhi*FLRn}}S)NcPOlrN(KY z7XH2-kBpUKzk{8{?|AYm0}Onhw%Z(d&kCn3#o}TQxW|HXTixc89v1Uh~xFi7g3|3 zq=dGtIgpv2gf7>E;Qf%rSxk+7!&(geH3!F*CqmWY5QgxYrAw&60|i9io{!{~c+`lT zpGAB;x|EvJMk{c7#UfEF`WW`TIf8jS2YneeesU`j9Jma3y<_n(HW>Mb*|lCyO=P+p zIt^L@%lv4(tP=V0LCnZeqg}rOL0> zzs#(lCM;DB%0{a2bB;ovh6vp0!|v;q)Lfdg0%L78a6CT}hWgRCvzwV!)I>NhgOi;O zlJ7@ir(X}Tzx5ppdDH~9Lg3N>aImO$mke(ZCM zLGO%0?9O28!K&2k+PoCs6W!78QVhxyO7TX*%zA424quM(8eX_o6a$OZ6}UE@nGMu5 z?plsXqkTjT(O6h^t%0{DGiuZnI*I;>Ilc&Z6N`D0I;cHn>*yP)8LT9SNh_ zkt3WdvT;P5&zvkaMlzlbq9xOVU0#lU2+@2Y8YG#i!!t`uA zB-*5)MXwak*04R17Swz{ZiIiGcB54<8F|5_C|S$QW@=)sjj%|@9+{#SNm@!N9QHF~ zNll-hX81F64+bm~Yy5*MAQ{e#6*c*JqW(~-9aim;!f#YLmW492g_`<<*06bPCu)3V zU{z`vB%_$wO3kSh8`P%mL7jOf)|pm7Esb64ZPcXpu)`E52iP3R!aPx9?8SL@A6Qcp zxk2oO>+OgoTXGQSQHBjtX0}t)XX*hl@8XE(pK}m7z6?XRGqZyls~HC{*~$@fwDT~i zxD>tJnb}EAZ)Z2$s@?}nhdlWI6#4s4*fY0_nvRw3nB?Y+*3tPWkuSp{S!QggdA83J zPXqR$!8Z>Zr11&}&80Y!%if8*sYyKLjl}m3i2t37i>js2 z;rStU)Eu?&!O6YO*swAeZ&wu~a4j2C?4icA)DLmLobaqA8{_X5;9Uo1?5Qy<48XTJ z&N#m`2Tkqrp%Tu%jSkcp$p;{`+zA=pS@^jlAAWt9*-K5y_&}tNaK@VLSr~7U2kR&7 zyX{B~#D0q6Gfw!lK-5S2oQuu;Tkk~8)krbl6y=0SvDdeJwWx!^dqO)?6M9C}c5K>< znSq(;(3*paB6dFesM$Qx54+yk!??fLQ{0e^#r4eWrzR^!tTksi0ONC@)|L$|-jDJC zHQmIzWRG0&T@Zioc{LN$_ptw|3pMY?_+ekVJ*jNQtZ z|F3so@9O|+PR!koo`J^5Y0Jl!@v-POf|)>SLUvhT;RZ9z6#I}Le2zwDD>FgV*u|UT z#wZg=MNL1u$|!6oW9A?=Gd7vxc!>pkhvbWMiNp&9W)4x4nrVWl0y8WW}H0MA9JCa8wqtyW`e0H|E7m9Eps^f zi;}tFxUI^}acbmOYGc7=bNpSIhmfJ+uxw@~iJDpdTKIl*GgQas zK~_H;@oSh#rpC5Z2krke$C%PQ%t;8t(izO8P*Ws&eVE3XVSY_6;%h{2yaHxUQ1eGg z^j^4X4zEplV%|3tpK6#%rDkuTCX%O{V~R%})O!e6xnk6ZhNUQ24d6Z*V3x>&NRudyO$}80X^t&FBgihLg6-rnH*}=n+&i`T^~os zi~VV~p?Gb>u5~Uoe;jlXKS~$7=jLKwTL@-VusC_t1lSoOX^J+6wBFq2PB zR*fNMHHn`0;=Vo*7K%78W(ugea$X;CIhvwBNDgM64ngyEc0PsFtX`-O_fc9n7Muf3 z^-yfoVWxPIdM#Xl1k=Oi`y2_|^<(*UdY zWTTh3M_X4hQ%8-rzp>bZZVW5OY-|_LkKI>h>ZvK(V}z<}Mu;DhjoW@<=r)s?Q`A_; z8KX$lK1*}Rf@OUe)?Q_%ftr84jZrLT43*oNFpdtz@Tbh2rY1_+2wpu+QLmVVKcZ*q z;DM}WTq89x9}V#u#UEX#?1^Q91cS} z1!h{PX>!wtSszPejnBlA)nS-2gqc=q?3RlB12GO6)s_kEfnvNfnVECc+@7e1#QM#U zGtI#FXJN23X68IKXNvVP{f0T5j51+xISg0VFmr*L55INcm|=;|6EZQRDGcomnYlkQg2;Gs11ASxAtjo-GYPu>L;MQL!$ogbK{ZlOLhO+1M1~rO`dg#>Zj9qS-aA=6b z?HSD6q~_BdJ?viOisWih2mWLN9`Tylx2RDOJyXZqx`mHWB2YR9LP&O+1NzliQ znS0c17X1VVw|ZisSTou_Ow^ga#KxBQsqwy`2mdFYu)CXuhhs7@^e3C&c|c93jvlfM zJ#nci3z}x>IIxDrc}R`2zb+o8c|h@D7S=dt;`}2P=Mgo_*6L#9AWv*c$bwq?3`Fys zxyRJpeyD?2XFYIH6j3bG%|Ov}7Uu~y7e;Dfs-6e(ax-zlC>t?sqr5 z8Y6P%MDC!|Z)ToRV^*LAMJ*3l+{uJ>n#eot$FAFRYJP9ghMIy0ekW!kpg{`zsm#2f zrq)Xv_Cq{i*gFfom;G-K12ZqFIo4GR{bqTHdDToz8zaS#Z|r;BYkq%gs`;{y#QH{E?dF*&66m z(RP3>3<`XpoZfZl$z!PB&>F6x#&ouD;S^rTJ=%t18H$~mW4;gr#cmf?d zvpAornXIaX(<43c1dmgf?0Ab z&JSvy)@j13&J}k)h+JLe6ddC=t?6*RHwZ z!9giL3_XD%(aij&=Eg*A9GK^Zwr~lucAmhCN$h<7P@~mDSJXHac|toyeI)BtoH)kf z{H5lp=!q>auJz-+5{Ry7$T-Z*KWf^!Yr*J(C;G}r&|zIF#vNs*-Egx1XRzqa+BMV@ zft}LCcq|o$gW37Cr)Fe>CPD(e5c4k;uT@T95Uc1Wrf&=kDJ`G3LrK3KUncmbi z7p{kGb^zv#kzmlMbR4MIt5~6UK)lUlR|;-GwM&xwz&1M87ca&4V7SwOge7(u{Z;$Y4B5l zX+Z#HWu;+ey#(4)W(HETszMdUqXRHytOS*(`^gDSP^IDu|>b@REsR6JQ=J0n3+AAA>xJv_wAV(Ma@DFRaAZOLq~DH#fm+2n^Kq=P0jPwD#*F% zhY6x5K;=^jWcZkT3^j%?#rX5YEJ7Y!AZ;o1d6@? zx#BlqIL~*PM$PsEN;qlmhSOrt_d;FKgO6W>>D2V@tAZO59(XP*{y*o_;c$^1+YD+< zUyC{=;`fi@l~k-KPRBD|mwF~On_ZN!r;7(54R$G#q>^ z_Mq}!%?qeGIZFw*6-6K9vQ)UeO^5SX_RKA$Ms~F_ev0w+p_WwqoRx`jJ(*cVP55qQ zn2mEsgqrxRE%wk2aA0OJH4&N02!cEA`AYEiO*$NVuyNHAYP=>Z;ZT1M{P`h4QCTK} zomrfv)SMcwjLDWBIOCLtyqTGp>%+`4YJ#DN9Vb1Iel7BWC2O>Hb*>nPWUee^v=f^)mr(R!|ck zs*K?0!b}!(e3hbxFRvN5lA5!VRnSq!1EyjevPsOzKMrTdwu+kYsVW$H)*a)-yvDhl zbPVA&<5p7>Wv`5wi{kltn1I&3&DX1W9q6dQ7OR&o_1EYCOB~YV$K?$>> zJ#hbB8YX9p`L{3ZeAZC2et-(z?esvwMhRl<(s5`NyGPejBb%gxa(Dvb-bp@?4)XvO zXB{=AE{ZrN{y#F}|6}+&9iwGf97Sr99F)=D%@Zp>r@=ia9R|Edyb?8LgB76^>H!-a zQ4eKZ23omMrl!j!C6p$5VBl%--jQUWu9}^X3N@eelpt5`0bTL?=k5MXtlr3sDm7y^ zsz9ZOCp=!HL4Jmqui3=RdTO@IsNjmP2NoEMZ>gu)|H5nbY@o(ESQ#Bxc;RD>n0xD- z39|((jv6%$)yh~pzys~YJsS5V1MT?l;EmLH8Y!bwdr#=imB6St1BPZS&L(QQ&s4!O zRSz_YF@<}026`@LMxC0FMipFB_dv=d2`-2@aod^EpvK~=vKUjkh&oIX{C$~$&HT5s zCN=GqRA7I@654Dne)AfZh0KZUyRA!&LX|3}y12l(ub3kb$U~qui=#)) z&UiHhO>;q9fdu=e7vMebGo(+AW!nb)(s4oX$22(25Iwk;vHy<&HL_1P;#7MV=!Z%% z<6|K*c|Ax&YE;InuAcff6WhEXJ+>qXQ5% z-P1JjqTUs@GE!0Lrx->qEY4>uo?^%tGh<23#=#=aWDhusr~CTd zB78_=#)=w)(OMWJM!tG;q+(ZB32K9x*+R{-$Ldi3=YlkIDdH+hu|biUt<-#wYM|c` zXP6p@Sz@unZ`>dDt7#iG*Y>F6c{eA_I4ePfVk!DfWpS*j+3&A`ZRrju6u(Xfohid4 zUYUA3H6wax;8uTI$i9{U=Sq=kz~bznrl*$%ELwIzW2#umsVc>3Ftd}IULqUG`=K>V zhe~m5XDLqpA02?GDf_00nFp-IuSY5Fe<^|`KQMoDq+ zN1phF%gi2X+C=Be#Zr-t6DY;GW%-yfml=C%G!!(E+F%3^acmF#^H8tPi~}{HL)Ec9 z-xR-MqzD>Rfa|u*?4`!iMiVQ7Ot4E-!q{vsx_)Fc<4BE;y(a$Um|@@nDGKEC(ZZ|# zI#IJ)RuiASj4{ns0!wkN{p(pAXKJigXya(AG2V!o!Cx{3h|XtbA2sS-H85hSDeUK^ zcTpr)UJCg#|hK*dCKDmE#=!c1mds2T69 ziLk!r@Z2ZCglz?Qu#Fj4Y7~<-F!O^s43~@U?K2BtQNoNHHDwW^+Fpej-ls}1FQouU zd_~Khn$f-*xIDxX%f)QsyX<_tC}dx54{G$|G*Dt=j$knpJH|}x0$Re1Cp9f<>d4lz z5ZOf%*k_53yL|P-i<*1xYWOMHjHs407z`>xCjSNGO^x^04bV7k2|F=Dcr>{P!(-X8 z`A~Cg&qiD=u*CBTY51_T81lU8uP-&R6W1f9uO;$+i%zq1i!pEj8(H~LQ`2uf(uZ0h z*dh&~y^4|Fo*92?zL%>YHEgTc0hETo_9d{L#7qD+HDX5bdOIr&I3v!dKy=9a$4nqK zH^r>orhsiQElR_mvO)|9W24I;YQB$C!Pg<%@IuTADTtj9w~n)!*n`waM2DT$JJzD& zpahpp^KgNiL)27?YROh&w|I&a>ueq$H*X-crZC>Q&3nK?qu zx2_w}eSj^Vi`7uaemUrr#Y`|YYKJyrNf$e8YfQuWOF3vB%`%^kQgfn{I`+Ho!F(+- zT0WZt-*?P}P*d-vj{00XOclHIlK13b@;H{s6-v#HXPPj}vB%@iY0!L;jTa-B38SW2 ztX8*#IzVPY8m>Ib!nXFzgj3T~R0^K19a}Orp0P?;<|ua6x=Y|X zumVbpnMt9heZ4YXjusg_OC*rhsYC;>YEO(_g`~vwS8sZ&sTjMC>e%T9XOysa23oVaJxmOr4dej^%=7e=^}* zQi(zKM`)aEYMN?y;NJ>2sCLf6p%Im^ymXkF9BTgiWP@Aw2QWr22R?f$F<|{6YI3QO zwXnziU2eFSn1`wLm2mHJkeWPdTK*n@yU0jtOejF`=}LU*!m8ZlQ{&yu3!_6_#mnW?4bnnr~9Cb(k9^|QF+S%YD9%$%fVtk~&YvGM@+$DYM)+Zxe{ zoS8amvZPV?QR@P|X{~rK-lJ3NnW?Adl5qq+{oRk!A1(OMP>sw#%$%ae=zJu)Z4%dQ zWGh~nSL5?XW*VqT5N2$h3%31jfuC{>`ttkwG&M0YQ5bj11)t5%;lIVzI8wplG*WZ= zT$I>V?TVKl&%)tc4UR~dIYW*1nrLi2?+*D(t>{!#1FsJ3Ic=iG@IjPV33kW%o#$}j zXbmc~Se#~RQa(oGev~_EXP?97_!>C#d*>`Qc_uMvN_I!EN-H*Bt;LQH?7nWH=FNZ@ zm}!df+w@ih%AUlxVeI*7rDi{3vA>;{Sj9PymF~6p`HRIlN6qf)DAY{!Mz2ZdFjuD* z6&5Vcd1^{kV=(H0HZTDRi!ggSU;1W)sWW}|Z?7WMImU*%bp|7yUc9f35?WokCbM?uTqADQRRV#(nK z#BO5Wg)7w9iTA;aTY>QTc^2t0r%^hWUAL>$1dfe>qV$kh-ED$$O%wc(scniTBRZaMT2x!O{K~U@?##+YM@Fs0gz-3c(G{(B6C* z4|cO-yGhN+QxQ=45{sUfn_%gF9bXifxy8)%D0KQ3k4tXNm_O_`c6VXsHZ>QPM#7>f z9wBSbB46$NnsNM15?mzva@5t!SYh%3>}7&z-O;`Xz# z%ROqA6pH(;E(x1AH=+Bm$M8~QW6S&0G#-k?-CxNFGHJ&CA5Zb?7#mkTpyuM|aBQ2F zia?iUn0If(;)~2Yq-OE12<%K3^G*>>xEK2xr<>Rq_7OF&$3|j==y)>O?F|0?evPk2 ztU}UbYA$q*!06&MNY0){`}MDJYBL*?KcU8=IUEL)q?m2m2z{-$V(o+dE_q5#=#vOM zxh*ne7B(We<6F$(E6>lU$+U{Z>n1726g1+m=Q|YhuJ_NW*`gSQ(;~CC?pY)HM!thu zFLrD%sG0dkRC&!2zkL=oL6Y(gDwo-BrkB(_6ID^)k4?wKRcDZY=^d*1iv25U%Ev~d z|3!(IS8l@Yuy;80gvDv2X3Fqr4E!oZ?^jJ2pz;p7N$h-HQ{yZ;G)IZwnFB+bF!SwO znDMNiH`M&ojzRV(DXQd}ar@z0Sn{s6Z>jN-i$VMqDW2VKg3;D@=*#Do-%;b$6a)Q8 ziKy1!46m;5VCcrq=RGx5>tc|oD8-VE%?NOKi!C+mx_zLgU{DPH-jN`$vKi~XzJ<+u zWnE8(yx92gKze$36 z_hux2eSH!?NWizXC$Rg| zIsDCS!xK3c=La>@4<=xLmgtIYbROCMFY$6eGe4>M`!4}!kp%a{7f`q5B_=#)@3&vn zJfC*Mez`Vz*c-A6(c`S9|LdTOg=N6sPXNWfYHWL*tGOINy*dyj`8yy^yIx}C$YA8fqSotou6?V&a=1T!2?VRCaV8h)`j zJ*esM+yP4QM^SR&l&Gjvi(Pk_=}C>%1}D)8`6zBKYJj6jE!NFs_jNC7`u*7p&#^~w zEAJG}+Sa0JGc&!ZnLWV~4-XzisNE@exYk1EIWv8z>Gfw1+P@4&vT;4aN^0@k^)b!uvfHx+hl2COW($r5v;RbsaWT^Qu z+8*oOjv`6B4qqKlVt6KtGl-hXEIasF9Yx;GdW=v#iT;+%45miTV>f2H1*0jg4v%h} z#3(yvhES9DdN*Vb1moJhlgLP@MH{cuK9rit`gRy)5{zXtPr^p47Jq)SYcPx&w{5#& zy819KPCtoHoocY~Iy17=oO80p4$fEFnVI3#oP21DH`PISqgM@RSHWu| zGb5;(`pX9OeFBkKSB+tIl}O5F&*?~NcG%dU!)rf0jHtqiUgg-Qz|1IWA{W|Vn6r-< z!T)5dliW);_MoXr^a!pH9F4T zgAwJq$QQX0e@mH}K+VBwD?GVr0~f~}3{xmTZdW#TnMloI^{q(mybbodvN5JZ9*#J% zYcPqLlQ%6PH`W>+Z!&TEZw^vxn3+sX+xg9i8DIrp>kRm<&Bn>~%uJys?tmHEzp_9l zvEJx3EE}U!*>gIT8ruct7(U7riLa&jY@CVQS!_OJ8a3M@%`s2i1g^it_+Mn9&t1gK zbZU;gw7{oSQ>=O=f!kWKKDC6I8Pv4tSYUdv$SqP8JIAU;{z?WjGpTty*8*q6u`SgU zxnCL*$OW+HbQU#DGG=(v%?MV{C5XHvx^SB?Gn*RKU1qp5&;V*855-EFiX8skoB*k$h16VX zGk{NtA)Zc4gIrN6hOc7x^&)C)zUkx3V?#7Iiye-JX(CsVnZ?wU4>!OvUjr1~OT~|& zsVLjQp6w;nTy)fi)-*#Hi*CTOcT*9b!OT)>#ukfQ4`)L-UO$23t5WgBizssuyJ@W9{t|c7rY~nU!!dD8bv?!I&k_;;f`* zkl$_utz3!Y%S#Y{AsCx?GP8=B#og?1dEjbv)-FK@rK8BOU}iNnrGs{3*Yj0a5L1H8 z!@;PW$BY6s?tOQo=grlK-cka$-@!O(#|)^6SZj;1?N&j)p#;bDg0Yw9ZLFbY&(=NY z&~Y^;ZY{;5r$^9t7K^i%nwO$WNK4I1)UPZ>&=WCcmStugHSrO<;iA6+_p3{==3y}Q zIWeP1jYr2lcpkA5vs+8?!tf}pcr|M!YI17qFiB$tUSBH1upPnZvWvx0rp9%*7?0|% z5S_70(XsbYv`uA3g&LEx-8ei^4nIbV+-rwmeD`5Sm6}ryw)oF&K9)QzK}P!{2#;ZA zJvHNQ?S#GPR_2{sipMI4U^kuRi*2B$&-0xKA21y=zmy>B>Om~!Ran)i8TG*$KXoUe zhsY&=-5~(G3R#?u)XbFI4u91V7+YS9luy3Uv}XBYo2YR*VvU&#hoPXT7@drL(4#9e z>eLJ#FY?dl$zYI237X@)P$tie1~t(oJJ7Ma3|9J;!RwJ1HkPyV(WGWM4|J&c zG~W>k5q(hjuM!%W?x@?x&PSJ;-f{cUpqF+%PFc7??F%!;)V$n&7-mr&p*N-hzr}U) zb!Ntdnk6rSF;=k?nmaV$M!kpVaK-+orqm2M7lPpjIwSVoDLCeNV(27h%&1A+7KYwk zy5h=}23YueqmJjy$tf*0#8ikV9 z-njJP47`*5@!OA$ceYTo<$E+@*Y?AHpC|Im+N-aj-RuaM<*{{I8)`0y{DXF}Yq0XtO~@ukV(kI;JIj_DkGxoX zS6BJkH|SQS&k;8b@|%qHfMjM7qSF zu7<6>@1f@U$!L7;C@QV}xrWaXvG@_lj6F3P9+CKJs)K-ES1{pSEEeRlTn`6ovWG^% zDpwDG9j?Ok)-kMfVrDNjGt|ORu|XGO{=10UJ29|rW%(42)C~6yfn>WLl4hPm$lF-- zDP_iq8b`~cFs#srRgWgJ%D=S{cVNqH}j= z?ma@y*x&l-cFhuFx+Nh;?6Mzd$4oFa?V=12d}a&04U*yVG#y5n>|J$~noBNvxU|3; zcDX6It0B6v%dl@_2sP`Uhz=RYZ18A%DmpFB!e$9Gq13c5pQGvU;PnwlV1)ddz;GBD<<=;j&3OawLWc5Z>(LJxRt z$-?u6g`yKGGm+G2>TH97=vZdfDCVx4im+%Mo3DzZ#{TPW9QhuAcUKGWwV(oBm$Nw0 z)bw8BfSVCPxcRaOyUS|ebb^@}YSs_mk1n4Np{}qPZR6^&bu}}w)RbIuMV(bJ{+5+s z|BOcL`pW*N$EbO?N>rMVhGFc2QcSlvE4rdF6Gx4zs9=0}OC&-?SH-dtZuTW}3wtt?I=HJ?s+p~G2`yCL=^ zy(zc}$$MswQxiSc3%(ta;O}37?tg9}Q;r>55;g5MdSlZPvA1z_CGw^|K)_!XCz+bG ze_r@pBZZqyB~-gT##o*^nL^FnRBwFPB&vV+ti%)JCvdvU-XSNbIhE-J>yMe}R#AcC zWzW%8!;USLnnn?4k~A9|CRU<)%?ljXW+siAU*$fSDLSiIZ>++c>AY~?#s^QK6nj-W$b>-rbczVHWZ=4aG@m6Y9!mX?gi83rs zE;ZdEy`drY{&YN6i7AI)qsucECyyH62|ftxoezbU3jFBYhF|>o$)_f?*c-m01D{uU z1zg{}g4ZZ^Yz5RbhIye^SRP(~uD~$8Hf-6!Od&N@`^9~5CKvv775JDa;_PCkh?-1e zF}F8J{7-*XpjEyNbsEeRQ*%I=E7x<;y<-J@RbHX>8#5);ST7gdW*3MKVpl4Voc0Q~ zj?9!&BR|Cpi;eTdw@17~OkbhvFlNfANu1z?qZ@OfY*B%B8LyDz%S<^np{KoZ?^zC_ zK2*SVQ5*iYFjGOz-9qtx`<9KcA(fcD@f89lvF~XmHG^k(W5=>=jJ#igrpqtInj?!- zMa?}~9|Y-Vpk-qP=4Zad@L9}MQ}gw@56=B>{aEbT{q6GtE&P6~p~iQMFaFd?pyng? z`5t_Zu@)>&Ej9o8`-^!H3BD&+z)k)cLRT?!k{UTDUlfY!iOv%$aO?aNEc?iwxjJgp zbNsO-ClwdP`K(;<1hcO(Q%}vj@c~#PsZho6OZ2s0O{iQF52(~tepHK7FZ zWleBsWabhz4?_YVzrYvc4wmA-{ih(wV&*b6y)XJ>W0faNqf5~1cO9amn7KlYT)98~ z=zHMOgc3Y4uf_Jq%v_~rL9Ra@B)OsQi4xe0{Hhyr%v_@;CHpWGWhS8C+eQ>9`eXSY z_U^q-O;lDeGB!`b$)F}Q{qw`GQf6*Y(`9xDPA!;;0Xv%Uq|P77x7hpbCN&bPP{_5+ zfajpIm|pCU-QmpKq9&$uI4+mXhGP2`gn##kj|(%msfjR-M9hgfFctfMEItR~z;X6I zxI@jUanV??XAZ{7oyFSkLHL!(%w1|Oi#>N*LGwgcyt8Ppdl0r}%-o~qYiKX|JS7u&NlQ%ROSw0GIh`NXX z*C;qSGxL&~OQlJOKdFGlE$0!vEefyfn0ZA_??>WWdTt$7JD!8bh8Vo%$JR#8`aub} zJV6<{ZOvHf7Y#K&hyR+IHp6(}uPSD!pG9L*3@&eA$M%LAw~=uO8nGUo-k-tPF0oM1 zXXY(6f5iI0*lIOw);*0D+gNxPG4qa^5V6Lxtx^rof*UYmLL81OF!P?8tDmBv6RnN` zr%s{Et7GUD%*+RB)`$+;=AuK<=B;(;vnw8>m6`cSjq2-gtR10)jYZWM|0N#nH!|~y znikPz*YBeaCd*gieNH_7sxk8)H65IS5j;>IDh(B=aY%&bZe~7H)BNu+97c&+7;8li zZB8OQrZMw{np+Bi=s(a5s<(@AH8=^QwVC-!&CD{<@ubQEy|)&i>XF!=(!l1Tzflu? z!v}jr9l#qma&c={5)>3!obS}U=;w`onOh-wm4)BANmxCJnIF{XKXb>9Dl6=+$UwJM zDF{Epj_oHk<3%OWQR>#%@k-?8T~EdWBNpcuHKPk1@W5*qlwwX`&6X1w3>N1%HD{Xk zz^cO@=w3<23bEJo{&8mhP~&l6Cv2+iVXmG8zl$dj`j(l$)HuG`hM)n?IFOnM6}MDO zxx~yrYHn=Zg4G%a5HvdxUB%wJEE#qU+KnXpe>9UfveSS;wPyPfHgzy@#1z)Esm&gWt9j=E;D_pX|uA%pP~6M*(*9J$lrkV4t8w)sM$MvJC+0% zz$sUX1ohk4d7GL3)SPqOg`|E(SiCqLn&#RziDfccyUIQ){u z8A#2$W_ygvDaDX=S@@Rv1o8a3$x!3aVK0VG7r&doWI<=&Gw3(6V;e+`(rPU5quA9<|rXPmgi%m`|{@A+V%{z=$BC`O-^U(k0fdsmI5 z=JXpM>^NMDPn%01_vi~g{AO`RQL|Ok7r(?cSejmny4Wx9u48dVQxj?Jhh>(cwvB2T z9$J3J+1t#Fq2^tiALe|mLbO^rb|`{P&8Pan!Ut z4#2s&6}Wc39Pgj}hpCI%w{bi**}DQ@8d3(``U>={{siYK%uJxBwmA^1qDsWxfJ)p+ z`iO;dnVCq9{Q5wQE){#3o>jtE@dHYVnVCdQ#-Kp_>{x{I_af)C`2()hF*BK(GW$SW zdzObi#TD?+euo)9n3+P2?a)AMTakyKs#W+b_OSmr#(o1%rDpc?0F1HB#@2*$%RvG7DCjND!!UWWY+o=(k$E&+HmG6TcJ{ZoO=cUkMwzr%-caab{9;^oAcEr==p^xC%=mpTMq^nOW5M+XuklR0>A@uEeIm$8a%Y zW;Qj|cl=?yHW{V|D{*VsBlI?BW)3x%NBP75ULtZ0E3wP;KK@lRGnbl#20t|Qi-+mc z3S2h6kD?%E=23I|oiAkX9mC}p6>z$K2Un_?nNQ8ST0hh}$DpOJn791zHvXMvW&t%l z#`!~GX*5#By3EPi8)6-VnT6DB-s=b7nWDalRXNmBui=ypGmEHk%=ZPV!f<|ucppeE zqvuv;7BlnN2ZiBBG3it}Vj?f$t1UB2sJX1`EB4)rT;jeJ(4Bc6`Kin-rKZO*AN2oo z81r_OW0Y+xYFn6DM$O0sA9&^*MDg7+%=L*6p8oElAgU+C=)#F7D(I2qZD$9c@i zQDeNz7fn0Fd~cQbpWbN1#P7_=QzN_F7l&+o#6F%fv|en0&CIN$X1&OB zXxy+3$^FW)GCdn@G@t-6URkqA1QIk2u3*k#G(LJ>ccl9%I z=_WJE)VRD5dyu!8;kkb~s{V)$XK~D^P!rwF6S^UW@DR0#cbSUreGAxqpi0e^2v5;{ z&;aY(iDzMD8WL++ob}WMt9YV|L`UR>mm?!M8KccuoDI}0kh&xMkv7gPEywJFB>e8c z{y%Ee6k51rPRSZsfLUUXN)O#I6m&xq9X;3quzdMGCJ!(mJ%SD%>SiJwp z&PS7)EN54&IxUCQiDghoih>MZE6}1wCCUZ5SC^r|qZ~%ck$Cx(<~ zP404-t_jC%em*+XbXo6$?f2)Q?_hC0hr&?n#IAuZHB%0|!n9=$js=!OE;1BLI?$ zUxBk_har8)&c~1%g$4WZ|H``WznOM5BW zX&a^Ss$NAC4I?7iG$bQ?_PM^_AJ6#%?zi)NU9ac7uIrpLYWWzb#Ad_0yC(|eSe#wd zyl^zfouJY1zL$-J8V~GSz|3xHj`uf5t%WLrEwiC|!4vEMv18MsW~{fFm^&Jc{+lIG z{OyjNBUqe0)Y!>dVBo|ts2dP5h_Giymzw%3W?0`t8E0-vfHKiVH-eeN z)bw9riJZ%WFgizqTi)V(gRci4q2{%<1x`*;z}F5r@JV;Z_fzaTJxYzDqBW$42O$5I z1iODa;^hHmj!{!~!4lV7`(b{+JowylgwiYad-SNOTy2ek$3-6H&OA69J7Vb+7UwuM z38I#&)>|3;73PALBXW2Sls+}a+w5@koh-T@lVZ#*2S|KaoDV)lECsp)PezBQ|RVDG6S%=dP{ z=bkK%2{jjXyTGfWH+C2lV~c?U&V6HXPEiwi)D5wEy)i}9Ql0SF0ooZX&S`4C_`9NM zd0$K@EZ$92ATPcTGe@pPNRLqfmXUAqk z&2J4~{4$b7{KXP9#=0Vj*DbQ8MrO9i9V?eXSFz9io7n4NRL;&FJ8D*r^}}h29Pafi zMs<-mC)%@Pv!`ZjxgQ#H`@wu>G2{-pp>r;a<3LS`nm;~g_QkrdC77e@2B*!;I8rk_ z*&o9)WRP{F7-JmVMa>aroT#a7^+)!OF-pQHov;BqN`h}z5@)R{R$&9wzV_~$kVK`o`Qed+;y{yyqO&A7io*f?1M zcIst#^U*`})MM|7-qdsy`Fy(!2g3Y9DgIOOKzT7UKGcLQ3x>YVU^qmUV#g;BQ9pqh zUurChg0b_#AXtdnwTkUMF^cc^_oL=r%|BG&L-pl6j&rx$)-3Ql?48%I80yJc~VZ9uSbDo-dhcoDKOb#)Lc~EwD z#f1sXgj1t=&I6x1$)nv5F?ZX}4KgL{*dnOeGRFg0E)Uzc5`?C>qCMYJ5=qUojjnJ@ zmB+b-nV2f?ii$}rP82nd#k~KFXNuVHGz}Xby5L9-TLX)x#xd9ts(l8+PV}C8ddC$$ zy;+EP2JKUBlh49Tg>9bQq!x_29JLX!u&CpaWB;sV{fwctT<|BIa}jH z>tMXuei7O_E@&}eaV}7E%FY6VUiHWFh!iMXb-_nxX5y(CzxNc{JygI!-6R}#bcM8x zt+^&p6JcY5jr|nxTCC@IpLE5tPApC$HHU^9qx`{ObZ$w&?kz4TY-A>h8v9H`%u^qX z8$aXGajclf=J~Nkl_=c`n#Ydq{Wo|726MZ-E(RcT|GM@Dxl?64&-ec5D)AZY!9Hn!QSpiXMbXp|0pQ zotYeJ#(J7Tu8%Snrlw*|sta=ZGLuV54U7n8~ANP`VX%6%WIq z4WhrOxc5pT+5JsQjk=ig?wO&Crj_X!cG?9U%$doj=4_HJj4r8Q<*jt6esC81-dV4h z0&4#Dv_)C83R0D?!seMX%mY}QYt)R~Y>QzUBk}#$RSXb)OzabwxlT>pHX9t&SHs4uqN8KHdKil~8?4KBsgIVqam zozYL3Jx|N1>G94QBm1f0t9=%7);nYSY-Vmyqb<(2(>YVboNXqu+?-+S$d2tMHF>YB zAoWy->YxmSgt?%(J$v7|MU987CAunVqQ}WhMxBpE&(qLnjQD0MW~P#wrf#Mfo4*jNGp?Y-&jX#7v+J~qnzmwN z9GJTpg$Y+s^vn|-Jej#o&7S9mkQOdOH~GsrC)TV>H?aHL9cnI&HG=)^6$l@73C?dt z1)l(B?o!jy`lNV2Sb@Z#i#P}$C~slr9yKz$qL$m5Rk&}FjGafkuwRv33)R#_R363O zO{=i?RU&Xk)Qw)u%zbLiI~~E&o2&7&I1yIx#(5)lY!9fJG35}tH>?zU>Jwoa?2Tc3 z?^g{qy1ECj!fOSNh`!7DQg2NA&f+|zCV#vRE>^6>d$AYgNFN`}f56NmY8p51!@a}H z@X12tOW1fr?Ij+&To%#im%$6n0T zQR8)UJNBMgin=A?Xce{19ab{)gc=xcN5skHsLTx$c~HJ+Kb@JU)X3i2gsQ$PuTLE>Uz<$ZxzxcL6{xyjctX@yr5>t z$hBhs=PJxm55jwiH{Sfm%u8ze+**y9PAk!QaS&XSd~iRXnOD@zKE7PkQ(cX}V*{|Q z)*H@=%)F*1Zo*PHO<#$Rcl~fn&le5{n0Z6Z>|sFn=c|x3(+>}We37fgOg%MCV;AC} zk{E+VAE=l4LbsXS6C0@Md1M|Q>a2iLdoO%@;)@UiW*VtEI(-hxs#fBeu_sKm{cy&D znI>vlBW6R|V=az~yucBj{)pMZ%v)+Ii_|g6X&r98aYu!LKOB;oX{JWrVg}Sj&+7U! zZqO9-kqWKs`SXsNkNu{ixOF`$WnJ*7Fc>l~nQ5WscJV|!SKo*=-JMWt6^x6u%)F;& zu*x_*ulo;Kn;oFu5{#p~2j2&3@?NRpo2aoSH_Z-iGH0Qw#o~OVW^!*8ytLhlaS~g6 z-F+5W3e0?>rgeZa7Jn1__hE_hcA=QuY)@-mwNi7*ekgVx+luomEs$n#7Cw2*e5R(d zpCZ=$-GXIDPh;7)v-rJ*nJ?6sy&H(hGq*ur&IB)qoWt)TX1-G6uw5SCC+@`k`$l+t zF$^y*G4qX@<0ty#&+Y9PeN7*UBA+QOgqiQu)IbIg-)zUcgU9hj?mPzXW9A1nYKvvC zpmhfV)bxVlbK)Cq#y2${AMleh&hH8PtIej zJ~O|md7Ix8Nei?QoO>93x`yNFMrQs{v%9XR*zd1}(;aoO=218vTQc*PnhsxjAb7)G zM0Guk&l(X}dWe~S)HwI;ftHMYSkYA%qh>{7Q-5aKsF7*z2{#oTEWL6V%SEnHdlhEd z4JG+MfAjkwGDipIQAc4uC=w@nG1HzJ`zx|A)i{Jf1$t=R6$KepW;#%_=}I5;{&WaN zM~}lQAsTPE=}1k?I9V9D9ELPTAN3nzushL?UZ&tO5@}B&({Da&c)%%Yi7Do^DnDE9$KEn1s+s9Z%`<0te2p-|k|T!D z8=Zg?znSSq&6^^5lqXxl!}=7qR43!!Y&O>2sadjiFl5_{HTyBA@k&9=2Y9nMJ*cs0 z7>I3}ws@d%3LVB?#N9W{^rU96n5!~*WQPM;CdgZK2{Z38(~Fu{?FZv|qyrB4iTcM| zu3%RyGrg&C>pTd(mpNf{wHYF(U4`N|X8KSgC)PbauXVw^-==t!nt=v;Hg0{X$=|LB z#ceKlvHldk9?3$z2{SU(^jW_=Gd>nXusDJTh5Dau;cPE z+KU-EYC_H{LaoV5yjNR@JsYCWbs{tUsL{_?g7ZWloS0~i)(J%zmBCDZYCcUEg1D8w zIC94vUtSa;Y7={&4xq;GoDwcK`(cxlIj;07MUopc^3;@jDq(O)ApEq<;bMIYxB0Vv zAT|C2hG3<45FCbE;cDk{I6JX83e=c?Q$mF_7_;>((M7%zA??_E)gWpT42I(PyR)LM zp%q5%sm6+q%nYW+ag#D$*@wbC*-G@JxsS$O%qUXxcA_$>O3tBH)O6c1uLeeYm{FqU z&Vu21y&w`3#9Y_wSC4TeikTtQBnJ;i@rEe;J8p}MkDrJfRAz=!lX`pvB38$sb&xG? z?R}2oCG4GCnVPY_Bcb^v26AI;vGKu6d;~MYs7d~-0$HOtENN#8&!E>>9m>pbY91?% z#IwC({=T;zrg+rj5H~8+=wy!q#Ju{!zjo->!{|W@zBV~R>&JU2@mx4nYJNwI$1FLK z2dChKZ$Ca_rXu@39ZgND`9x8(=Mv88i9X8rKcR}(Z5>0+qKT7GCwd<#K5|B1&Clq> z*I33`@#^9v{3EdPpv7i&CwwkoJT>N9Cu8^V zG`JhNhl#S|nJrK8&)XQZ}#M{@-;6RCOibuyG@ih0}aF7W>N1HGOy zGl`nC(Q4TFDjmztyW;ZhUzlsp%w%fZJ55DxO$K&;cgD6}zwqBnW~NYcuviUGO|C*? zj96Pp`GpooX4I(ZH+w2Z`Db8|=p&{k_Zy?rnVCw>-O)1;+AkCJ#jY?>`~?$ZW~Nay zA!Y`aOwEM0gByz#PE92v#Ig>ItQ(lnMi0oBjz7} zA>t5=qe)Fe@dD&8&lEjVyy^~( zon-<0SP6P8xQv#xU%2yv9os5ulr?Q|dt^4A4NJ!l{GJ!bMF?QymGy$7G`4 zpg*|A*D=;mGh?paweEAz`3t61C)Kr~wLKm%U1TM_MEyG`!8O+Q&YL4lM zT+-Goycc+S-Sm-W=+K->u)tQ+M)Qo9%Lrh*424`Kvs42g& zpqAZ7H&P?7?}4w)*%;hdh|ubvsP4exY@$YX_!(U8o`p(}BD{_Kft=;c{720jH!oan z$;OuWVq8o70ewE!o2gOz=7Xa8OpItL!+Evuc+b})woo%_i68XDz3OyP8M5wuLnmK$ zY+I>0H!1-4rCAu5DSCFtenamu%xt6Pc$XmPb{J>&_0hf8CPL={~nqjwnFV5`_9s)CQ&5;O|I# zsmU#jz~gaOv0=vpjCT6~jbALzK5EPzA~Ca58v1)Zz=I+0A^(~g9co{y)A&RwL zU~z9|4pXxwI|f@vB;w_fhmcr5!=CSK-sK22{nTPHc4a&=!XKgIk2+*4u{cMm86j$* zUwjvbfqx%j{_a}H3}NT!F=}?z$0FfYEX=1o!piy@OdG)B=uxvnHx5P{qM;G<2=Q0% z<3|kpJ;$kei#T+zh{E%Sb+{CG4;5O>=u=a>G7jn^Bhcx#11mT@s3kYPHbYR*t+-7RP{^&(&g_D?_kB?-72cm!X>@ zGltZB`xJ{v!$6$+`WSbH6hZbGGe*?-zKq3GSsxU%)M0KzA=3H!f-yDO`J!*h4lg{f zuEXk$1?b0rj|nwp5wS3;5c{roJ%;J7QRiVQ2whHS(O)YlwaEx)a<$$3-!Mz(Am5ew-+X1x-mO8OKKF% zV=;Z09y+JjqUCo2lJ%Lfq9*@XEE;Qd#lF^BRJX;6y7Sb z^YKU&?i!7PywZKR>9|ArA&cWp%{%eFGpmORTJ7&+#9dcJZ)I_OsFArAg|;W7U^ra# zRS~%iBYCeSUus$=M`1yN3PMGl@teVW!*gGLAmi?X(YGj=w zVXP{LKbGRyter4?2RjeWQWG2ziJO=EqP5}<<~cc`ZV)q})NBur!m)0>F>%#x{GIQJ zsyFP|!l+pq69vULy-`?OiBeTZC^xY<=coz#8;NwY9#9m=rYmxneFItE)Ol(KS4QEA zTxWbab{B@q4%nW#_XN=o>OgGlsdKwj&*i8v5)e1p+T&m^W|FDd6&Qhs+y5>Kswl@=*x~thW>Tn0Hj99s@t;Ly zi$ouV-u8%p%kocCsR>vZfv#JAFEVf_N7x>FD0#9t7pYmCUs5+rI-0n@Zp|&{6a| zWacV0`TK%#dT$pz$`-lUR-(?BF8e(h)OfrP!m7F5p)sKp6B-YHSPwL~WoR zcr9}O`%iU*Hm|plMNRg>0MricE7s{t(Q?BH=6zY-dp0$#<-XYetPf0`N>Etrh)#j* z*d)|c#fp8fbuu`7xd>DGJ7Iq(7AJ?AHQC-c?J0-tS;dIa7P&8$%;ZvYUH1%vhWCg2 z~g5S(sqh@TK6ULV)q4(hH zc(-2UQLblwq_0!6N^%oI`6 z_kum1WetOY@-<|MUg=ZzGE+>=E?Ij~cSz(-HRPj*wy2$*%S;J1a@n?UvmcIkE&1rG z=8lw`%#>0ybeJvFUaFvTmjWnX5dATpFjGcNk=tnu)|rLny`-r4A@)UIX66PpGX|c< zY}aLo(#nIcGzdlf|G7y`%mrg)&R>QpS-BW8>ntACvN*S>8SP_$ZGAT4#(-S0H#i&< zmormN&B!MPSY5jV9@ld*SA47P(P5^7nrTl@pjzZt4AaO%>fadD$TCw&&6Ecx5i;Z; z>O|clYt4AP;$u)nP5+GJ=#+F6w`6m$dtoBh$g?=Nskt7mj}>Oe;m{=3+jb|x6wKVA zrm<2VC(;e@uPqz?y2)r<$IM-77RH~%x4*_%`ceXg@Kn+JgPD8OEGX26skb@0ivDx< zPcNZGlbLF2rk*{4UC*pUE~EsHPh3XhAZG4UbJX($?B#6nXs+l3Ri2I~J(zhw&Fr%$ z(R^6+g?=X14|`<5Qk|I^YWAudAnUq4>OW>zaOQqkDK;=v^vf4BAjGDCX#+b9t51Yjr_p&P`IMc?SjnAn`h%&(5@d5BZ zF2Ri_Wk?^(;=G`ytF{ULn-qvkVqM%q<|e)jXXYg}eO-((?0FF8JeHucQ#p1`X66+& z@e@q2qJ!usFX|EJ9ISx-UG{rkQ`2l_Ec)eyqNn&zOG2vf$(@-u)Vz%_#*oBwn5dY8 z=dbQy!wP2VsoA&H2y&;w#hhg>Cdl7|vA4 z(lx}onPTr`eh#Ac*C5u7#c84@V(BT&*Neuh8VTlD*B~yF{hqhf%=12ldNF5v`J4n# z(`!)pfthA%o*XrXa+&Bu7$6aOxQ}qyf*spCY8I%SLi;swc>iAP-6*Stsf3vpYVM3Y zjc-%pFjmyw3{ zGidP|cps@5*k+30=LrbxoC6b&XDHTVaXwMAGRzE?&l7RBB?ku+pW}%!Gp*FT-)f4I zk%_3-CH^13mvHfC<})?3wwXbzW}`U$1r~N^<_k4)d8Q~BpM?JoiQb<{uVMP0 zegAx=X89otw0BO1xu}2LY20gk_GWRuQPXj;B_^Fsf`fs$zyEy=6(x2pe5Xbw$r|r( zBxAs9QAaag^oH2N;{2dy@Lg+!uSmwOP7-*V)Z>W`Ge41H8a8vhkyZf|iSe04L?L8}SF+cVRJnyz~G$Uc>b*aFeJW64{r zxWKN{uGAbnVvn&m;_)s&16Imn54ssM-KcSXY=?b&5>b8*LVq|j zy{KugW{ZAlaq!!gj&r^Z7(0=f-qh%JvO&)NIBe;fj!ex4SZrse4>hmiEn%P;i)WwG z5K#999{$YqrKYpJ6}~Qs#f#r*Se^D7>-RGwLrqALrN}jmM*A-5IB@4BPVkz)vef)+ zX9>O1D9D$mA?n$4bmR4i<*2FBGQ+IEXatVB3ZvwwQ029f`%%-a$qa)&iG4rsuZo&0 zb+D1JImrIhq!yUrpjc~KBKoYp9Q6nmx%HeC+iQ1yL-HJT)F? zPhmmtaQNKMghyfx%u<;dNXVQ{({Mc0{8pk_y=8CHKe2j3Z4m~VX- zAKoxCh??>;BXs!~3MH|B@_uZUs87MnU}{u;iuc~Y5Hy;ITJTFM(7^~h!jjQV^3{dpN<@q_tO)G)c24V335D*vOAs_7LkL1?$`0)ATuh|tQYU(O}#wfF)SB;y9%(M-$zGK zf0E6j|e#yHplIy;?lRX+~{8naP5 znvL~%YRVfdFs50|#cq&dUPlS8^Ly_EYF=2I!s5Fl7E1DvC&@%#-gkK-HFIa4!h?Ud zh#sDYRns$3&A&?~QDY@*C3*we;%RFx*1o%n!z0)?(`0ItGtAMFY>ma$xqx~)x_@Br zAycU7w7>!v)J;)UpNG#*SMacs88vDG#67+!)(8pFdFb2qvZ#5>j%_M6gVtK&j#MAr zg7VP3_96nySe$9pAlrP)IZdLb|)$9A1C7TM`mVF z^K72z*S2mq22aaJ^Otzs^JPYzn!_h-MQx59X!@5A{mMA3&}L>PHAdoHdF_<{Fyuu( z+V|9Mma5?4xtI`ITJ@v#D9( z>xfnQi_jyp08Ywb*zd&RXi`&U;)F1@dDtp@9d6UlVof(@=1_z8&PW?P7jt#5W2|{F zX7yvoHkX>68=R3fRudmq7Gj5LAbRX%apqC;*}@rL?`yzpZXqT;^~dpxEY5sty7UzF zs}rUn@?RkauJMC@2r~<)*&6K%Z((}M7UA+?f0zwpW+63t!S0y7K?6$?i_qpH>fGFA z_qRpVs8_o|)lvhJtHrov=`ZqXSsYNKQ|$_+^|SEoRxvi@_`%=~i?f&-z3FaHA3qOw z=NH3YGXSIOnOQ=OU$!eO1Lwo6ycm-%iC)^8|7Q-2(e0qyaaB9V&F%L&Z!}m+)#I-& zaQw3fuSBLq`ITTStz(gvQIm1o1$j@_;m@}sxCVqFoOc>pPL0JrXKd8jj;iV+(OEtm z&BIun71Y4N30K{;@nU2la@R%SuPQSusX6h)5gUq*pyR$mv|AU8RlF+mDr(Zo?J-d1 z1bojIqUY^+%!y=iR#S6eg&nl-nIc*7Iy(BNz-&J=Yp7Y$W`$%OD=eN_fa|j_VZvl) z)>6~G$r`ig+hORv0?3z$Y+Ma?#;v2q)}CTLvcGXJ$P$1?HAG-{u6f z&iPO_$;2cJW;Rgs-o*my`L6KF&PSGmn6df8%tmT97@A?bsAfKDxfIhx*R7f(>@Kp2 z8vl=GIBjqS3tXhwX&{9Szw`V@%^LAGwma4fvXi86UX~BTa297XHMYkr5WU_H$3(T{ zh~#T14rXQxHA+3r(No4B?meXF*trP$EzE4CCOpd=t0DrhOI3=5jA9h2uqXF6YE%!2 zj?ivF7&tr^qpp@BkyrTNPR&Vk3-sC(4D0u~aLB)big|2ya0fMW!Y%PGB1B}Vi7sl_ zZ{ltXJGPzF+}vvcoIHz{qJ!h1?YHpFn#I{gOzD4vv--;)ex`qeIQj^_H0Gdj@qs#0p*MH5^i8WF zGY6=#v$RCjN;g~)Ul=vrr5JUAnS<1{Jhy<{8W$AJ6y0M*cT&5F?Eg7LO~L~!41D8+ z&98VXgdVd+W@{cYTGDV^kD24t^jw_=7^=3 zUEgbiQky(bLwK54LHGp1&YzAZG4AIJAyQjw7@c6%IS#)O)s%SHBb`vb5x$P-n}E+GCE zGpDFo($fZ~qjd18G!GX=29b;mJFibu6WVHnyHz_;BhH4tJ2V^vo-t!V z&4gGx+}ykj{lqiaPGpG=OJ~NCnzy1-&G68rI3V6Zbnl14p6_C^qUL60&Gv??~>-p}6G9j4d_F z>W=8B1{AHxhemY>GNvng@; zKH^76F|cb8eEPFE&eX(;-7Y=<&BLNCQq2_S}9^}0?@>}Ah}cH_n#Xid*&d; zPl^nMKy;I`I3CnAE^vollqOuoeaGjzKbD3w<4KLOjt2?~G;wKPKH75qvFZUcXQ*i@ zbH_J7P56wIqUCh}4mmO7MNOoQ8z#Kd#B0rb=!y62mi5edQ*&d2J06KD;BV$jA#*PP z*Tyj8LrrHnca+T6fKs&-11<-^SD(H6`%+_g-wiFDHL%803e9N&Sn!&~@uOzWLh+w| z)qsMP6g?aKu;w^3{?vSMcEe+ZnRr(#b{iJ?!N-@G0BWAOxZ?WI8R#QsRV(3#iF~XB zsVVn%L-gC}$jZ*e)){`-W6RO^&R~Wr8@C8*W|xQ= zxDOMt=UOhV&GABn5u4kHq^6sT8STGoCg=!7{pPtL&Y7%uhcPSS3b^%c;VnzW-d_EpzaRY+q3Xq+z;AXyfGt` znRsf_Znz`l^=$O{D8=`BZ&Zz8CV?80TcTTRl_sW46vr0jgKxjt7$j0tl;?rAj|-qY zxd64xeE}bqX_Q3GYGqH%8NU!6M-@Q2%@=Y!Lo1n@W$B(Un7TxC=8@v}6F(G}vp6Z# zbbjrQB@dUQR9=d;HU98C%uFgZ+eVX_<0Z+$L|M+RbJ5i^&mY3h3ho{~*CCw8i??i37%UhMZ=p(bIN zC+?X22g}tG=m@j$A&Zkn&BCTLm@{iDOvO|ZW#1;7f#7!U|QW-sMRnd zp+-Y=8@uYd8>L3+a8^8vjX#;mp~fo98K1UliOx<};Wr`_3qCTFOO1-iWchkk3yZYV z@qAAxQVp5Oqh{fG2b3D@LGOe#s2&JKWfyiokWzEyr9HO4+=KCcS1@mF7#8t&*nDbU z?XZPH)E?X&eg!UK6)ex5J%bCVvDUT5r}q2sb>1bset!$=Gj{dWUkx?mva}9 znR*@@cvjbSYK~@}LT2zj9C&;Y)n6lUn$N=)Qq%dP2?nm;hw`wCP*@&;;A8B$T|~|K zY$Gh)eh{BbQeoLG0*N!(w@fiLJ8qnS>O5Vj2;=f10(W`lR0%a#T=bBTbr6imzjc}L!)4PmzgqZLVp}Y=C8vzVVVN<3z0}Y!sZ-qP*d8r zANq%n;Kb7uY!N%HCiDrQbGJ9C8RescpC^vvtw{><83ld zi``~6H<>A?rapWRg8cOHIXzkI){Vo;If3-pDySLsc{h%V?2p4^lA%%(hl)$gR8o_a zxdYNYM)2227W3ZmsLE%iiW*hF9dP(yj0XnEsH#f9!9r$kQ!_<$S6SV`9DT%Y)!XKY zkbBO|9cpIp-GTlEmS`61U(ZJ*qhM$d9fP~nq(0q-C(5?y(3XsW|56bDkHxu1O&8Z4 zSSB)^2X9J-zvz@`_=lNlY8obN$ErDwqHAFaYPVj&iv(uwQ?q%>PG~=ILYz?wriZ2D zqBb)RsJR%oTU0a`S(gpT@XN@+b_Hf?sB!7E8_oLS-8Ur#hkHvf^ej8y9#S*)fHr1W zxuH!f1q%=4qUS|s9#Qjg;a=EH_kc#d7=sikmd|9zR!a@^ba45bnD@65-SHHzBl0AR z^Ozb{iRgl4T+jlpYbvbH&(#Pb~FHSe!T1 zq=~sf`^0dZjm*H*f6w8i#Y{alk;Vq{h$M1EH6r;3mF<<;z|}tC*Q4YLa|!Y8Q>q^`)q6*NDA?nE6c2{VdTrODh^*2A0A1Ks{d6uw(l|jn(BKQ7t-J zbf7Cm&gcf%rm#3)sTt`aDt7*kf>!TRY!0o*%r(q>qh?r6Fp69v;p$zAw#?V)cZiwq z)MzV&z@9Y7UDG!lXsv(5NXx-j$d5pvcTmYU;a+dz;w#RyzMC zy5D|@ewl3Ceo^zfUobpAgka2po2dC!2ZK0fep7SlZy;Xv3`TrK8HQFp#_rGTUHK0+ z+9!h`6`AOZa&BO?sAeYb$;@ABrWOYxtJM$X9ybtZ_yFoFnfXV}W!WHfJ?)2)&C&o!k9$L|@dm2>?ug7?X4+Ge z|1}UXrruaQ^`_{^cpFFUndv~ymWrS&<#7`FUKdk(7GkT)<7l z2UnnNCyUdGnxgtZRLl|cmB(%(b3z3!TQk#{no}2oaN?H-EJxpjja~)ZOql6HP4?VC z=pS*%q4*mxQLn%P3ud}fqq{H&qbGafPuLC2?o@#dHq3ORMpiipdo(;z99oXDiRDn_ zdw#l8^B^?{L*9F0^0iw~wz~xxKGr>`k+udQxzz(_?%qP!oLe}#iyd1}YP$FbK|9z3 zMxtWPPH`@H+A`CNnofTLF{!gBZhb6Aueuvh=JVIRsY$&Zgg$TGkvFdr$5d~kLn4dQ zhZ?=YAXo%?V8+dIOcMK^PDe4*mzrlggE9P@JDL(I(D&dC>|Dc)3^gP7i(RmNJkZ>u z0)PIyfj^6xk)`IxxnR6@a)+IHIrdB~L&JP#8??TZ!*Fj2 z`fgxGo|@w(!H~Zy=GQw{U{Pxk3KuamkeU$PU~Fh}#mj9KP(CSk(l)ZW8U<=>Mup%& zxGPfRE1+3igfPAyJct@o@%%~K|4K*vOpzT$JMj2+5sJSRQFg_C56&ub~;kH#F`Z_W*gqrEML!hnhh8b%_ zRb6px=kGByl$ykdv(P>5hNlDXV7|t6XvHw2OpVgBP%OXWf>hbNxKmz;>F<~sMveEX zFw~~G;?dr_NYg4r)Iw&4Q}Z=B6e@$nhSXk1>Um~Vs7d<~hCM$-*Y?^w$h&?W zGY7KsZ3H!|e~010Qx`nfzK0uM3UH&G#TiMB^0o8OYj%P2r)u19&d0=;%#5OD-jDMb zc;6Xet@kiV>@RwDh8b0AirdaX8s&_Hfz^n6D>4%Dm>EsYhQsGEPTK_wMMcPu#`##x z=Q_twGj)R47qi?M(|T1yIUx^sY+0PK)Yw&>$NLV>SQ1^0r>0VDwPt1E)QmTYM@6Fp3=^LsGBpd`-!e0Y8pp8-nDfyA;}1VU zx7k?;pTfRn=2Fu`E&*~;4!D;kGM8<#MAk2hGmo03z2jje>xiiPPq6YyCM0WFocYvj z-=6@RFAj)kdWKOGvheCNi?e{5rf=~$X=D$lj?eHSA`_m^nOR89_)ZBJQR;x0z^4e? Omthresh] = 0 + ratio = 255/thresh + dm = dm*ratio + if False: + dm = dm/dm.max() + dm_color = cv2.applyColorMap(dm, cv2.COLORMAP_JET) + dm = dm_color + else: + dm = cv2.cvtColor(dm.astype('uint8'), cv2.COLOR_GRAY2BGR) + return dm + +def visAnnotatedDepthMap(dm, pose, cfg, thresh=750): + dm = visDepthMap(dm, thresh) + pose = xyz2uvd(pose,cfg) + for pt2 in pose: + cv2.circle(dm, (int(pt2[0]), int(pt2[1])), 3, (0,0,255), -1) + return dm + +def visAnnotatedDepthMap_uvd(dm, pose, thresh=750): + dm = visDepthMap(dm, thresh) + for pt2 in pose: + cv2.circle(dm, (int(pt2[0]), int(pt2[1])), 3, (0,0,255), -1) + return dm + +'''unit test +''' +def run_heatmap_from_xyz(): + from data.bigHand import BigHandDataset + + pts = np.array([-67.4598, 5.3851, 584.7425, -55.6470, 8.8958, 587.4889, -35.5874, -54.6665, 583.3420, -54.7895, -53.8799, 577.8048, -71.0328, -51.3926, 573.4493, -88.8696, -46.2022, 569.1099, -32.8905, -20.8474, 553.7415, -18.7491, -39.3305, 532.7702, -19.8893, -56.4645, 516.0034, -35.5810, -69.2128, 545.6373, -35.5768, -78.8591, 520.6336, -35.2772, -75.8186, 501.8809, -52.5099, -66.7139, 535.8283, -51.0812, -74.7579, 509.5187, -51.7939, -78.6711, 488.8988, -72.3119, -85.2855, 549.0604, -73.1781, -108.2356, 532.5458, -69.9800, -125.8427, 521.5565, -101.7839, -74.5066, 557.4333, -110.1215, -92.7800, 549.8948, -117.0142, -109.9064, 545.4029 +]) + pts = pts.reshape((-1,)).astype(np.float32) + + tf.reset_default_graph() + xyz_pts = tf.placeholder(tf.float32,(BigHandDataset.pose_dim,)) + cfg = BigHandDataset.cfg + heatmap_op = heatmap_from_xyz_op(xyz_pts, cfg) + + with tf.Session() as sess: + (heatmap,) = sess.run([heatmap_op], {xyz_pts:pts}) + print('gaussian blurred') + summap = np.zeros((BigHandDataset.cfg.h, BigHandDataset.cfg.w)) + print(heatmap.shape) + for hm in heatmap: + summap += hm + + summap /= summap.max() + import matplotlib.pyplot as plt + plt.imshow(summap, interpolation='none') + plt.show() + +if __name__ == '__main__': + run_heatmap_from_xyz() diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/visualization.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/visualization.py new file mode 100644 index 000000000..adc8217cd --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/data/visualization.py @@ -0,0 +1,137 @@ +# for matplotlib wrapper to tf summary +import tensorflow as tf +import tfplot, matplotlib +import matplotlib.lines as lines + +FLAGS = tf.app.flags.FLAGS + +def figure_heatmap(hm): + fig = matplotlib.figure.Figure() + ax = fig.add_subplot(1,1,1) + im = ax.imshow(hm, cmap=matplotlib.cm.jet) + fig.colorbar(im) + return fig + +def figure_joint(dm, uvd_pt): + fig = matplotlib.figure.Figure() + ax = fig.add_subplot(1,1,1) + ax.imshow(dm, cmap=matplotlib.cm.Greys) + + if FLAGS.dataset == 'bighand': + ax.scatter(uvd_pt[0,0], uvd_pt[0,1], s=200, c='w') + ax.scatter(uvd_pt[1:6,0], uvd_pt[1:6,1], s=100, c='w') + ax.scatter(uvd_pt[6:9,0], uvd_pt[6:9,1], s=60, c='c') + ax.scatter(uvd_pt[9:12,0], uvd_pt[9:12,1], s=60, c='m') + ax.scatter(uvd_pt[12:15,0], uvd_pt[12:15,1], s=60, c='y') + ax.scatter(uvd_pt[15:18,0], uvd_pt[15:18,1], s=60, c='g') + ax.scatter(uvd_pt[18:,0], uvd_pt[18:,1], s=60, c='r') + elif FLAGS.dataset == 'nyu': + ax.scatter(uvd_pt[10:,0], uvd_pt[10:,1], s=200, c='w') + ax.scatter(uvd_pt[0,0], uvd_pt[0,1], s=60, c='c') + ax.scatter(uvd_pt[1,0], uvd_pt[1,1], s=90, c='c') + ax.scatter(uvd_pt[2,0], uvd_pt[2,1], s=60, c='m') + ax.scatter(uvd_pt[3,0], uvd_pt[3,1], s=90, c='m') + ax.scatter(uvd_pt[4,0], uvd_pt[4,1], s=60, c='y') + ax.scatter(uvd_pt[5,0], uvd_pt[5,1], s=90, c='y') + ax.scatter(uvd_pt[6,0], uvd_pt[6,1], s=60, c='g') + ax.scatter(uvd_pt[7,0], uvd_pt[7,1], s=90, c='g') + ax.scatter(uvd_pt[8,0], uvd_pt[8,1], s=60, c='r') + ax.scatter(uvd_pt[9,0], uvd_pt[9,1], s=90, c='r') + elif FLAGS.dataset == 'msra': + fig_color = ['c', 'm', 'y', 'g', 'r'] + ax.scatter(uvd_pt[0:,0], uvd_pt[0:,1], s=200, c='w') + for f in range(5): + ax.scatter(uvd_pt[f*4+1,0], uvd_pt[f*4+1,1], s=90, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+2,0], uvd_pt[f*4+2,1], s=80, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+3,0], uvd_pt[f*4+3,1], s=70, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+4,0], uvd_pt[f*4+4,1], s=60, c=fig_color[f]) + + elif FLAGS.dataset == 'icvl': + fig_color = ['c', 'm', 'y', 'g', 'r'] + ax.scatter(uvd_pt[0:,0], uvd_pt[0:,1], s=200, c='w') + for f in range(5): + ax.scatter(uvd_pt[f*3+1,0], uvd_pt[f*3+1,1], s=90, c=fig_color[f]) + ax.scatter(uvd_pt[f*3+2,0], uvd_pt[f*3+2,1], s=80, c=fig_color[f]) + ax.scatter(uvd_pt[f*3+3,0], uvd_pt[f*3+3,1], s=60, c=fig_color[f]) + return fig + +def figure_joint_skeleton(dm, uvd_pt): + fig = matplotlib.figure.Figure() + ax = fig.add_subplot(1,1,1) + ax.imshow(dm, cmap=matplotlib.cm.Greys) + + if FLAGS.dataset == 'bighand': + ax.scatter(uvd_pt[0,0], uvd_pt[0,1], s=200, c='w') + ax.scatter(uvd_pt[1:6,0], uvd_pt[1:6,1], s=100, c='w') + ax.scatter(uvd_pt[6:9,0], uvd_pt[6:9,1], s=60, c='c') + ax.scatter(uvd_pt[9:12,0], uvd_pt[9:12,1], s=60, c='m') + ax.scatter(uvd_pt[12:15,0], uvd_pt[12:15,1], s=60, c='y') + ax.scatter(uvd_pt[15:18,0], uvd_pt[15:18,1], s=60, c='g') + ax.scatter(uvd_pt[18:,0], uvd_pt[18:,1], s=60, c='r') + elif FLAGS.dataset == 'nyu': + fig_color = ['c', 'm', 'y', 'g', 'r'] + for f in range(5): + ax.plot([uvd_pt[f*2,0], uvd_pt[f*2+1,0]], + [uvd_pt[f*2,1], uvd_pt[f*2+1,1]], color=fig_color[f], linewidth=3) + ax.scatter(uvd_pt[f*2,0],uvd_pt[f*2,1],s=60,c=fig_color[f]) + ax.scatter(uvd_pt[f*2+1,0],uvd_pt[f*2+1,1],s=60,c=fig_color[f]) + if f<4: + ax.plot([uvd_pt[13,0], uvd_pt[f*2+1,0]], + [uvd_pt[13,1], uvd_pt[f*2+1,1]], color=fig_color[f], linewidth=3) + ax.plot([uvd_pt[9,0], uvd_pt[10,0]], + [uvd_pt[9,1], uvd_pt[10,1]], color='r', linewidth=3) + + ax.scatter(uvd_pt[13,0], uvd_pt[13,1], s=200, c='w') + ax.scatter(uvd_pt[11,0], uvd_pt[11,1], s=100, c='b') + ax.scatter(uvd_pt[12,0], uvd_pt[12,1], s=100, c='b') + + ax.plot([uvd_pt[13,0], uvd_pt[11,0]], + [uvd_pt[13,1], uvd_pt[11,1]], color='b', linewidth=3) + ax.plot([uvd_pt[13,0], uvd_pt[12,0]], + [uvd_pt[13,1], uvd_pt[12,1]], color='b', linewidth=3) + ax.plot([uvd_pt[13,0], uvd_pt[10,0]], + [uvd_pt[13,1], uvd_pt[10,1]], color='r', linewidth=3) + + elif FLAGS.dataset == 'msra': + fig_color = ['c', 'm', 'y', 'g', 'r'] + ax.scatter(uvd_pt[0:,0], uvd_pt[0:,1], s=200, c='w') + for f in range(5): + ax.scatter(uvd_pt[f*4+1,0], uvd_pt[f*4+1,1], s=90, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+2,0], uvd_pt[f*4+2,1], s=80, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+3,0], uvd_pt[f*4+3,1], s=70, c=fig_color[f]) + ax.scatter(uvd_pt[f*4+4,0], uvd_pt[f*4+4,1], s=60, c=fig_color[f]) + ax.plot([uvd_pt[f*4+1,0], uvd_pt[f*4+2,0]], + [uvd_pt[f*4+1,1], uvd_pt[f*4+2,1]], color=fig_color[f], linewidth=3) + ax.plot([uvd_pt[f*4+2,0], uvd_pt[f*4+3,0]], + [uvd_pt[f*4+2,1], uvd_pt[f*4+3,1]], color=fig_color[f], linewidth=3) + ax.plot([uvd_pt[f*4+3,0], uvd_pt[f*4+4,0]], + [uvd_pt[f*4+3,1], uvd_pt[f*4+4,1]], color=fig_color[f], linewidth=3) + elif FLAGS.dataset == 'icvl': + fig_color = ['c', 'm', 'y', 'g', 'r'] + ax.scatter(uvd_pt[0:,0], uvd_pt[0:,1], s=200, c='w') + for f in range(5): + ax.scatter(uvd_pt[f*3+1,0], uvd_pt[f*3+1,1], s=90, c=fig_color[f]) + ax.scatter(uvd_pt[f*3+2,0], uvd_pt[f*3+2,1], s=80, c=fig_color[f]) + ax.scatter(uvd_pt[f*3+3,0], uvd_pt[f*3+3,1], s=60, c=fig_color[f]) + ax.plot([uvd_pt[f*3+1,0], uvd_pt[f*3+2,0]], + [uvd_pt[f*3+1,1], uvd_pt[f*3+2,1]], color=fig_color[f], linewidth=3) + ax.plot([uvd_pt[f*3+2,0], uvd_pt[f*3+3,0]], + [uvd_pt[f*3+2,1], uvd_pt[f*3+3,1]], color=fig_color[f], linewidth=3) + + return fig + +def figure_smp_pts(dm, pts1, pts2): + fig = matplotlib.figure.Figure() + ax = fig.add_subplot(1,1,1) + ax.imshow(dm, cmap=matplotlib.cm.jet) + + for pt1, pt2 in zip(pts1, pts2): + ax.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]]) + ax.scatter(pt1[0], pt1[1], s=60, c='w') + ax.scatter(pt2[0], pt2[1], s=60, c='m') + return fig + +tf_heatmap_wrapper = tfplot.wrap(figure_heatmap, batch=True, name='hm_summary') +tf_jointplot_wrapper = tfplot.wrap(figure_joint_skeleton, batch=True, name='pt_summary') +tf_smppt_wrapper = tfplot.wrap(figure_smp_pts, batch=True, name='smppt_summary') + -- Gitee From 63c83732a6baf33c307ffb6af8c2fbec4be8e502 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:17:25 +0000 Subject: [PATCH 09/11] update --- .../exp/fetch_icvl_model.sh | 25 +++++++++++++++ .../exp/fetch_msra_model.sh | 31 +++++++++++++++++++ .../exp/fetch_nyu_model.sh | 25 +++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_icvl_model.sh create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_msra_model.sh create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_nyu_model.sh diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_icvl_model.sh b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_icvl_model.sh new file mode 100644 index 000000000..84406e467 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_icvl_model.sh @@ -0,0 +1,25 @@ +cur_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" +cd $cur_dir +model_dir=${cur_dir}/train_cache/icvl_training_s2_f128_daug_um_v1/ +if ! [ -d $model_dir ]; then + mkdir -p $model_dir +fi + +cd $model_dir +url=https://polybox.ethz.ch/index.php/s/f9EWUGSpTeKmFDo/download +fname=icvl.tar.gz + +if [ -f $fname ]; then + echo "file already exists, no need to download again" +else + echo "downloading the pretrained model(62M)..." + wget $url + mv download $fname +fi + +echo "unzipping..." +tar xvzf $fname +mv icvl/*.* ./ +rmdir icvl/ + +echo "done." \ No newline at end of file diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_msra_model.sh b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_msra_model.sh new file mode 100644 index 000000000..e1db07d0e --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_msra_model.sh @@ -0,0 +1,31 @@ +cur_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" +cd $cur_dir + +cache_dir=${cur_dir}/msra_model +if ! [ -d $cache_dir ]; then + mkdir $cache_dir +fi +cd $cache_dir + +fname=msra.tar.gz +url=https://polybox.ethz.ch/index.php/s/B2W1ngyUAitsv2e/download +if [ -f $fname ]; then + echo "file already exists, no need to download again" +else + echo "downloading the pretrained model(566M)..." + wget $url + mv download $fname +fi +echo "unzipping..." +tar xvzf $fname + + +cd $cur_dir +for pid in {0..8}; do + tar_dir=${cur_dir}/train_cache/msra_P${pid}_training_s2_f128_daug_um_v1/ + src_dir=${cache_dir}/msra/P${pid}/ + mv $src_dir $tar_dir +done + +rmdir ${cache_dir}/msra +echo "done." \ No newline at end of file diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_nyu_model.sh b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_nyu_model.sh new file mode 100644 index 000000000..9654cd227 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/exp/fetch_nyu_model.sh @@ -0,0 +1,25 @@ +cur_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" +cd $cur_dir +model_dir=${cur_dir}/train_cache/nyu_training_s2_f128_daug_um_v1/ +if ! [ -d $model_dir ]; then + mkdir -p $model_dir +fi + +cd $model_dir +url=https://polybox.ethz.ch/index.php/s/Q4GS7bgRRM3zK5J/download +fname=nyu.tar.gz + +if [ -f $fname ]; then + echo "file already exists, no need to download again" +else + echo "downloading the pretrained model(61M)..." + wget $url + mv download $fname +fi + +echo "unzipping..." +tar xvzf $fname +mv nyu/*.* ./ +rmdir nyu/ + +echo "done." \ No newline at end of file -- Gitee From b63a24038baf2d38bab97ea8b5eddcbda1526dd6 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:17:59 +0000 Subject: [PATCH 10/11] update --- .../model/__init__.py | 0 .../model/hourglass_um_crop_tiny.py | 909 ++++++++++++++++++ .../model/test_model.py | 94 ++ .../model/train_multi_gpu.py | 159 +++ .../model/train_single_gpu.py | 179 ++++ 5 files changed, 1341 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/__init__.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/hourglass_um_crop_tiny.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/test_model.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_multi_gpu.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_single_gpu.py diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/__init__.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/hourglass_um_crop_tiny.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/hourglass_um_crop_tiny.py new file mode 100644 index 000000000..b9f6cc47d --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/hourglass_um_crop_tiny.py @@ -0,0 +1,909 @@ +'''to simultaneously regress the 3D joint offset and the 2D joint heatmap +''' +from __future__ import print_function,absolute_import, division + +import time, os +import numpy as np +from datetime import datetime +import importlib + +import gpu_config +import tensorflow as tf +import data.util +from data.util import heatmap_from_xyz_op, CameraConfig, xyz2uvd_op, uvd2xyz_op +import data.util +import data.preprocess +import numpy as np, numpy.linalg as alg + +# from model_new.train_single_gpu import train +from model.train_single_gpu import train +from model.test_model import test +import network.slim as slim + +from data.preprocess import generate_xyzs_from_multi_cfgs, crop_from_xyz_pose, crop_from_bbx, center_of_mass, norm_xyz_pose, unnorm_xyz_pose + +from data.visualization import tf_heatmap_wrapper, tf_jointplot_wrapper, tf_smppt_wrapper +from data.evaluation import Evaluation + +# implementation setting +tf.app.flags.DEFINE_integer('num_gpus', 1, #gpu_config.num_gpus, + """how many gpu to be used""") +# use cpu instead if no gpu is available +tf.app.flags.DEFINE_integer('batch_size', 40, + '''batch size''') +tf.app.flags.DEFINE_integer('debug_level', 1, + '''the higher, the more saved to summary''') +tf.app.flags.DEFINE_integer('sub_batch', 5, + '''batch size''') +tf.app.flags.DEFINE_integer('pid', 0, + '''for msra person id''') +tf.app.flags.DEFINE_boolean('is_train', True, + '''True for traning, False for testing''') + +# the network architecture to be used +tf.app.flags.DEFINE_string('net_module', 'um_v1', + '''the module containing the network architecture''') +tf.app.flags.DEFINE_boolean('is_aug', True, + '''whether to augment data''') +tf.app.flags.DEFINE_string('dataset', 'nyu', + '''the dataset to conduct experiments''') +# epoch +tf.app.flags.DEFINE_integer('epoch', 80, + '''number of epoches''') + +# network specification +tf.app.flags.DEFINE_integer('num_stack', 2, + 'number of stacked hourglass') +tf.app.flags.DEFINE_integer('num_fea', 128, + 'number of feature maps in hourglass') +tf.app.flags.DEFINE_integer('kernel_size', 3, + 'kernel size for the residual module') + +FLAGS = tf.app.flags.FLAGS + +MAXIMUM_DEPTH = 600.0 + +class JointDetectionModel(object): + _moving_average_decay = 0.9999 + _batchnorm_moving_average_decay = 0.9997 + _init_lr = 0.001 + if FLAGS.dataset == 'nyu': + _num_epochs_per_decay = 10 + elif FLAGS.dataset == 'msra': + _num_epochs_per_decay = 20 + _lr_decay_factor = 0.1 + + _adam_beta1 = 0.5 + + # maximum allowed depth + _max_depth = 600.0 + + # input size: the input of the network + _input_height = 128 + _input_width = 128 + + # output size: the output size of network, as well as the largest size of hourglass model + _output_height = 32 + _output_width = 32 + + _gau_sigma = 3.0 + _gau_filter_size = 10 + + _base_dir = './exp/train_cache/' + + + def __init__(self, dataset, detect_net, epoch, net_desc='dummy', val_dataset=None): + ''' + args: + dataset: data.xxxdataset isinstance + detect_net: funtional input of the net + desc: string, the name of the corresponding cache folder + notice: + any tf operations on the graph cannot be defined here, + they can only be defined after the graph is initialized by the training module + ''' + self._dataset = dataset + self._jnt_num = int(dataset.jnt_num) + self._cfg = self._dataset.cfg + + self._num_batches_per_epoch = dataset.approximate_num / (FLAGS.batch_size*FLAGS.sub_batch) + self._net_desc = net_desc + self._net = detect_net + self._max_steps = int(epoch*self._num_batches_per_epoch) + + self._val_dataset = val_dataset + self._model_desc = '%s_%s_s%d_f%d'%(dataset.name, dataset.subset, FLAGS.num_stack, FLAGS.num_fea) + if FLAGS.is_aug: + self._model_desc += '_daug' + + if self._val_dataset: + assert self._jnt_num == self._val_dataset.jnt_num, ( + 'the validation dataset should be with the same number of joints to the traning dataset') + + if not os.path.exists(self._base_dir): + os.makedirs(self._base_dir) + + self._log_path = os.path.join(self._base_dir, self.name, 'validation_log.txt') + + '''data interface + 1. initialize the dataset + 2. the global setting of the batch_size + 3. total number of steps + ''' + def batch_input(self, dataset, batch_size=None): + if batch_size is None: + batch_size = FLAGS.batch_size + dm_batch, pose_batch, cfg_batch, com_batch = dataset.get_batch_op( + batch_size=batch_size, + num_readers = 2, + num_preprocess_threads = 2, + preprocess_op=dataset.preprocess_op(self._input_width, self._input_height)) + return [dm_batch, pose_batch, cfg_batch, com_batch] + + def batch_input_test(self, dataset): + dm_batch, pose_batch, cfg_batch, com_batch, name_batch = dataset.get_batch_op_test( + batch_size = FLAGS.batch_size, + preprocess_op = dataset.preprocess_op(self._input_width, self._input_height)) + return [dm_batch, pose_batch, cfg_batch, com_batch, name_batch] + + @property + def train_dataset(self): + return self._dataset + + @property + def val_dataset(self): + return self._val_dataset + + '''hyper parameters + ''' + @property + def init_lr(self): + '''the initial learning rate + ''' + return self._init_lr + @property + def lr_decay_factor(self): + '''the rate of exponential decay of learning rate + ''' + return self._lr_decay_factor + + @property + def decay_steps(self): + '''lr does not decay when global_step < decay_steps + ''' + return self._num_batches_per_epoch * self._num_epochs_per_decay + + @property + def moving_average_decay(self): + return self._moving_average_decay + + @property + def max_steps(self): + return self._max_steps + + '''training operation + ''' + def inference(self, normed_dms, cfgs, coms, reuse_variables, is_training=True): + with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): + # resize the image to fit the network input + # during training, inference is called by loss function, where dms are resized + end_points = self._net(normed_dms, cfgs, coms, self._jnt_num, is_training) + return end_points + + max_dist_2d = 4.0 # 4 pixels + max_dist_3d = 0.8 # 80mm 3d distance + def _hm_3d(self, oms): + '''generate 3D distance heatmap according to the offset map + Args: + oms: the normalized xyz offset maps, (b,h,w,3*j) + Returns: + hms: the 3D heatmap, (b,h,w,j) + ''' + om_list = tf.unstack(oms, axis=-1) + hm_list = [] + for j in range(self._jnt_num): + xx,yy,zz = om_list[j*3], om_list[j*3+1], om_list[j*3+2] + hm = tf.sqrt(xx**2+yy**2+zz**2) + hm = tf.divide(self.max_dist_3d-hm, self.max_dist_3d) + hm = tf.maximum(hm, tf.zeros_like(hm)) + hm_list.append(hm) + hms = tf.stack(hm_list, axis=-1) + return hms + + def _hm_2d(self, poses, cfgs, out_h, out_w): + '''synthesize the 2d heatmap + Args: + poses: unnormed xyz pose, (b,j*3) + cfgs: camera configuration, (b, 6) + out_h, out_w: output of heatmap size + Returns: + hm2: 2D heatmap, (b, out_h, out_w, j) + ''' + def fn(elems): + xyz_pose, cfg = elems[0], elems[1] + + w_ratio = cfg[4] / out_w + h_ratio = cfg[5] / out_h + new_cfg = CameraConfig(cfg[0]/w_ratio, cfg[1]/h_ratio, + cfg[2]/w_ratio, cfg[3]/h_ratio, + out_w, out_h) + + xx, yy = tf.meshgrid(tf.range(out_h), tf.range(out_w)) + xx, yy = tf.cast(xx, tf.float32), tf.cast(yy, tf.float32) + xx = tf.tile(tf.expand_dims(xx, axis=-1), [1, 1, self._jnt_num]) + yy = tf.tile(tf.expand_dims(yy, axis=-1), [1, 1, self._jnt_num]) + + uvd_pose = tf.reshape(data.util.xyz2uvd_op(xyz_pose, new_cfg), (-1,3)) + [uu,vv,dd] = tf.unstack(uvd_pose, axis=-1) + uu = tf.reshape(uu, (1,1,-1)) + vv = tf.reshape(vv, (1,1,-1)) + + hm = tf.maximum(self.max_dist_2d-tf.sqrt(tf.square(xx-uu)+tf.square(yy-vv)), + tf.zeros_like(xx))/self.max_dist_2d + return [hm, cfg] + + with tf.name_scope('pose_sync'): + hms, _ = tf.map_fn(fn, [poses, cfgs]) + return hms + + def _um(self, om, hm_3d): + '''get the unit offset vector map from offset maps + Args: + om: the offset map, (b,h,w,j*3) + hm_3d: the offset norm, (b,h,w,j) + Returns: + um: the unit offset map, (b,h,w,j*3) + ''' + om_list = tf.unstack(om, axis=-1) + + dm_3d = self.max_dist_3d - tf.multiply(hm_3d, self.max_dist_3d) + dm_list = tf.unstack(dm_3d, axis=-1) + + um_list = [] + + for j in range(self._jnt_num): + x,y,z = om_list[j*3], om_list[j*3+1], om_list[j*3+2] + d = dm_list[j] + + mask = tf.less(d, self.max_dist_3d-1e-2) + + x = tf.where(mask, tf.divide(x, d), tf.zeros_like(x)) + y = tf.where(mask, tf.divide(y, d), tf.zeros_like(y)) + z = tf.where(mask, tf.divide(z, d), tf.zeros_like(z)) + um_list += [x,y,z] + return tf.stack(um_list, axis=-1) + + def _resume_om(self, hm_3d, um): + '''resume the offset map from the 3d heatmap and unit offset vector + Args: + hm_3d: the 3D heatmap, (b,h,w,j) + um: the 3D unit offset vector, (b,h,w,j*3) + Returns: + om: the 3D offset vector, (b,h,w,j) + ''' + # um = tf.clip_by_value(um, -1.0, 1.0) + um_list = tf.unstack(um, axis=-1) + + dm_3d = self.max_dist_3d - tf.multiply(hm_3d, self.max_dist_3d) + dm_list = tf.unstack(dm_3d, axis=-1) + + om_list = [] + + for j in range(self._jnt_num): + x,y,z = um_list[j*3], um_list[j*3+1], um_list[j*3+2] + d = dm_list[j] + x = tf.multiply(x,d) + y = tf.multiply(y,d) + z = tf.multiply(z,d) + om_list += [x,y,z] + return tf.stack(om_list, axis=-1) + + def _vis_um_xy(self, ums): + '''visualize the xy plane angle of ums + ''' + um_list = tf.unstack(ums, axis=-1) + angle_list = [] + for j in range(self._jnt_num): + x,y,z = um_list[j*3], um_list[j*3+1], um_list[j*3+2] + d = tf.sqrt(x**2+y**2) + sin = tf.where(tf.less(d**2+z**2, 0.1), tf.ones_like(d), tf.sin(tf.divide(x,d))) + angle_list.append(sin) + return tf.stack(angle_list, axis=-1) + + def _vis_um_z(self, ums): + '''visuzlie the z plane angle of ums + ''' + um_list = tf.unstack(ums, axis=-1) + angle_list = [] + for j in range(self._jnt_num): + angle_list.append(um_list[j*3+2]) + return tf.stack(angle_list, axis=-1) + + # training + def loss(self, dms, poses, cfgs, coms): + ''' the losses for the training + Args: + dms: + poses: + reuse_variables: + Returns: + the total loss + ''' + if FLAGS.is_aug: + dms, poses = data.preprocess.data_aug(dms, poses, cfgs, coms) + + # generate ground truth + gt_hms = self._hm_2d(poses, cfgs, self._output_height, self._output_width) + + gt_normed_poses = norm_xyz_pose(poses, coms) + normed_dms = data.preprocess.norm_dm(dms, coms) + tiny_normed_dms = tf.image.resize_images(normed_dms, (self._output_height, self._output_width), 2) + xyzs = generate_xyzs_from_multi_cfgs(tiny_normed_dms, cfgs, coms) + xyzs = tf.tile(xyzs, [1,1,1,self._jnt_num]) + gt_oms = tf.reshape(gt_normed_poses, (-1,1,1,3*self._jnt_num)) - xyzs + + gt_hm3s = self._hm_3d(gt_oms) + gt_ums = self._um(gt_oms, gt_hm3s) + + # generate estimation + end_points = self.inference(normed_dms, cfgs, coms, reuse_variables=None, is_training=True) + + # heatmap loss + est_hm_list = end_points['hm_outs'] + hm_losses = [tf.nn.l2_loss(est_hms-gt_hms) for est_hms in est_hm_list] + + # 3D heatmap loss + est_hm3_list = end_points['hm3_outs'] + hm3_losses = [tf.nn.l2_loss(est_hm3-gt_hm3s) for est_hm3 in est_hm3_list] + + # offsetmap loss + # we only consider the nearby point offset maps + # in order to make the oms loss on the same scale w.r.t. hms loss + est_um_list = end_points['um_outs'] + um_losses = [tf.nn.l2_loss(est_ums-gt_ums) for est_ums in est_um_list] + + # add the weight decay loss + reg_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), 'reg_loss') + hm_loss = tf.add_n(hm_losses, 'hm_loss') + um_loss = tf.add_n(um_losses, 'um_loss') + hm3_loss = tf.add_n(hm3_losses, 'hm3_loss') + + total_loss = reg_loss+hm_loss+um_loss+hm3_loss + + tf.summary.scalar('tra/um_loss', um_loss) + tf.summary.scalar('tra/hm_loss', hm_loss) + tf.summary.scalar('tra/hm3_loss', hm3_loss) + + # to visualize the training error, + # only pick the first three for tensorboard visualization + est_hms = est_hm_list[-1][0:3,:,:,:] + est_ums = est_um_list[-1][0:3,:,:,:] + est_hm3s = est_hm3_list[-1][0:3,:,:,:] + tiny_normed_dms = tiny_normed_dms[0:3,:,:,:] + cfgs = cfgs[0:3,:] + coms = coms[0:3,:] + dms = dms[0:3,:,:,:] + est_oms = self._resume_om(est_hm3s, est_ums) + + # get point estimation + est_normed_poses = self._xyz_estimation(est_hms, est_oms, est_hm3s, tiny_normed_dms, cfgs, coms) + est_normed_poses = tf.reshape(est_normed_poses, + (est_normed_poses.get_shape()[0].value, -1)) + xyz_pts = unnorm_xyz_pose(est_normed_poses, coms) + + # 2d joint detection + def to_uvd_fn(elem): + xyz_pt, cfg = elem[0], elem[1] + return [data.util.xyz2uvd_op(xyz_pt, cfg), cfg] + uvd_pts, _ = tf.map_fn(to_uvd_fn, [xyz_pts, cfgs]) + resized_hms = tf.image.resize_images(est_hms, (self._input_height, self._input_width), 2) + hm_uvd_pts, _ = self._uvd_estimation_op(resized_hms, tf.ones_like(resized_hms)) + + # for visualization + gt_xy_angle = self._vis_um_xy(gt_ums) + gt_z_angle = self._vis_um_z(gt_ums) + est_xy_angle = self._vis_um_xy(est_ums) + est_z_angle = self._vis_um_z(est_ums) + + if FLAGS.debug_level > 0: + tf.summary.image('tra_dm/', dms) + tf.summary.image('tra_pts/', tf_jointplot_wrapper(tf.squeeze(dms,axis=-1), + tf.reshape(uvd_pts, (3,-1,3)))) + tf.summary.image('tra_pt_hm/', tf_jointplot_wrapper(tf.squeeze(dms, axis=-1), + tf.reshape(hm_uvd_pts, (3,-1,3)))) + if FLAGS.debug_level > 1: + tf.summary.image('tra_hm_est_0/', tf_heatmap_wrapper(est_hms[:,:,:,0])) + tf.summary.image('tra_hm_gt_0/', tf_heatmap_wrapper(gt_hms[:,:,:,0])) + tf.summary.image('tra_3d_hm_est_0/', tf_heatmap_wrapper(est_hm3s[:,:,:,0])) + tf.summary.image('tra_3d_hm_gt_0/', tf_heatmap_wrapper(gt_hm3s[:,:,:,0])) + tf.summary.image('tra_um_xy_gt_0', tf_heatmap_wrapper(gt_xy_angle[:,:,:,0])) + tf.summary.image('tra_um_z_gt_0', tf_heatmap_wrapper(gt_z_angle[:,:,:,0])) + tf.summary.image('tra_um_xy_est_0', tf_heatmap_wrapper(est_xy_angle[:,:,:,0])) + tf.summary.image('tra_um_z_est_0', tf_heatmap_wrapper(est_z_angle[:,:,:,0])) + + if FLAGS.debug_level > 2: + tf.summary.image('tra_hm_gt_1/', tf_heatmap_wrapper(gt_hms[:,:,:,5])) + tf.summary.image('tra_hm_est_1/', tf_heatmap_wrapper(est_hms[:,:,:,5])) + tf.summary.image('tra_3d_hm_est_1/', tf_heatmap_wrapper(est_hm3s[:,:,:,5])) + tf.summary.image('tra_3d_hm_gt_1/', tf_heatmap_wrapper(gt_hm3s[:,:,:,5])) + tf.summary.image('tra_um_xy_est_1', tf_heatmap_wrapper(est_xy_angle[:,:,:,5])) + tf.summary.image('tra_um_z_est_1', tf_heatmap_wrapper(est_z_angle[:,:,:,5])) + tf.summary.image('tra_um_xy_gt_1', tf_heatmap_wrapper(gt_xy_angle[:,:,:,5])) + tf.summary.image('tra_um_z_gt_1', tf_heatmap_wrapper(gt_z_angle[:,:,:,5])) + + return total_loss + + def opt(self, lr): + '''return the optimizer of the model + ''' + return tf.train.AdamOptimizer(lr, beta1=self._adam_beta1) + + # validation and test + def test(self, dms, poses, cfgs, coms, reuse_variables=True): + '''the validation step to show the result from the validation set + + ''' + batch_size = dms.get_shape()[0].value + # 1st phase, gpu computation + normed_dms = data.preprocess.norm_dm(dms, coms) + end_points = self.inference(normed_dms, cfgs, coms, reuse_variables=reuse_variables, is_training=False) + + est_hms = end_points['hm_outs'][-1] + + tiny_normed_dms = tf.image.resize_images(normed_dms, (self._output_height, self._output_width), 2) + est_ums = end_points['um_outs'][-1] + est_hm3s = end_points['hm3_outs'][-1] + + est_oms = self._resume_om(est_hm3s, est_ums) + + est_normed_poses = self._xyz_estimation(est_hms, est_oms, est_hm3s, tiny_normed_dms, cfgs, coms) + est_normed_poses = tf.reshape(est_normed_poses, + (est_normed_poses.get_shape()[0].value, -1)) + xyz_pts = unnorm_xyz_pose(est_normed_poses, coms) + + def to_uvd_fn(elem): + xyz_pt, cfg = elem[0], elem[1] + return [data.util.xyz2uvd_op(xyz_pt, CameraConfig(*tf.unstack(cfg,axis=0))), cfg] + uvd_pts, _ = tf.map_fn(to_uvd_fn, [xyz_pts, cfgs]) + gt_uvd_pts, _ = tf.map_fn(to_uvd_fn, [poses, cfgs]) + + resized_hms = tf.image.resize_images(est_hms, (self._input_height, self._input_width)) + hm_uvd_pts, _ = self._uvd_estimation_op(resized_hms, tf.ones_like(resized_hms)) + + # for gt visulization + gt_normed_poses = norm_xyz_pose(poses, coms) + gt_hms = self._hm_2d(poses, cfgs, self._output_height, self._output_width) + xyzs = generate_xyzs_from_multi_cfgs(tiny_normed_dms, cfgs, coms) + xyzs = tf.tile(xyzs, [1,1,1,self._jnt_num]) + gt_oms = tf.reshape(gt_normed_poses, (-1,1,1,3*self._jnt_num)) - xyzs + gt_hm3s = self._hm_3d(gt_oms) + gt_ums = self._um(gt_oms, gt_hm3s) + gt_xy_angle = self._vis_um_xy(gt_ums) + gt_z_angle = self._vis_um_z(est_ums) + + # add summayries + est_xy_angle = self._vis_um_xy(est_ums) + est_z_angle = self._vis_um_z(est_ums) + if FLAGS.debug_level > 0: + tf.summary.image('val_pts/', tf_jointplot_wrapper(tf.squeeze(dms,axis=-1), + tf.reshape(uvd_pts, (batch_size,-1,3))), + collections=['val_summaries']) + tf.summary.image('gt_pts/', tf_jointplot_wrapper(tf.squeeze(dms,axis=-1), + tf.reshape(gt_uvd_pts, (batch_size,-1,3))), + collections=['val_summaries']) + if FLAGS.debug_level > 1: + tf.summary.image('gt_hm/', tf_heatmap_wrapper(gt_hms[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('gt_hm3', tf_heatmap_wrapper(gt_hm3s[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('val_hm/', tf_heatmap_wrapper(est_hms[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('val_hm3', tf_heatmap_wrapper(est_hm3s[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('val_dm/', dms, collections=['val_summaries']) + tf.summary.image('val_pts_hm/', tf_jointplot_wrapper(tf.squeeze(dms,axis=-1), + tf.reshape(hm_uvd_pts, (batch_size,-1,3))), + collections=['val_summaries']) + + if FLAGS.debug_level > 2: + tf.summary.image('gt_xy', tf_heatmap_wrapper(gt_xy_angle[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('gt_z', tf_heatmap_wrapper(gt_z_angle[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('val_xy', tf_heatmap_wrapper(est_xy_angle[:,:,:,0]), + collections=['val_summaries']) + tf.summary.image('val_z', tf_heatmap_wrapper(est_z_angle[:,:,:,0]), + collections=['val_summaries']) + + + self.val_summary_op = tf.summary.merge_all(key='val_summaries') + + # interface to fetch output + self.uvd_pts = uvd_pts + self.xyz_pts = xyz_pts + self.val_dms = dms + self.est_hms = est_hms + self.gt_pose = poses + print('testing graph is established') + + @property + def is_validate(self): + return True if self._val_dataset else False + + @property + def name(self): + return '%s_%s'%(self._model_desc, self._net_desc) + + @property + def train_dir(self): + return os.path.join(self._base_dir, self.name) + + @property + def summary_dir(self): + return os.path.join(self.train_dir, 'summary') + + def _mean_shift(self, can_pts, num_it=10, band_width=0.8): + '''mean shift over the candidate point + Args: + can_pts: candidate points, (b,j,n,3) + num_it: number of iterations + band_width: bandwidth of the kernel + Returns: + centers: the density maximal points + ''' + def joint_fn(can_pt): + '''iteration over joint + Args: + can_pt: (n,3) + Returns: + center: (3) + ''' + # initialization + num_quan = 2.0 + quan_pt = tf.clip_by_value((can_pt+1.0)*num_quan, 0, 2*num_quan-0.1) + quan_pt = tf.to_int64(quan_pt) + + quan_hm = tf.scatter_nd(quan_pt, tf.ones(num_pt,), + (int(2*num_quan),int(2*num_quan),int(2*num_quan))) + curr_pt = tf.where(tf.equal(quan_hm, tf.reduce_max(quan_hm)))[-1] + curr_pt = tf.divide(tf.to_float(curr_pt), num_quan) - 1.0 + curr_pt += 0.5/num_quan + + # iteration + for _ in range(num_it): + s = tf.reduce_sum((can_pt - curr_pt)**2, axis=-1) + s = tf.expand_dims(tf.exp(inverse_sigma*s), axis=-1) + curr_pt = tf.reduce_sum(tf.multiply(can_pt, s), axis=0) + curr_pt = tf.divide(curr_pt, tf.reduce_sum(s)) + curr_pt = tf.reshape(curr_pt, (1,3)) + + curr_pt = tf.reshape(curr_pt, (3,)) + return curr_pt + + def batch_fn(can_pt): + '''iteration over batch + Args: + can_pt: (j,n,3) + Returns: + centers: (j,3) + ''' + return tf.map_fn(joint_fn ,can_pt) + + num_jnt = can_pts.get_shape()[1].value + num_pt = can_pts.get_shape()[2].value + inverse_sigma = -1.0 / (2*band_width*band_width) + + return tf.map_fn(batch_fn, can_pts) + + def _generate_candidates(self, hms, xyzs, num_pt): + '''generate the candidates to do mean shift, from xyzs + Args: + hms: estimated heatmaps, (b,h,w,j) + xyzs: the xyz points, (b,h,w,j*3) + num_pt: the number of candidates + Returns: + can_pts: candidate points, (b,j,n,3) + ''' + def fn(elems): + hm, xyz = elems[0], elems[1] + hm = tf.reshape(hm, (-1, jnt_num)) + xyz = tf.reshape(xyz, (-1, 3*jnt_num)) + + hm_list = tf.unstack(hm, axis=-1) + xyz_list = tf.unstack(xyz, axis=-1) + can_list = [] + + for j in range(jnt_num): + weights, indices = tf.nn.top_k(hm_list[j], k=num_pt, sorted=True) + xx = tf.gather(xyz_list[j*3], indices) + yy = tf.gather(xyz_list[j*3+1], indices) + zz = tf.gather(xyz_list[j*3+2], indices) + can_list.append(tf.stack([xx,yy,zz], axis=1)) + can_pts = tf.stack(can_list, axis=0) + return [can_pts, hms] + + jnt_num = hms.get_shape()[-1].value + can_pts, _ = tf.map_fn(fn, [hms, xyzs]) + return can_pts + + def _get_candidate_weights(self, xyz_pts, coms, cfgs, hms, dms): + '''the weights measures how xyz_pts fits the 2d hms estimation and dms observation + Args: + xyz_pts: the candidate points, (b,j,n,3) + coms: centers of mass, (b,3) + cfgs: camera configurations, (b,6) + hms: estimated 2D heatmap, (b,h,w,j) + dms: depth map, (b,h,w,1) + Returns: + weights: the weights of the corresponding points, (b,j,n,1) + ''' + def fn(elems): + xyz_pt, com, cfg, hm, dm = elems[0], elems[1], elems[2], elems[3], elems[4] + + xx,yy,zz = tf.unstack(tf.reshape(xyz_pt,(-1,3)), axis=-1) + xyz_pt = tf.reshape(xyz_pt, (-1,)) + + xyz_pt = tf.multiply(xyz_pt, data.preprocess.POSE_NORM_RATIO) + tf.tile(com,[jnt_num*pnt_num]) + xyz_pt = tf.reshape(xyz_pt, (-1,3)) + + w_ratio = cfg[4] / out_w + h_ratio = cfg[5] / out_h + new_cfg = CameraConfig(cfg[0]/w_ratio, cfg[1]/h_ratio, + cfg[2]/w_ratio, cfg[3]/h_ratio, + out_w, out_h) + uvd_pt = xyz2uvd_op(xyz_pt, new_cfg) + uvd_pt = tf.reshape(uvd_pt, (-1, 3)) + uu, vv, dd = tf.unstack(uvd_pt, axis=-1) + uu = tf.to_int32(uu+0.5) + vv = tf.to_int32(vv+0.5) + jj = tf.tile(tf.expand_dims(tf.range(jnt_num),axis=-1), [1,pnt_num]) + jj = tf.reshape(jj, (-1,)) + + indices = tf.stack([vv,uu,jj], axis=-1) + weights = tf.gather_nd(hm, indices) + weights = tf.reshape(weights, (jnt_num, pnt_num, 1)) + + #we also clip the values of depth + dm = tf.squeeze(dm) + dm = tf.divide(dm*data.preprocess.D_RANGE - data.preprocess.D_RANGE*0.5, + data.preprocess.POSE_NORM_RATIO) + indices = tf.stack([vv,uu], axis=-1) + od = tf.gather_nd(dm, indices) + zz = tf.maximum(zz, od) + xyz_pt = tf.stack([xx,yy,zz], axis=-1) + xyz_pt = tf.reshape(xyz_pt, (jnt_num, pnt_num, 3)) + + return [weights, xyz_pt, cfg, hm, dm] + + out_h, out_w = self._output_height, self._output_width + jnt_num = xyz_pts.get_shape()[1].value + pnt_num = xyz_pts.get_shape()[2].value + weights, xyz_pts, _, _, _ = tf.map_fn(fn, [xyz_pts, coms, cfgs, hms, dms]) + return weights, xyz_pts + + def _weighted_mean_shift(self, can_pts, weights, num_it, band_width): + '''mean shift over the candidate point + Args: + can_pts: candidate points, (b,j,n,3) + weights: weights of candidate points, (b,j,n,1) + num_it: number of iterations + band_width: bandwidth of the kernel + Returns: + centers: the density maximal points + ''' + def joint_fn(elems): + '''iteration over joint + Args: + can_pt: (n,3), elems[0] + weight: (n,1), elems[1] + Returns: + center: (3) + ''' + can_pt, weight = elems[0], elems[1] + # initialization + num_quan = 2.0 + quan_pt = tf.clip_by_value((can_pt+1.0)*num_quan, 0, 2*num_quan-0.1) + quan_pt = tf.to_int64(quan_pt) + + quan_hm = tf.scatter_nd(quan_pt, tf.squeeze(weight), + (int(2*num_quan),int(2*num_quan),int(2*num_quan))) + curr_pt = tf.where(tf.equal(quan_hm, tf.reduce_max(quan_hm)))[-1] + curr_pt = tf.divide(tf.to_float(curr_pt), num_quan) - 1.0 + curr_pt += 0.5/num_quan + + # iteration + for _ in range(num_it): + s = tf.reduce_sum((can_pt - curr_pt)**2, axis=-1) + s = tf.expand_dims(tf.exp(inverse_sigma*s), axis=-1) + s = tf.multiply(s, weight) + curr_pt = tf.reduce_sum(tf.multiply(can_pt, s), axis=0) + curr_pt = tf.divide(curr_pt, tf.reduce_sum(s)) + curr_pt = tf.reshape(curr_pt, (1,3)) + + curr_pt = tf.reshape(curr_pt, (3,)) + return [curr_pt, can_pt] + + def batch_fn(elems): + '''iteration over batch + Args: + can_pt: (j,n,3), elems[0] + weights: (j,n,1), elems[1] + Returns: + centers: (j,3) + ''' + return tf.map_fn(joint_fn ,elems) + + num_jnt = can_pts.get_shape()[1].value + num_pt = can_pts.get_shape()[2].value + inverse_sigma = -1.0 / (2*band_width*band_width) + + centers, _ = tf.map_fn(batch_fn, [can_pts, weights]) + return centers + + def _xyz_estimation(self, hms, oms, hm3s, dms, cfgs, coms): + '''use meanshift to get the final estimation + Args: + hms: the heatmap returned from 2D joint detection, (b,h,w,j) + oms: the 3D offset maps, (b,h,w,3*j) + hm3s: the 3D heaetmap, (b,h,w,j) + dms: the normalized depth map, (b,h,w,1) + cfgs: camera configurations, (b,6) + Returns: + xyz_pts: the estimated 3d joint, (b,3*j) + ''' + # get dense joint estimation + jnt_num = hms.get_shape()[-1].value + xyzs = generate_xyzs_from_multi_cfgs(dms, cfgs, coms) + xyzs = tf.tile(xyzs, [1,1,1,self._jnt_num]) + orig_xyzs= xyzs + + xyzs = xyzs + oms + + # get the weight map for candidate selection + # refined_hms = tf.multiply(hms, hm3s) + refined_hms = tf.multiply(hms+1.0, hm3s) + # refined_hms = hm3s + # refined_hms = hms + dms_mask = tf.where(tf.less(dms, -0.99), tf.zeros_like(dms), tf.ones_like(dms)) + refined_hms = tf.multiply(refined_hms, dms_mask) + + num_pt = 5 + can_pts = self._generate_candidates(refined_hms, xyzs, num_pt=num_pt) + + # weighted scheme + can_weights, _ = self._get_candidate_weights(can_pts, coms, cfgs, hms, dms) + xyz_pts = self._weighted_mean_shift(can_pts, can_weights, num_it=10, band_width=0.4) + + # unweighted scheme + # xyz_pts = self._mean_shift(can_pts, num_it=10, band_width=0.4) + + # for visualization + ori_pts = self._generate_candidates(refined_hms, orig_xyzs, num_pt=num_pt) + + self.can_pts = can_pts + self.ori_pts = ori_pts + return xyz_pts + + + def _uvd_estimation_op(self, hms, ds): + ''' find the argmax from heatmaps and corresponding depth maps, and get the final estimation + Args: + hms: the heatmap with the same size as the initial captured image by camera + ds: the depth value of the coresponding points + Returns: + the uvd points of the joint + ''' + width = hms.shape[2] + + def fn(elems): + hough_hm, hough_dm = elems[0], elems[1] + uvd_pts = [] + + hough_hm_list = tf.unstack(hough_hm, axis=-1) + hough_dm_list = tf.unstack(hough_dm, axis=-1) + for j in range(self._jnt_num): + hh = hough_hm_list[j] + hd = hough_dm_list[j] + + idx = tf.where(tf.equal(hh, tf.reduce_max(hh))) + dd = tf.gather_nd(hd, idx) + + uu, vv, dd = tf.cast(idx[0][1],tf.float32), tf.cast(idx[0][0], tf.float32), dd[0] + uvd_pts.append(tf.stack([uu,vv,dd])) + return [tf.concat(uvd_pts, axis=-1), ds] + return tf.map_fn(fn, [hms, ds]) + + def do_test(self, sess, summary_writer, step, names=None): + '''execute computation of the inference + a fast version of inference + ''' + # during training + if names is None: + f = open(self._log_path, 'a') + summary_str, gt_vals, xyz_vals = sess.run( + [self.val_summary_op, self.gt_pose, self.xyz_pts]) + summary_writer.add_summary(summary_str, step) + + maxJntError=[] + f.write('[%s] step %d\n'%(datetime.now(), step)) + for xyz_val, gt_val in zip(xyz_vals, gt_vals): + maxJntError.append(Evaluation.maxJntError(xyz_val, gt_val)) + diff = (xyz_val-gt_val).reshape(-1,3) + dist = alg.norm(diff, axis=1).reshape(-1,1) + error_mat = np.concatenate((diff, dist), axis=1) + print(error_mat) + f.write(np.array_str(error_mat)+'\n') + print('validate error:', maxJntError) + f.write('validation error: {}\n'.format(maxJntError)) + f.flush() + f.close() + return + + if step%100 == 0: + summary_str, xyz_vals, gt_vals, names = sess.run( + [self.val_summary_op, self.xyz_pts, self.gt_pose, names]) + summary_writer.add_summary(summary_str, step) + + maxJntError=[] + for xyz_val, gt_val in zip(xyz_vals, gt_vals): + maxJntError.append(Evaluation.maxJntError(xyz_val, gt_val)) + diff = (xyz_val-gt_val).reshape(-1,3) + dist = alg.norm(diff, axis=1).reshape(-1,1) + print(np.concatenate((diff, dist), axis=1)) + print('[step: %d]test error:'%step, maxJntError) + print('---\n') + return gt_vals, xyz_vals, names + + gt_vals, xyz_vals, names = sess.run([self.gt_pose, self.xyz_pts, names]) + return gt_vals, xyz_vals, names + +'''unit test +''' +def run_train(dataset, val_dataset, restore_step=None): + net_module_name = 'network.'+FLAGS.net_module + + net_module = importlib.import_module(net_module_name, package=None) + net = net_module.detect_net + net_name = net_module.TOWER_NAME + + model = JointDetectionModel(dataset, net, epoch=FLAGS.epoch, net_desc=net_name, + val_dataset = val_dataset) + train(model, restore_step) + +def run_test(train_dataset, test_dataset, selected_step=None): + net_module_name = 'network.'+FLAGS.net_module + + net_module = importlib.import_module(net_module_name, package=None) + net = net_module.detect_net + net_name = net_module.TOWER_NAME + + model = JointDetectionModel(train_dataset, net, epoch=FLAGS.epoch, net_desc=net_name, + val_dataset = test_dataset) + + test(model, selected_step) + +if __name__ == '__main__': + if FLAGS.dataset == 'bighand': + import data.bigHand + dataset = data.bigHand.BigHandDataset('training') + val_dataset = data.bigHand.BigHandDataset('testing') + + elif FLAGS.dataset == 'nyu': + import data.nyu + dataset = data.nyu.NyuDataset('training') + val_dataset = data.nyu.NyuDataset('testing') + + elif FLAGS.dataset == 'icvl': + import data.icvl + dataset = data.icvl.IcvlDataset('training') + val_dataset = data.icvl.IcvlDataset('testing') + + elif FLAGS.dataset == 'msra': + import data.msra + dataset = data.msra.MsraDataset('training', FLAGS.pid) + val_dataset = data.msra.MsraDataset('testing', FLAGS.pid) + + if FLAGS.is_train: + run_train(dataset, val_dataset) + else: + run_test(dataset, val_dataset, -1) diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/test_model.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/test_model.py new file mode 100644 index 000000000..a5f06073e --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/test_model.py @@ -0,0 +1,94 @@ +from __future__ import print_function, absolute_import, division + +import gpu_config +import tensorflow as tf +import network.slim as slim +import numpy as np +import time, os +import cv2 +from datetime import datetime +from data.evaluation import Evaluation + +FLAGS = tf.app.flags.FLAGS + +def test(model, selected_step): + with tf.Graph().as_default(): + total_test_num = model.val_dataset.exact_num + + dms, poses, cfgs, coms, names = model.batch_input_test(model.val_dataset) + model.test(dms, poses, cfgs, coms, reuse_variables=None) + + # dms, poses, names = model.batch_input_test(model.val_dataset) + # model.test(dms, poses, reuse_variables=None) + + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=False)) + + init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) + sess.run(init_op) + + if selected_step is not None: + checkpoint_path = os.path.join(model.train_dir, 'model.ckpt-%d'%selected_step) + saver = tf.train.Saver(tf.global_variables()) + saver.restore(sess, checkpoint_path) + print('[test_model]model has been resotored from %s'%checkpoint_path) + + tf.train.start_queue_runners(sess=sess) + summary_writer = tf.summary.FileWriter( + model.summary_dir+'_'+model.val_dataset.subset, + graph=sess.graph) + + res_path = os.path.join(model.train_dir, '%s-%s-result'%(model.val_dataset.subset, datetime.now())) + res_path = res_path.replace(' ', '_') + + res_txt_path = res_path+'.txt' + if os.path.exists(res_txt_path): + os.remove(res_txt_path) + err_path = res_path+'_error.txt' + f = open(res_txt_path, 'w') + + # res_vid_path = res_path+'.avi' + # codec = cv2.cv.CV_FOURCC('X','V','I','D') + # the output size is defined by the visualization tool of matplotlib + # vid = cv2.VideoWriter(res_vid_path, codec, 25, (640, 480)) + + print('[test_model]begin test') + test_num = 0 + step = 0 + maxJntError = [] + while True: + start_time = time.time() + try: + gt_vals, xyz_vals, name_vals = model.do_test(sess, summary_writer, step, names) + except tf.errors.OutOfRangeError: + print('run out of range') + break + + duration = time.time()-start_time + + for xyz_val, gt_val, name_val in zip(xyz_vals, gt_vals, name_vals): + maxJntError.append(Evaluation.maxJntError(xyz_val, gt_val)) + + xyz_val = xyz_val.tolist() + res_str = '%s\t%s\n'%(name_val, '\t'.join(format(pt, '.4f') for pt in xyz_val)) + res_str = res_str.replace('/', '\\') + f.write(res_str) + # vid.write(vis_val) + test_num += 1 + if test_num >= total_test_num: + print('finish test') + f.close() + Evaluation.plotError(maxJntError, err_path) + return + f.flush() + + if step%101 == 0: + print('[%s]: %d/%d computed, with %.2fs'%(datetime.now(), step, model.max_steps, duration)) + + step += 1 + + + print('finish test') + f.close() + Evaluation.plotError(maxJntError, 'result.txt') diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_multi_gpu.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_multi_gpu.py new file mode 100644 index 000000000..45866a8e3 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_multi_gpu.py @@ -0,0 +1,159 @@ +'''provide a multi-thread training scheme +WARNING: this file is still under development, is not guaranteed to work. +''' +from __future__ import print_function, absolute_import, division + +import gpu_config +import tensorflow as tf +import network.slim as slim +import numpy as np +import time, os +from datetime import datetime +import model.memory_util as memory_util + +FLAGS = tf.app.flags.FLAGS + +def _average_gradients(tower_grads): + '''calcualte the average gradient for each shared variable across all towers on multi gpus + Args: + tower_grads: list of lists of (gradient, variable) tuples. len(tower_grads)=#tower, len(tower_grads[0])=#vars + Returns: + List of paris (gradient, variable) where the gradients has been averaged across + all towers + ''' + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # over different variables + grads = [] + for g, _ in grad_and_vars: + # over different towers + expanded_g = tf.expand_dims(g,0) + grads.append(expanded_g) + + grad = tf.concat(axis=0, values=grads) + grad = tf.reduce_mean(grad, 0) + + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + average_grads.append(grad_and_var) + return average_grads + +def train(model): + '''train the provided model + model: provide several required interface to train + ''' + with tf.Graph().as_default(), tf.device('/cpu:0'): + global_step = tf.get_variable( + 'global_step', [], + initializer=tf.constant_initializer(0), trainable=False) + lr = tf.train.exponential_decay(model.init_lr, + global_step, + model.decay_steps, + model.lr_decay_factor, + staircase=True) + opt = model.opt(lr) + + '''split the batch into num_gpus groups, + do the backpropagation on each gpu seperately, + then average the gradidents on each of which and update + ''' + assert FLAGS.batch_size % FLAGS.num_gpus == 0, ( + 'the batch_size should be divisible wrt num_gpus') + dms, poses = model.batch_input + dm_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=dms) + pose_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=poses) + + # calculate the gradients for each gpu + tower_grads = [] + reuse_variables = None + + for i in range(FLAGS.num_gpus): + # i = 1 + # with tf.device('/gpu:%d'%gpu_config.gpu_list[i]): + with tf.device('gpu:%d'%i): + with tf.name_scope('%s_%d'%(model.name, i)) as scope: + with slim.arg_scope([slim.variables.variable], device='/cpu:0'): + # with slim.arg_scope([slim.variables.variable], device='/gpu:%d'%gpu_config.gpu_list[i]): + loss = model.loss(dm_splits[i], pose_splits[i], reuse_variables) + + # tf.get_variable_scope().reuse_variables() + # reuse variables after the first tower + reuse_variables = True + # only retain the summaries for the last tower + summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) + # retain the batch-norm optimization only from the last tower + batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION, + scope) + + grads = opt.compute_gradients(loss) + tower_grads.append(grads) + print('setup %dth gpu on %d'%(i, gpu_config.gpu_list[i])) + + grads = _average_gradients(tower_grads) + + # TODO: add input summaries + # summaries.extend(input_summaries) + + summaries.append(tf.summary.scalar('learning_rate', lr)) + + for grad, var in grads: + if grad is not None: + summaries.append(tf.summary.histogram(var.op.name+'/gradients', grad)) + + apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) + + for var in tf.trainable_variables(): + summaries.append(tf.summary.histogram(var.op.name, var)) + + variable_averages = tf.train.ExponentialMovingAverage( + model.moving_average_decay, global_step) + variables_to_average = (tf.trainable_variables()+ + tf.moving_average_variables()) + variable_averages_op = variable_averages.apply(variables_to_average) + + batchnorm_update_op = tf.group(*batchnorm_updates) + # group all training operations into one + train_op = tf.group(apply_gradient_op, variable_averages_op, batchnorm_update_op) + + saver = tf.train.Saver(tf.global_variables()) + summary_op = tf.summary.merge(summaries) + init_op = tf.global_variables_initializer() + + memory_util.vlog(1) + + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=False)) + + sess.run(init_op) + tf.train.start_queue_runners(sess=sess) + + summary_writer = tf.summary.FileWriter( + model.train_dir, + graph=sess.graph) + + # finally into the training loop + print('finally into the long long training loop') + + # for step in range(model.max_steps): + for step in range(1000): + start_time = time.time() + _, loss_value = sess.run([train_op, loss]) + duration = time.time() - start_time + + assert not np.isnan(loss_value), 'Model diverged with loss = NaN' + + if step%10 == 0: + format_str = '[model/train_multi_gpu] %s: step %d, loss = %.2f, %.3f sec/batch, %.3f sec/sample' + print(format_str %(datetime.now(), step, loss_value, duration, duration/FLAGS.batch_size)) + + if step%100 == 0: + summary_str = sess.run(summary_op) + summary_writer.add_summary(summary_str, step) + + if step%1000 == 0 or (step+1) == model.max_steps: + checkpoint_path = os.path.join(model.train_dir, 'model.ckpt') + saver.save(sess, checkpoint_path, global_step=step) + + print('finish train') + diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_single_gpu.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_single_gpu.py new file mode 100644 index 000000000..1e72e3f4c --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/model/train_single_gpu.py @@ -0,0 +1,179 @@ +from __future__ import print_function, absolute_import, division + +import gpu_config +import tensorflow as tf +import network.slim as slim +import numpy as np +import time, os +from datetime import datetime + +FLAGS = tf.app.flags.FLAGS + +def _average_gradients(tower_grads): + '''calcualte the average gradient for each shared variable across all towers on multi gpus + Args: + tower_grads: list of lists of (gradient, variable) tuples. len(tower_grads)=#tower, len(tower_grads[0])=#vars + Returns: + List of paris (gradient, variable) where the gradients has been averaged across + all towers + ''' + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # over different variables + grads = [] + for g, _ in grad_and_vars: + # over different towers + expanded_g = tf.expand_dims(g,0) + grads.append(expanded_g) + + grad = tf.concat(axis=0, values=grads) + grad = tf.reduce_mean(grad, 0) + + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + average_grads.append(grad_and_var) + return average_grads + +def train(model, restore_step=None): + '''train the provided model + model: provide several required interface to train + ''' + with tf.Graph().as_default(): + global_step = tf.get_variable( + 'global_step', [], + initializer=tf.constant_initializer(0), trainable=False) + lr = tf.train.exponential_decay(model.init_lr, + global_step, + model.decay_steps, + model.lr_decay_factor, + staircase=True) + + print('[train] learning rate decays per %d steps with rate=%f'%( + model.decay_steps,model.lr_decay_factor)) + print('[train] initial learning_rate = %f'%model.init_lr) + tf.summary.scalar('learning_rate', lr) + opt = model.opt(lr) + + batches = model.batch_input(model.train_dataset) + + loss = model.loss(*batches) + tf.summary.scalar('loss', loss) + + if model.is_validate: + # set batch_size as 3 since tensorboard visualization + val_batches = model.batch_input(model.val_dataset, 3) + model.test(*val_batches) # don't need the name + + batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION) + + accu_steps = float(FLAGS.sub_batch) + + grads = opt.compute_gradients(loss) + accum_grads = [] + for grad, var in grads: + if grad is not None: + accum_grads.append(tf.Variable(tf.zeros_like(grad), trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + name=var.op.name+'_accu_grad')) + else: + accum_grads.append(tf.Variable(tf.zeros_like(var), trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + name=var.op.name+'_accu_grad')) + + reset_op = [grad.assign(tf.zeros_like(grad)) for grad in accum_grads] + accum_op = [accum_grads[i].assign_add(grad[0]) for i, grad in enumerate(grads)if grad[0] is not None] + + ave_grad = [(tf.clip_by_value(tf.divide(accum_grads[i], accu_steps), -0.2, 0.2), + grad[1]) for i, grad in enumerate(grads)] + apply_gradient_op = opt.apply_gradients(ave_grad, + global_step=global_step) + + for ave_grad, grad_and_var in zip(ave_grad, grads): + grad, var = grad_and_var[0], grad_and_var[1] + if grad is not None: + tf.summary.histogram(var.op.name, var) + tf.summary.histogram(var.op.name+'/gradients', ave_grad) + + # variable_averages = tf.train.ExponentialMovingAverage( + # model.moving_average_decay, global_step) + # variables_to_average = tf.trainable_variables() + # var_1, var_2 = tf.moving_average_variables()[0], tf.moving_average_variables()[1] + # variable_averages_op = variable_averages.apply(variables_to_average) + + batchnorm_update_op = tf.group(*batchnorm_updates) + # group all training operations into one + # train_op = tf.group(apply_gradient_op, variable_averages_op) + train_op = tf.group(apply_gradient_op) + + saver = tf.train.Saver(tf.global_variables()) + summary_op = tf.summary.merge_all() + + init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) + + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=False)) + + sess.run(init_op) + start_step = 0 + # to resume the training + if restore_step is not None and restore_step>0: + checkpoint_path = os.path.join(model.train_dir, 'model.ckpt-%d'%restore_step) + saver.restore(sess, checkpoint_path) + start_step = restore_step + + tf.train.start_queue_runners(sess=sess) + + #TODO: change to tf.train.SummaryWriter() + summary_writer = tf.summary.FileWriter( + model.summary_dir, + graph=sess.graph) + + # finally into the training loop + print('finally into the long long training loop') + + log_path = os.path.join(model.train_dir, 'training_log.txt') + f = open(log_path, 'a') + + for step in range(start_step, model.max_steps): + if f.closed: + f = open(log_path, 'a') + + start_time = time.time() + ave_loss = 0 + sess.run(reset_op) + for sub_step in range(int(accu_steps)): + _, _, loss_value = sess.run([accum_op, batchnorm_update_op, loss]) + assert not np.isnan(loss_value), 'Model diverged with loss = NaN' + ave_loss += loss_value + + _ = sess.run([train_op]) + ave_loss /= accu_steps + duration = time.time() - start_time + + if step%5 == 0: + format_str = '[model/train_multi_gpu] %s: step %d/%d, loss = %.3f, %.3f sec/batch, %.3f sec/sample' + print(format_str%(datetime.now(), step, model.max_steps, ave_loss, duration, duration/(FLAGS.batch_size*accu_steps))) + f.write(format_str%(datetime.now(), step, model.max_steps, ave_loss, duration, duration/(FLAGS.batch_size*accu_steps))+'\n') + f.flush() + + if step%20 == 0: + summary_str = sess.run(summary_op) + summary_writer.add_summary(summary_str, step) + + + if step%40 == 0 and hasattr(model, 'do_test'): + model.do_test(sess, summary_writer, step) + + if step%100 == 0 or (step+1) == model.max_steps: + if not os.path.exists(model.train_dir): + os.makedirs(model.train_dir) + checkpoint_path = os.path.join(model.train_dir, 'model.ckpt') + saver.save(sess, checkpoint_path, global_step=step) + print('model has been saved to %s\n'%checkpoint_path) + f.write('model has been saved to %s\n'%checkpoint_path) + f.flush() + + print('finish train') + f.close() + -- Gitee From 6c157911cb6ea7ceb91bc131eb104b9361509055 Mon Sep 17 00:00:00 2001 From: xiaoqiang Date: Wed, 8 Jun 2022 06:18:24 +0000 Subject: [PATCH 11/11] update --- .../network/__init__.py | 25 + .../network/losses.py | 174 ++++ .../network/ops.py | 781 ++++++++++++++++++ .../network/scopes.py | 170 ++++ .../network/variables.py | 289 +++++++ 5 files changed, 1439 insertions(+) create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/__init__.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/losses.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/ops.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/scopes.py create mode 100644 TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/variables.py diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/__init__.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/__init__.py new file mode 100644 index 000000000..26ca233c9 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TF-Slim grouped API. Please see README.md for details and usage.""" +# pylint: disable=unused-import + +# Collapse tf-slim into a single namespace. +from __future__ import absolute_import + +from network.slim import losses +from network.slim import ops +from network.slim import scopes +from network.slim import variables +from network.slim.scopes import arg_scope diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/losses.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/losses.py new file mode 100644 index 000000000..78298d092 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/losses.py @@ -0,0 +1,174 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for various Neural Network TensorFlow losses. + + All the losses defined here add themselves to the LOSSES_COLLECTION + collection. + + l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso. + l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay. + cross_entropy_loss: Define a cross entropy loss using + softmax_cross_entropy_with_logits. Useful for classification. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +# In order to gather all losses in a network, the user should use this +# key for get_collection, i.e: +# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) +LOSSES_COLLECTION = '_losses' + + +def l1_regularizer(weight=1.0, scope=None): + """Define a L1 regularizer. + + Args: + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L1Regularizer', [tensor]): + l1_weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='weight') + return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') + return regularizer + + +def l2_regularizer(weight=1.0, scope=None): + """Define a L2 regularizer. + + Args: + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L2Regularizer', [tensor]): + l2_weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='weight') + return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') + return regularizer + + +def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): + """Define a L1L2 regularizer. + + Args: + weight_l1: scale the L1 loss by this factor. + weight_l2: scale the L2 loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): + weight_l1_t = tf.convert_to_tensor(weight_l1, + dtype=tensor.dtype.base_dtype, + name='weight_l1') + weight_l2_t = tf.convert_to_tensor(weight_l2, + dtype=tensor.dtype.base_dtype, + name='weight_l2') + reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), + name='value_l1') + reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), + name='value_l2') + return tf.add(reg_l1, reg_l2, name='value') + return regularizer + + +def l1_loss(tensor, weight=1.0, scope=None): + """Define a L1Loss, useful for regularize, i.e. lasso. + + Args: + tensor: tensor to regularize. + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + the L1 loss op. + """ + with tf.name_scope(scope, 'L1Loss', [tensor]): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def l2_loss(tensor, weight=1.0, scope=None): + """Define a L2Loss, useful for regularize, i.e. weight decay. + + Args: + tensor: tensor to regularize. + weight: an optional weight to modulate the loss. + scope: Optional scope for name_scope. + + Returns: + the L2 loss op. + """ + with tf.name_scope(scope, 'L2Loss', [tensor]): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, + weight=1.0, scope=None): + """Define a Cross Entropy loss using softmax_cross_entropy_with_logits. + + It can scale the loss by weight factor, and smooth the labels. + + Args: + logits: [batch_size, num_classes] logits outputs of the network . + one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels. + label_smoothing: if greater than 0 then smooth the labels. + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + A tensor with the softmax_cross_entropy loss. + """ + logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape()) + with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]): + num_classes = one_hot_labels.get_shape()[-1].value + one_hot_labels = tf.cast(one_hot_labels, logits.dtype) + if label_smoothing > 0: + smooth_positives = 1.0 - label_smoothing + smooth_negatives = label_smoothing / num_classes + one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives + cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits( + logits, one_hot_labels, name='xentropy') + + weight = tf.convert_to_tensor(weight, + dtype=logits.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/ops.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/ops.py new file mode 100644 index 000000000..3e047cc85 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/ops.py @@ -0,0 +1,781 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for typical Neural Network TensorFlow layers. + + Additionally it maintains a collection with update_ops that need to be + updated after the ops have been computed, for example to update moving means + and moving variances of batch_norm. + + Ops that have different behavior during training or eval have an is_training + parameter. Additionally Ops that contain variables.variable have a trainable + parameter, which control if the ops variables are trainable or not. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from tensorflow.python.training import moving_averages + +from network.slim import losses +from network.slim import scopes +from network.slim import variables + +# Used to keep the update ops done by batch_norm. +UPDATE_OPS_COLLECTION = '_update_ops_' + + +# the batch_norm here is batch_renorm implementation instead of batch norm +@scopes.add_arg_scope +def batch_norm(inputs, + decay=0.999, + center=True, + scale=False, + epsilon=0.001, + moving_vars='moving_vars', + activation=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a Batch ReNormalization layer. + + Args: + inputs: a tensor of size [batch_size, height, width, channels] + or [batch_size, channels]. + decay: decay for the moving average. + center: If True, subtract beta. If False, beta is not created and ignored. + scale: If True, multiply by gamma. If False, gamma is + not used. When the next layer is linear (also e.g. ReLU), this can be + disabled since the scaling can be done by the next layer. + epsilon: small float added to variance to avoid dividing by zero. + moving_vars: collection to store the moving_mean and moving_variance. + activation: activation function. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + + Returns: + a tensor representing the output of the operation. + + """ + inputs_shape = inputs.get_shape() + with tf.variable_scope(scope, 'BatchReNorm', [inputs], reuse=reuse): + axis = list(range(len(inputs_shape) - 1)) + params_shape = inputs_shape[-1:] + # Allocate parameters for the beta and gamma of the normalization. + beta, gamma = None, None + if center: + beta = variables.variable('beta', + params_shape, + initializer=tf.zeros_initializer(), + trainable=trainable, + restore=restore) + if scale: + gamma = variables.variable('gamma', + params_shape, + initializer=tf.ones_initializer(), + trainable=trainable, + restore=restore) + # Create moving_mean and moving_variance add them to + # GraphKeys.MOVING_AVERAGE_VARIABLES collections. + moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES] + moving_mean = variables.variable('moving_mean', + params_shape, + initializer=tf.zeros_initializer(), + trainable=False, + restore=restore, + collections=moving_collections) + moving_variance = variables.variable('moving_variance', + params_shape, + initializer=tf.ones_initializer(), + trainable=False, + restore=restore, + collections=moving_collections) + + r_max = variables.variable('r_max', + (1,), + initializer=tf.ones_initializer(), + trainable=False, + restore=restore) + d_max = variables.variable('d_max', + (1,), + initializer=tf.zeros_initializer(), + trainable=False, + restore=restore) + curr_t = variables.variable('curr_t', + (1,), + initializer=tf.zeros_initializer(), + trainable=False, + restore=restore) + + if is_training: + # Calculate the moments based on the individual batch. + mean, variance = tf.nn.moments(inputs, axis) + + update_moving_mean = moving_averages.assign_moving_average( + moving_mean, mean, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) + update_moving_variance = moving_averages.assign_moving_average( + moving_variance, variance, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) + + r_max_val = 3.0 + new_r = tf.divide(r_max_val, 1.0+(r_max_val-1.0)*tf.exp(-curr_t)) + update_r = r_max.assign(new_r) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_r) + + d_max_val = 5.0 + new_d = tf.divide(d_max_val, (1.0+(d_max_val/1e-3)-1.0)*tf.exp(-2.0*curr_t)) + update_d = d_max.assign(new_d) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_d) + + new_t = curr_t+1e-5 + update_t = curr_t.assign(new_t) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_t) + + # batch renorm + std = tf.sqrt(variance+epsilon) + moving_std = tf.sqrt(moving_variance+epsilon) + r = tf.divide(std, moving_std) + r = tf.stop_gradient(tf.clip_by_value(r, 1.0/r_max, r_max)) + + d = tf.divide(mean - moving_mean, moving_std) + d = tf.stop_gradient(tf.clip_by_value(d, -d_max, d_max)) + + outputs = tf.nn.batch_normalization( + inputs, mean, variance, None, None, epsilon) + outputs = tf.multiply(outputs, r) + d + + if scale: + outputs = tf.multiply(outputs, gamma) + if center: + outputs += beta + + else: + # Just use the moving_mean and moving_variance. + mean = moving_mean + variance = moving_variance + + # Normalize the activations. + outputs = tf.nn.batch_normalization( + inputs, mean, variance, beta, gamma, epsilon) + + outputs.set_shape(inputs.get_shape()) + if activation: + outputs = activation(outputs) + return outputs + + +def _two_element_tuple(int_or_tuple): + """Converts `int_or_tuple` to height, width. + + Several of the functions that follow accept arguments as either + a tuple of 2 integers or a single integer. A single integer + indicates that the 2 values of the tuple are the same. + + This functions normalizes the input value by always returning a tuple. + + Args: + int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. + + Returns: + A tuple with 2 values. + + Raises: + ValueError: If `int_or_tuple` it not well formed. + """ + if isinstance(int_or_tuple, (list, tuple)): + if len(int_or_tuple) != 2: + raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) + return int(int_or_tuple[0]), int(int_or_tuple[1]) + if isinstance(int_or_tuple, int): + return int(int_or_tuple), int(int_or_tuple) + if isinstance(int_or_tuple, tf.TensorShape): + if len(int_or_tuple) == 2: + return int_or_tuple[0], int_or_tuple[1] + raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' + 'length 2') + + +@scopes.add_arg_scope +def conv2d(inputs, + num_filters_out, + kernel_size, + stride=1, + padding='SAME', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a 2D convolution followed by an optional batch_norm layer. + + conv2d creates a variable called 'weights', representing the convolutional + kernel, that is convolved with the input. If `batch_norm_params` is None, a + second variable called 'biases' is added to the result of the convolution + operation. + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + num_filters_out: the number of output filters. + kernel_size: a list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: one of 'VALID' or 'SAME'. + activation: activation function. + stddev: standard deviation of the truncated guassian weight distribution. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + Returns: + a tensor representing the output of the operation. + + """ + with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + num_filters_in = inputs.get_shape()[-1] + weights_shape = [kernel_h, kernel_w, + num_filters_in, num_filters_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], + padding=padding) + if batch_norm_params is not None: + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(conv, **batch_norm_params) + else: + bias_shape = [num_filters_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(conv, biases) + if activation: + outputs = activation(outputs) + return outputs + +@scopes.add_arg_scope +def depthwise_conv2d(inputs, + num_filters_out, + kernel_size, + stride=1, + padding='VALID', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + is_norm=False, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a 2D depth wise convolution followed by an optional batch_norm layer. + this applies channels differnt filters to each channel independently + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + num_filters_out: the number of output filters. + kernel_size: a list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: one of 'VALID' or 'SAME'. + activation: activation function. + stddev: standard deviation of the truncated guassian weight distribution. + bias: the initial value of the biases. + weight_decay: the weight decay. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + Returns: + a tensor representing the output of the operation. + + """ + with tf.variable_scope(scope, 'ConvDepthWise', [inputs], reuse=reuse): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + num_filters_in = inputs.get_shape()[-1].value + weights_shape = [kernel_h, kernel_w, + num_filters_in, num_filters_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + + batch_size = inputs.get_shape()[0].value + num_pt = inputs.get_shape()[1].value + + conv = tf.nn.depthwise_conv2d(inputs, weights, [1, stride_h, stride_w, 1], + padding=padding) + + if is_norm: + outputs = tf.reshape(conv, (batch_size*num_pt, num_filters_out, num_filters_in)) + outputs = batch_norm(outputs, decay=0.999) + outputs = tf.reshape(conv, (batch_size, num_pt, num_filters_out, num_filters_in)) + else: + bias_shape = [conv.get_shape()[-1],] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(conv, biases) + outputs = tf.reshape(outputs, (batch_size,num_pt,num_filters_out,num_filters_in)) + + if activation: + outputs = activation(outputs) + return outputs + +@scopes.add_arg_scope +def depthwise_conv2d_v1(inputs, + num_filters_out, + kernel_size, + stride=1, + padding='VALID', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a 2D depth wise convolution followed by an optional batch_norm layer. + this applies channels differnt filters to each channel independently + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + num_filters_out: the number of output filters. + kernel_size: a list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: one of 'VALID' or 'SAME'. + activation: activation function. + stddev: standard deviation of the truncated guassian weight distribution. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + Returns: + a tensor representing the output of the operation. + + """ + with tf.variable_scope(scope, 'ConvDepthWise', [inputs], reuse=reuse): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + num_filters_in = inputs.get_shape()[-1] + weights_shape = [kernel_h, kernel_w, + num_filters_in, num_filters_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + conv = tf.nn.depthwise_conv2d(inputs, weights, [1, stride_h, stride_w, 1], + padding=padding) + if batch_norm_params is not None: + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(conv, **batch_norm_params) + else: + bias_shape = [conv.get_shape()[-1],] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(conv, biases) + if activation: + outputs = activation(outputs) + return outputs + +def _deconv_output_length(input_length, filter_size, padding, stride): + """determines output length of a transposed convolution given input length, stride and kernel + Args: + padding: 'SAME', 'VALID' or 'FULL' + Returns: + output length + """ + padding = padding.upper + if input_length is None: + return None + input_length *= stride + if padding == 'VALID': + input_length += max(filter_size-stride, 0) + elif padding == 'FULL': + input_length -= (stride + filter_size - 2) + return input_length + +@scopes.add_arg_scope +def deconv(inputs, + num_filters_out, + kernel_size, + stride, + padding='SAME', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a 2D deconvolution operator followed by optional batch_norm layer + args: + inputs: with size [batch_size, height, widht, channels] + num_filters_out: number of output feature channels + padding: 'VALID', 'SAME' 'FULL' + returns: + a tensor representing the output of the operation + """ + with tf.variable_scope(scope, 'Deconv', [inputs], reuse=reuse): + batch_size = inputs.get_shape()[0] + height, width = inputs.get_shape()[1], inputs.get_shape()[2] + num_filters_in = inputs.get_shape()[-1] + + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + + weights_shape = [kernel_h, kernel_w, + num_filters_out, num_filters_in] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay>0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + + out_height = _deconv_output_length(height, kernel_h, padding, stride_h) + out_width = _deconv_output_length(width, kernel_w, padding, stride_w) + output_shape = tf.stack([batch_size, out_height, out_width, num_filters_out]) + deconv = tf.nn.conv2d_transpose(inputs, weights, output_shape, [1, stride_h, stride_w, 1], padding=padding) + + if batch_norm_params is not None: + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(deconv, **batch_norm_params) + else: + bias_shape = [num_filters_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(deconv, biases) + if activation: + outputs = activation(outputs) + return outputs + + +@scopes.add_arg_scope +def fc(inputs, + num_units_out, + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a fully connected layer followed by an optional batch_norm layer. + + FC creates a variable called 'weights', representing the fully connected + weight matrix, that is multiplied by the input. If `batch_norm` is None, a + second variable called 'biases' is added to the result of the initial + vector-matrix multiplication. + + Args: + inputs: a [B x N] tensor where B is the batch size and N is the number of + input units in the layer. + num_units_out: the number of output units in the layer. + activation: activation function. + stddev: the standard deviation for the weights. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + + Returns: + the tensor variable representing the result of the series of operations. + """ + with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse): + num_units_in = inputs.get_shape()[1] + weights_shape = [num_units_in, num_units_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + if batch_norm_params is not None: + outputs = tf.matmul(inputs, weights) + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(outputs, **batch_norm_params) + else: + bias_shape = [num_units_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.xw_plus_b(inputs, weights, biases) + if activation: + outputs = activation(outputs) + return outputs + + +def one_hot_encoding(labels, num_classes, scope=None): + """Transform numeric labels into onehot_labels. + + Args: + labels: [batch_size] target labels. + num_classes: total number of classes. + scope: Optional scope for name_scope. + Returns: + one hot encoding of the labels. + """ + with tf.name_scope(scope, 'OneHotEncoding', [labels]): + batch_size = labels.get_shape()[0] + indices = tf.expand_dims(tf.range(0, batch_size), 1) + labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) + concated = tf.concat(axis=1, values=[indices, labels]) + onehot_labels = tf.sparse_to_dense( + concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) + onehot_labels.set_shape([batch_size, num_classes]) + return onehot_labels + + +@scopes.add_arg_scope +def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Max Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: a list of length 2: [kernel_height, kernel_width] of the + pooling kernel over which the op is computed. Can be an int if both + values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the results of the pooling operation. + Raises: + ValueError: if 'kernel_size' is not a 2-D list + """ + with tf.name_scope(scope, 'MaxPool', [inputs]): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + return tf.nn.max_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding) + +@scopes.add_arg_scope +def upsampling_nearest(inputs, scale): + assert scale>1, 'scale of upsampling should be larger then 1' + new_h = int(inputs.shape[1]*scale) + new_w = int(inputs.shape[2]*scale) + return tf.image.resize_images(inputs, [new_h, new_w], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + + +@scopes.add_arg_scope +def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Avg Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: a list of length 2: [kernel_height, kernel_width] of the + pooling kernel over which the op is computed. Can be an int if both + values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the results of the pooling operation. + """ + with tf.name_scope(scope, 'AvgPool', [inputs]): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + return tf.nn.avg_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding) + + +@scopes.add_arg_scope +def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): + """Returns a dropout layer applied to the input. + + Args: + inputs: the tensor to pass to the Dropout layer. + keep_prob: the probability of keeping each input unit. + is_training: whether or not the model is in training mode. If so, dropout is + applied and values scaled. Otherwise, inputs is returned. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the output of the operation. + """ + if is_training and keep_prob > 0: + with tf.name_scope(scope, 'Dropout', [inputs]): + return tf.nn.dropout(inputs, keep_prob) + else: + return inputs + + +def flatten(inputs, scope=None): + """Flattens the input while maintaining the batch_size. + + Assumes that the first dimension represents the batch. + + Args: + inputs: a tensor of size [batch_size, ...]. + scope: Optional scope for name_scope. + + Returns: + a flattened tensor with shape [batch_size, k]. + Raises: + ValueError: if inputs.shape is wrong. + """ + if len(inputs.get_shape()) < 2: + raise ValueError('Inputs must be have a least 2 dimensions') + dims = inputs.get_shape()[1:] + k = dims.num_elements() + with tf.name_scope(scope, 'Flatten', [inputs]): + return tf.reshape(inputs, [-1, k]) + + +def repeat_op(repetitions, inputs, op, *args, **kwargs): + """Build a sequential Tower starting from inputs by using an op repeatedly. + + It creates new scopes for each operation by increasing the counter. + Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') + it will repeat the given op under the following variable_scopes: + conv1/Conv + conv1/Conv_1 + conv1/Conv_2 + + Args: + repetitions: number or repetitions. + inputs: a tensor of size [batch_size, height, width, channels]. + op: an operation. + *args: args for the op. + **kwargs: kwargs for the op. + + Returns: + a tensor result of applying the operation op, num times. + Raises: + ValueError: if the op is unknown or wrong. + """ + scope = kwargs.pop('scope', None) + with tf.variable_scope(scope, 'RepeatOp', [inputs]): + tower = inputs + for _ in range(repetitions): + tower = op(tower, *args, **kwargs) + return tower + diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/scopes.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/scopes.py new file mode 100644 index 000000000..2c2fb0a2e --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/scopes.py @@ -0,0 +1,170 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains the new arg_scope used for TF-Slim ops. + + Allows one to define models much more compactly by eliminating boilerplate + code. This is accomplished through the use of argument scoping (arg_scope). + + Example of how to use scopes.arg_scope: + + with scopes.arg_scope(ops.conv2d, padding='SAME', + stddev=0.01, weight_decay=0.0005): + net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') + net = ops.conv2d(net, 256, [5, 5], scope='conv2') + + The first call to conv2d will overwrite padding: + ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', + stddev=0.01, weight_decay=0.0005, scope='conv1') + + The second call to Conv will use predefined args: + ops.conv2d(inputs, 256, [5, 5], padding='SAME', + stddev=0.01, weight_decay=0.0005, scope='conv2') + + Example of how to reuse an arg_scope: + with scopes.arg_scope(ops.conv2d, padding='SAME', + stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope: + net = ops.conv2d(net, 256, [5, 5], scope='conv1') + .... + + with scopes.arg_scope(conv2d_arg_scope): + net = ops.conv2d(net, 256, [5, 5], scope='conv2') + + Example of how to use scopes.add_arg_scope: + + @scopes.add_arg_scope + def conv2d(*args, **kwargs) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import functools + +from tensorflow.python.framework import ops + +_ARGSTACK_KEY = ("__arg_stack",) + +_DECORATED_OPS = set() + + +def _get_arg_stack(): + stack = ops.get_collection(_ARGSTACK_KEY) + if stack: + return stack[0] + else: + stack = [{}] + ops.add_to_collection(_ARGSTACK_KEY, stack) + return stack + + +def _current_arg_scope(): + stack = _get_arg_stack() + return stack[-1] + + +def _add_op(op): + key_op = (op.__module__, op.__name__) + if key_op not in _DECORATED_OPS: + _DECORATED_OPS.add(key_op) + + +@contextlib.contextmanager +def arg_scope(list_ops_or_scope, **kwargs): + """Stores the default arguments for the given set of list_ops. + + For usage, please see examples at top of the file. + + Args: + list_ops_or_scope: List or tuple of operations to set argument scope for or + a dictionary containg the current scope. When list_ops_or_scope is a dict, + kwargs must be empty. When list_ops_or_scope is a list or tuple, then + every op in it need to be decorated with @add_arg_scope to work. + **kwargs: keyword=value that will define the defaults for each op in + list_ops. All the ops need to accept the given set of arguments. + + Yields: + the current_scope, which is a dictionary of {op: {arg: value}} + Raises: + TypeError: if list_ops is not a list or a tuple. + ValueError: if any op in list_ops has not be decorated with @add_arg_scope. + """ + if isinstance(list_ops_or_scope, dict): + # Assumes that list_ops_or_scope is a scope that is being reused. + if kwargs: + raise ValueError("When attempting to re-use a scope by suppling a" + "dictionary, kwargs must be empty.") + current_scope = list_ops_or_scope.copy() + try: + _get_arg_stack().append(current_scope) + yield current_scope + finally: + _get_arg_stack().pop() + else: + # Assumes that list_ops_or_scope is a list/tuple of ops with kwargs. + if not isinstance(list_ops_or_scope, (list, tuple)): + raise TypeError("list_ops_or_scope must either be a list/tuple or reused" + "scope (i.e. dict)") + try: + current_scope = _current_arg_scope().copy() + for op in list_ops_or_scope: + key_op = (op.__module__, op.__name__) + if not has_arg_scope(op): + raise ValueError("%s is not decorated with @add_arg_scope", key_op) + if key_op in current_scope: + current_kwargs = current_scope[key_op].copy() + current_kwargs.update(kwargs) + current_scope[key_op] = current_kwargs + else: + current_scope[key_op] = kwargs.copy() + _get_arg_stack().append(current_scope) + yield current_scope + finally: + _get_arg_stack().pop() + + +def add_arg_scope(func): + """Decorates a function with args so it can be used within an arg_scope. + + Args: + func: function to decorate. + + Returns: + A tuple with the decorated function func_with_args(). + """ + @functools.wraps(func) + def func_with_args(*args, **kwargs): + current_scope = _current_arg_scope() + current_args = kwargs + key_func = (func.__module__, func.__name__) + if key_func in current_scope: + current_args = current_scope[key_func].copy() + current_args.update(kwargs) + return func(*args, **current_args) + _add_op(func) + return func_with_args + + +def has_arg_scope(func): + """Checks whether a func has been decorated with @add_arg_scope or not. + + Args: + func: function to check. + + Returns: + a boolean. + """ + key_op = (func.__module__, func.__name__) + return key_op in _DECORATED_OPS diff --git a/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/variables.py b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/variables.py new file mode 100644 index 000000000..fcd6e55d1 --- /dev/null +++ b/TensorFlow/contrib/graph/Dense3DRegression_ID1066_for_TensorFlow/network/variables.py @@ -0,0 +1,289 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for creating variables in TF-Slim. + +The variables module is typically used for defining model variables from the +ops routines (see slim.ops). Such variables are used for training, evaluation +and inference of models. + +All the variables created through this module would be added to the +MODEL_VARIABLES collection, if you create a model variable outside slim, it can +be added with slim.variables.add_variable(external_variable, reuse). + +Usage: + weights_initializer = tf.truncated_normal_initializer(stddev=0.01) + l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005) + weights = variables.variable('weights', + shape=[100, 100], + initializer=weights_initializer, + regularizer=l2_regularizer, + device='/cpu:0') + + biases = variables.variable('biases', + shape=[100], + initializer=tf.zeros_initializer(), + device='/cpu:0') + + # More complex example. + + net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1') + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') + with slim.arg_scope([variables.variable], restore=False): + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3') + + # Get all model variables from all the layers. + model_variables = slim.variables.get_variables() + + # Get all model variables from a specific the layer, i.e 'conv1'. + conv1_variables = slim.variables.get_variables('conv1') + + # Get all weights from all the layers. + weights = slim.variables.get_variables_by_name('weights') + + # Get all bias from all the layers. + biases = slim.variables.get_variables_by_name('biases') + + # Get all variables to restore. + # (i.e. only those created by 'conv1' and 'conv2') + variables_to_restore = slim.variables.get_variables_to_restore() + +************************************************ +* Initializing model variables from a checkpoint +************************************************ + +# Create some variables. +v1 = slim.variables.variable(name="v1", ..., restore=False) +v2 = slim.variables.variable(name="v2", ...) # By default restore=True +... +# The list of variables to restore should only contain 'v2'. +variables_to_restore = slim.variables.get_variables_to_restore() +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from network.slim import scopes + +# Collection containing all the variables created using slim.variables +MODEL_VARIABLES = '_model_variables_' + +# Collection containing the slim.variables that are created with restore=True. +VARIABLES_TO_RESTORE = '_variables_to_restore_' + + +def add_variable(var, restore=True): + """Adds a variable to the MODEL_VARIABLES collection. + + Optionally it will add the variable to the VARIABLES_TO_RESTORE collection. + Args: + var: a variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + + """ + collections = [MODEL_VARIABLES] + if restore: + collections.append(VARIABLES_TO_RESTORE) + for collection in collections: + if var not in tf.get_collection(collection): + tf.add_to_collection(collection, var) + + +def get_variables(scope=None, suffix=None): + """Gets the list of variables, filtered by scope and/or suffix. + + Args: + scope: an optional scope for filtering the variables to return. + suffix: an optional suffix for filtering the variables to return. + + Returns: + a copied list of variables with scope and suffix. + """ + candidates = tf.get_collection(MODEL_VARIABLES, scope)[:] + if suffix is not None: + candidates = [var for var in candidates if var.op.name.endswith(suffix)] + return candidates + + +def get_variables_to_restore(): + """Gets the list of variables to restore. + + Returns: + a copied list of variables. + """ + return tf.get_collection(VARIABLES_TO_RESTORE)[:] + + +def get_variables_by_name(given_name, scope=None): + """Gets the list of variables that were given that name. + + Args: + given_name: name given to the variable without scope. + scope: an optional scope for filtering the variables to return. + + Returns: + a copied list of variables with the given name and prefix. + """ + return get_variables(scope=scope, suffix=given_name) + + +def get_unique_variable(name): + """Gets the variable uniquely identified by that name. + + Args: + name: a name that uniquely identifies the variable. + + Returns: + a tensorflow variable. + + Raises: + ValueError: if no variable uniquely identified by the name exists. + """ + candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name) + if not candidates: + raise ValueError('Couldnt find variable %s' % name) + + for candidate in candidates: + if candidate.op.name == name: + return candidate + raise ValueError('Variable %s does not uniquely identify a variable', name) + + +class VariableDeviceChooser(object): + """Slim device chooser for variables. + + When using a parameter server it will assign them in a round-robin fashion. + When not using a parameter server it allows GPU:0 placement otherwise CPU:0. + """ + + def __init__(self, + num_parameter_servers=0, + ps_device='/job:ps', + placement='CPU:0'): + """Initialize VariableDeviceChooser. + + Args: + num_parameter_servers: number of parameter servers. + ps_device: string representing the parameter server device. + placement: string representing the placement of the variable either CPU:0 + or GPU:0. When using parameter servers forced to CPU:0. + """ + self._num_ps = num_parameter_servers + self._ps_device = ps_device + self._placement = placement if num_parameter_servers == 0 else 'CPU:0' + self._next_task_id = 0 + + def __call__(self, op): + device_string = '' + if self._num_ps > 0: + task_id = self._next_task_id + self._next_task_id = (self._next_task_id + 1) % self._num_ps + device_string = '%s/task:%d' % (self._ps_device, task_id) + device_string += '/%s' % self._placement + return device_string + + +# TODO(sguada) Remove once get_variable is able to colocate op.devices. +def variable_device(device, name): + """Fix the variable device to colocate its ops.""" + if callable(device): + var_name = tf.get_variable_scope().name + '/' + name + var_def = tf.NodeDef(name=var_name, op='Variable') + device = device(var_def) + if device is None: + device = '' + return device + + +@scopes.add_arg_scope +def global_step(device=''): + """Returns the global step variable. + + Args: + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + + Returns: + the tensor representing the global step variable. + """ + global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP) + if global_step_ref: + return global_step_ref[0] + else: + collections = [ + VARIABLES_TO_RESTORE, + tf.GraphKeys.GLOBAL_VARIABLES, + tf.GraphKeys.GLOBAL_STEP, + ] + # Get the device for the variable. + with tf.device(variable_device(device, 'global_step')): + return tf.get_variable('global_step', shape=[], dtype=tf.int64, + initializer=tf.zeros_initializer(), + trainable=False, collections=collections) + + +@scopes.add_arg_scope +def variable(name, shape=None, dtype=tf.float32, initializer=None, + regularizer=None, trainable=True, collections=None, device='', + restore=True): + """Gets an existing variable with these parameters or creates a new one. + + It also add itself to a group with its name. + + Args: + name: the name of the new or existing variable. + shape: shape of the new or existing variable. + dtype: type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: initializer for the variable if one is created. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + collections: A list of collection names to which the Variable will be added. + Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES + and MODEL_VARIABLES collections. + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + + Returns: + The created or existing variable. + """ + collections = list(collections or []) + + # Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES + collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES] + # Add to VARIABLES_TO_RESTORE if necessary + if restore: + collections.append(VARIABLES_TO_RESTORE) + # Remove duplicates + collections = set(collections) + # Get the device for the variable. + with tf.device(variable_device(device, name)): + return tf.get_variable(name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, + trainable=trainable, collections=collections) -- Gitee