From a79006cbfdd3e781962959505fba51ad243a0e6b Mon Sep 17 00:00:00 2001 From: pengyeqing Date: Sat, 17 Jul 2021 16:44:32 +0800 Subject: [PATCH 1/4] update --- ...50\347\220\206\346\214\207\345\257\274.md" | 605 ++++++++ .../cv/classification/ResNext50/LICENSE" | 201 +++ .../cv/classification/ResNext50/README.md" | 345 +++++ .../cv/classification/ResNext50/env.sh" | 8 + .../ResNext50/gen_dataset_info.py" | 60 + .../ResNext50/imagenet_acc_eval.py" | 183 +++ .../ResNext50/imagenet_torch_preprocess.py" | 116 ++ .../ResNext50/requirements.txt" | 6 + .../ResNext50/resnext50_pth2onnx.py" | 35 + .../classification/ResNext50/test/README.md" | 33 + .../ResNext50/test/eval_acc_perf.sh" | 69 + .../classification/ResNext50/test/parse.py" | 32 + .../classification/ResNext50/test/perf_t4.sh" | 22 + .../classification/ResNext50/test/pth2om.sh" | 13 + .../cv/segmentation/ssd_detection.diff" | 140 ++ ...50\347\220\206\346\214\207\345\257\274.md" | 1227 +++++++++++++++++ ...50\347\220\206\346\214\207\345\257\274.md" | 1041 ++++++++++++++ .../benchmark/nlp/.keep" | 0 .../cv/ReID/ReID-strong-baseline/LICENSE" | 201 +++ .../cv/ReID/ReID-strong-baseline/README.md" | 53 + .../ReID-strong-baseline/ReID_postprocess.py" | 73 + .../ReID-strong-baseline/ReID_preprocess.py" | 56 + .../ReID-strong-baseline/ReID_pth2onnx.py" | 63 + .../cv/ReID/ReID-strong-baseline/env.sh" | 8 + .../ReID-strong-baseline/gen_dataset_info.py" | 60 + .../ReID-strong-baseline/modelzoo_level.txt" | 4 + .../ReID-strong-baseline/requirements.txt" | 8 + .../test/eval_acc_perf.sh" | 72 + .../ReID/ReID-strong-baseline/test/parse.py" | 33 + .../ReID/ReID-strong-baseline/test/perf_g.sh" | 19 + .../ReID/ReID-strong-baseline/test/pth2om.sh" | 19 + .../research/.keep" | 0 ...47\272\277\346\216\250\347\220\206-FAQ.md" | 286 ++++ ...\350\257\225\346\212\245\345\221\212.docx" | Bin ...\346\214\207\345\257\274\344\271\246.docx" | 0 ...\350\257\225\346\212\245\345\221\212.docx" | Bin 0 -> 304048 bytes ...\220\206-issue\346\250\241\346\235\277.md" | 21 + ...46\216\250\347\220\206-models_result.xlsx" | Bin 0 -> 11905 bytes ...77\347\224\250\350\257\264\346\230\216.md" | 115 ++ ...347\220\206-\350\277\233\345\261\225.xlsx" | Bin 0 -> 10203 bytes ...14\346\224\266\346\214\207\345\215\227.md" | 285 ++++ .../.keep" | 0 .../.keep" | 0 .../.keep" | 0 docs/models_result.xlsx | Bin 11605 -> 0 bytes 45 files changed, 5512 insertions(+) create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/LICENSE" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/README.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/env.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/gen_dataset_info.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_acc_eval.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_torch_preprocess.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/requirements.txt" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/resnext50_pth2onnx.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/README.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/eval_acc_perf.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/parse.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/perf_t4.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/pth2om.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" rename docs/.keep => "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" (100%) create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/LICENSE" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/README.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_postprocess.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_preprocess.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_pth2onnx.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/env.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/gen_dataset_info.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/modelzoo_level.txt" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/requirements.txt" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/eval_acc_perf.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/parse.py" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/perf_g.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/pth2om.sh" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" rename "docs/XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" => "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" (100%) create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274\344\271\246.docx" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\265\213\350\257\225\346\212\245\345\221\212.docx" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-issue\346\250\241\346\235\277.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-models_result.xlsx" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-\347\216\257\345\242\203\351\203\250\347\275\262\344\270\216\344\275\277\347\224\250\350\257\264\346\230\216.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-\350\277\233\345\261\225.xlsx" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-\351\252\214\346\224\266\346\214\207\345\215\227.md" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\345\212\237\350\203\275\346\211\223\351\200\232/.keep" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\346\200\247\350\203\275\344\274\230\345\214\226/.keep" create mode 100644 "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\347\262\276\345\272\246\350\260\203\350\257\225/.keep" delete mode 100644 docs/models_result.xlsx diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" new file mode 100644 index 0000000..11de7d2 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" @@ -0,0 +1,605 @@ +# ResNeXt50 Onnx模型端到端推理指导 +- [1 模型概述](#1-模型概述) + - [1.1 论文地址](#11-论文地址) + - [1.2 代码地址](#12-代码地址) +- [2 环境说明](#2-环境说明) + - [2.1 深度学习框架](#21-深度学习框架) + - [2.2 python第三方库](#22-python第三方库) +- [3 模型转换](#3-模型转换) + - [3.1 pth转onnx模型](#31-pth转onnx模型) + - [3.2 onnx转om模型](#32-onnx转om模型) +- [4 数据集预处理](#4-数据集预处理) + - [4.1 数据集获取](#41-数据集获取) + - [4.2 数据集预处理](#42-数据集预处理) + - [4.3 生成数据集信息文件](#43-生成数据集信息文件) +- [5 离线推理](#5-离线推理) + - [5.1 benchmark工具概述](#51-benchmark工具概述) + - [5.2 离线推理](#52-离线推理) +- [6 精度对比](#6-精度对比) + - [6.1 离线推理TopN精度统计](#61-离线推理TopN精度统计) + - [6.2 开源TopN精度](#62-开源TopN精度) + - [6.3 精度对比](#63-精度对比) +- [7 性能对比](#7-性能对比) + - [7.1 npu性能数据](#71-npu性能数据) + - [7.2 T4性能数据](#72-T4性能数据) + - [7.3 性能对比](#73-性能对比) + + + +## 1 模型概述 + +- **[论文地址](#11-论文地址)** + +- **[代码地址](#12-代码地址)** + +### 1.1 论文地址 +[ResNeXt50论文](https://arxiv.org/abs/1611.05431) +本文提出了一个简单的,高度模型化的针对图像分类问题的网络结构。本文的网络是通过重复堆叠building block组成的,这些building block整合了一系列具有相同拓扑结构的变体(transformations)。本文提出的简单的设计思路可以生成一种同质的,多分支的结构。这种方法产生了一个新的维度,作者将其称为基(变体的数量,the size of the set of transformations)。在ImageNet-1K数据集上,作者可以在保证模型复杂度的限制条件下,通过提升基的大小来提高模型的准确率。更重要的是,相比于更深和更宽的网络,提升基的大小更加有效。作者将本文的模型命名为ResNeXt,本模型在ILSVRC2016上取得了第二名。本文还在ImageNet-5K和COCO数据集上进行了实验,结果均表明ResNeXt的性能比ResNet好。 + +### 1.2 代码地址 +[ResNeXt50代码](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py) + +## 2 环境说明 + +- **[深度学习框架](#21-深度学习框架)** + +- **[python第三方库](#22-python第三方库)** + +### 2.1 深度学习框架 +``` +pytorch == 1.6.0 +torchvision == 0.7.0 +onnx == 1.7.0 +``` + +### 2.2 python第三方库 + +``` +numpy == 1.18.5 +Pillow == 7.2.0 +``` + +**说明:** +> X86架构:pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 +> +> Arm架构:pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 + +## 3 模型转换 + +- **[pth转onnx模型](#31-pth转onnx模型)** + +- **[onnx转om模型](#32-onnx转om模型)** + +### 3.1 pth转onnx模型 + +1.下载pth权重文件 +[ResNeXt50预训练pth权重文件](https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth) +文件md5sum: 1d6611049e6ef03f1d6afa11f6f9023e +2.编写pth2onnx脚本resnext50_pth2onnx.py +```python +import sys +import torch +import torch.onnx +import torchvision.models as models + +def pth2onnx(input_file, output_file): + model = models.resnext50_32x4d(pretrained=False) + checkpoint = torch.load(input_file, map_location=None) + model.load_state_dict(checkpoint) + + model.eval() + input_names = ["image"] + output_names = ["class"] + dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + torch.onnx.export(model, dummy_input, output_file, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, verbose=True, opset_version=11) + +if __name__ == "__main__": + input_file = sys.argv[1] + output_file = sys.argv[2] + pth2onnx(input_file, output_file) +``` + + **说明:** +>注意目前ATC支持的onnx算子版本为11 + +3.执行pth2onnx脚本,生成onnx模型文件 +``` +python3 resnext50_pth2onnx.py resnext50_32x4d-7cdf4587.pth resnext50.onnx +``` + +### 3.2 onnx转om模型 + +1.设置环境变量 +``` +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373) +``` +atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 +``` + +## 4 数据集预处理 + +- **[数据集获取](#41-数据集获取)** + +- **[数据集预处理](#42-数据集预处理)** + +- **[生成数据集信息文件](#43-生成数据集信息文件)** + +### 4.1 数据集获取 +该模型使用[ImageNet官网](http://www.image-net.org)的5万张验证集进行测试,图片与标签分别存放在datasets/ImageNet/val_union与datasets/ImageNet/val_label.txt。 + +### 4.2 数据集预处理 +1.预处理脚本imagenet_torch_preprocess.py +```python +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from PIL import Image +import numpy as np +import multiprocessing + + +model_config = { + 'resnet': { + 'resize': 256, + 'centercrop': 224, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv3': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv4': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + }, +} + + +def center_crop(img, output_size): + if isinstance(output_size, int): + output_size = (int(output_size), int(output_size)) + image_width, image_height = img.size + crop_height, crop_width = output_size + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)) + + +def resize(img, size, interpolation=Image.BILINEAR): + if isinstance(size, int): + w, h = img.size + if (w <= h and w == size) or (h <= w and h == size): + return img + if w < h: + ow = size + oh = int(size * h / w) + return img.resize((ow, oh), interpolation) + else: + oh = size + ow = int(size * w / h) + return img.resize((ow, oh), interpolation) + else: + return img.resize(size[::-1], interpolation) + + +def gen_input_bin(mode_type, file_batches, batch): + i = 0 + for file in file_batches[batch]: + i = i + 1 + print("batch", batch, file, "===", i) + + # RGBA to RGB + image = Image.open(os.path.join(src_path, file)).convert('RGB') + image = resize(image, model_config[mode_type]['resize']) # Resize + image = center_crop(image, model_config[mode_type]['centercrop']) # CenterCrop + img = np.array(image, dtype=np.float32) + img = img.transpose(2, 0, 1) # ToTensor: HWC -> CHW + img = img / 255. # ToTensor: div 255 + img -= np.array(model_config[mode_type]['mean'], dtype=np.float32)[:, None, None] # Normalize: mean + img /= np.array(model_config[mode_type]['std'], dtype=np.float32)[:, None, None] # Normalize: std + img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin")) + + +def preprocess(mode_type, src_path, save_path): + files = os.listdir(src_path) + file_batches = [files[i:i + 500] for i in range(0, 50000, 500) if files[i:i + 500] != []] + thread_pool = multiprocessing.Pool(len(file_batches)) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(mode_type, file_batches, batch)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + + +if __name__ == '__main__': + if len(sys.argv) < 4: + raise Exception("usage: python3 xxx.py [model_type] [src_path] [save_path]") + mode_type = sys.argv[1] + src_path = sys.argv[2] + save_path = sys.argv[3] + src_path = os.path.realpath(src_path) + save_path = os.path.realpath(save_path) + if mode_type not in model_config: + model_type_help = "model type: " + for key in model_config.keys(): + model_type_help += key + model_type_help += ' ' + raise Exception(model_type_help) + if not os.path.isdir(save_path): + os.makedirs(os.path.realpath(save_path)) + preprocess(mode_type, src_path, save_path) +``` +2.执行预处理脚本,生成数据集预处理后的bin文件 +``` +python3 imagenet_torch_preprocess.py datasets/ImageNet/val_union ./prep_dataset +``` +### 4.3 生成数据集信息文件 +1.生成数据集信息文件脚本get_info.py +```python +import os +import sys +import cv2 +from glob import glob + + +def get_bin_info(file_path, info_name, width, height): + bin_images = glob(os.path.join(file_path, '*.bin')) + with open(info_name, 'w') as file: + for index, img in enumerate(bin_images): + content = ' '.join([str(index), img, width, height]) + file.write(content) + file.write('\n') + + +def get_jpg_info(file_path, info_name): + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + image_names = [] + for extension in extensions: + image_names.append(glob(os.path.join(file_path, '*.' + extension))) + with open(info_name, 'w') as file: + for image_name in image_names: + if len(image_name) == 0: + continue + else: + for index, img in enumerate(image_name): + img_cv = cv2.imread(img) + shape = img_cv.shape + width, height = shape[1], shape[0] + content = ' '.join([str(index), img, str(width), str(height)]) + file.write(content) + file.write('\n') + + +if __name__ == '__main__': + file_type = sys.argv[1] + file_path = sys.argv[2] + info_name = sys.argv[3] + if file_type == 'bin': + width = sys.argv[4] + height = sys.argv[5] + assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' + get_bin_info(file_path, info_name, width, height) + elif file_type == 'jpg': + assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' + get_jpg_info(file_path, info_name) +``` +2.执行生成数据集信息脚本,生成数据集信息文件 +``` +python3 get_info.py bin ./prep_dataset ./resnext50_prep_bin.info 224 224 +``` +第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 +## 5 离线推理 + +- **[benchmark工具概述](#51-benchmark工具概述)** + +- **[离线推理](#52-离线推理)** + +### 5.1 benchmark工具概述 + +benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) +### 5.2 离线推理 +1.设置环境变量 +``` +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.执行离线推理 +``` +./benchmark -model_type=vision -device_id=0 -batch_size=16 -om_path=resnext50_bs16.om -input_text_path=./resnext50_prep_bin.info -input_width=224 -input_height=224 -output_binary=False -useDvpp=False +``` +输出结果默认保存在当前目录result/dumpOutput_devicex,模型只有一个名为class的输出,shape为bs * 1000,数据类型为FP32,对应1000个分类的预测结果,每个输入对应的输出对应一个_x.bin文件。 + +## 6 精度对比 + +- **[离线推理TopN精度](#61-离线推理TopN精度)** +- **[开源TopN精度](#62-开源TopN精度)** +- **[精度对比](#63-精度对比)** + +### 6.1 离线推理TopN精度统计 + +后处理统计TopN精度 +```python +import os +import sys +import json +import numpy as np +import time + +np.set_printoptions(threshold=sys.maxsize) + +LABEL_FILE = "HiAI_label.json" + + +def gen_file_name(img_name): + full_name = img_name.split('/')[-1] + index = full_name.rfind('.') + return full_name[:index] + + +def cre_groundtruth_dict(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + for gtfile in os.listdir(gtfile_path): + if (gtfile != LABEL_FILE): + with open(os.path.join(gtfile_path, gtfile), 'r') as f: + gt = json.load(f) + ret = gt["image"]["annotations"][0]["category_id"] + img_gt_dict[gen_file_name(gtfile)] = ret + return img_gt_dict + + +def cre_groundtruth_dict_fromtxt(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + with open(gtfile_path, 'r')as f: + for line in f.readlines(): + temp = line.strip().split(" ") + imgName = temp[0].split(".")[0] + imgLab = temp[1] + img_gt_dict[imgName] = imgLab + return img_gt_dict + + +def load_statistical_predict_result(filepath): + """ + function: + the prediction esult file data extraction + input: + result file:filepath + output: + n_label:numble of label + data_vec: the probabilitie of prediction in the 1000 + :return: probabilities, numble of label, in_type, color + """ + with open(filepath, 'r')as f: + data = f.readline() + temp = data.strip().split(" ") + n_label = len(temp) + if data == '': + n_label = 0 + data_vec = np.zeros((n_label), dtype=np.float32) + in_type = '' + color = '' + if n_label == 0: + in_type = f.readline() + color = f.readline() + else: + for ind, prob in enumerate(temp): + data_vec[ind] = np.float32(prob) + return data_vec, n_label, in_type, color + + +def create_visualization_statistical_result(prediction_file_path, + result_store_path, json_file_name, + img_gt_dict, topn=5): + """ + :param prediction_file_path: + :param result_store_path: + :param json_file_name: + :param img_gt_dict: + :param topn: + :return: + """ + writer = open(os.path.join(result_store_path, json_file_name), 'w') + table_dict = {} + table_dict["title"] = "Overall statistical evaluation" + table_dict["value"] = [] + + count = 0 + resCnt = 0 + n_labels = 0 + count_hit = np.zeros(topn) + for tfile_name in os.listdir(prediction_file_path): + count += 1 + temp = tfile_name.split('.')[0] + index = temp.rfind('_') + img_name = temp[:index] + filepath = os.path.join(prediction_file_path, tfile_name) + ret = load_statistical_predict_result(filepath) + prediction = ret[0] + n_labels = ret[1] + sort_index = np.argsort(-prediction) + gt = img_gt_dict[img_name] + if (n_labels == 1000): + realLabel = int(gt) + elif (n_labels == 1001): + realLabel = int(gt) + 1 + else: + realLabel = int(gt) + + resCnt = min(len(sort_index), topn) + for i in range(resCnt): + if (str(realLabel) == str(sort_index[i])): + count_hit[i] += 1 + break + + if 'value' not in table_dict.keys(): + print("the item value does not exist!") + else: + table_dict["value"].extend( + [{"key": "Number of images", "value": str(count)}, + {"key": "Number of classes", "value": str(n_labels)}]) + if count == 0: + accuracy = 0 + else: + accuracy = np.cumsum(count_hit) / count + for i in range(resCnt): + table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", + "value": str( + round(accuracy[i] * 100, 2)) + '%'}) + json.dump(table_dict, writer) + writer.close() + + +if __name__ == '__main__': + start = time.time() + try: + # txt file path + folder_davinci_target = sys.argv[1] + # annotation files path, "val_label.txt" + annotation_file_path = sys.argv[2] + # the path to store the results json path + result_json_path = sys.argv[3] + # result json file name + json_file_name = sys.argv[4] + except IndexError: + print("Stopped!") + exit(1) + + if not (os.path.exists(folder_davinci_target)): + print("target file folder does not exist.") + + if not (os.path.exists(annotation_file_path)): + print("Ground truth file does not exist.") + + if not (os.path.exists(result_json_path)): + print("Result folder doesn't exist.") + + img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) + create_visualization_statistical_result(folder_davinci_target, + result_json_path, json_file_name, + img_label_dict, topn=5) + + elapsed = (time.time() - start) + print("Time used:", elapsed) +``` +调用vision_metric_ImageNet.py脚本推理结果与label比对,可以获得Accuracy Top5数据,结果保存在result.json中。 +``` +python3 vision_metric_ImageNet.py result/dumpOutput_device0/ dataset/ImageNet/val_label.txt ./ result.json +``` +第一个为benchmark输出目录,第二个为数据集配套标签,第三个是生成文件的保存目录,第四个是生成的文件名。 +查看输出结果: +``` +{"title": "Overall statistical evaluation", "value": [{"key": "Number of images", "value": "50000"}, {"key": "Number of classes", "value": "1000"}, {"key": "Top1 accuracy", "value": "77.62%"}, {"key": "Top2 accuracy", "value": "87.42%"}, {"key": "Top3 accuracy", "value": "90.79%"}, {"key": "Top4 accuracy", "value": "92.56%"}, {"key": "Top5 accuracy", "value": "93.69%"}] +``` +### 6.2 开源TopN精度 +[torchvision官网精度](https://pytorch.org/vision/stable/models.html) +``` +Model Acc@1 Acc@5 +ResNeXt-50-32x4d 77.618 93.698 +``` +### 6.3 精度对比 +将得到的om离线模型推理TopN精度与该模型github代码仓上公布的精度对比,精度下降在1%范围之内,故精度达标。 + +## 7 性能对比 + +- **[npu性能数据](#71-npu性能数据)** +- **[T4性能数据](#72-T4性能数据)** +- **[性能对比](#73-性能对比)** + +### 7.1 npu性能数据 +batch1的性能: + 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 +``` +./benchmark -round=50 -om_path=resnext50_bs1.om -device_id=0 -batch_size=1 +``` +执行50次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 +``` +[INFO] Dataset number: 49 finished cost 2.635ms +[INFO] PureInfer result saved in ./result/PureInfer_perf_of_resnext50_bs1_in_device_0.txt +-----------------PureInfer Performance Summary------------------ +[INFO] ave_throughputRate: 374.313samples/s, ave_latency: 2.67914ms +``` +batch16的性能: +``` +./benchmark -round=50 -om_path=resnext50_bs16.om -device_id=0 -batch_size=16 +``` +``` +[INFO] Dataset number: 49 finished cost 30.514ms +[INFO] PureInfer result saved in ./result/PureInfer_perf_of_resnext50_bs16_in_device_0.txt +-----------------PureInfer Performance Summary------------------ +[INFO] ave_throughputRate: 524.094samples/s, ave_latency: 1.9101ms +``` +### 7.2 T4性能数据 +batch1性能: +在T4机器上安装开源TensorRT +``` +cd /usr/local/TensorRT-7.2.2.3/targets/x86_64-linux-gnu/bin/ +./trtexec --onnx=resnext50.onnx --fp16 --shapes=image:1x3x224x224 --threads +``` +gpu T4是4个device并行执行的结果,mean是时延(tensorrt的时延是batch个数据的推理时间),即吞吐率的倒数乘以batch +``` +[03/24/2021-03:54:47] [I] GPU Compute +[03/24/2021-03:54:47] [I] min: 1.26575 ms +[03/24/2021-03:54:47] [I] max: 4.41528 ms +[03/24/2021-03:54:47] [I] mean: 1.31054 ms +[03/24/2021-03:54:47] [I] median: 1.30151 ms +[03/24/2021-03:54:47] [I] percentile: 1.40723 ms at 99% +[03/24/2021-03:54:47] [I] total compute time: 2.9972 s +``` +batch16性能: +``` +./trtexec --onnx=resnext50.onnx --fp16 --shapes=image:16x3x224x224 --threads +``` +``` +[03/24/2021-03:57:22] [I] GPU Compute +[03/24/2021-03:57:22] [I] min: 12.5645 ms +[03/24/2021-03:57:22] [I] max: 14.8437 ms +[03/24/2021-03:57:22] [I] mean: 12.9561 ms +[03/24/2021-03:57:22] [I] median: 12.8541 ms +[03/24/2021-03:57:22] [I] percentile: 14.8377 ms at 99% +[03/24/2021-03:57:22] [I] total compute time: 3.03173 s +``` +### 7.3 性能对比 +batch1:2.67914/4 < 1.31054/1 +batch16:1.9101/4 < 12.9561/16 +npu的吞吐率乘4比T4的吞吐率大,即npu的时延除4比T4的时延除以batch小,故npu性能高于T4性能,性能达标。 +对于batch1与batch16,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/classification目录下。 + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/LICENSE" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/LICENSE" new file mode 100644 index 0000000..eeac88f --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/LICENSE" @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/README.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/README.md" new file mode 100644 index 0000000..a37c703 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/README.md" @@ -0,0 +1,345 @@ +# ResNeXt50 Onnx模型端到端推理指导 +- [1 模型概述](#1-模型概述) + - [1.1 论文地址](#11-论文地址) + - [1.2 代码地址](#12-代码地址) +- [2 环境说明](#2-环境说明) + - [2.1 深度学习框架](#21-深度学习框架) + - [2.2 python第三方库](#22-python第三方库) +- [3 模型转换](#3-模型转换) + - [3.1 pth转onnx模型](#31-pth转onnx模型) + - [3.2 onnx转om模型](#32-onnx转om模型) +- [4 数据集预处理](#4-数据集预处理) + - [4.1 数据集获取](#41-数据集获取) + - [4.2 数据集预处理](#42-数据集预处理) + - [4.3 生成数据集信息文件](#43-生成数据集信息文件) +- [5 离线推理](#5-离线推理) + - [5.1 benchmark工具概述](#51-benchmark工具概述) + - [5.2 离线推理](#52-离线推理) +- [6 精度对比](#6-精度对比) + - [6.1 离线推理精度统计](#61-离线推理精度统计) + - [6.2 开源精度](#62-开源精度) + - [6.3 精度对比](#63-精度对比) +- [7 性能对比](#7-性能对比) + - [7.1 npu性能数据](#71-npu性能数据) + - [7.2 T4性能数据](#72-T4性能数据) + - [7.3 性能对比](#73-性能对比) + + + +## 1 模型概述 + +- **[论文地址](#11-论文地址)** + +- **[代码地址](#12-代码地址)** + +### 1.1 论文地址 +[ResNeXt50论文](https://arxiv.org/abs/1611.05431) + +### 1.2 代码地址 +[ResNeXt50代码](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py) +branch:master +commit id:b68adcf9a9280aef02fc08daed170d74d0892361 +$\color{red}{说明:删除线用于说明READ.md必要的包含项,以下带有删除线的说明在README.md中需要删除}$ +~~优先使用本任务提供的开源代码仓,填写分支与commit id,需要从github的commits中找到commit id,commit id是指基于该次提交时的模型代码做推理,通常选择稳定版本的最后一次提交,或代码仓最新的一次提交~~ + + +## 2 环境说明 + +- **[深度学习框架](#21-深度学习框架)** + +- **[python第三方库](#22-python第三方库)** + +### 2.1 深度学习框架 +``` +python3.7.5 +CANN 5.0.1 + +pytorch >= 1.5.0 +torchvision >= 0.6.0 +onnx >= 1.7.0 +``` +~~目前推理310服务器安装的是蓝区商用版本CANN 5.0.1,库若无特殊版本要求以上三个库固定这么写,需要使用python3.7命令执行脚本,pip3.7命令安装库,torch使用1.5.0版本,如果开源模型代码导出onnx要求torch版本大于1.5.0,则使用1.8.0版本,并在此处说明~~ + +### 2.2 python第三方库 + +``` +numpy == 1.20.3 +Pillow == 8.2.0 +opencv-python == 4.5.2.54 +``` +~~requirements.txt中需要写明本模型离线推理所有必要依赖库的具体版本,版本号即是推理310服务器上推理时使用库的版本号~~ + +**说明:** +> X86架构:pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 +> +> Arm架构:pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 + +## 3 模型转换 + +- **[pth转onnx模型](#31-pth转onnx模型)** + +- **[onnx转om模型](#32-onnx转om模型)** + +### 3.1 pth转onnx模型 + +1.下载pth权重文件 +[ResNeXt50预训练pth权重文件](https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth) +文件md5sum: 1d6611049e6ef03f1d6afa11f6f9023e +``` +wget https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth +``` +~~优先使用训练提供的权重文件,如果训练的权重文件网上能获则需给出网址,否则需要给出从哪获取权重文件。如果训练没有提供权重则使用开源代码仓的权重文件。需要给出权重文件名与通过md5sum命令计算的权重文件md5sum值~~ +2.resnext50模型代码在torchvision里,安装torchvision,arm下需源码安装,参考torchvision官网,若安装过程报错请百度解决 +``` +git clone https://github.com/pytorch/vision +cd vision +git reset b68adcf9a9280aef02fc08daed170d74d0892361 --hard +python3.7 setup.py install +cd .. +``` +~~如果需要对模型的开源代码做修改,以打patch的形式修改后再安装:patch -p1 < ../{patch_name}.diff~~ +3.编写pth2onnx脚本resnext50_pth2onnx.py +~~如果模型开源代码仓没有安装脚本,可以通过sys.path.append(r"./vision")添加搜索路径,然后就可以引用模型开源代码仓的函数或类~~ + **说明:** +>注意目前ATC支持的onnx算子版本为11 + +4.执行pth2onnx脚本,生成onnx模型文件 +``` +python3.7 resnext50_pth2onnx.py resnext50_32x4d-7cdf4587.pth resnext50.onnx +``` + + **模型转换要点:** +~~对于CANN包算子有问题导致模型转换失败或需要规避才能转换成功,则需要在模型转换要点里写明定位主要过程,原因与措施~~ +>此模型转换为onnx不需要修改开源代码仓代码,故不需要特殊说明 + +### 3.2 onnx转om模型 + +1.设置环境变量 +``` +source env.sh +``` +2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考CANN 5.0.1 开发辅助工具指南 (推理) 01 +``` +atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 +``` + +## 4 数据集预处理 + +- **[数据集获取](#41-数据集获取)** + +- **[数据集预处理](#42-数据集预处理)** + +- **[生成数据集信息文件](#43-生成数据集信息文件)** + +### 4.1 数据集获取 +该模型使用[ImageNet官网](http://www.image-net.org)的5万张验证集进行测试,图片与标签分别存放在/root/datasets/imagenet/val与/root/datasets/imagenet/val_label.txt。 + +### 4.2 数据集预处理 +1.预处理脚本imagenet_torch_preprocess.py + +2.执行预处理脚本,生成数据集预处理后的bin文件 +``` +python3.7 imagenet_torch_preprocess.py resnet /root/datasets/imagenet/val ./prep_dataset +``` +### 4.3 生成数据集信息文件 +1.生成数据集信息文件脚本gen_dataset_info.py + +2.执行生成数据集信息脚本,生成数据集信息文件 +``` +python3.7 gen_dataset_info.py bin ./prep_dataset ./resnext50_prep_bin.info 224 224 +``` +第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 +## 5 离线推理 + +- **[benchmark工具概述](#51-benchmark工具概述)** + +- **[离线推理](#52-离线推理)** + +### 5.1 benchmark工具概述 + +benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考CANN 5.0.1 推理benchmark工具用户指南 01 +### 5.2 离线推理 +1.设置环境变量 +``` +source env.sh +``` +2.执行离线推理 +``` +./benchmark.x86_64 -model_type=vision -device_id=0 -batch_size=16 -om_path=resnext50_bs16.om -input_text_path=./resnext50_prep_bin.info -input_width=224 -input_height=224 -output_binary=False -useDvpp=False +``` +输出结果默认保存在当前目录result/dumpOutput_device{0},模型只有一个名为class的输出,shape为bs * 1000,数据类型为FP32,对应1000个分类的预测结果,每个输入对应的输出对应一个_x.bin文件。 + +## 6 精度对比 + +- **[离线推理精度](#61-离线推理精度)** +- **[开源精度](#62-开源精度)** +- **[精度对比](#63-精度对比)** + +### 6.1 离线推理精度统计 + +后处理统计TopN精度 + +调用imagenet_acc_eval.py脚本推理结果与label比对,可以获得Accuracy Top5数据,结果保存在result.json中。 +``` +python3.7 imagenet_acc_eval.py result/dumpOutput_device0/ /root/datasets/imagenet/val_label.txt ./ result.json +``` +第一个为benchmark输出目录,第二个为数据集配套标签,第三个是生成文件的保存目录,第四个是生成的文件名。 +查看输出结果: +``` +{"title": "Overall statistical evaluation", "value": [{"key": "Number of images", "value": "50000"}, {"key": "Number of classes", "value": "1000"}, {"key": "Top1 accuracy", "value": "77.62%"}, {"key": "Top2 accuracy", "value": "87.42%"}, {"key": "Top3 accuracy", "value": "90.79%"}, {"key": "Top4 accuracy", "value": "92.56%"}, {"key": "Top5 accuracy", "value": "93.69%"}] +``` +经过对bs1与bs16的om测试,本模型batch1的精度与batch16的精度没有差别,精度数据均如上 +~~因为batch可能影响精度,如果模型支持多batch的话,精度测试需要且仅测试bs1与bs16的精度~~ + +### 6.2 开源精度 +[torchvision官网精度](https://pytorch.org/vision/stable/models.html) +``` +Model Acc@1 Acc@5 +ResNeXt-50-32x4d 77.618 93.698 +``` +### 6.3 精度对比 +将得到的om离线模型推理TopN精度与该模型github代码仓上公布的精度对比,精度下降在1%范围之内,故精度达标。 + **精度调试:** +~~对于CANN包算子有问题导致精度不达标或需要规避才能达标,则需要在精度调试里写明定位主要过程,原因与措施~~ +>没有遇到精度不达标的问题,故不需要进行精度调试 + +## 7 性能对比 + +- **[npu性能数据](#71-npu性能数据)** +- **[T4性能数据](#72-T4性能数据)** +- **[性能对比](#73-性能对比)** + +~~性能数据需要测bs1,16,4,8,32的性能数据,且需要计算出单卡吞吐率。对于npu,bs1,16要在整个数据集上推理测性能,为了避免长期占用device,bs4,8,32也可以使用纯推理测性能~~ + +### 7.1 npu性能数据 +benchmark工具在整个数据集上推理时也会统计性能数据,但是推理整个数据集较慢,如果这么测性能那么整个推理期间需要确保独占device,使用npu-smi info可以查看device是否空闲。也可以使用benchmark纯推理功能测得性能数据,但是由于随机数不能模拟数据分布,纯推理功能测的有些模型性能数据可能不太准,benchmark纯推理功能测性能仅为快速获取大概的性能数据以便调试优化使用,可初步确认benchmark工具在整个数据集上推理时由于device也被其它推理任务使用了导致的性能不准的问题。模型的性能以使用benchmark工具在整个数据集上推理得到bs1与bs16的性能数据为准,对于使用benchmark工具测试的batch4,8,32的性能数据在README.md中如下作记录即可。 +1.benchmark工具在整个数据集上推理获得性能数据 +batch1的性能,benchmark工具在整个数据集上推理后生成result/perf_vision_batchsize_1_device_0.txt: +``` +[e2e] throughputRate: 243.034, latency: 205733 +[data read] throughputRate: 258.963, moduleLatency: 3.86155 +[preprocess] throughputRate: 258.404, moduleLatency: 3.86991 +[infer] throughputRate: 244.435, Interface throughputRate: 382.328, moduleLatency: 3.35758 +[post] throughputRate: 244.435, moduleLatency: 4.09107 +``` +Interface throughputRate: 382.328,382.328x4=1529.312既是batch1 310单卡吞吐率 +batch16的性能,benchmark工具在整个数据集上推理后生成result/perf_vision_batchsize_16_device_1.txt: +``` +[e2e] throughputRate: 173.173, latency: 288729 +[data read] throughputRate: 174.62, moduleLatency: 5.72673 +[preprocess] throughputRate: 174.357, moduleLatency: 5.73535 +[infer] throughputRate: 173.844, Interface throughputRate: 519.634, moduleLatency: 3.36724 +[post] throughputRate: 10.865, moduleLatency: 92.0383 +``` +Interface throughputRate: 519.634,519.634x4=2078.536既是batch16 310单卡吞吐率 +batch4性能: +``` +[e2e] throughputRate: 232.98, latency: 214611 +[data read] throughputRate: 235.537, moduleLatency: 4.24562 +[preprocess] throughputRate: 235.147, moduleLatency: 4.25266 +[infer] throughputRate: 234.437, Interface throughputRate: 492.99, moduleLatency: 3.48397 +[post] throughputRate: 58.6087, moduleLatency: 17.0623 +``` +batch4 310单卡吞吐率:492.99x4=1971.96fps +batch8性能: +``` +[e2e] throughputRate: 211.307, latency: 236622 +[data read] throughputRate: 212.246, moduleLatency: 4.71152 +[preprocess] throughputRate: 211.931, moduleLatency: 4.71851 +[infer] throughputRate: 211.927, Interface throughputRate: 496.378, moduleLatency: 3.45797 +[post] throughputRate: 26.4906, moduleLatency: 37.7493 +``` +batch8 310单卡吞吐率:496.378x4=1985.512fps +batch32性能: +``` +[e2e] throughputRate: 122.942, latency: 406696 +[data read] throughputRate: 123.244, moduleLatency: 8.11402 +[preprocess] throughputRate: 123.143, moduleLatency: 8.12064 +[infer] throughputRate: 123.207, Interface throughputRate: 377.787, moduleLatency: 4.10655 +[post] throughputRate: 3.8514, moduleLatency: 259.646 +``` +batch32 310单卡吞吐率:377.787x4=1511.148fps + +### 7.2 T4性能数据 +在装有T4卡的服务器上测试gpu性能,测试过程请确保卡没有运行其他任务,TensorRT版本:7.2.3.4,cuda版本:11.0,cudnn版本:8.2 +~~目前T4服务器安装的cuda,cudnn,TensorRT版本如上~~ +batch1性能: +``` +trtexec --onnx=resnext50.onnx --fp16 --shapes=image:1x3x224x224 --threads +``` +gpu T4是4个device并行执行的结果,mean是时延(tensorrt的时延是batch个数据的推理时间),即吞吐率的倒数乘以batch。其中--fp16是算子精度,目前算子精度只测--fp16的。注意--shapes是onnx的输入节点名与shape,当onnx输入节点的batch为-1时,可以用同一个onnx文件测不同batch的性能,否则用固定batch的onnx测不同batch的性能不准 +``` +[03/24/2021-03:54:47] [I] GPU Compute +[03/24/2021-03:54:47] [I] min: 1.26575 ms +[03/24/2021-03:54:47] [I] max: 4.41528 ms +[03/24/2021-03:54:47] [I] mean: 1.31054 ms +[03/24/2021-03:54:47] [I] median: 1.30151 ms +[03/24/2021-03:54:47] [I] percentile: 1.40723 ms at 99% +[03/24/2021-03:54:47] [I] total compute time: 2.9972 s +``` +batch1 t4单卡吞吐率:1000/(1.31054/1)=763.044fps + +batch16性能: +``` +trtexec --onnx=resnext50.onnx --fp16 --shapes=image:16x3x224x224 --threads +``` +``` +[03/24/2021-03:57:22] [I] GPU Compute +[03/24/2021-03:57:22] [I] min: 12.5645 ms +[03/24/2021-03:57:22] [I] max: 14.8437 ms +[03/24/2021-03:57:22] [I] mean: 12.9561 ms +[03/24/2021-03:57:22] [I] median: 12.8541 ms +[03/24/2021-03:57:22] [I] percentile: 14.8377 ms at 99% +[03/24/2021-03:57:22] [I] total compute time: 3.03173 s +``` +batch16 t4单卡吞吐率:1000/(12.9561/16)=1234.940fps + +batch4性能: +``` +[05/27/2021-03:16:26] [I] GPU Compute +[05/27/2021-03:16:26] [I] min: 3.77515 ms +[05/27/2021-03:16:26] [I] max: 4.07959 ms +[05/27/2021-03:16:26] [I] mean: 3.92862 ms +[05/27/2021-03:16:26] [I] median: 3.9552 ms +[05/27/2021-03:16:26] [I] percentile: 4.07324 ms at 99% +[05/27/2021-03:16:26] [I] total compute time: 3.0054 s +``` +batch4 t4单卡吞吐率:1000/(3.92862/4)=1018.169fps + +batch8性能: +``` +[05/27/2021-03:14:52] [I] GPU Compute +[05/27/2021-03:14:52] [I] min: 6.52148 ms +[05/27/2021-03:14:52] [I] max: 7.22937 ms +[05/27/2021-03:14:52] [I] mean: 6.80709 ms +[05/27/2021-03:14:52] [I] median: 6.78735 ms +[05/27/2021-03:14:52] [I] percentile: 7.08972 ms at 99% +[05/27/2021-03:14:52] [I] total compute time: 3.01554 s +``` +batch8 t4单卡吞吐率:1000/(6.80709/8)=1175.245fps + +batch32性能: +``` +[05/27/2021-03:13:11] [I] GPU Compute +[05/27/2021-03:13:11] [I] min: 23.126 ms +[05/27/2021-03:13:11] [I] max: 26.0043 ms +[05/27/2021-03:13:11] [I] mean: 24.2826 ms +[05/27/2021-03:13:11] [I] median: 24.2343 ms +[05/27/2021-03:13:11] [I] percentile: 25.6355 ms at 99% +[05/27/2021-03:13:11] [I] total compute time: 3.05961 s +``` +batch32 t4单卡吞吐率:1000/(24.2826/32)=1317.816fps + +### 7.3 性能对比 +batch1:382.328x4 > 1000x1/(1.31054/1) +batch16:519.634x4 > 1000x1/(12.9561/16) +310单个device的吞吐率乘4即单卡吞吐率比T4单卡的吞吐率大,故310性能高于T4性能,性能达标。 +对于batch1与batch16,310性能均高于T4性能1.2倍,该模型放在Benchmark/cv/classification目录下。 +~~对比bs1和16,小于1倍放于Research,1-1.2倍放于Official,大于1.2倍放于Benchmark,而实际提交代码时目前都放在Research目录下~~ + **性能优化:** +~~对于CANN包算子有问题导致性能不达标或需要规避才能达标,则需要在性能优化里写明定位主要过程,原因与措施~~ +>没有遇到性能不达标的问题,故不需要进行性能优化 + +~~如果蓝区商用版本测精度或性能不达标,蓝区最新社区CANN版本测可以达标,这里需要写出原因与最新蓝区社区CANN包版本,用最新版本测。如果是无法规避的算子缺陷导致性能不达标,这里需要添加性能不达标的原因与解决方案。如果onnx因包含自定义算子不支持推理,需要说明性能是在t4上测的在线推理,如果模型不支持batch 16,也需要说明一下~~ + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/env.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/env.sh" new file mode 100644 index 0000000..49be8f1 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/env.sh" @@ -0,0 +1,8 @@ +#! /bin/bash + +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/gen_dataset_info.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/gen_dataset_info.py" new file mode 100644 index 0000000..80c2b0f --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/gen_dataset_info.py" @@ -0,0 +1,60 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import cv2 +from glob import glob + + +def get_bin_info(file_path, info_name, width, height): + bin_images = glob(os.path.join(file_path, '*.bin')) + with open(info_name, 'w') as file: + for index, img in enumerate(bin_images): + content = ' '.join([str(index), img, width, height]) + file.write(content) + file.write('\n') + + +def get_jpg_info(file_path, info_name): + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + image_names = [] + for extension in extensions: + image_names.append(glob(os.path.join(file_path, '*.' + extension))) + with open(info_name, 'w') as file: + for image_name in image_names: + if len(image_name) == 0: + continue + else: + for index, img in enumerate(image_name): + img_cv = cv2.imread(img) + shape = img_cv.shape + width, height = shape[1], shape[0] + content = ' '.join([str(index), img, str(width), str(height)]) + file.write(content) + file.write('\n') + + +if __name__ == '__main__': + file_type = sys.argv[1] + file_path = sys.argv[2] + info_name = sys.argv[3] + if file_type == 'bin': + width = sys.argv[4] + height = sys.argv[5] + assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' + get_bin_info(file_path, info_name, width, height) + elif file_type == 'jpg': + assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' + get_jpg_info(file_path, info_name) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_acc_eval.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_acc_eval.py" new file mode 100644 index 0000000..0e1db27 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_acc_eval.py" @@ -0,0 +1,183 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import numpy as np +import time + +np.set_printoptions(threshold=sys.maxsize) + +LABEL_FILE = "HiAI_label.json" + + +def gen_file_name(img_name): + full_name = img_name.split('/')[-1] + index = full_name.rfind('.') + return full_name[:index] + + +def cre_groundtruth_dict(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + for gtfile in os.listdir(gtfile_path): + if (gtfile != LABEL_FILE): + with open(os.path.join(gtfile_path, gtfile), 'r') as f: + gt = json.load(f) + ret = gt["image"]["annotations"][0]["category_id"] + img_gt_dict[gen_file_name(gtfile)] = ret + return img_gt_dict + + +def cre_groundtruth_dict_fromtxt(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + with open(gtfile_path, 'r')as f: + for line in f.readlines(): + temp = line.strip().split(" ") + imgName = temp[0].split(".")[0] + imgLab = temp[1] + img_gt_dict[imgName] = imgLab + return img_gt_dict + + +def load_statistical_predict_result(filepath): + """ + function: + the prediction esult file data extraction + input: + result file:filepath + output: + n_label:numble of label + data_vec: the probabilitie of prediction in the 1000 + :return: probabilities, numble of label, in_type, color + """ + with open(filepath, 'r')as f: + data = f.readline() + temp = data.strip().split(" ") + n_label = len(temp) + if data == '': + n_label = 0 + data_vec = np.zeros((n_label), dtype=np.float32) + in_type = '' + color = '' + if n_label == 0: + in_type = f.readline() + color = f.readline() + else: + for ind, prob in enumerate(temp): + data_vec[ind] = np.float32(prob) + return data_vec, n_label, in_type, color + + +def create_visualization_statistical_result(prediction_file_path, + result_store_path, json_file_name, + img_gt_dict, topn=5): + """ + :param prediction_file_path: + :param result_store_path: + :param json_file_name: + :param img_gt_dict: + :param topn: + :return: + """ + writer = open(os.path.join(result_store_path, json_file_name), 'w') + table_dict = {} + table_dict["title"] = "Overall statistical evaluation" + table_dict["value"] = [] + + count = 0 + resCnt = 0 + n_labels = 0 + count_hit = np.zeros(topn) + for tfile_name in os.listdir(prediction_file_path): + count += 1 + temp = tfile_name.split('.')[0] + index = temp.rfind('_') + img_name = temp[:index] + filepath = os.path.join(prediction_file_path, tfile_name) + ret = load_statistical_predict_result(filepath) + prediction = ret[0] + n_labels = ret[1] + sort_index = np.argsort(-prediction) + gt = img_gt_dict[img_name] + if (n_labels == 1000): + realLabel = int(gt) + elif (n_labels == 1001): + realLabel = int(gt) + 1 + else: + realLabel = int(gt) + + resCnt = min(len(sort_index), topn) + for i in range(resCnt): + if (str(realLabel) == str(sort_index[i])): + count_hit[i] += 1 + break + + if 'value' not in table_dict.keys(): + print("the item value does not exist!") + else: + table_dict["value"].extend( + [{"key": "Number of images", "value": str(count)}, + {"key": "Number of classes", "value": str(n_labels)}]) + if count == 0: + accuracy = 0 + else: + accuracy = np.cumsum(count_hit) / count + for i in range(resCnt): + table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", + "value": str( + round(accuracy[i] * 100, 2)) + '%'}) + json.dump(table_dict, writer) + writer.close() + + +if __name__ == '__main__': + start = time.time() + try: + # txt file path + folder_davinci_target = sys.argv[1] + # annotation files path, "val_label.txt" + annotation_file_path = sys.argv[2] + # the path to store the results json path + result_json_path = sys.argv[3] + # result json file name + json_file_name = sys.argv[4] + except IndexError: + print("Stopped!") + exit(1) + + if not (os.path.exists(folder_davinci_target)): + print("target file folder does not exist.") + + if not (os.path.exists(annotation_file_path)): + print("Ground truth file does not exist.") + + if not (os.path.exists(result_json_path)): + print("Result folder doesn't exist.") + + img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) + create_visualization_statistical_result(folder_davinci_target, + result_json_path, json_file_name, + img_label_dict, topn=5) + + elapsed = (time.time() - start) + print("Time used:", elapsed) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_torch_preprocess.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_torch_preprocess.py" new file mode 100644 index 0000000..65b50a5 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/imagenet_torch_preprocess.py" @@ -0,0 +1,116 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from PIL import Image +import numpy as np +import multiprocessing + + +model_config = { + 'resnet': { + 'resize': 256, + 'centercrop': 224, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv3': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv4': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + }, +} + + +def center_crop(img, output_size): + if isinstance(output_size, int): + output_size = (int(output_size), int(output_size)) + image_width, image_height = img.size + crop_height, crop_width = output_size + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)) + + +def resize(img, size, interpolation=Image.BILINEAR): + if isinstance(size, int): + w, h = img.size + if (w <= h and w == size) or (h <= w and h == size): + return img + if w < h: + ow = size + oh = int(size * h / w) + return img.resize((ow, oh), interpolation) + else: + oh = size + ow = int(size * w / h) + return img.resize((ow, oh), interpolation) + else: + return img.resize(size[::-1], interpolation) + + +def gen_input_bin(mode_type, file_batches, batch): + i = 0 + for file in file_batches[batch]: + i = i + 1 + print("batch", batch, file, "===", i) + + # RGBA to RGB + image = Image.open(os.path.join(src_path, file)).convert('RGB') + image = resize(image, model_config[mode_type]['resize']) # Resize + image = center_crop(image, model_config[mode_type]['centercrop']) # CenterCrop + img = np.array(image, dtype=np.float32) + img = img.transpose(2, 0, 1) # ToTensor: HWC -> CHW + img = img / 255. # ToTensor: div 255 + img -= np.array(model_config[mode_type]['mean'], dtype=np.float32)[:, None, None] # Normalize: mean + img /= np.array(model_config[mode_type]['std'], dtype=np.float32)[:, None, None] # Normalize: std + img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin")) + + +def preprocess(mode_type, src_path, save_path): + files = os.listdir(src_path) + file_batches = [files[i:i + 500] for i in range(0, 50000, 500) if files[i:i + 500] != []] + thread_pool = multiprocessing.Pool(len(file_batches)) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(mode_type, file_batches, batch)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + + +if __name__ == '__main__': + if len(sys.argv) < 4: + raise Exception("usage: python3 xxx.py [model_type] [src_path] [save_path]") + mode_type = sys.argv[1] + src_path = sys.argv[2] + save_path = sys.argv[3] + src_path = os.path.realpath(src_path) + save_path = os.path.realpath(save_path) + if mode_type not in model_config: + model_type_help = "model type: " + for key in model_config.keys(): + model_type_help += key + model_type_help += ' ' + raise Exception(model_type_help) + if not os.path.isdir(save_path): + os.makedirs(os.path.realpath(save_path)) + preprocess(mode_type, src_path, save_path) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/requirements.txt" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/requirements.txt" new file mode 100644 index 0000000..8f0de95 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/requirements.txt" @@ -0,0 +1,6 @@ +torch == 1.5.0 +torchvision == 0.6.0 +onnx == 1.7.0 +numpy == 1.20.3 +Pillow == 8.2.0 +opencv-python == 4.5.2.54 \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/resnext50_pth2onnx.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/resnext50_pth2onnx.py" new file mode 100644 index 0000000..8e180e9 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/resnext50_pth2onnx.py" @@ -0,0 +1,35 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import torch +import torch.onnx +import torchvision.models as models + +def pth2onnx(input_file, output_file): + model = models.resnext50_32x4d(pretrained=False) + checkpoint = torch.load(input_file, map_location=None) + model.load_state_dict(checkpoint) + + model.eval() + input_names = ["image"] + output_names = ["class"] + dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + torch.onnx.export(model, dummy_input, output_file, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, verbose=True, opset_version=11) + +if __name__ == "__main__": + input_file = sys.argv[1] + output_file = sys.argv[2] + pth2onnx(input_file, output_file) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/README.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/README.md" new file mode 100644 index 0000000..c252794 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/README.md" @@ -0,0 +1,33 @@ +环境准备: + +1.数据集路径 +通用的数据集统一放在/root/datasets/或/opt/npu/ +本模型数据集放在/root/datasets/ + +2.进入工作目录 +cd ResNext50 + +3.安装必要的依赖,测试环境可能已经安装其中的一些不同版本的库了,故手动测试时不推荐使用该命令安装 +pip3.7 install -r requirements.txt + +4.获取,修改与安装开源模型代码 +git clone https://github.com/pytorch/vision +cd vision +如果修改了模型代码,交付了{model_name}.diff +patch -p1 < ../{model_name}.diff +如果模型代码需要安装,则安装模型代码(如果没有安装脚本,pth2onnx等脚本需要引用模型代码的类或函数,可通过sys.path.append(r"./vision")添加搜索路径的方式) +python3.7 setup.py install +cd .. + +5.获取权重文件 +wget https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth + +6.获取benchmark工具 +将benchmark.x86_64 benchmark.aarch64放在当前目录 + +7.310上执行,执行时确保device空闲 +bash test/pth2om.sh +bash test/eval_acc_perf.sh --datasets_path=/root/datasets + +8.在t4环境上将onnx文件与perf_t4.sh放在同一目录 +然后执行bash perf_t4.sh,执行时确保gpu空闲 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/eval_acc_perf.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/eval_acc_perf.sh" new file mode 100644 index 0000000..46fcfc6 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/eval_acc_perf.sh" @@ -0,0 +1,69 @@ +#!/bin/bash + +datasets_path="/root/datasets/" + +for para in $* +do + if [[ $para == --datasets_path* ]]; then + datasets_path=`echo ${para#*=}` + fi +done + +arch=`uname -m` +rm -rf ./prep_dataset +python3.7 imagenet_torch_preprocess.py resnet ${datasets_path}/imagenet/val ./prep_dataset +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 gen_dataset_info.py bin ./prep_dataset ./resnext50_prep_bin.info 224 224 +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +source env.sh +rm -rf result/dumpOutput_device0 +./benchmark.${arch} -model_type=vision -device_id=0 -batch_size=1 -om_path=resnext50_bs1.om -input_text_path=./resnext50_prep_bin.info -input_width=224 -input_height=224 -output_binary=False -useDvpp=False +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +rm -rf result/dumpOutput_device1 +./benchmark.${arch} -model_type=vision -device_id=1 -batch_size=16 -om_path=resnext50_bs16.om -input_text_path=./resnext50_prep_bin.info -input_width=224 -input_height=224 -output_binary=False -useDvpp=False +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 imagenet_acc_eval.py result/dumpOutput_device0/ ${datasets_path}/imagenet/val_label.txt ./ result_bs1.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 imagenet_acc_eval.py result/dumpOutput_device1/ ${datasets_path}/imagenet/val_label.txt ./ result_bs16.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "====accuracy data====" +python3.7 test/parse.py result_bs1.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 test/parse.py result_bs16.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "====performance data====" +python3.7 test/parse.py result/perf_vision_batchsize_1_device_0.txt +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 test/parse.py result/perf_vision_batchsize_16_device_1.txt +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "success" \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/parse.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/parse.py" new file mode 100644 index 0000000..b9c74f4 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/parse.py" @@ -0,0 +1,32 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import json +import re + +if __name__ == '__main__': + if sys.argv[1].endswith('.json'): + result_json = sys.argv[1] + with open(result_json, 'r') as f: + content = f.read() + tops = [i.get('value') for i in json.loads(content).get('value') if 'Top' in i.get('key')] + print('om {} top1:{} top5:{}'.format(result_json.split('_')[1].split('.')[0], tops[0], tops[4])) + elif sys.argv[1].endswith('.txt'): + result_txt = sys.argv[1] + with open(result_txt, 'r') as f: + content = f.read() + txt_data_list = [i.strip() for i in re.findall(r':(.*?),', content.replace('\n', ',') + ',')] + fps = float(txt_data_list[7].replace('samples/s', '')) * 4 + print('310 bs{} fps:{}'.format(result_txt.split('_')[3], fps)) \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/perf_t4.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/perf_t4.sh" new file mode 100644 index 0000000..aee41c4 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/perf_t4.sh" @@ -0,0 +1,22 @@ +#!/bin/bash + +# T4上执行: +trtexec --onnx=resnext50.onnx --fp16 --shapes=image:1x3x224x224 --threads > resnext50_bs1.log +perf_str=`grep "GPU.* mean.*ms$" resnext50_bs1.log` +if [ -n "$perf_str" ]; then + perf_num=`echo $perf_str | awk -F' ' '{print $16}'` +else + perf_str=`grep "mean.*ms$" resnext50_bs1.log` + perf_num=`echo $perf_str | awk -F' ' '{print $4}'` +fi +awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}' + +trtexec --onnx=resnext50.onnx --fp16 --shapes=image:16x3x224x224 --threads > resnext50_bs16.log +perf_str=`grep "GPU.* mean.*ms$" resnext50_bs16.log` +if [ -n "$perf_str" ]; then + perf_num=`echo $perf_str | awk -F' ' '{print $16}'` +else + perf_str=`grep "mean.*ms$" resnext50_bs16.log` + perf_num=`echo $perf_str | awk -F' ' '{print $4}'` +fi +awk 'BEGIN{printf "t4 bs16 fps:%.3f\n", 1000*1/('$perf_num'/16)}' \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/pth2om.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/pth2om.sh" new file mode 100644 index 0000000..eaf285c --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNext50/test/pth2om.sh" @@ -0,0 +1,13 @@ +#!/bin/bash + +rm -rf resnext50.onnx +python3.7 resnext50_pth2onnx.py resnext50_32x4d-7cdf4587.pth resnext50.onnx +source env.sh +rm -rf resnext50_bs1.om resnext50_bs16.om +atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:1,3,224,224" --output=resnext50_bs1 --log=debug --soc_version=Ascend310 +atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 +if [ -f "resnext50_bs1.om" ] && [ -f "resnext50_bs16.om" ]; then + echo "success" +else + echo "fail!" +fi \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" new file mode 100644 index 0000000..6c8e012 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" @@ -0,0 +1,140 @@ +diff --git a/mmdet/core/anchor/anchor_generator.py b/mmdet/core/anchor/anchor_generator.py +index 3c2fd5a0..f6d11fa7 100644 +--- a/mmdet/core/anchor/anchor_generator.py ++++ b/mmdet/core/anchor/anchor_generator.py +@@ -197,6 +197,8 @@ class AnchorGenerator: + tuple[torch.Tensor]: The mesh grids of x and y. + """ + # use shape instead of len to keep tracing while exporting to onnx ++ x = x.to(dtype=torch.int32) ++ y = y.to(dtype=torch.int32) + xx = x.repeat(y.shape[0]) + yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) + if row_major: +diff --git a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +index 98d30906..48bcdae3 100644 +--- a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py ++++ b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +@@ -207,10 +207,22 @@ def delta2bbox(rois, + deltas.size(-1) // 4) + stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) + denorm_deltas = deltas * stds + means +- dx = denorm_deltas[..., 0::4] ++ '''dx = denorm_deltas[..., 0::4] + dy = denorm_deltas[..., 1::4] + dw = denorm_deltas[..., 2::4] +- dh = denorm_deltas[..., 3::4] ++ dh = denorm_deltas[..., 3::4]''' ++ if denorm_deltas.shape[2] > 4: ++ #please self fix when shape[2] > 4 ++ denorm_deltas = denorm_deltas.view(-1, 80, 4) ++ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) ++ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) ++ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) ++ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) ++ else: ++ dx = denorm_deltas[..., 0:1:] ++ dy = denorm_deltas[..., 1:2:] ++ dw = denorm_deltas[..., 2:3:] ++ dh = denorm_deltas[..., 3:4:] + + x1, y1 = rois[..., 0], rois[..., 1] + x2, y2 = rois[..., 2], rois[..., 3] +diff --git a/mmdet/models/dense_heads/anchor_head.py b/mmdet/models/dense_heads/anchor_head.py +index e7c975f5..e2d057e9 100644 +--- a/mmdet/models/dense_heads/anchor_head.py ++++ b/mmdet/models/dense_heads/anchor_head.py +@@ -9,6 +9,55 @@ from ..builder import HEADS, build_loss + from .base_dense_head import BaseDenseHead + from .dense_test_mixins import BBoxTestMixin + ++class BatchNMSOp(torch.autograd.Function): ++ @staticmethod ++ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). ++ scores (torch.Tensor): scores in shape (batch, N, C). ++ return: ++ nmsed_boxes: (1, N, 4) ++ nmsed_scores: (1, N) ++ nmsed_classes: (1, N) ++ nmsed_num: (1,) ++ """ ++ ++ # Phony implementation for onnx export ++ nmsed_boxes = bboxes[:, :max_total_size, 0, :] ++ nmsed_scores = scores[:, :max_total_size, 0] ++ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) ++ nmsed_num = torch.Tensor([max_total_size]) ++ ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++ @staticmethod ++ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', ++ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, ++ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (N, 4). ++ scores (torch.Tensor): scores in shape (N, ). ++ """ ++ ++ if bboxes.dtype == torch.float32: ++ bboxes = bboxes.reshape(bboxes.size(0), bboxes.shape[1].numpy(), -1, 4).half() ++ scores = scores.reshape(scores.size(0), scores.shape[1].numpy(), -1).half() ++ else: ++ bboxes = bboxes.reshape(bboxes.size(0), bboxes.shape[1].numpy(), -1, 4) ++ scores = scores.reshape(scores.size(0), scores.shape[1].numpy(), -1) ++ ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, ++ score_threshold, iou_threshold, max_size_per_class, max_total_size) ++ nmsed_boxes = nmsed_boxes.float() ++ nmsed_scores = nmsed_scores.float() ++ nmsed_classes = nmsed_classes.long() ++ dets = torch.cat((nmsed_boxes.reshape((bboxes.size(0), max_total_size, 4)), nmsed_scores.reshape((bboxes.size(0), max_total_size, 1))), -1) ++ labels = nmsed_classes.reshape((bboxes.size(0), max_total_size)) ++ return dets, labels + + @HEADS.register_module() + class AnchorHead(BaseDenseHead, BBoxTestMixin): +@@ -653,7 +702,10 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): + anchors = anchors.expand_as(bbox_pred) + # Always keep topk op for dynamic input in onnx + from mmdet.core.export import get_k_for_topk +- nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) ++ #nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) ++ nms_pre = bbox_pred.shape[1] ++ if nms_pre_tensor > 0 and bbox_pred.shape[1] > nms_pre_tensor: ++ nms_pre = nms_pre_tensor + if nms_pre > 0: + # Get maximum scores for foreground classes. + if self.use_sigmoid_cls: +@@ -662,11 +714,14 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class +- max_scores, _ = scores[..., :-1].max(-1) ++ scores_tmp = scores.permute(2, 1, 0) ++ max_scores, _ = scores_tmp[:-1, ...].max(0) ++ max_scores = max_scores.permute(1, 0) + + _, topk_inds = max_scores.topk(nms_pre) + batch_inds = torch.arange(batch_size).view( +- -1, 1).expand_as(topk_inds) ++ -1, 1).to(dtype=torch.int32).expand_as(topk_inds) ++ batch_inds = batch_inds.to(dtype=torch.int64) + anchors = anchors[batch_inds, topk_inds, :] + bbox_pred = bbox_pred[batch_inds, topk_inds, :] + scores = scores[batch_inds, topk_inds, :] +@@ -694,6 +749,8 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): + iou_threshold = cfg.nms.get('iou_threshold', 0.5) + score_threshold = cfg.score_thr + nms_pre = cfg.get('deploy_nms_pre', -1) ++ dets, labels = batch_nms_op(batch_mlvl_bboxes, batch_mlvl_scores, score_threshold, iou_threshold, cfg.max_per_img, cfg.max_per_img) ++ return dets, labels + return add_dummy_nms_for_onnx(batch_mlvl_bboxes, batch_mlvl_scores, + max_output_boxes_per_class, + iou_threshold, score_threshold, diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" new file mode 100644 index 0000000..e365eaa --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" @@ -0,0 +1,1227 @@ +# 基于detectron2训练的npu权重的maskrcnn Onnx模型端到端推理指导 +- [1 模型概述](#1-模型概述) + - [1.1 论文地址](#11-论文地址) + - [1.2 代码地址](#12-代码地址) +- [2 环境说明](#2-环境说明) + - [2.1 深度学习框架](#21-深度学习框架) + - [2.2 python第三方库](#22-python第三方库) +- [3 模型转换](#3-模型转换) + - [3.1 pth转onnx模型](#31-pth转onnx模型) + - [3.2 onnx转om模型](#32-onnx转om模型) +- [4 数据集预处理](#4-数据集预处理) + - [4.1 数据集获取](#41-数据集获取) + - [4.2 数据集预处理](#42-数据集预处理) + - [4.3 生成数据集信息文件](#43-生成数据集信息文件) +- [5 离线推理](#5-离线推理) + - [5.1 benchmark工具概述](#51-benchmark工具概述) + - [5.2 离线推理](#52-离线推理) +- [6 精度对比](#6-精度对比) + - [6.1 离线推理精度统计](#61-离线推理精度统计) + - [6.2 开源精度](#62-开源精度) + - [6.3 精度对比](#63-精度对比) +- [7 性能对比](#7-性能对比) + - [7.1 npu性能数据](#71-npu性能数据) + - [7.2 T4性能数据](#72-T4性能数据) + - [7.3 性能对比](#73-性能对比) + + + +## 1 模型概述 + +- **[论文地址](#11-论文地址)** + +- **[代码地址](#12-代码地址)** + +### 1.1 论文地址 +[maskrcnn论文](https://arxiv.org/abs/1703.06870) +论文提出了一个简单、灵活、通用的目标实例分割框架Mask R-CNN。这个框架可同时做目标检测、实例分割。实例分割的实现就是在faster r-cnn的基础上加了一个可以预测目标掩膜(mask)的分支。只比Faster r-cnn慢一点,5fps。很容易拓展到其他任务如:关键点检测。18年在coco的目标检测、实例分割、人体关键点检测都取得了最优成绩。 + +### 1.2 代码地址 +[cpu,gpu版detectron2框架maskrcnn代码](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) + +[npu版detectron2框架maskrcnn代码](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) + +## 2 环境说明 + +- **[深度学习框架](#21-深度学习框架)** + +- **[python第三方库](#22-python第三方库)** + +### 2.1 深度学习框架 +``` +pytorch == 1.8.0 +torchvision == 0.9.0 +onnx == 1.8.0 +``` + +**注意:** +> 转onnx的环境上pytorch需要安装1.8.0版本 +> + +### 2.2 python第三方库 + +``` +numpy == 1.18.5 +opencv-python == 4.2.0.34 +``` + +**说明:** +> X86架构:opencv,pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 +> +> Arm架构:opencv,pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 + +## 3 模型转换 + +- **[pth转onnx模型](#31-pth转onnx模型)** + +- **[onnx转om模型](#32-onnx转om模型)** + +detectron2暂支持pytorch1.8导出pytorch框架的onnx,npu权重可以使用开源的detectron2加载,因此基于pytorch1.8与开源detectron2导出含npu权重的onnx。atc暂不支持动态shape小算子,可以使用大颗粒算子替换这些小算子规避,这些小算子可以在转onnx时的verbose打印中找到其对应的python代码,从而根据功能用大颗粒算子替换,onnx能推导出变量正确的shape与算子属性正确即可,变量实际的数值无关紧要,因此这些大算子函数的功能实现无关紧要,因包含自定义算子需要去掉对onnx模型的校验。 + +### 3.1 pth转onnx模型 + +1.获取pth权重文件 +[maskrcnn基于detectron2预训练的npu权重文件](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) +文件md5sum: b95f35f051012a02875220482a568c3b +2.下载detectron2源码并安装 +```shell +git clone https://github.com/facebookresearch/detectron2 +python3.7 -m pip install -e detectron2 +``` + + **说明:** +> 安装所需的依赖说明请参考detectron2/INSTALL.md +> +> 重装pytorch后需要rm -rf detectron2/build/ **/*.so再重装detectron2 + +3.detectron2代码迁移,参见maskrcnn_detectron2.diff: +```diff +diff --git a/detectron2/layers/__init__.py b/detectron2/layers/__init__.py +index c8bd1fb..f5fa9ea 100644 +--- a/detectron2/layers/__init__.py ++++ b/detectron2/layers/__init__.py +@@ -2,7 +2,7 @@ + from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm + from .deform_conv import DeformConv, ModulatedDeformConv + from .mask_ops import paste_masks_in_image +-from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated ++from .nms import batched_nms, batch_nms_op, batched_nms_rotated, nms, nms_rotated + from .roi_align import ROIAlign, roi_align + from .roi_align_rotated import ROIAlignRotated, roi_align_rotated + from .shape_spec import ShapeSpec +diff --git a/detectron2/layers/nms.py b/detectron2/layers/nms.py +index ac14d45..22efb24 100644 +--- a/detectron2/layers/nms.py ++++ b/detectron2/layers/nms.py +@@ -15,6 +15,56 @@ if TORCH_VERSION < (1, 7): + else: + nms_rotated_func = torch.ops.detectron2.nms_rotated + ++class BatchNMSOp(torch.autograd.Function): ++ @staticmethod ++ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). ++ scores (torch.Tensor): scores in shape (batch, N, C). ++ return: ++ nmsed_boxes: (1, N, 4) ++ nmsed_scores: (1, N) ++ nmsed_classes: (1, N) ++ nmsed_num: (1,) ++ """ ++ ++ # Phony implementation for onnx export ++ nmsed_boxes = bboxes[:, :max_total_size, 0, :] ++ nmsed_scores = scores[:, :max_total_size, 0] ++ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) ++ nmsed_num = torch.Tensor([max_total_size]) ++ ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++ @staticmethod ++ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', ++ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, ++ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (N, 4). ++ scores (torch.Tensor): scores in shape (N, ). ++ """ ++ ++ num_classes = bboxes.shape[1].numpy() // 4 ++ if bboxes.dtype == torch.float32: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() ++ else: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1) ++ ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, ++ score_threshold, iou_threshold, max_size_per_class, max_total_size) ++ nmsed_boxes = nmsed_boxes.float() ++ nmsed_scores = nmsed_scores.float() ++ nmsed_classes = nmsed_classes.long() ++ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) ++ labels = nmsed_classes.reshape((max_total_size, )) ++ return dets, labels + + def batched_nms( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +diff --git a/detectron2/modeling/box_regression.py b/detectron2/modeling/box_regression.py +index 12be000..074f3e3 100644 +--- a/detectron2/modeling/box_regression.py ++++ b/detectron2/modeling/box_regression.py +@@ -87,20 +87,33 @@ class Box2BoxTransform(object): + deltas = deltas.float() # ensure fp32 for decoding precision + boxes = boxes.to(deltas.dtype) + +- widths = boxes[:, 2] - boxes[:, 0] +- heights = boxes[:, 3] - boxes[:, 1] +- ctr_x = boxes[:, 0] + 0.5 * widths +- ctr_y = boxes[:, 1] + 0.5 * heights ++ boxes_prof = boxes.permute(1, 0) ++ widths = boxes_prof[2, :] - boxes_prof[0, :] ++ heights = boxes_prof[3, :] - boxes_prof[1, :] ++ ctr_x = boxes_prof[0, :] + 0.5 * widths ++ ctr_y = boxes_prof[1, :] + 0.5 * heights + + wx, wy, ww, wh = self.weights +- dx = deltas[:, 0::4] / wx ++ '''dx = deltas[:, 0::4] / wx + dy = deltas[:, 1::4] / wy + dw = deltas[:, 2::4] / ww +- dh = deltas[:, 3::4] / wh ++ dh = deltas[:, 3::4] / wh''' ++ denorm_deltas = deltas ++ if denorm_deltas.shape[1] > 4: ++ denorm_deltas = denorm_deltas.view(-1, 80, 4) ++ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) / wx ++ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) / wy ++ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) / ww ++ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) / wh ++ else: ++ dx = denorm_deltas[:, 0:1:] / wx ++ dy = denorm_deltas[:, 1:2:] / wy ++ dw = denorm_deltas[:, 2:3:] / ww ++ dh = denorm_deltas[:, 3:4:] / wh + + # Prevent sending too large values into torch.exp() +- dw = torch.clamp(dw, max=self.scale_clamp) +- dh = torch.clamp(dh, max=self.scale_clamp) ++ dw = torch.clamp(dw, min=-float('inf'), max=self.scale_clamp) ++ dh = torch.clamp(dh, min=-float('inf'), max=self.scale_clamp) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] +diff --git a/detectron2/modeling/meta_arch/rcnn.py b/detectron2/modeling/meta_arch/rcnn.py +index e5f66d1..1bbba71 100644 +--- a/detectron2/modeling/meta_arch/rcnn.py ++++ b/detectron2/modeling/meta_arch/rcnn.py +@@ -199,8 +199,9 @@ class GeneralizedRCNN(nn.Module): + """ + assert not self.training + +- images = self.preprocess_image(batched_inputs) +- features = self.backbone(images.tensor) ++ # images = self.preprocess_image(batched_inputs) ++ images = batched_inputs ++ features = self.backbone(images) + + if detected_instances is None: + if self.proposal_generator is not None: +diff --git a/detectron2/modeling/poolers.py b/detectron2/modeling/poolers.py +index e5d72ab..7c0dd2f 100644 +--- a/detectron2/modeling/poolers.py ++++ b/detectron2/modeling/poolers.py +@@ -94,6 +94,31 @@ def convert_boxes_to_pooler_format(box_lists: List[Boxes]): + + return pooler_fmt_boxes + ++import torch.onnx.symbolic_helper as sym_help ++ ++class RoiExtractor(torch.autograd.Function): ++ @staticmethod ++ def forward(self, f0, f1, f2, f3, rois, aligned=0, finest_scale=56, pooled_height=7, pooled_width=7, ++ pool_mode='avg', roi_scale_factor=0, sample_num=0, spatial_scale=[0.25, 0.125, 0.0625, 0.03125]): ++ """ ++ feats (torch.Tensor): feats in shape (batch, 256, H, W). ++ rois (torch.Tensor): rois in shape (k, 5). ++ return: ++ roi_feats (torch.Tensor): (k, 256, pooled_width, pooled_width) ++ """ ++ ++ # phony implementation for shape inference ++ k = rois.size()[0] ++ roi_feats = torch.ones(k, 256, pooled_height, pooled_width) ++ return roi_feats ++ ++ @staticmethod ++ def symbolic(g, f0, f1, f2, f3, rois, aligned=0, finest_scale=56, pooled_height=7, pooled_width=7): ++ # TODO: support tensor list type for feats ++ #f_tensors = sym_help._unpack_list(feats) ++ roi_feats = g.op('RoiExtractor', f0, f1, f2, f3, rois, aligned_i=0, finest_scale_i=56, pooled_height_i=pooled_height, pooled_width_i=pooled_width, ++ pool_mode_s='avg', roi_scale_factor_i=0, sample_num_i=0, spatial_scale_f=[0.25, 0.125, 0.0625, 0.03125], outputs=1) ++ return roi_feats + + class ROIPooler(nn.Module): + """ +@@ -202,6 +227,12 @@ class ROIPooler(nn.Module): + A tensor of shape (M, C, output_size, output_size) where M is the total number of + boxes aggregated over all N batch images and C is the number of channels in `x`. + """ ++ if torch.onnx.is_in_onnx_export(): ++ output_size = self.output_size[0] ++ pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) ++ roi_feats = RoiExtractor.apply(x[0], x[1], x[2], x[3], pooler_fmt_boxes, 0, 56, output_size, output_size) ++ return roi_feats ++ + num_level_assignments = len(self.level_poolers) + + assert isinstance(x, list) and isinstance( +diff --git a/detectron2/modeling/proposal_generator/proposal_utils.py b/detectron2/modeling/proposal_generator/proposal_utils.py +index 9c10436..b3437a7 100644 +--- a/detectron2/modeling/proposal_generator/proposal_utils.py ++++ b/detectron2/modeling/proposal_generator/proposal_utils.py +@@ -4,7 +4,7 @@ import math + from typing import List, Tuple + import torch + +-from detectron2.layers import batched_nms, cat ++from detectron2.layers import batch_nms_op, cat + from detectron2.structures import Boxes, Instances + from detectron2.utils.env import TORCH_VERSION + +@@ -68,15 +68,19 @@ def find_top_rpn_proposals( + for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): + Hi_Wi_A = logits_i.shape[1] + if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing +- num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) ++ num_proposals_i = torch.clamp(Hi_Wi_A, min=0, max=pre_nms_topk) + else: + num_proposals_i = min(Hi_Wi_A, pre_nms_topk) + + # sort is faster than topk: https://github.com/pytorch/pytorch/issues/22812 +- # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) +- logits_i, idx = logits_i.sort(descending=True, dim=1) ++ num_proposals_i = num_proposals_i.item() ++ logits_i = logits_i.reshape(logits_i.size(1)) ++ topk_scores_i, topk_idx = torch.topk(logits_i, num_proposals_i) ++ topk_scores_i = topk_scores_i.reshape(1, topk_scores_i.size(0)) ++ topk_idx = topk_idx.reshape(1, topk_idx.size(0)) ++ '''logits_i, idx = logits_i.sort(descending=True, dim=1) + topk_scores_i = logits_i.narrow(1, 0, num_proposals_i) +- topk_idx = idx.narrow(1, 0, num_proposals_i) ++ topk_idx = idx.narrow(1, 0, num_proposals_i)''' + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 +@@ -108,7 +112,7 @@ def find_top_rpn_proposals( + lvl = lvl[valid_mask] + boxes.clip(image_size) + +- # filter empty boxes ++ '''# filter empty boxes + keep = boxes.nonempty(threshold=min_box_size) + if _is_tracing() or keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] +@@ -126,7 +130,14 @@ def find_top_rpn_proposals( + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] ++ results.append(res)''' ++ ++ dets, labels = batch_nms_op(boxes.tensor, scores_per_img, 0, nms_thresh, post_nms_topk, post_nms_topk) ++ res = Instances(image_size) ++ res.proposal_boxes = Boxes(dets[:, :4]) ++ res.objectness_logits = dets[:, 4] + results.append(res) ++ + return results + + +diff --git a/detectron2/modeling/proposal_generator/rpn.py b/detectron2/modeling/proposal_generator/rpn.py +index 1675377..77d9f26 100644 +--- a/detectron2/modeling/proposal_generator/rpn.py ++++ b/detectron2/modeling/proposal_generator/rpn.py +@@ -434,7 +434,7 @@ class RPN(nn.Module): + else: + losses = {} + proposals = self.predict_proposals( +- anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes ++ anchors, pred_objectness_logits, pred_anchor_deltas, [(1344, 1344)] + ) + return proposals, losses + +@@ -485,7 +485,8 @@ class RPN(nn.Module): + B = anchors_i.tensor.size(1) + pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) + # Expand anchors to shape (N*Hi*Wi*A, B) +- anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) ++ s = torch.zeros(N, anchors_i.tensor.unsqueeze(0).size(1), anchors_i.tensor.unsqueeze(0).size(2)) ++ anchors_i = anchors_i.tensor.unsqueeze(0).expand_as(s).reshape(-1, B) + proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) + # Append feature map proposals with shape (N, Hi*Wi*A, B) + proposals.append(proposals_i.view(N, -1, B)) +diff --git a/detectron2/modeling/roi_heads/fast_rcnn.py b/detectron2/modeling/roi_heads/fast_rcnn.py +index 348f6a0..87c7cd3 100644 +--- a/detectron2/modeling/roi_heads/fast_rcnn.py ++++ b/detectron2/modeling/roi_heads/fast_rcnn.py +@@ -7,7 +7,7 @@ from torch import nn + from torch.nn import functional as F + + from detectron2.config import configurable +-from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple ++from detectron2.layers import ShapeSpec, batch_nms_op, cat, cross_entropy, nonzero_tuple + from detectron2.modeling.box_regression import Box2BoxTransform + from detectron2.structures import Boxes, Instances + from detectron2.utils.events import get_event_storage +@@ -144,7 +144,7 @@ def fast_rcnn_inference_single_image( + # Convert to Boxes to use the `clip` function ... + boxes = Boxes(boxes.reshape(-1, 4)) + boxes.clip(image_shape) +- boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 ++ boxes = boxes.tensor.view(-1, num_bbox_reg_classes.item(), 4) # R x C x 4 + + # 1. Filter results based on detection scores. It can make NMS more efficient + # by filtering out low-confidence detections. +@@ -152,7 +152,7 @@ def fast_rcnn_inference_single_image( + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() +- if num_bbox_reg_classes == 1: ++ '''if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] +@@ -167,7 +167,14 @@ def fast_rcnn_inference_single_image( + result = Instances(image_shape) + result.pred_boxes = Boxes(boxes) + result.scores = scores +- result.pred_classes = filter_inds[:, 1] ++ result.pred_classes = filter_inds[:, 1]''' ++ ++ dets, labels = batch_nms_op(boxes, scores, score_thresh, nms_thresh, topk_per_image, topk_per_image) ++ result = Instances(image_shape) ++ result.pred_boxes = Boxes(dets[:, :4]) ++ result.scores = dets.permute(1, 0)[4, :] ++ result.pred_classes = labels ++ + return result, filter_inds[:, 0] + + +diff --git a/detectron2/modeling/roi_heads/mask_head.py b/detectron2/modeling/roi_heads/mask_head.py +index 5ac5c4b..f81b96b 100644 +--- a/detectron2/modeling/roi_heads/mask_head.py ++++ b/detectron2/modeling/roi_heads/mask_head.py +@@ -142,7 +142,9 @@ def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Ins + num_masks = pred_mask_logits.shape[0] + class_pred = cat([i.pred_classes for i in pred_instances]) + indices = torch.arange(num_masks, device=class_pred.device) +- mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() ++ print(indices,class_pred) ++ # mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() ++ mask_probs_pred = pred_mask_logits.sigmoid() + # mask_probs_pred.shape: (B, 1, Hmask, Wmask) + + num_boxes_per_image = [len(i) for i in pred_instances] +diff --git a/detectron2/structures/boxes.py b/detectron2/structures/boxes.py +index 57f862a..bad473b 100644 +--- a/detectron2/structures/boxes.py ++++ b/detectron2/structures/boxes.py +@@ -202,10 +202,11 @@ class Boxes: + """ + assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" + h, w = box_size +- x1 = self.tensor[:, 0].clamp(min=0, max=w) +- y1 = self.tensor[:, 1].clamp(min=0, max=h) +- x2 = self.tensor[:, 2].clamp(min=0, max=w) +- y2 = self.tensor[:, 3].clamp(min=0, max=h) ++ boxes_prof = self.tensor.permute(1, 0) ++ x1 = boxes_prof[0, :].clamp(min=0, max=w) ++ y1 = boxes_prof[1, :].clamp(min=0, max=h) ++ x2 = boxes_prof[2, :].clamp(min=0, max=w) ++ y2 = boxes_prof[3, :].clamp(min=0, max=h) + self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: +diff --git a/tools/deploy/export_model.py b/tools/deploy/export_model.py +index fe2fe30..22145b7 100755 +--- a/tools/deploy/export_model.py ++++ b/tools/deploy/export_model.py +@@ -77,6 +77,28 @@ def export_scripting(torch_model): + # TODO inference in Python now missing postprocessing glue code + return None + ++from typing import Dict, Tuple ++import numpy ++from detectron2.structures import ImageList ++def preprocess_image(batched_inputs: Tuple[Dict[str, torch.Tensor]]): ++ """ ++ Normalize, pad and batch the input images. ++ """ ++ images = [x["image"].to('cpu') for x in batched_inputs] ++ images = [(x - numpy.array([[[103.530]], [[116.280]], [[123.675]]])) / numpy.array([[[1.]], [[1.]], [[1.]]]) for x in images] ++ import torch.nn.functional as F ++ image = torch.zeros(0, 1344, 1344) ++ for i in range(images[0].size(0)): ++ img = images[0][i] ++ img = img.expand((1, 1, img.size(0), img.size(1))) ++ img = img.to(dtype=torch.float32) ++ img = F.interpolate(img, size=(int(1344), int(1344)), mode='bilinear', align_corners=False) ++ img = img[0][0] ++ img = img.unsqueeze(0) ++ image = torch.cat((image, img)) ++ images = [image] ++ images = ImageList.from_tensors(images, 32) ++ return images + + # experimental. API not yet final + def export_tracing(torch_model, inputs): +@@ -84,6 +106,8 @@ def export_tracing(torch_model, inputs): + image = inputs[0]["image"] + inputs = [{"image": image}] # remove other unused keys + ++ inputs = preprocess_image(inputs).tensor.to(torch.float32) ++ image = inputs + if isinstance(torch_model, GeneralizedRCNN): + + def inference(model, inputs): +@@ -104,7 +128,7 @@ def export_tracing(torch_model, inputs): + elif args.format == "onnx": + # NOTE onnx export currently failing in pytorch + with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f: +- torch.onnx.export(traceable_model, (image,), f) ++ torch.onnx.export(traceable_model, (image,), f, opset_version=11, verbose=True) + logger.info("Inputs schema: " + str(traceable_model.inputs_schema)) + logger.info("Outputs schema: " + str(traceable_model.outputs_schema)) + + +``` + **修改依据:** +> 1.slice,topk算子问题导致pre_nms_topk未生效,atc转换报错,修改参见maskrcnn_detectron2.diff +> 2.expand会引入where动态算子因此用expand_as替换 +> 3.slice跑在aicpu有错误,所以改为dx = denorm_deltas[:, :, 0:1:].view(-1, 80) / wx,使其运行在aicore上 +> 4.atc转换时根据日志中报错的算子在转onnx时的verbose打印中找到其对应的python代码,然后找到规避方法解决,具体修改参见maskrcnn_detectron2.diff +> 5.其它地方的修改原因参见精度调试与性能优化 + + +通过打补丁的方式修改detectron2: +```shell +cd detectron2 +patch -p1 < ../maskrcnn_detectron2.diff +cd .. +``` +4.修改pytorch代码去除导出onnx时进行检查 +将/usr/local/python3.7.5/lib/python3.7/site-packages/torch/onnx/utils.py文件的_check_onnx_proto(proto)改为pass + +5.准备coco2017验证集,数据集获取参见本文第四章第一节 +在当前目录按结构构造数据集:datasets/coco目录下有annotations与val2017,annotations目录存放coco数据集的instances_val2017.json,val2017目录存放coco数据集的5000张验证图片。 +或者修改detectron2/detectron2/data/datasets/builtin.py为_root = os.getenv("DETECTRON2_DATASETS", "/opt/npu/dataset/")指定coco数据集所在的目录/opt/npu/dataset/。 + +6.运行如下命令,在output目录生成model.onnx +```shell +python3.7 detectron2/tools/deploy/export_model.py --config-file detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --output ./output --export-method tracing --format onnx MODEL.WEIGHTS model_final.pth MODEL.DEVICE cpu + +mv output/model.onnx model_py1.8.onnx +``` + +### 3.2 onnx转om模型 + +1.设置环境变量 +```shell +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373),需要指定输出节点以去除无用输出,使用netron开源可视化工具查看具体的输出节点名: +```shell +atc --model=model_py1.8.onnx --framework=5 --output=maskrcnn_detectron2_npu --input_format=NCHW --input_shape="0:1,3,1344,1344" --out_nodes="Cast_1673:0;Gather_1676:0;Reshape_1667:0;Slice_1706:0" --log=debug --soc_version=Ascend310 +``` + +## 4 数据集预处理 + +- **[数据集获取](#41-数据集获取)** + +- **[数据集预处理](#42-数据集预处理)** + +- **[生成数据集信息文件](#43-生成数据集信息文件)** + +### 4.1 数据集获取 +该模型使用[COCO官网](https://cocodataset.org/#download)的coco2017的5千张验证集进行测试,图片与标签分别存放在/opt/npu/dataset/coco/val2017/与/opt/npu/dataset/coco/annotations/instances_val2017.json。 + +### 4.2 数据集预处理 +1.预处理脚本maskrcnn_pth_preprocess_detectron2.py +```python +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import numpy as np +import cv2 +import torch +import multiprocessing + +def resize(img, size): + old_h = img.shape[0] + old_w = img.shape[1] + scale_ratio = 800 / min(old_w, old_h) + new_w = int(np.floor(old_w * scale_ratio)) + new_h = int(np.floor(old_h * scale_ratio)) + if max(new_h, new_w) > 1333: + scale = 1333 / max(new_h, new_w) + new_h = new_h * scale + new_w = new_w * scale + new_w = int(new_w + 0.5) + new_h = int(new_h + 0.5) + resized_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR) + return resized_img + +def gen_input_bin(file_batches, batch): + i = 0 + for file in file_batches[batch]: + i = i + 1 + print("batch", batch, file, "===", i) + + image = cv2.imread(os.path.join(flags.image_src_path, file), cv2.IMREAD_COLOR) + image = resize(image, (800, 1333)) + mean = np.array([103.53, 116.28, 123.675], dtype=np.float32) + std = np.array([1., 1., 1.], dtype=np.float32) + img = image.copy().astype(np.float32) + mean = np.float64(mean.reshape(1, -1)) + std = 1 / np.float64(std.reshape(1, -1)) + cv2.subtract(img, mean, img) + cv2.multiply(img, std, img) + img = cv2.copyMakeBorder(img, 0, flags.model_input_height - img.shape[0], 0, flags.model_input_width - img.shape[1], cv2.BORDER_CONSTANT, value=0) + #os.makedirs('./paded_jpg/', exist_ok=True) + #cv2.imwrite('./paded_jpg/' + file.split('.')[0] + '.jpg', img) + img = img.transpose(2, 0, 1) + img.tofile(os.path.join(flags.bin_file_path, file.split('.')[0] + ".bin")) + +def preprocess(src_path, save_path): + files = os.listdir(src_path) + file_batches = [files[i:i + 100] for i in range(0, 5000, 100) if files[i:i + 100] != []] + thread_pool = multiprocessing.Pool(len(file_batches)) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(file_batches, batch)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='preprocess of MaskRCNN PyTorch model') + parser.add_argument("--image_src_path", default="./coco2017/", help='image of dataset') + parser.add_argument("--bin_file_path", default="./coco2017_bin/", help='Preprocessed image buffer') + parser.add_argument("--model_input_height", default=1344, type=int, help='input tensor height') + parser.add_argument("--model_input_width", default=1344, type=int, help='input tensor width') + flags = parser.parse_args() + if not os.path.exists(flags.bin_file_path): + os.makedirs(flags.bin_file_path) + preprocess(flags.image_src_path, flags.bin_file_path) +``` +2.执行预处理脚本,生成数据集预处理后的bin文件 +```shell +python3.7 maskrcnn_pth_preprocess_detectron2.py --image_src_path=/opt/npu/dataset/coco/val2017 --bin_file_path=val2017_bin --model_input_height=1344 --model_input_width=1344 +``` +### 4.3 生成数据集信息文件 +1.生成数据集信息文件脚本get_info.py +```python +import os +import sys +import cv2 +from glob import glob + + +def get_bin_info(file_path, info_name, width, height): + bin_images = glob(os.path.join(file_path, '*.bin')) + with open(info_name, 'w') as file: + for index, img in enumerate(bin_images): + content = ' '.join([str(index), img, width, height]) + file.write(content) + file.write('\n') + + +def get_jpg_info(file_path, info_name): + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + image_names = [] + for extension in extensions: + image_names.append(glob(os.path.join(file_path, '*.' + extension))) + with open(info_name, 'w') as file: + for image_name in image_names: + if len(image_name) == 0: + continue + else: + for index, img in enumerate(image_name): + img_cv = cv2.imread(img) + shape = img_cv.shape + width, height = shape[1], shape[0] + content = ' '.join([str(index), img, str(width), str(height)]) + file.write(content) + file.write('\n') + + +if __name__ == '__main__': + file_type = sys.argv[1] + file_path = sys.argv[2] + info_name = sys.argv[3] + if file_type == 'bin': + width = sys.argv[4] + height = sys.argv[5] + assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' + get_bin_info(file_path, info_name, width, height) + elif file_type == 'jpg': + assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' + get_jpg_info(file_path, info_name) +``` +2.执行生成数据集信息脚本,生成数据集信息文件 +```shell +python3.7 get_info.py bin val2017_bin maskrcnn.info 1344 1344 +``` +第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 +## 5 离线推理 + +- **[benchmark工具概述](#51-benchmark工具概述)** + +- **[离线推理](#52-离线推理)** + +### 5.1 benchmark工具概述 + +benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) +### 5.2 离线推理 +1.设置环境变量 +```shell +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.执行离线推理 +```shell +./benchmark.x86_64 -model_type=vision -om_path=maskrcnn_detectron2_npu.om -device_id=0 -batch_size=1 -input_text_path=maskrcnn.info -input_width=1344 -input_height=1344 -useDvpp=false -output_binary=true +``` +输出结果默认保存在当前目录result/dumpOutput_device0,模型有四个输出,每个输入对应的输出对应四个_x.bin文件 +``` +输出 shape 数据类型 数据含义 +output1 100 * 4 FP32 boxes +output2 100 * 1 FP32 scores +output3 100 * 1 INT64 labels +output4 100 * 80 * 28 * 28 FP32 masks +``` + +## 6 精度对比 + +- **[离线推理精度](#61-离线推理精度)** +- **[开源精度](#62-开源精度)** +- **[精度对比](#63-精度对比)** + +### 6.1 离线推理精度统计 + +后处理统计map精度 +```python +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import cv2 +import numpy as np + +def postprocess_bboxes(bboxes, image_size, net_input_width, net_input_height): + org_w = image_size[0] + org_h = image_size[1] + + scale = 800 / min(org_w, org_h) + new_w = int(np.floor(org_w * scale)) + new_h = int(np.floor(org_h * scale)) + if max(new_h, new_w) > 1333: + scale = 1333 / max(new_h, new_w) * scale + + bboxes[:, 0] = (bboxes[:, 0]) / scale + bboxes[:, 1] = (bboxes[:, 1]) / scale + bboxes[:, 2] = (bboxes[:, 2]) / scale + bboxes[:, 3] = (bboxes[:, 3]) / scale + + return bboxes + +def postprocess_masks(masks, image_size, net_input_width, net_input_height): + org_w = image_size[0] + org_h = image_size[1] + + scale = 800 / min(org_w, org_h) + new_w = int(np.floor(org_w * scale)) + new_h = int(np.floor(org_h * scale)) + if max(new_h, new_w) > 1333: + scale = 1333 / max(new_h, new_w) * scale + + pad_w = net_input_width - org_w * scale + pad_h = net_input_height - org_h * scale + top = 0 + left = 0 + hs = int(net_input_height - pad_h) + ws = int(net_input_width - pad_w) + + masks = masks.to(dtype=torch.float32) + res_append = torch.zeros(0, org_h, org_w) + if torch.cuda.is_available(): + res_append = res_append.to(device='cuda') + for i in range(masks.size(0)): + mask = masks[i][0][top:hs, left:ws] + mask = mask.expand((1, 1, mask.size(0), mask.size(1))) + mask = F.interpolate(mask, size=(int(org_h), int(org_w)), mode='bilinear', align_corners=False) + mask = mask[0][0] + mask = mask.unsqueeze(0) + res_append = torch.cat((res_append, mask)) + + return res_append[:, None] + +import pickle +def save_variable(v, filename): + f = open(filename, 'wb') + pickle.dump(v, f) + f.close() +def load_variavle(filename): + f = open(filename, 'rb') + r = pickle.load(f) + f.close() + return r + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--test_annotation", default="./origin_pictures.info") + parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0/") + parser.add_argument("--det_results_path", default="./detection-results/") + parser.add_argument("--net_out_num", type=int, default=4) + parser.add_argument("--net_input_width", type=int, default=1344) + parser.add_argument("--net_input_height", type=int, default=1344) + parser.add_argument("--ifShowDetObj", action="store_true", help="if input the para means True, neither False.") + flags = parser.parse_args() + + img_size_dict = dict() + with open(flags.test_annotation)as f: + for line in f.readlines(): + temp = line.split(" ") + img_file_path = temp[1] + img_name = temp[1].split("/")[-1].split(".")[0] + img_width = int(temp[2]) + img_height = int(temp[3]) + img_size_dict[img_name] = (img_width, img_height, img_file_path) + + bin_path = flags.bin_data_path + det_results_path = flags.det_results_path + os.makedirs(det_results_path, exist_ok=True) + total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name]) + + import torch + from torchvision.models.detection.roi_heads import paste_masks_in_image + import torch.nn.functional as F + from detectron2.evaluation import COCOEvaluator + from detectron2.structures import Boxes, Instances + from detectron2.data import DatasetCatalog, MetadataCatalog + import logging + logging.basicConfig(level=logging.INFO) + evaluator = COCOEvaluator('coco_2017_val') + evaluator.reset() + coco_class_map = {id:name for id, name in enumerate(MetadataCatalog.get('coco_2017_val').thing_classes)} + results = [] + + cnt = 0 + for bin_file in sorted(total_img): + cnt = cnt + 1 + print(cnt - 1, bin_file) + path_base = os.path.join(bin_path, bin_file) + res_buff = [] + for num in range(1, flags.net_out_num + 1): + if os.path.exists(path_base + "_" + str(num) + ".bin"): + if num == 1: + buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") + buf = np.reshape(buf, [100, 4]) + elif num == 2: + buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") + buf = np.reshape(buf, [100, 1]) + elif num == 3: + buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="int64") + buf = np.reshape(buf, [100, 1]) + elif num == 4: + bboxes = np.fromfile(path_base + "_" + str(num - 3) + ".bin", dtype="float32") + bboxes = np.reshape(bboxes, [100, 4]) + bboxes = torch.from_numpy(bboxes) + scores = np.fromfile(path_base + "_" + str(num - 2) + ".bin", dtype="float32") + scores = np.reshape(scores, [100, 1]) + scores = torch.from_numpy(scores) + labels = np.fromfile(path_base + "_" + str(num - 1) + ".bin", dtype="int64") + labels = np.reshape(labels, [100, 1]) + labels = torch.from_numpy(labels) + mask_pred = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") + mask_pred = np.reshape(mask_pred, [100, 80, 28, 28]) + mask_pred = torch.from_numpy(mask_pred) + + org_img_size = img_size_dict[bin_file][:2] + result = Instances((org_img_size[1], org_img_size[0])) + + if torch.cuda.is_available(): + mask_pred = mask_pred.to(device='cuda') + img_shape = (flags.net_input_height, flags.net_input_width) + mask_pred = mask_pred[range(len(mask_pred)), labels[:, 0]][:, None] + masks = paste_masks_in_image(mask_pred, bboxes[:, :4], img_shape) + masks = masks >= 0.5 + masks = postprocess_masks(masks, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) + if torch.cuda.is_available(): + masks = masks.cpu() + masks = masks.squeeze(1) + result.pred_masks = masks + + '''masks = masks.numpy() + img = masks[0] + from PIL import Image + for j in range(len(masks)): + mask = masks[j] + mask = mask.astype(bool) + img[mask] = img[mask] + 1 + imag = Image.fromarray((img * 255).astype(np.uint8)) + imag.save(os.path.join('.', bin_file + '.png'))''' + + predbox = postprocess_bboxes(bboxes, org_img_size, flags.net_input_height, flags.net_input_width) + result.pred_boxes = Boxes(predbox) + result.scores = scores.reshape([100]) + result.pred_classes = labels.reshape([100]) + + results.append({"instances": result}) + + res_buff.append(buf) + else: + print("[ERROR] file not exist", path_base + "_" + str(num) + ".bin") + + current_img_size = img_size_dict[bin_file] + res_bboxes = np.concatenate(res_buff, axis=1) + predbox = postprocess_bboxes(res_bboxes, current_img_size, flags.net_input_width, flags.net_input_height) + + if flags.ifShowDetObj == True: + imgCur = cv2.imread(current_img_size[2]) + + det_results_str = '' + for idx, class_idx in enumerate(predbox[:, 5]): + if float(predbox[idx][4]) < float(0.05): + #if float(predbox[idx][4]) < float(0): + continue + if class_idx < 0 or class_idx > 80: + continue + + class_name = coco_class_map[int(class_idx)] + det_results_str += "{} {} {} {} {} {}\n".format(class_name, str(predbox[idx][4]), predbox[idx][0], + predbox[idx][1], predbox[idx][2], predbox[idx][3]) + + if flags.ifShowDetObj == True: + imgCur = cv2.rectangle(imgCur, (int(predbox[idx][0]), int(predbox[idx][1])), (int(predbox[idx][2]), int(predbox[idx][3])), (0,255,0), 2) + imgCur = cv2.putText(imgCur, class_name, (int(predbox[idx][0]), int(predbox[idx][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + #imgCur = cv2.putText(imgCur, str(predbox[idx][4]), (int(predbox[idx][0]), int(predbox[idx][1])),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + + if flags.ifShowDetObj == True: + cv2.imwrite(os.path.join(det_results_path, bin_file +'.jpg'), imgCur, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) + + det_results_file = os.path.join(det_results_path, bin_file + ".txt") + with open(det_results_file, "w") as detf: + detf.write(det_results_str) + + #save_variable(results, './results.txt') + #results = load_variavle('./results.txt') + inputs = DatasetCatalog.get('coco_2017_val')[:5000] + evaluator.process(inputs, results) + evaluator.evaluate() +``` +调用maskrcnn_pth_postprocess_detectron2.py评测map精度: +```shell +python3.7 get_info.py jpg /opt/npu/dataset/coco/val2017 maskrcnn_jpeg.info + +python3.7 maskrcnn_pth_postprocess_detectron2.py --bin_data_path=./result/dumpOutput_device0/ --test_annotation=maskrcnn_jpeg.info --det_results_path=./ret_npuinfer/ --net_out_num=4 --net_input_height=1344 --net_input_width=1344 --ifShowDetObj +``` +第一个参数为benchmark推理结果,第二个为原始图片信息文件,第三个为后处理输出结果,第四个为网络输出个数,第五六个为网络高宽,第七个为是否将box画在图上显示 +执行完后会打印出精度: +``` +INFO:detectron2.data.datasets.coco:Loaded 5000 images in COCO format from /opt/npu/dataset/coco/annotations/instances_val2017.json +INFO:detectron2.evaluation.coco_evaluation:Preparing results for COCO format ... +INFO:detectron2.evaluation.coco_evaluation:Evaluating predictions with unofficial COCO API... +Loading and preparing results... +DONE (t=2.16s) +creating index... +index created! +INFO:detectron2.evaluation.fast_eval_api:Evaluate annotation type *bbox* +INFO:detectron2.evaluation.fast_eval_api:COCOeval_opt.evaluate() finished in 21.80 seconds. +INFO:detectron2.evaluation.fast_eval_api:Accumulating evaluation results... +INFO:detectron2.evaluation.fast_eval_api:COCOeval_opt.accumulate() finished in 2.61 seconds. +Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.326 +Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.536 +Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.349 +Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.179 +Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.366 +Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.432 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.282 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.444 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.465 +Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.269 +Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.508 +Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.609 +INFO:detectron2.evaluation.coco_evaluation:Evaluation results for bbox: +| AP | AP50 | AP75 | APs | APm | APl | +|:------:|:------:|:------:|:------:|:------:|:------:| +| 32.586 | 53.634 | 34.852 | 17.862 | 36.613 | 43.174 | +INFO:detectron2.evaluation.coco_evaluation:Per-category bbox AP: +| category | AP | category | AP | category | AP | +|:--------------|:-------|:-------------|:-------|:---------------|:-------| +| person | 48.933 | bicycle | 24.620 | car | 37.483 | +| motorcycle | 33.410 | airplane | 50.975 | bus | 54.898 | +| train | 51.864 | truck | 26.716 | boat | 20.755 | +| traffic light | 20.305 | fire hydrant | 58.144 | stop sign | 58.833 | +| parking meter | 41.813 | bench | 17.210 | bird | 29.444 | +| cat | 57.738 | dog | 52.853 | horse | 51.333 | +| sheep | 40.341 | cow | 41.568 | elephant | 56.160 | +| bear | 63.240 | zebra | 59.121 | giraffe | 57.166 | +| backpack | 11.226 | umbrella | 29.385 | handbag | 8.685 | +| tie | 24.923 | suitcase | 27.242 | frisbee | 53.933 | +| skis | 16.987 | snowboard | 24.268 | sports ball | 40.009 | +| kite | 34.285 | baseball bat | 17.073 | baseball glove | 25.865 | +| skateboard | 39.694 | surfboard | 28.035 | tennis racket | 37.552 | +| bottle | 30.593 | wine glass | 26.470 | cup | 33.779 | +| fork | 19.335 | knife | 11.024 | spoon | 8.761 | +| bowl | 33.928 | banana | 18.034 | apple | 15.394 | +| sandwich | 27.732 | orange | 26.546 | broccoli | 19.022 | +| carrot | 15.449 | hot dog | 25.118 | pizza | 44.402 | +| donut | 35.096 | cake | 23.876 | chair | 18.866 | +| couch | 32.443 | potted plant | 18.701 | bed | 33.585 | +| dining table | 20.164 | toilet | 46.354 | tv | 48.705 | +| laptop | 50.107 | mouse | 47.597 | remote | 20.899 | +| keyboard | 40.454 | cell phone | 28.115 | microwave | 43.190 | +| oven | 25.974 | toaster | 13.432 | sink | 27.114 | +| refrigerator | 42.467 | book | 10.420 | clock | 44.894 | +| vase | 30.559 | scissors | 25.719 | teddy bear | 36.704 | +| hair drier | 0.000 | toothbrush | 11.796 | | | +``` + + **精度调试:** +> 1.根据代码语义RoiExtractor参数finest_scale不是224而是56 +> 2.因gather算子处理-1会导致每张图的第一个score为0,故maskrcnn_detectron2.diff中已将dets[:, -1]改为dets[:, 4] +> 3.单张图调试 +> ``` +> demo.py分数改为0.05,defaults.py MIN_SIZE_TEST与MAX_SIZE_TEST改为1344: +> python3.7 demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --input 000000252219_1344x1344.jpg --opts MODEL.WEIGHTS ../../model_final.pth MODEL.DEVICE cpu +> 说明: +> 比较pth的rpn与om的rpn输出前提是detectron2/config/defaults.py的_C.INPUT.MIN_SIZE_TEST与_C.INPUT.MAX_SIZE_TEST要改为1344,并且注意因为000000252219_1344x1344.jpg 是等比例缩放四边加pad的处理结果,因此pth推理时等价于先进行了pad然后再进行标准化的,因此图片tensor边缘是负均值。开始误认为预处理与mmdetection相同因此SIZE_TEST的值与000000252219_1344x1344.jpg缩放是按上述方式处理的,经此与后面的调试步骤发现预处理与mmdetection不同。om算子输出与开源pth推理时变量的打印值对比,找到输出不对的算子,发现前处理均值方差不同于mmdetection框架,且是BGR序。 +> ``` +> 4.精度调试 +> ``` +> 对开源代码预处理与参数修改,使得cpu,gpu版的pth推理达到npu版代码的pth推理精度,参见本文第七章第二节T4精度数据的diff文件与执行精度测评的命令。 +> 说明: +> 1.查看npu固定1344,1344的前处理方式(缩放加pad) +> from torchvision import utils as vutils +> vutils.save_image(images.tensor, 'test.jpg') +> FIX_SHAPE->./detectron2/data/dataset_mapper.py->ResizeShortestEdge,最短边800最大1333。 +> 2.cpu与gpu开源代码推理pth精度与npu代码推理pth差2到3个点,npu代码(基于detectron2 v0.2.1)更改roi_align.py为开源的代码后推理发现pth精度下降2到3个点,最终发现是aligned参数问题,注意插件缺陷导致om中设置该参数未能生效。 +> ``` + + +### 6.2 开源精度 +[官网精度](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) + +参考[npu版detectron2框架的maskrcnn](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch),安装依赖PyTorch(NPU版本)与设置环境变量,在npu上执行推理,测得npu精度如下: +```shell +python3.7 -m pip install -e Faster_Mask_RCNN_for_PyTorch +cd Faster_Mask_RCNN_for_PyTorch +修改eval.sh的配置文件与权重文件分别为mask_rcnn_R_101_FPN_3x.yaml与model_final.pth,删除mask_rcnn_R_101_FPN_3x.yaml的SOLVER和DATALOADER配置,datasets/coco下面放置coco2017验证集图片与标签(参考本文第三章第一节步骤五) +./eval.sh +``` +``` +Task: bbox +AP,AP50,AP75,APs,APm,APl +33.0103,53.5686,35.5192,17.8069,36.9325,44.0201 +Task: segm +AP,AP50,AP75,APs,APm,APl +30.3271,50.4665,31.8223,12.9573,33.0375,44.8537 +``` +### 6.3 精度对比 +om推理box map精度为0.326,npu推理box map精度为0.330,npu输出400个框精度更高点但性能较低,精度下降在1个点之内,因此可视为精度达标 + +## 7 性能对比 + +- **[npu性能数据](#71-npu性能数据)** +- **[T4性能数据](#72-T4性能数据)** +- **[性能对比](#73-性能对比)** + +### 7.1 npu性能数据 +batch1的性能: + 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 +``` +./benchmark.x86_64 -round=20 -om_path=maskrcnn_detectron2_npu.om -device_id=0 -batch_size=1 +``` +执行20次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 +``` +[INFO] Dataset number: 19 finished cost 439.142ms +[INFO] PureInfer result saved in ./result/PureInfer_perf_of_maskrcnn_detectron2_npu_in_device_0.txt +-----------------PureInfer Performance Summary------------------ +[INFO] ave_throughputRate: 2.27773samples/s, ave_latency: 440.813ms +---------------------------------------------------------------- +``` +maskrcnn detectron2不支持多batch + + **性能优化:** +> 查看profiling导出的op_statistic_0_1.csv算子总体耗时统计发现gather算子耗时最多,然后查看profiling导出的task_time_0_1.csv找到具体哪些gather算子耗时最多,通过导出onnx的verbose打印找到具体算子对应的代码,因gather算子计算最后一个轴会很耗时,因此通过转置后计算0轴规避,比如maskrcnn_detectron2.diff文件中的如下修改: +> ``` +> boxes_prof = boxes.permute(1, 0) +> widths = boxes_prof[2, :] - boxes_prof[0, :] +> ``` +> + + +### 7.2 T4性能数据 +batch1性能: +onnx包含自定义算子,因此不能使用开源TensorRT测试性能数据,故在T4机器上使用pth在线推理测试性能数据 + +依据npu版代码修改cpu,gpu版detectron2,参见maskrcnn_pth_npu.diff: +```diff +diff --git a/detectron2/data/dataset_mapper.py b/detectron2/data/dataset_mapper.py +index 0e77851..0d03c08 100644 +--- a/detectron2/data/dataset_mapper.py ++++ b/detectron2/data/dataset_mapper.py +@@ -4,6 +4,7 @@ import logging + import numpy as np + from typing import List, Optional, Union + import torch ++from torch.nn import functional as F + + from detectron2.config import configurable + +@@ -133,6 +134,7 @@ class DatasetMapper: + + aug_input = T.AugInput(image, sem_seg=sem_seg_gt) + transforms = self.augmentations(aug_input) ++ print(self.augmentations,transforms) + image, sem_seg_gt = aug_input.image, aug_input.sem_seg + + image_shape = image.shape[:2] # h, w +@@ -140,6 +142,20 @@ class DatasetMapper: + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) ++ ++ size_divisibility = 32 ++ pad_value = 0 ++ pixel_mean = torch.Tensor([103.53, 116.28, 123.675]).view(-1, 1, 1) ++ pixel_std = torch.Tensor([1.0, 1.0, 1.0]).view(-1, 1, 1) ++ images = (dataset_dict["image"] - pixel_mean) / pixel_std ++ dataset_dict["image_size"] = tuple(images.shape[-2:]) ++ batch_shape = (3, 1344, 1344) ++ padding_size = [0, batch_shape[-1] - images.shape[-1], ++ 0, batch_shape[-2] - images.shape[-2]] ++ padded = F.pad(images, padding_size, value=pad_value) ++ batched_imgs = padded.unsqueeze_(0) ++ dataset_dict["image_preprocess"] = batched_imgs.contiguous() ++ + if sem_seg_gt is not None: + dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) + +diff --git a/detectron2/layers/roi_align.py b/detectron2/layers/roi_align.py +index bcbf5f4..23b138d 100644 +--- a/detectron2/layers/roi_align.py ++++ b/detectron2/layers/roi_align.py +@@ -38,7 +38,7 @@ class ROIAlign(nn.Module): + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio +- self.aligned = aligned ++ self.aligned = False + + from torchvision import __version__ + +diff --git a/detectron2/modeling/meta_arch/rcnn.py b/detectron2/modeling/meta_arch/rcnn.py +index e5f66d1..b9ffa66 100644 +--- a/detectron2/modeling/meta_arch/rcnn.py ++++ b/detectron2/modeling/meta_arch/rcnn.py +@@ -202,6 +202,9 @@ class GeneralizedRCNN(nn.Module): + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + ++ #from torchvision import utils as vutils ++ #vutils.save_image(images.tensor, 'test.jpg') ++ print(features['p2'].shape) + if detected_instances is None: + if self.proposal_generator is not None: + proposals, _ = self.proposal_generator(images, features, None) +@@ -224,10 +227,14 @@ class GeneralizedRCNN(nn.Module): + """ + Normalize, pad and batch the input images. + """ +- images = [x["image"].to(self.device) for x in batched_inputs] ++ '''images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) +- return images ++ return images''' ++ images = [x["image_preprocess"].to(device=self.device) for x in batched_inputs] ++ images = torch.cat(images, dim=0) ++ image_sizes = [x["image_size"] for x in batched_inputs] ++ return ImageList(images, image_sizes) + + @staticmethod + def _postprocess(instances, batched_inputs: Tuple[Dict[str, torch.Tensor]], image_sizes): +diff --git a/detectron2/modeling/postprocessing.py b/detectron2/modeling/postprocessing.py +index f42e77c..909923a 100644 +--- a/detectron2/modeling/postprocessing.py ++++ b/detectron2/modeling/postprocessing.py +@@ -55,6 +55,7 @@ def detector_postprocess( + output_boxes = None + assert output_boxes is not None, "Predictions must contain boxes!" + ++ print(scale_x, scale_y) + output_boxes.scale(scale_x, scale_y) + output_boxes.clip(results.image_size) + + +``` +测评T4精度与性能: +```shell +git clone https://github.com/facebookresearch/detectron2 +python3.7 -m pip install -e detectron2 +cd detectron2 +patch -p1 < ../maskrcnn_pth_npu.diff +cd tools +mkdir datasets +cp -rf ../../datasets/coco datasets/(数据集构造参考本文第三章第一节步骤五) +python3.7 train_net.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --eval-only MODEL.WEIGHTS ../../model_final.pth MODEL.DEVICE cuda:0 +``` +``` +Inference done 4993/5000. 0.2937 s / img. +``` + +### 7.3 性能对比 +310单卡4个device,benchmark测试的是一个device。T4一个设备相当于4个device,测试的是整个设备。benchmark时延是吞吐率的倒数,T4时延是吞吐率的倒数乘以batch。对于batch1,440.73ms / 4 * 1 < 0.2937s,即npu性能超过T4 +对于batch1,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/segmentation目录下 + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" new file mode 100644 index 0000000..dfb1a8f --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" @@ -0,0 +1,1041 @@ +# 基于开源mmdetection预训练的maskrcnn Onnx模型端到端推理指导 +- [1 模型概述](#1-模型概述) + - [1.1 论文地址](#11-论文地址) + - [1.2 代码地址](#12-代码地址) +- [2 环境说明](#2-环境说明) + - [2.1 深度学习框架](#21-深度学习框架) + - [2.2 python第三方库](#22-python第三方库) +- [3 模型转换](#3-模型转换) + - [3.1 pth转onnx模型](#31-pth转onnx模型) + - [3.2 onnx转om模型](#32-onnx转om模型) +- [4 数据集预处理](#4-数据集预处理) + - [4.1 数据集获取](#41-数据集获取) + - [4.2 数据集预处理](#42-数据集预处理) + - [4.3 生成数据集信息文件](#43-生成数据集信息文件) +- [5 离线推理](#5-离线推理) + - [5.1 benchmark工具概述](#51-benchmark工具概述) + - [5.2 离线推理](#52-离线推理) +- [6 精度对比](#6-精度对比) + - [6.1 离线推理精度统计](#61-离线推理精度统计) + - [6.2 开源精度](#62-开源精度) + - [6.3 精度对比](#63-精度对比) +- [7 性能对比](#7-性能对比) + - [7.1 npu性能数据](#71-npu性能数据) + - [7.2 T4性能数据](#72-T4性能数据) + - [7.3 性能对比](#73-性能对比) + + + +## 1 模型概述 + +- **[论文地址](#11-论文地址)** + +- **[代码地址](#12-代码地址)** + +### 1.1 论文地址 +[maskrcnn论文](https://arxiv.org/abs/1703.06870) +论文提出了一个简单、灵活、通用的目标实例分割框架Mask R-CNN。这个框架可同时做目标检测、实例分割。实例分割的实现就是在faster r-cnn的基础上加了一个可以预测目标掩膜(mask)的分支。只比Faster r-cnn慢一点,5fps。很容易拓展到其他任务如:关键点检测。18年在coco的目标检测、实例分割、人体关键点检测都取得了最优成绩。 + +### 1.2 代码地址 +[mmdetection框架maskrcnn代码](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) + +## 2 环境说明 + +- **[深度学习框架](#21-深度学习框架)** + +- **[python第三方库](#22-python第三方库)** + +### 2.1 深度学习框架 +``` +pytorch == 1.8.0 +torchvision == 0.9.0 +onnx == 1.8.0 +``` + +### 2.2 python第三方库 + +``` +numpy == 1.18.5 +opencv-python == 4.2.0.34 +``` + +**说明:** +> X86架构:opencv,pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 +> +> Arm架构:opencv,pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 + +## 3 模型转换 + +- **[pth转onnx模型](#31-pth转onnx模型)** + +- **[onnx转om模型](#32-onnx转om模型)** + +atc暂不支持动态shape小算子,可以使用大颗粒算子替换这些小算子规避,这些小算子可以在转onnx时的verbose打印中找到其对应的python代码,从而根据功能用大颗粒算子替换,onnx能推导出变量正确的shape与算子属性正确即可,变量实际的数值无关紧要,因此这些大算子函数的功能实现无关紧要,因包含自定义算子需要去掉对onnx模型的校验。 + +### 3.1 pth转onnx模型 + +1.获取pth权重文件 +[maskrcnn基于detectron2预训练的npu权重文件](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) +文件md5sum: f4ee3c5911537f454045395d2f708954 +2.mmdetection源码安装 +```shell +git clone https://github.com/open-mmlab/mmcv +cd mmcv +MMCV_WITH_OPS=1 pip3.7 install -e . +cd .. +git clone https://github.com/open-mmlab/mmdetection +cd mmdetection +pip3.7 install -r requirements/build.txt +python3.7 setup.py develop +``` + + **说明:** +> 安装所需的依赖说明请参考mmdetection/docs/get_started.md +> + +3.转原始onnx +```shell +python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img demo/demo.jpg --test-img tests/data/color.jpg --shape 800 1216 --show --verify --simplify +若报错参考:https://github.com/open-mmlab/mmdetection/issues/4548 +``` +4.修改mmdetection代码,参见maskrcnn_mmdetection.diff: +```diff +diff --git a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +index e9eb357..f72cef7 100644 +--- a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py ++++ b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +@@ -168,13 +168,31 @@ def delta2bbox(rois, + [0.0000, 0.3161, 4.1945, 0.6839], + [5.0000, 5.0000, 5.0000, 5.0000]]) + """ +- means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4) +- stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4) ++ ++ # fix shape for means and stds when exporting onnx ++ if torch.onnx.is_in_onnx_export(): ++ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1).numpy() // 4) ++ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1).numpy() // 4) ++ else: ++ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4) ++ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4) + denorm_deltas = deltas * stds + means +- dx = denorm_deltas[:, 0::4] +- dy = denorm_deltas[:, 1::4] +- dw = denorm_deltas[:, 2::4] +- dh = denorm_deltas[:, 3::4] ++ # dx = denorm_deltas[:, 0::4] ++ # dy = denorm_deltas[:, 1::4] ++ # dw = denorm_deltas[:, 2::4] ++ # dh = denorm_deltas[:, 3::4] ++ if denorm_deltas.shape[1] > 4: ++ denorm_deltas = denorm_deltas.view(-1, 80, 4) ++ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) ++ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) ++ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) ++ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) ++ else: ++ dx = denorm_deltas[:, 0:1:] ++ dy = denorm_deltas[:, 1:2:] ++ dw = denorm_deltas[:, 2:3:] ++ dh = denorm_deltas[:, 3:4:] ++ + max_ratio = np.abs(np.log(wh_ratio_clip)) + dw = dw.clamp(min=-max_ratio, max=max_ratio) + dh = dh.clamp(min=-max_ratio, max=max_ratio) +diff --git a/mmdet/core/post_processing/bbox_nms.py b/mmdet/core/post_processing/bbox_nms.py +index c43aea9..e99f5d8 100644 +--- a/mmdet/core/post_processing/bbox_nms.py ++++ b/mmdet/core/post_processing/bbox_nms.py +@@ -4,6 +4,59 @@ from mmcv.ops.nms import batched_nms + from mmdet.core.bbox.iou_calculators import bbox_overlaps + + ++class BatchNMSOp(torch.autograd.Function): ++ @staticmethod ++ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). ++ scores (torch.Tensor): scores in shape (batch, N, C). ++ return: ++ nmsed_boxes: (1, N, 4) ++ nmsed_scores: (1, N) ++ nmsed_classes: (1, N) ++ nmsed_num: (1,) ++ """ ++ ++ # Phony implementation for onnx export ++ nmsed_boxes = bboxes[:, :max_total_size, 0, :] ++ nmsed_scores = scores[:, :max_total_size, 0] ++ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) ++ nmsed_num = torch.Tensor([max_total_size]) ++ ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++ @staticmethod ++ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', ++ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, ++ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (N, 4). ++ scores (torch.Tensor): scores in shape (N, ). ++ """ ++ ++ num_classes = bboxes.shape[1].numpy() // 4 ++ if bboxes.dtype == torch.float32: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() ++ else: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1) ++ ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, ++ score_threshold, iou_threshold, max_size_per_class, max_total_size) ++ nmsed_boxes = nmsed_boxes.float() ++ nmsed_scores = nmsed_scores.float() ++ nmsed_classes = nmsed_classes.long() ++ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) ++ dets = dets.reshape((max_total_size, 5)) ++ labels = nmsed_classes.reshape((max_total_size, )) ++ return dets, labels ++ ++ + def multiclass_nms(multi_bboxes, + multi_scores, + score_thr, +@@ -40,7 +93,17 @@ def multiclass_nms(multi_bboxes, + multi_scores.size(0), num_classes, 4) + + scores = multi_scores[:, :-1] ++ # multiply score_factor after threshold to preserve more bboxes, improve ++ # mAP by 1% for YOLOv3 ++ if score_factors is not None: ++ # expand the shape to match original shape of score ++ score_factors = score_factors.view(-1, 1).expand( ++ multi_scores.size(0), num_classes) ++ score_factors = score_factors.reshape(-1) ++ scores = scores * score_factors + ++ # cpu and gpu ++ ''' + labels = torch.arange(num_classes, dtype=torch.long) + labels = labels.view(1, -1).expand_as(scores) + +@@ -80,7 +143,11 @@ def multiclass_nms(multi_bboxes, + return dets, labels[keep], keep + else: + return dets, labels[keep] ++ ''' + ++ # npu ++ dets, labels = batch_nms_op(bboxes, scores, score_thr, nms_cfg.get("iou_threshold"), max_num, max_num) ++ return dets, labels + + def fast_nms(multi_bboxes, + multi_scores, +diff --git a/mmdet/models/dense_heads/rpn_head.py b/mmdet/models/dense_heads/rpn_head.py +index f565d1a..3c29386 100644 +--- a/mmdet/models/dense_heads/rpn_head.py ++++ b/mmdet/models/dense_heads/rpn_head.py +@@ -9,6 +9,57 @@ from .anchor_head import AnchorHead + from .rpn_test_mixin import RPNTestMixin + + ++class BatchNMSOp(torch.autograd.Function): ++ @staticmethod ++ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). ++ scores (torch.Tensor): scores in shape (batch, N, C). ++ return: ++ nmsed_boxes: (1, N, 4) ++ nmsed_scores: (1, N) ++ nmsed_classes: (1, N) ++ nmsed_num: (1,) ++ """ ++ ++ # Phony implementation for onnx export ++ nmsed_boxes = bboxes[:, :max_total_size, 0, :] ++ nmsed_scores = scores[:, :max_total_size, 0] ++ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) ++ nmsed_num = torch.Tensor([max_total_size]) ++ ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++ @staticmethod ++ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', ++ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, ++ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) ++ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num ++ ++def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): ++ """ ++ boxes (torch.Tensor): boxes in shape (N, 4). ++ scores (torch.Tensor): scores in shape (N, ). ++ """ ++ ++ num_classes = bboxes.shape[1].numpy() // 4 ++ if bboxes.dtype == torch.float32: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() ++ else: ++ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) ++ scores = scores.reshape(1, scores.shape[0].numpy(), -1) ++ ++ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, ++ score_threshold, iou_threshold, max_size_per_class, max_total_size) ++ nmsed_boxes = nmsed_boxes.float() ++ nmsed_scores = nmsed_scores.float() ++ nmsed_classes = nmsed_classes.long() ++ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) ++ labels = nmsed_classes.reshape((max_total_size, )) ++ return dets, labels ++ + @HEADS.register_module() + class RPNHead(RPNTestMixin, AnchorHead): + """RPN head. +@@ -132,9 +183,14 @@ class RPNHead(RPNTestMixin, AnchorHead): + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + # sort is faster than topk + # _, topk_inds = scores.topk(cfg.nms_pre) +- ranked_scores, rank_inds = scores.sort(descending=True) +- topk_inds = rank_inds[:cfg.nms_pre] +- scores = ranked_scores[:cfg.nms_pre] ++ # onnx uses topk to sort, this is simpler for onnx export ++ if torch.onnx.is_in_onnx_export(): ++ scores, topk_inds = torch.topk(scores, cfg.nms_pre) ++ else: ++ ranked_scores, rank_inds = scores.sort(descending=True) ++ topk_inds = rank_inds[:cfg.nms_pre] ++ scores = ranked_scores[:cfg.nms_pre] ++ + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + mlvl_scores.append(scores) +@@ -164,5 +220,12 @@ class RPNHead(RPNTestMixin, AnchorHead): + + # TODO: remove the hard coded nms type + nms_cfg = dict(type='nms', iou_threshold=cfg.nms_thr) ++ # cpu and gpu return ++ ''' + dets, keep = batched_nms(proposals, scores, ids, nms_cfg) + return dets[:cfg.nms_post] ++ ''' ++ ++ # npu return ++ dets, labels = batch_nms_op(proposals, scores, 0.0, nms_cfg.get("iou_threshold"), cfg.nms_post, cfg.nms_post) ++ return dets +diff --git a/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +index 0cba3cd..a965e53 100644 +--- a/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py ++++ b/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +@@ -199,11 +199,11 @@ class FCNMaskHead(nn.Module): + # TODO: Remove after F.grid_sample is supported. + from torchvision.models.detection.roi_heads \ + import paste_masks_in_image +- masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) ++ '''masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) + thr = rcnn_test_cfg.get('mask_thr_binary', 0) + if thr > 0: +- masks = masks >= thr +- return masks ++ masks = masks >= thr''' ++ return mask_pred + + N = len(mask_pred) + # The actual implementation split the input into chunks, +diff --git a/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py +index c0eebc4..63605c5 100644 +--- a/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py ++++ b/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py +@@ -4,6 +4,31 @@ from mmcv.runner import force_fp32 + from mmdet.models.builder import ROI_EXTRACTORS + from .base_roi_extractor import BaseRoIExtractor + ++import torch.onnx.symbolic_helper as sym_help ++ ++class RoiExtractor(torch.autograd.Function): ++ @staticmethod ++ def forward(self, f0, f1, f2, f3, rois, aligned=1, finest_scale=56, pooled_height=7, pooled_width=7, ++ pool_mode='avg', roi_scale_factor=0, sample_num=0, spatial_scale=[0.25, 0.125, 0.0625, 0.03125]): ++ """ ++ feats (torch.Tensor): feats in shape (batch, 256, H, W). ++ rois (torch.Tensor): rois in shape (k, 5). ++ return: ++ roi_feats (torch.Tensor): (k, 256, pooled_width, pooled_width) ++ """ ++ ++ # phony implementation for shape inference ++ k = rois.size()[0] ++ roi_feats = torch.ones(k, 256, pooled_height, pooled_width) ++ return roi_feats ++ ++ @staticmethod ++ def symbolic(g, f0, f1, f2, f3, rois, aligned=1, finest_scale=56, pooled_height=7, pooled_width=7): ++ # TODO: support tensor list type for feats ++ #f_tensors = sym_help._unpack_list(feats) ++ roi_feats = g.op('RoiExtractor', f0, f1, f2, f3, rois, aligned_i=1, finest_scale_i=56, pooled_height_i=pooled_height, pooled_width_i=pooled_width, ++ pool_mode_s='avg', roi_scale_factor_i=0, sample_num_i=0, spatial_scale_f=[0.25, 0.125, 0.0625, 0.03125], outputs=1) ++ return roi_feats + + @ROI_EXTRACTORS.register_module() + class SingleRoIExtractor(BaseRoIExtractor): +@@ -52,6 +77,14 @@ class SingleRoIExtractor(BaseRoIExtractor): + + @force_fp32(apply_to=('feats', ), out_fp16=True) + def forward(self, feats, rois, roi_scale_factor=None): ++ # Work around to export onnx for npu ++ if torch.onnx.is_in_onnx_export(): ++ out_size = self.roi_layers[0].output_size ++ roi_feats = RoiExtractor.apply(feats[0], feats[1], feats[2], feats[3], rois, 1, 56, out_size[0], out_size[1]) ++ # roi_feats = RoiExtractor.apply(list(feats), rois) ++ return roi_feats ++ ++ + """Forward function.""" + out_size = self.roi_layers[0].output_size + num_levels = len(feats) +diff --git a/tools/deployment/pytorch2onnx.py b/tools/deployment/pytorch2onnx.py +index 1305a79..c79e9fb 100644 +--- a/tools/deployment/pytorch2onnx.py ++++ b/tools/deployment/pytorch2onnx.py +@@ -48,7 +48,7 @@ def pytorch2onnx(config_path, + input_names=['input'], + output_names=output_names, + export_params=True, +- keep_initializers_as_inputs=True, ++ #keep_initializers_as_inputs=True, + do_constant_folding=True, + verbose=show, + opset_version=opset_version) + +``` + **修改依据:** +> 1.atc暂不支持if与nonzero动态小算子,这两小算子是bbox_nms.py与single_level_roi_extractor.py的大功能nms与roi引入的(rpn_head.py中的nms虽然没有引入不支持算子但也需要替换,否则后面会出现E19014: Op[ReduceMax_505]'s attribute axes is invalid which is empty),因此使用npu的nms与roi大算子代替这部分大功能。loop算子暂无合适替换方法,由于它在网络最后一部分,因此可将其与后面的部分放到后处理 +> 2. atc转换报错E11019: Op[Conv_0]'s input[1] is not linked,因此注释掉tools/deployment/pytorch2onnx.py中export函数的keep_initializers_as_inputs=True, +> 3.动态shape算子导致atc转换出现未知错误,atc日志debug显示Unknown shape op Tile output shape range is unknown, set its size -1,在转onnx时的verbose打印中找到该算子对应的python代码行,利用numpy()将means和std的shape固定下来,参见maskrcnn_mmdetection.diff +> 4.slice跑在aicpu有错误,所以改为dx = denorm_deltas[:, :, 0:1:].view(-1, 80),使其运行在aicore上 +> 5.atc转换Concat一对多算子会改变其名字,故添加dets = dets.reshape((max_total_size, 5)),使得Concat后填加了一冗余的Reshape算子作为输出节点 +> 6.atc转换时计算mask的RoiExtractor算子报错,打开--log=debug输出日志,查看starce -f cmd的打印/root/ascend/log/plog/…找到日志存放路径,发现(14,14)导致cube内存不够用 +> 7.atc转换时根据日志中报错的算子在转onnx时的verbose打印中找到其对应的python代码,然后找到规避方法解决,具体修改参见maskrcnn_mmdetection.diff +> 8.其它地方的修改原因参见精度调试 + + +通过打补丁的方式修改detectron2: +```shell +patch -p1 < ./maskrcnn_mmdetection.diff +``` +5.修改pytorch代码去除导出onnx时进行检查 +将/usr/local/python3.7.5/lib/python3.7/site-packages/torch/onnx/utils.py文件的_check_onnx_proto(proto)改为pass + +6.运行如下命令,生成含有npu自定义算子的onnx: +```shell +python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img demo/demo.jpg --shape 800 1216 +``` +7.经过修改后导出的onnx由于添加了自定义算子无法使用onnx的infer shape,所以需要手动固定resize算子的shape,这里可以用未经修改的源代码导出onnx并使用simplifier后使用netron查看resize的具体参数。对原始onnx使用simplifier后(添加--simplify参数参见转原始onnx命令),使用netron可视化工具可以查看该onnx中resize的大小 +```python +import sys +import onnx +from onnx import helper + +input_model=sys.argv[1] +output_model=sys.argv[2] +model = onnx.load(input_model) +# onnx.checker.check_model(model) + +model_nodes = model.graph.node +def getNodeByName(nodes, name: str): + for n in nodes: + if n.name == name: + return n + return -1 + +# fix shape for resize, 对原始onnx使用simplifier后,使用netron可视化工具可以查看该onnx中resize的大小 +sizes1 = onnx.helper.make_tensor('size1', onnx.TensorProto.INT32, [4], [1, 256, 50, 76]) +sizes2 = onnx.helper.make_tensor('size2', onnx.TensorProto.INT32, [4], [1, 256, 100, 152]) +sizes3 = onnx.helper.make_tensor('size3', onnx.TensorProto.INT32, [4], [1, 256, 200, 304]) +model.graph.initializer.append(sizes1) +model.graph.initializer.append(sizes2) +model.graph.initializer.append(sizes3) +getNodeByName(model_nodes, 'Resize_141').input[3] = "size1" +getNodeByName(model_nodes, 'Resize_161').input[3] = "size2" +getNodeByName(model_nodes, 'Resize_181').input[3] = "size3" + +print("Mask R-CNN onnx adapted to ATC") +onnx.save(model, output_model) +``` +```shell +python3.7 fix_onnx_shape.py mask_rcnn_r50_fpn_1x_coco.onnx mask_rcnn_r50_fpn_1x_coco_fix.onnx +``` + +### 3.2 onnx转om模型 + +1.设置环境变量 +```shell +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373),需要指定输出节点以去除无用输出,节点序号可能会因网络结构不同而不同,使用netron开源可视化工具查看具体的输出节点名: +```shell +atc --framework=5 --model=./mask_rcnn_r50_fpn_1x_coco_fix.onnx --output=mask_rcnn_r50_fpn_1x_coco_bs1 --out_nodes="Reshape_574:0;Reshape_576:0;Sigmoid_604:0" --input_format=NCHW --input_shape="input:1,3,800,1216" --log=debug --soc_version=Ascend310 +``` + +## 4 数据集预处理 + +- **[数据集获取](#41-数据集获取)** + +- **[数据集预处理](#42-数据集预处理)** + +- **[生成数据集信息文件](#43-生成数据集信息文件)** + +### 4.1 数据集获取 +该模型使用[COCO官网](https://cocodataset.org/#download)的coco2017的5千张验证集进行测试,图片与标签分别存放在/opt/npu/dataset/coco/val2017/与/opt/npu/dataset/coco/annotations/instances_val2017.json。 + +### 4.2 数据集预处理 +1.预处理脚本maskrcnn_pth_preprocess.py +```python +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import numpy as np +import cv2 +import mmcv +import torch +import multiprocessing + +def resize(img, size): + old_h = img.shape[0] + old_w = img.shape[1] + scale_ratio = min(size[0] / old_w, size[1] / old_h) + new_w = int(np.floor(old_w * scale_ratio)) + new_h = int(np.floor(old_h * scale_ratio)) + resized_img = mmcv.imresize(img, (new_w, new_h), backend='cv2') + return resized_img + +def gen_input_bin(file_batches, batch): + i = 0 + for file in file_batches[batch]: + i = i + 1 + print("batch", batch, file, "===", i) + + image = mmcv.imread(os.path.join(flags.image_src_path, file), backend='cv2') + #image = mmcv.imrescale(image, (flags.model_input_width, flags.model_input_height)) + image = resize(image, (flags.model_input_width, flags.model_input_height)) + mean = np.array([123.675, 116.28, 103.53], dtype=np.float32) + std = np.array([58.395, 57.12, 57.375], dtype=np.float32) + image = mmcv.imnormalize(image, mean, std) + h = image.shape[0] + w = image.shape[1] + pad_left = (flags.model_input_width - w) // 2 + pad_top = (flags.model_input_height - h) // 2 + pad_right = flags.model_input_width - pad_left - w + pad_bottom = flags.model_input_height - pad_top - h + image = mmcv.impad(image, padding=(pad_left, pad_top, pad_right, pad_bottom), pad_val=0) + #mmcv.imwrite(image, './paded_jpg/' + file.split('.')[0] + '.jpg') + image = image.transpose(2, 0, 1) + image.tofile(os.path.join(flags.bin_file_path, file.split('.')[0] + ".bin")) + +def preprocess(src_path, save_path): + files = os.listdir(src_path) + file_batches = [files[i:i + 100] for i in range(0, 5000, 100) if files[i:i + 100] != []] + thread_pool = multiprocessing.Pool(len(file_batches)) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(file_batches, batch)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='preprocess of MaskRCNN PyTorch model') + parser.add_argument("--image_src_path", default="./coco2017/", help='image of dataset') + parser.add_argument("--bin_file_path", default="./coco2017_bin/", help='Preprocessed image buffer') + parser.add_argument("--model_input_height", default=800, type=int, help='input tensor height') + parser.add_argument("--model_input_width", default=1216, type=int, help='input tensor width') + flags = parser.parse_args() + if not os.path.exists(flags.bin_file_path): + os.makedirs(flags.bin_file_path) + preprocess(flags.image_src_path, flags.bin_file_path) +``` +2.执行预处理脚本,生成数据集预处理后的bin文件 +```shell +python3.7 maskrcnn_pth_preprocess.py --image_src_path=/opt/npu/dataset/coco/val2017 --bin_file_path=val2017_bin --model_input_height=800 --model_input_width=1216 +``` +### 4.3 生成数据集信息文件 +1.生成数据集信息文件脚本get_info.py +```python +import os +import sys +import cv2 +from glob import glob + + +def get_bin_info(file_path, info_name, width, height): + bin_images = glob(os.path.join(file_path, '*.bin')) + with open(info_name, 'w') as file: + for index, img in enumerate(bin_images): + content = ' '.join([str(index), img, width, height]) + file.write(content) + file.write('\n') + + +def get_jpg_info(file_path, info_name): + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + image_names = [] + for extension in extensions: + image_names.append(glob(os.path.join(file_path, '*.' + extension))) + with open(info_name, 'w') as file: + for image_name in image_names: + if len(image_name) == 0: + continue + else: + for index, img in enumerate(image_name): + img_cv = cv2.imread(img) + shape = img_cv.shape + width, height = shape[1], shape[0] + content = ' '.join([str(index), img, str(width), str(height)]) + file.write(content) + file.write('\n') + + +if __name__ == '__main__': + file_type = sys.argv[1] + file_path = sys.argv[2] + info_name = sys.argv[3] + if file_type == 'bin': + width = sys.argv[4] + height = sys.argv[5] + assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' + get_bin_info(file_path, info_name, width, height) + elif file_type == 'jpg': + assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' + get_jpg_info(file_path, info_name) +``` +2.执行生成数据集信息脚本,生成数据集信息文件 +```shell +python3.7 get_info.py bin val2017_bin maskrcnn.info 1216 800 +``` +第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 +## 5 离线推理 + +- **[benchmark工具概述](#51-benchmark工具概述)** + +- **[离线推理](#52-离线推理)** + +### 5.1 benchmark工具概述 + +benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) +### 5.2 离线推理 +1.设置环境变量 +```shell +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +2.执行离线推理 +```shell +./benchmark.x86_64 -model_type=vision -om_path=mask_rcnn_r50_fpn_1x_coco_bs1.om -device_id=0 -batch_size=1 -input_text_path=maskrcnn.info -input_width=1216 -input_height=800 -useDvpp=false -output_binary=true +``` + **注意:** +> label是int64,benchmark输出非二进制时会将float转为0 +> + +输出结果默认保存在当前目录result/dumpOutput_device0,模型有三个输出,每个输入对应的输出对应三个_x.bin文件 +``` +输出 shape 数据类型 数据含义 +output1 100 * 5 FP32 boxes and scores +output3 100 * 1 INT64 labels +output4 100 * 80 * 28 * 28 FP32 masks +``` + +## 6 精度对比 + +- **[离线推理精度](#61-离线推理精度)** +- **[开源精度](#62-开源精度)** +- **[精度对比](#63-精度对比)** + +### 6.1 离线推理精度统计 + +后处理统计map精度 +```python +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import cv2 +import numpy as np + +def postprocess_bboxes(bboxes, image_size, net_input_width, net_input_height): + w = image_size[0] + h = image_size[1] + scale = min(net_input_width / w, net_input_height / h) + + pad_w = net_input_width - w * scale + pad_h = net_input_height - h * scale + pad_left = pad_w // 2 + pad_top = pad_h // 2 + + bboxes[:, 0] = (bboxes[:, 0] - pad_left) / scale + bboxes[:, 1] = (bboxes[:, 1] - pad_top) / scale + bboxes[:, 2] = (bboxes[:, 2] - pad_left) / scale + bboxes[:, 3] = (bboxes[:, 3] - pad_top) / scale + + return bboxes + +def postprocess_masks(masks, image_size, net_input_width, net_input_height): + w = image_size[0] + h = image_size[1] + scale = min(net_input_width / w, net_input_height / h) + + pad_w = net_input_width - w * scale + pad_h = net_input_height - h * scale + pad_left = pad_w // 2 + pad_top = pad_h // 2 + + if pad_top < 0: + pad_top = 0 + if pad_left < 0: + pad_left = 0 + top = int(pad_top) + left = int(pad_left) + hs = int(pad_top + net_input_height - pad_h) + ws = int(pad_left + net_input_width - pad_w) + masks = masks.to(dtype=torch.float32) + res_append = torch.zeros(0, h, w) + if torch.cuda.is_available(): + res_append = res_append.to(device='cuda') + for i in range(masks.size(0)): + mask = masks[i][0][top:hs, left:ws] + mask = mask.expand((1, 1, mask.size(0), mask.size(1))) + mask = F.interpolate(mask, size=(int(h), int(w)), mode='bilinear', align_corners=False) + mask = mask[0][0] + mask = mask.unsqueeze(0) + res_append = torch.cat((res_append, mask)) + + return res_append[:, None] + +import pickle +def save_variable(v, filename): + f = open(filename, 'wb') + pickle.dump(v, f) + f.close() +def load_variavle(filename): + f = open(filename, 'rb') + r = pickle.load(f) + f.close() + return r + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--test_annotation", default="./origin_pictures.info") + parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0/") + parser.add_argument("--det_results_path", default="./detection-results/") + parser.add_argument("--net_out_num", type=int, default=3) + parser.add_argument("--net_input_width", type=int, default=1216) + parser.add_argument("--net_input_height", type=int, default=800) + parser.add_argument("--ifShowDetObj", action="store_true", help="if input the para means True, neither False.") + flags = parser.parse_args() + + img_size_dict = dict() + with open(flags.test_annotation)as f: + for line in f.readlines(): + temp = line.split(" ") + img_file_path = temp[1] + img_name = temp[1].split("/")[-1].split(".")[0] + img_width = int(temp[2]) + img_height = int(temp[3]) + img_size_dict[img_name] = (img_width, img_height, img_file_path) + + bin_path = flags.bin_data_path + det_results_path = flags.det_results_path + os.makedirs(det_results_path, exist_ok=True) + #total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name]) + + import glob + import torch + from torchvision.models.detection.roi_heads import paste_masks_in_image + import torch.nn.functional as F + from mmdet.core import bbox2result + from mmdet.core import encode_mask_results + from mmdet.datasets import CocoDataset + coco_dataset = CocoDataset(ann_file='/opt/npu/dataset/coco/annotations/instances_val2017.json', pipeline=[]) + coco_class_map = {id:name for id, name in enumerate(coco_dataset.CLASSES)} + #print(dir(coco_dataset)) + results = [] + + cnt = 0 + #for bin_file in sorted(total_img): + for ids in coco_dataset.img_ids: + cnt = cnt + 1 + bin_file = glob.glob(bin_path + '/*0' + str(ids) + '_1.bin')[0] + bin_file = bin_file[bin_file.rfind('/') + 1:] + bin_file = bin_file[:bin_file.rfind('_')] + print(cnt - 1, bin_file) + path_base = os.path.join(bin_path, bin_file) + res_buff = [] + bbox_results = [] + cls_segms = [] + for num in range(1, flags.net_out_num + 1): + if os.path.exists(path_base + "_" + str(num) + ".bin"): + if num == 1: + buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") + buf = np.reshape(buf, [100, 5]) + elif num == 2: + buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="int64") + buf = np.reshape(buf, [100, 1]) + elif num == 3: + bboxes = np.fromfile(path_base + "_" + str(num - 2) + ".bin", dtype="float32") + bboxes = np.reshape(bboxes, [100, 5]) + bboxes = torch.from_numpy(bboxes) + labels = np.fromfile(path_base + "_" + str(num - 1) + ".bin", dtype="int64") + labels = np.reshape(labels, [100, 1]) + labels = torch.from_numpy(labels) + mask_pred = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") + mask_pred = np.reshape(mask_pred, [100, 80, 28, 28]) + mask_pred = torch.from_numpy(mask_pred) + + if torch.cuda.is_available(): + mask_pred = mask_pred.to(device='cuda') + + img_shape = (flags.net_input_height, flags.net_input_width) + mask_pred = mask_pred[range(len(mask_pred)), labels[:, 0]][:, None] + masks = paste_masks_in_image(mask_pred, bboxes[:, :4], img_shape) + masks = masks >= 0.5 + + masks = postprocess_masks(masks, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) + if torch.cuda.is_available(): + masks = masks.cpu() + '''masks = masks.numpy() + img = masks[0].squeeze() + from PIL import Image + for j in range(len(masks)): + mask = masks[j].squeeze() + mask = mask.astype(bool) + img[mask] = img[mask] + 1 + imag = Image.fromarray((img * 255).astype(np.uint8)) + imag.save(os.path.join('.', bin_file + '.png'))''' + + cls_segms = [[] for _ in range(80)] + for i in range(len(masks)): + cls_segms[labels[i][0]].append(masks[i][0].numpy()) + + bboxes = postprocess_bboxes(bboxes, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) + bbox_results = [bbox2result(bboxes, labels[:, 0], 80)] + res_buff.append(buf) + else: + print("[ERROR] file not exist", path_base + "_" + str(num) + ".bin") + + result = list(zip(bbox_results, [cls_segms])) + result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] + results.extend(result) + + current_img_size = img_size_dict[bin_file] + res_bboxes = np.concatenate(res_buff, axis=1) + predbox = postprocess_bboxes(res_bboxes, current_img_size, flags.net_input_width, flags.net_input_height) + + if flags.ifShowDetObj == True: + imgCur = cv2.imread(current_img_size[2]) + + det_results_str = '' + for idx, class_idx in enumerate(predbox[:, 5]): + if float(predbox[idx][4]) < float(0.05): + continue + if class_idx < 0 or class_idx > 80: + continue + + class_name = coco_class_map[int(class_idx)] + det_results_str += "{} {} {} {} {} {}\n".format(class_name, str(predbox[idx][4]), predbox[idx][0], + predbox[idx][1], predbox[idx][2], predbox[idx][3]) + if flags.ifShowDetObj == True: + imgCur = cv2.rectangle(imgCur, (int(predbox[idx][0]), int(predbox[idx][1])), (int(predbox[idx][2]), int(predbox[idx][3])), (0,255,0), 2) + imgCur = cv2.putText(imgCur, class_name, (int(predbox[idx][0]), int(predbox[idx][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + + if flags.ifShowDetObj == True: + cv2.imwrite(os.path.join(det_results_path, bin_file +'.jpg'), imgCur, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) + + det_results_file = os.path.join(det_results_path, bin_file + ".txt") + with open(det_results_file, "w") as detf: + detf.write(det_results_str) + + save_variable(results, './results.txt') + #results = load_variavle('./results.txt') + eval_results = coco_dataset.evaluate(results, metric=['bbox', 'segm'], classwise=True) +``` +调用maskrcnn_pth_postprocess.py评测map精度: +```shell +python3.7 get_info.py jpg /opt/npu/dataset/coco/val2017 maskrcnn_jpeg.info + +python3.7 maskrcnn_pth_postprocess.py --bin_data_path=./result/dumpOutput_device0/ --test_annotation=maskrcnn_jpeg.info --det_results_path=./ret_npuinfer/ --net_out_num=3 --net_input_height=800 --net_input_width=1216 --ifShowDetObj +``` +第一个参数为benchmark推理结果,第二个为原始图片信息文件,第三个为后处理输出结果,第四个为网络输出个数,第五六个为网络高宽,第七个为是否将box画在图上显示 +执行完后会打印出精度: +``` +Evaluating bbox... +Loading and preparing results... +DONE (t=8.57s) +creating index... +index created! +Running per image evaluation... +Evaluate annotation type *bbox* +DONE (t=103.05s). +Accumulating evaluation results... +DONE (t=26.62s). +Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.377 +Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.584 +Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.411 +Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.211 +Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.411 +Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.500 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.515 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.515 +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.515 +Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.319 +Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.556 +Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.656 + ++---------------+-------+--------------+-------+----------------+-------+ +| category | AP | category | AP | category | AP | ++---------------+-------+--------------+-------+----------------+-------+ +| person | 0.517 | bicycle | 0.296 | car | 0.411 | +| motorcycle | 0.392 | airplane | 0.588 | bus | 0.603 | +| train | 0.576 | truck | 0.332 | boat | 0.254 | +| traffic light | 0.253 | fire hydrant | 0.627 | stop sign | 0.624 | +| parking meter | 0.431 | bench | 0.224 | bird | 0.335 | +| cat | 0.588 | dog | 0.544 | horse | 0.527 | +| sheep | 0.473 | cow | 0.515 | elephant | 0.597 | +| bear | 0.616 | zebra | 0.627 | giraffe | 0.623 | +| backpack | 0.132 | umbrella | 0.347 | handbag | 0.119 | +| tie | 0.306 | suitcase | 0.368 | frisbee | 0.634 | +| skis | 0.214 | snowboard | 0.286 | sports ball | 0.398 | +| kite | 0.375 | baseball bat | 0.215 | baseball glove | 0.333 | +| skateboard | 0.455 | surfboard | 0.340 | tennis racket | 0.417 | +| bottle | 0.365 | wine glass | 0.325 | cup | 0.400 | +| fork | 0.259 | knife | 0.139 | spoon | 0.108 | +| bowl | 0.395 | banana | 0.217 | apple | 0.200 | +| sandwich | 0.322 | orange | 0.289 | broccoli | 0.214 | +| carrot | 0.199 | hot dog | 0.277 | pizza | 0.478 | +| donut | 0.397 | cake | 0.353 | chair | 0.245 | +| couch | 0.371 | potted plant | 0.243 | bed | 0.398 | +| dining table | 0.228 | toilet | 0.557 | tv | 0.542 | +| laptop | 0.547 | mouse | 0.572 | remote | 0.260 | +| keyboard | 0.491 | cell phone | 0.325 | microwave | 0.531 | +| oven | 0.300 | toaster | 0.467 | sink | 0.330 | +| refrigerator | 0.511 | book | 0.146 | clock | 0.481 | +| vase | 0.336 | scissors | 0.249 | teddy bear | 0.431 | +| hair drier | 0.013 | toothbrush | 0.145 | None | None | ++---------------+-------+--------------+-------+----------------+-------+ +``` + + **精度调试:** +> 1.因为在线推理前处理图片是一定格式的动态分辨率,所以onnx将分辨率固定为1216x800会导致精度下降些,改为1216x1216可以提升精度,使得mask的精度与开源相比下降在1%之内 +> 2.单图调试 +> ``` +> python3.7 tools/test.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --eval bbox segm --show +>python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img 000000397133_1216x800.jpg --shape 800 1216 --show --verify --simplify +> 说明: +> 1.参考开源精度测评工具,以精度达标的pth为基准,添加打印弄明白关键点代码含义。可以得到导出原始onnx时,paste_masks_in_image 前需要添加mask_pred = mask_pred[range(len(mask_pred)), labels][:, None],onnx显示mask才与pth一致。 +> 2.将图片经过缩放添加pad后导出的原始onnx作为精度基准,发现原始onnx的mask_pred作为输出时形状是(100,80,28,28),而更换自定义算子后导出的onnx输出形状是(100,80,14,14),因此通过添加打印与对比发现计算mask的RoiExtractor的(pooled_height, pooled_width)配置是(14,14)而不应该是默认的(7,7)。将om推理RoiExtractor的输入变量使用pickle模块保存起来,然后在源代码中加载数据到这些变量,查看原始onnx的图片显示结果可以验证是RoiExtractor的问题 +> 3.800x1216不是pth模型固定的高宽,在build_from_cfg添加print(obj_cls)发现./mmdet/models/detectors/base.py的BaseDetector,推断模型的输入大小是变化的 +> 4.至于查看函数调用关系,可以在代码中故意构造错误,python运行出错时会打印调用栈 +> ``` + + +### 6.2 开源精度 +[官网精度](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) + +``` +{"mode": "val", "epoch": 12, "iter": 7330, "lr": 0.0002, "bbox_mAP": 0.382, "bbox_mAP_50": 0.588, "bbox_mAP_75": 0.414, "bbox_mAP_s": 0.219, "bbox_mAP_m": 0.409, "bbox_mAP_l": 0.495, "bbox_mAP_copypaste": "0.382 0.588 0.414 0.219 0.409 0.495", "segm_mAP": 0.347, "segm_mAP_50": 0.557, "segm_mAP_75": 0.372, "segm_mAP_s": 0.183, "segm_mAP_m": 0.374, "segm_mAP_l": 0.472, "segm_mAP_copypaste": "0.347 0.557 0.372 0.183 0.374 0.472"} +``` +### 6.3 精度对比 +om推理box map50精度为0.584,开源box map50精度为0.588,精度下降在1%之内,因此可视为精度达标 +om推理segm map50精度为0.553,开源segm map50精度为0.557,精度下降在1%之内,因此可视为精度达标 + +## 7 性能对比 + +- **[npu性能数据](#71-npu性能数据)** +- **[T4性能数据](#72-T4性能数据)** +- **[性能对比](#73-性能对比)** + +### 7.1 npu性能数据 +batch1的性能: + 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 +``` +./benchmark.x86_64 -round=20 -om_path=mask_rcnn_r50_fpn_1x_coco_bs1.om -device_id=0 -batch_size=1 +``` +执行20次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 +``` +[INFO] Dataset number: 19 finished cost 512.331ms +[INFO] PureInfer result saved in ./result/PureInfer_perf_of_mask_rcnn_r50_fpn_1x_coco_bs1_in_device_0.txt +-----------------PureInfer Performance Summary------------------ +[INFO] ave_throughputRate: 1.95202samples/s, ave_latency: 512.318ms +---------------------------------------------------------------- +``` +maskrcnn mmdetection不支持多batch + + **性能优化:** +> 生成多batch模型需要修改源码,否则atc转化的多batch模型推理出的数据不对,多batch性能没有提升 +> + + +### 7.2 T4性能数据 +batch1性能: +onnx包含自定义算子,因此不能使用开源TensorRT测试性能数据,故在T4机器上使用pth在线推理测试性能数据 + +测评T4精度与性能: +```shell +git clone https://github.com/open-mmlab/mmcv +cd mmcv +MMCV_WITH_OPS=1 pip3.7 install -e . +cd .. +git clone https://github.com/open-mmlab/mmdetection +cd mmdetection +pip3.7 install -r requirements/build.txt +python3.7 setup.py develop +wget http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth +在当前目录按结构构造数据集:data/coco目录下有annotations与val2017,annotations目录存放coco数据集的instances_val2017.json,val2017目录存放coco数据集的5000张验证图片。 +python3.7 tools/test.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --eval bbox segm +``` +``` +6.0 task/s +``` + +### 7.3 性能对比 +310单卡4个device,benchmark测试的是一个device。T4一个设备相当于4个device,测试的是整个设备。benchmark时延是吞吐率的倒数,T4时延是吞吐率的倒数乘以batch。对于batch1,1.95202 * 4 > 6.0,即npu性能超过T4 +对于batch1,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/segmentation目录下 + + diff --git a/docs/.keep "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" similarity index 100% rename from docs/.keep rename to "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/LICENSE" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/LICENSE" new file mode 100644 index 0000000..56ee3c8 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/LICENSE" @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/README.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/README.md" new file mode 100644 index 0000000..c5f37d0 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/README.md" @@ -0,0 +1,53 @@ +$\color{red}{说明:删除线用于READ.md的说明,以下带有删除线的说明在README.md中需要删除}$ + +# ReID-strong-baseline模型PyTorch离线推理指导 + +## 1 环境准备 + +1.安装必要的依赖,测试环境可能已经安装其中的一些不同版本的库了,故手动测试时不推荐使用该命令安装 +``` +pip3.7 install -r requirements.txt +``` +~~需要使用python3.7命令执行脚本,pip3.7命令安装库,torch使用1.5.0版本,如果开源模型代码导出onnx要求torch版本大于1.5.0,则使用1.8.0版本,并在此处说明。onnx可以选择1.9.0。requirements.txt中需要写明本模型离线推理所有必要依赖库的具体版本,版本号即是推理310服务器上推理时使用库的版本号,常用库包括numpy,Pillow,opencv-python等。目前atc工具支持的onnx算子版本opset_version为11。~~ + + +2.获取,修改与安装开源模型代码 +``` +git clone https://github.com/michuanhaohao/reid-strong-baseline -b master +cd reid-strong-baseline +git reset 3da7e6f03164a92e696cb6da059b1cd771b0346d --hard +cd .. +``` +~~优先使用本任务提供的开源代码仓,并基于分支与commit id的代码做离线推理,分支一般选择master或稳定的版本,需要从github的commits中找到commit id,通常选择稳定版本的最后一次提交,或代码仓最新的一次提交~~ +~~如果需要对模型的开源代码做修改,以打patch的形式修改后再安装:patch -p1 < ../{patch_name}.diff~~ +~~如果开源模型代码仓没有安装脚本,可以通过sys.path.append(r"./reid-strong-baseline")添加搜索路径,然后在pth2onnx脚本中就可以引用模型代码的函数或类~~ + +3.获取权重文件 + +[market_resnet50_model_120_rank1_945.pth](https://drive.google.com/open?id=1hn0sXLZ5yJcxtmuY-ItQfYD7hBtHwt7A) +~~优先使用训练提供的权重文件,如果训练的权重文件网上能获则需给出网址,否则需要给出从哪获取权重文件。如果训练没有提供权重则使用开源代码仓的权重文件。需要给出权重文件名~~ + +4.数据集 +[获取Market1501](http://www.liangzheng.org/Project/project_reid.html),并重命名为market1501 + +5.[获取benchmark工具](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373/software/) +将benchmark.x86_64或benchmark.aarch64放到当前目录 + +## 2 离线推理 + +310上执行,执行时使npu-smi info查看设备状态,确保device空闲 +``` +bash test/pth2om.sh +bash test/eval_acc_perf.sh --datasets_path=/root/datasets +``` + **评测结果:** +| 模型 | 官网pth精度 | 310离线推理精度 | gpu性能 | 310性能 | +| :------: | :------: | :------: | :------: | :------: | +| ReID-strong-baseline bs1 | [rank1:94.5% mAP:85.9%](https://github.com/michuanhaohao/reid-strong-baseline) | rank1:94.5% mAP:85.9% | 992.9994fps | 1446.188fps | +| ReID-strong-baseline bs16 | [rank1:94.5% mAP:85.9%](https://github.com/michuanhaohao/reid-strong-baseline) | rank1:94.5% mAP:85.9% | 2211.7074fps | 2588.56fps | + +备注: +加上TEST.NECK_FEAT "('before')" TEST.FEAT_NORM "('no')"导出的onnx可以进行离线推理 +不加TEST.NECK_FEAT "('before')" TEST.FEAT_NORM "('no')"导出的onnx转换的om精度与官网精度一致 + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_postprocess.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_postprocess.py" new file mode 100644 index 0000000..0af4cb7 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_postprocess.py" @@ -0,0 +1,73 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +sys.path.append('./reid-strong-baseline') +import os +import argparse +import glob +import re +import numpy as np +import torch +from utils.reid_metric import R1_mAP + +def get_pred_label(label_dir, pre_dir): + img_paths = glob.glob(os.path.join(label_dir, '*.jpg')) + pattern = re.compile(r'([-\d]+)_c(\d)') + + outputs = [] + for img_path in img_paths: + pid, camid = map(int, pattern.search(img_path).groups()) + if pid == -1: continue # junk images are just ignored + camid -= 1 # index starts from 0 + + filename = img_path.split("/")[-1] + if filename[-8:] == ".jpg.jpg": + bin_file = filename[:-8] + "_1.bin" + else: + bin_file = filename[:-4] + "_1.bin" + output = np.fromfile(os.path.join(pre_dir, bin_file), dtype="float32") + output = torch.from_numpy(output) + output = output.unsqueeze(0) + + pid = torch.from_numpy(np.array([pid,])) + camid = torch.from_numpy(np.array([camid,])) + outputs.append((output, pid, camid)) + + return outputs + +def eval(query_dir, gallery_dir, pred_dir): + + query = get_pred_label(query_dir, pred_dir) + gallery = get_pred_label(gallery_dir, pred_dir) + outputs = query + gallery + + num_query = 3368 + eval = R1_mAP(num_query, max_rank=50, feat_norm="yes") + eval.reset() + for output in outputs: + eval.update(output) + cmc, mAP = eval.compute() + print('Validation Results') + print("mAP: {:.1%}".format(mAP)) + for r in [1, 5, 10]: + print("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--query_dir", default="./data/market1501/query") + parser.add_argument("--gallery_dir", default="./data/market1501/bounding_box_test") + parser.add_argument("--pred_dir", default="./result/dumpOutput_device0/") + args = parser.parse_args() + eval(args.query_dir, args.gallery_dir, args.pred_dir) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_preprocess.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_preprocess.py" new file mode 100644 index 0000000..c296fb1 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_preprocess.py" @@ -0,0 +1,56 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import numpy as np +from PIL import Image +from torchvision import transforms +import multiprocessing + +preprocess = transforms.Compose([ + transforms.Resize([256, 128]), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +]) + +def gen_input_bin(file_batches, batch): + i = 0 + for file in file_batches[batch]: + if ".db" in file: + continue + i = i + 1 + print("batch", batch, file, "===", i) + + input_image = Image.open(os.path.join(src_path, file)).convert('RGB') + input_tensor = preprocess(input_image) + img = np.array(input_tensor).astype(np.float32) + img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin")) + +def ReID_preprocess(src_path, save_path): + files = os.listdir(src_path) + file_batches = [files[i:i + 500] for i in range(0, 50000, 500) if files[i:i + 500] != []] + thread_pool = multiprocessing.Pool(len(file_batches)) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(file_batches, batch)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + +if __name__ == '__main__': + src_path = sys.argv[1] + save_path = sys.argv[2] + if not os.path.isdir(save_path): + os.makedirs(os.path.realpath(save_path)) + ReID_preprocess(src_path, save_path) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_pth2onnx.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_pth2onnx.py" new file mode 100644 index 0000000..659451d --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/ReID_pth2onnx.py" @@ -0,0 +1,63 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import argparse +import torch +import torch.onnx +sys.path.append('./reid-strong-baseline') +from config import cfg +from modeling import build_model + +from collections import OrderedDict +def proc_nodes_module(checkpoint): + new_state_dict = OrderedDict() + for k, v in checkpoint.items(): + if "classifier" in k: + continue + new_state_dict[k] = v + return new_state_dict + +def main(): + parser = argparse.ArgumentParser(description="ReID Baseline Inference") + parser.add_argument( + "--config_file", default="", help="path to config file", type=str + ) + parser.add_argument("opts", help="Modify config options using the command-line", default=None, + nargs=argparse.REMAINDER) + args = parser.parse_args() + + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + + num_classes = 751 + model = build_model(cfg, num_classes) + checkpoint = torch.load(cfg.TEST.WEIGHT, map_location='cpu') + #checkpoint = proc_nodes_module(checkpoint) + model.load_state_dict(checkpoint) + model.eval() + + input_names = ["image"] + output_names = ["class"] + dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} + dummy_input = torch.randn(1, 3, 256, 128) + export_onnx_file = "ReID.onnx" + + torch.onnx.export(model, dummy_input, export_onnx_file, input_names=input_names, dynamic_axes=dynamic_axes, + output_names=output_names, opset_version=11, verbose=True) + +if __name__ == '__main__': + main() diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/env.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/env.sh" new file mode 100644 index 0000000..49be8f1 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/env.sh" @@ -0,0 +1,8 @@ +#! /bin/bash + +export install_path=/usr/local/Ascend/ascend-toolkit/latest +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/gen_dataset_info.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/gen_dataset_info.py" new file mode 100644 index 0000000..f80f45a --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/gen_dataset_info.py" @@ -0,0 +1,60 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import cv2 +from glob import glob + + +def get_bin_info(file_path, info_name, width, height): + bin_images = glob(os.path.join(file_path, '*.bin')) + with open(info_name, 'w') as file: + for index, img in enumerate(bin_images): + content = ' '.join([str(index), img, width, height]) + file.write(content) + file.write('\n') + + +def get_jpg_info(file_path, info_name): + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + image_names = [] + for extension in extensions: + image_names.append(glob(os.path.join(file_path, '*.' + extension))) + with open(info_name, 'w') as file: + for image_name in image_names: + if len(image_name) == 0: + continue + else: + for index, img in enumerate(image_name): + img_cv = cv2.imread(img) + shape = img_cv.shape + width, height = shape[1], shape[0] + content = ' '.join([str(index), img, str(width), str(height)]) + file.write(content) + file.write('\n') + + +if __name__ == '__main__': + file_type = sys.argv[1] + file_path = sys.argv[2] + info_name = sys.argv[3] + if file_type == 'bin': + width = sys.argv[4] + height = sys.argv[5] + assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' + get_bin_info(file_path, info_name, width, height) + elif file_type == 'jpg': + assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' + get_jpg_info(file_path, info_name) diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/modelzoo_level.txt" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/modelzoo_level.txt" new file mode 100644 index 0000000..9e95396 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/modelzoo_level.txt" @@ -0,0 +1,4 @@ +FuncStatus:OK +PrecisionStatus:OK +AutoTune:OK +PerfStatus:POK \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/requirements.txt" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/requirements.txt" new file mode 100644 index 0000000..4cda321 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/requirements.txt" @@ -0,0 +1,8 @@ +torch == 1.5.0 +torchvision == 0.6.0 +onnx == 1.7.0 +numpy == 1.20.3 +Pillow == 8.2.0 +opencv-python == 4.5.2.54 +yacs == 0.1.8 +pytorch-ignite == 0.4.5 \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/eval_acc_perf.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/eval_acc_perf.sh" new file mode 100644 index 0000000..1bfa7cf --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/eval_acc_perf.sh" @@ -0,0 +1,72 @@ +#!/bin/bash + +datasets_path="/root/datasets/" + +for para in $* +do + if [[ $para == --datasets_path* ]]; then + datasets_path=`echo ${para#*=}` + fi +done + +arch=`uname -m` +rm -rf ./prep_dataset_query +rm -rf ./prep_dataset_gallery +python3.7 ReID_preprocess.py ${datasets_path}/market1501/query ./prep_dataset_query +python3.7 ReID_preprocess.py ${datasets_path}/market1501/bounding_box_test ./prep_dataset_gallery +mv prep_dataset_gallery/* prep_dataset_query/ +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 gen_dataset_info.py bin ./prep_dataset_query ./prep_bin.info 128 256 +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +source env.sh +rm -rf result/dumpOutput_device0 +./benchmark.${arch} -model_type=vision -device_id=0 -batch_size=1 -om_path=./ReID_bs1.om -input_text_path=./prep_bin.info -input_width=128 -input_height=256 -output_binary=True -useDvpp=False +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +rm -rf result/dumpOutput_device1 +./benchmark.${arch} -model_type=vision -device_id=1 -batch_size=16 -om_path=./ReID_bs16.om -input_text_path=./prep_bin.info -input_width=128 -input_height=256 -output_binary=True -useDvpp=False +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 ReID_postprocess.py --query_dir=${datasets_path}/market1501/query --gallery_dir=${datasets_path}/market1501/bounding_box_test --pred_dir=./result/dumpOutput_device0 > result_bs1.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 ReID_postprocess.py --query_dir=${datasets_path}/market1501/query --gallery_dir=${datasets_path}/market1501/bounding_box_test --pred_dir=./result/dumpOutput_device1 > result_bs16.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "====accuracy data====" +python3.7 test/parse.py result_bs1.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 test/parse.py result_bs16.json +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "====performance data====" +python3.7 test/parse.py result/perf_vision_batchsize_1_device_0.txt +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +python3.7 test/parse.py result/perf_vision_batchsize_16_device_1.txt +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi +echo "success" \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/parse.py" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/parse.py" new file mode 100644 index 0000000..6d5a129 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/parse.py" @@ -0,0 +1,33 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import json +import re + +if __name__ == '__main__': + if sys.argv[1].endswith('.json'): + result_json = sys.argv[1] + with open(result_json, 'r') as f: + content = f.read() + #tops = [i.get('value') for i in json.loads(content).get('value') if 'Top' in i.get('key')] + #print('om {} top1:{} top5:{}'.format(result_json.split('_')[1].split('.')[0], tops[0], tops[4])) + print(content) + elif sys.argv[1].endswith('.txt'): + result_txt = sys.argv[1] + with open(result_txt, 'r') as f: + content = f.read() + txt_data_list = [i.strip() for i in re.findall(r':(.*?),', content.replace('\n', ',') + ',')] + fps = float(txt_data_list[7].replace('samples/s', '')) * 4 + print('310 bs{} fps:{}'.format(result_txt.split('_')[3], fps)) \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/perf_g.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/perf_g.sh" new file mode 100644 index 0000000..a1c5a64 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/perf_g.sh" @@ -0,0 +1,19 @@ +trtexec --onnx=ReID.onnx --fp16 --shapes=image:1x3x256x128 > ReID_bs1.log +perf_str=`grep "GPU.* mean.*ms$" ReID_bs1.log` +if [ -n "$perf_str" ]; then + perf_num=`echo $perf_str | awk -F' ' '{print $16}'` +else + perf_str=`grep "mean.*ms$" ReID_bs1.log` + perf_num=`echo $perf_str | awk -F' ' '{print $4}'` +fi +awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}' + +trtexec --onnx=ReID.onnx --fp16 --shapes=image:16x3x256x128 > ReID_bs16.log +perf_str=`grep "GPU.* mean.*ms$" ReID_bs16.log` +if [ -n "$perf_str" ]; then + perf_num=`echo $perf_str | awk -F' ' '{print $16}'` +else + perf_str=`grep "mean.*ms$" ReID_bs16.log` + perf_num=`echo $perf_str | awk -F' ' '{print $4}'` +fi +awk 'BEGIN{printf "t4 bs16 fps:%.3f\n", 1000*1/('$perf_num'/16)}' \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/pth2om.sh" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/pth2om.sh" new file mode 100644 index 0000000..393275a --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/official/cv/ReID/ReID-strong-baseline/test/pth2om.sh" @@ -0,0 +1,19 @@ +#!/bin/bash + +rm -rf ReID.onnx +python3.7 ReID_pth2onnx.py --config_file='reid-strong-baseline/configs/softmax_triplet_with_center.yml' MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('market_resnet50_model_120_rank1_945.pth')" TEST.NECK_FEAT "('before')" TEST.FEAT_NORM "('no')" +if [ $? != 0 ]; then + echo "fail!" + exit -1 +fi + +rm -rf ReID_bs1.om ReID_bs16.om +source env.sh +atc --framework=5 --model=ReID.onnx --output=ReID_bs1 --input_format=NCHW --input_shape="image:1,3,256,128" --log=debug --soc_version=Ascend310 +atc --framework=5 --model=ReID.onnx --output=ReID_bs16 --input_format=NCHW --input_shape="image:16,3,256,128" --log=debug --soc_version=Ascend310 --auto_tune_mode="RL,GA" + +if [ -f "ReID_bs1.om" ] && [ -f "ReID_bs16.om" ]; then + echo "success" +else + echo "fail!" +fi \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/ONNX\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" new file mode 100644 index 0000000..e69de29 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" new file mode 100644 index 0000000..09a53c3 --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" @@ -0,0 +1,286 @@ +# Ascend PyTorch模型推理常见问题FAQ +- [1 介绍](#1-介绍) +- [2 常见问题FAQ](#2-常见问题FAQ) + - [2.1 NPU模型打通常见问题FAQ](#21-NPU模型打通常见问题FAQ) + - [2.2 NPU模型精度调试常见问题FAQ](#22-NPU模型精度调试常见问题FAQ) + - [2.3 NPU模型性能优化常见问题FAQ](#23-NPU模型性能优化常见问题FAQ) + +# [1 介绍](#1-介绍) + + 本文目标读者为Ascend模型离线推理开发者,用于指导开发者在昇腾版本的CANN包下,实现模型推理精度性能达标。这里仅列举模型离线推理中遇到的常见问题与解决方法,持续更新。 + + +# [2 常见问题FAQ](#2-常见问题FAQ) + +## [2.1 NPU模型打通常见问题FAQ](#21-NPU模型打通常见问题FAQ) + +### FAQ1、需要提供哪些交付件,如何交付? +交付请参考《推理指导》6.2 交付标准与规范 +交付件样例:https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50 + +### FAQ2、装有Ascend 310卡的服务器环境如何使用? +提供的装有Ascend 310卡的服务器已经安装好ascend的包,服务器home/common/resnext50的样例是可以运行的 + +### FAQ3、推理与训练的关系? +模型推理与训练是独立的事情,推理比训练简单一些,推理是使用Ascend 910训练的权重或模型开源代码仓提供的权重在310上执行推理,一般在训练等待结果的时间内可以同步做推理 + +### FAQ4、推理工作量? +做之前需要先熟悉相关工作,然后进行模型推理,如果精度性能不达标就需要花费不少时间了,模型推理到验收还有检视整改测试资料文档的工作,不是三天就能做完模型推理,从开始到验收完成整个周期规划了1个月~1.5个月的时间 + +### FAQ5、推理过程中哪些工作在310服务器上做,哪些在t4服务器上做,哪些在cpu上做? +前后处理与转onnx在cpu上做即可,转om模型和benchmark推理时的命令在装有ascend 310的服务器上执行即可,因为两个命令依赖Ascend cann包提供的编译工具与npu算子库,gpu性能数据需要在装有t4卡的服务器上测 + +### FAQ6、预训练权重文件选择的问题? +如果已经有了ascend 910训练提供的权重文件,那么优先使用910训练提供的权重文件做离线推理,精度与910训练出的精度对齐 +如果开源代码仓提供了多个权重文件,使用常用的基础的那个配置的权重文件即可,并且模型支持多任务时只需要针对一个任务做推理 +如果开源代码仓没有提供pth权重文件,则需要该模型的训练同学提供pth权重文件,或者使用开源代码仓训练脚本简单训练一个pth权重文件,然后对比om精度与该pth权重文件的精度 + +### FAQ7、精度与性能需要测试哪些batch? +如果模型支持多batch,需要测试batch1,4,8,16,32的精度与性能,写在README.md里,模型测试脚本与提交代码的描述只需提供bs1和bs16的精度性能数据 + +### FAQ8、onnx不能推理,t4性能如何测? +如果导出的onnx因包含自定义算子等而不能推理,则在t4上运行开源评测脚本测试pth模型在线推理性能 + +### FAQ9、om性能如何测? +测试时需要确保测试过程中device只进行了这一个测试任务,使用npu-smi info查看device是否空闲 +由于随机数可能不能模拟数据分布,Ascend benchmark工具纯推理功能测的有些模型性能数据可能不太准,所以模型测试脚本与提交代码的描述中的性能数据以Ascend benchmark在数据集上推理时得到性能数据为准 + +### FAQ10、导出onnx脚本的dynamic_axes与onnx的输入shape(-1,3,224,224)中的-1是什么意思? +如下导出的onnx模型通过可视化工具netron查看其输入shape是(-1,3,224,224),-1代表onnx模型是动态batch的,当用tensorRT在t4上测onnx的性能时可以指定任意batch的输入(batch,3,224,224),dynamic_axes是动态batch参数,'image': {0: '-1'}表示输入image的第一维是-1即batch维为-1表示动态 +``` + input_names = ["image"] + output_names = ["class"] + dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + torch.onnx.export(model, dummy_input, output_file, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, opset_version=11, verbose=True) +``` +无论onnx模型的batch是多少,onnx转换为om时只要通过--input_shape指定batch为正整数,就得到对应batch的om模型,目前om虽然支持动态batch,但是我们不使用动态batch的om模型 +``` +atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 +``` +当然像一些模型如shufflenetv1其实不支持动态batch,转换为固定batch的om时除了指定--input_shape的相同的batch,还需要相同batch的onnx模型来转换,否则会报错 + +### FAQ11、atc命令失败时如何查看日志? +``` +export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export ASCEND_GLOBAL_LOG_LEVEL=0 #debug 0 --> info 1 --> warning 2 --> error 3 +然后执行atc ... > atc.log +``` + +### FAQ12、模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子? +pytorch代码的adaptive_avg_pool2d目前onnx还不支持,所以导出onnx时报错,解决方案是尝试使用avg_pool2d替换adaptive_avg_pool2d,但当input最后两维不是output的整数倍时,adaptive_avg_pool2d不能完全等价替换为avg_pool2d,而npu有adaptive_avg_pool2d算子的实现,所以解决方案变为将adaptive_avg_pool2d改为自定义算子导出onnx,自定义算子不需要具体实现代码(因此导出的onnx不能使用onnxruntime进行推理,还需要将pytorch的_check_onnx_proto(proto)改为pass去除导出onnx时进行检查),只要自定义算子返回的输出shape与原算子输出的shape保持一致即可,相当于onnx只包含这个算子的声明(数据类型与属性需要与npu版算子对应),在onnx转为om时,atc工具的onnx插件如果支持该算子,atc工具会根据这个声明找到该算子npu的实现。 +查看npu的adaptive_avg_pool2d声明: +``` +REG_OP(AdaptiveAvgPool2d) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(output_size, ListInt) + .OP_END_FACTORY_REG(AdaptiveAvgPool2d) +``` +修改模型代码,将adaptive_avg_pool2d改为自定义算子,然后导出onnx,其中output_size_i代表int64类型的算子属性: +``` +class AdaptiveAvgPoolOp(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, output_size): + out = torch.randn(x.shape[0], x.shape[1], output_size[0], output_size[1]).to(x.dtype) + return out + + @staticmethod + def symbolic(g, x, output_size): + out = g.op('AdaptiveAvgPool2d', x, output_size_i = output_size) + return out + +def adaptive_avg_pool_op(x, output_size): + out = AdaptiveAvgPoolOp.apply(x, output_size) + return out + +x = F.adaptive_avg_pool2d(input, output_size=bin_size)替换为x = adaptive_avg_pool_op(input, (bin_size, bin_size)) +``` + +### FAQ13、运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库 + +* 现象描述 + + ``` + Command 'atc' not found, but can be installed with: + or + ./benchmark.x86_64: error while loading shared libraries: libascendcl.so: cannot open shared object file: No such file or directory + ``` + +* 原因分析 + + 当环境变量未设置或者无效时,会出现上述错误。 + +* 处理方法 + + 设置环境变量: + ``` + export install_path=/usr/local/Ascend/ascend-toolkit/latest + export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH + export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH + export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH + export ASCEND_OPP_PATH=${install_path}/opp + export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest + ``` + 若是普通用户登录装有Ascend310卡的服务器,需要使用sudo执行命令,并且 + ``` + 修改/etc/sudoers将Defaults env_reset改成Defaults !env_reset + 修改/etc/bash.bashrc添加alias sudo='sudo env PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH' + ``` + +### FAQ14、推理性能不达标,profiling显示TransData算子耗时,参考如下方案优化 +(1)修改five_2_four.py优化方法 + 在环境变量env.sh中export install_path=/usr/local/Ascend/ascend-toolkit/latest路径下查找five_2_four.py文件,路径一般为 +``` +/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/opp/op_impl/built-in/ai_core/tbe/impl/five_2_four.py +``` + +修改five_2_four.py文件,将TransData算子的output shape加入five_2_four函数行中,示例如下: +``` +from impl import trans_data_negative_target_ntc + +@util.check_input_type(dict, dict, str, str, str) +def five_2_four(src, dst, src_format, dst_format, kernel_name='five_2_four'): + ... + elif dst_format.lower() == "nhwc" and dst_shape in [[10000, 63, 63, 1], [10000, 127, 127, 1], [16, 19, 19, 486], + [16, 10, 10, 486], [16, 38, 38, 324], [16, 5, 5, 486], + [16, 3, 3, 324], [8, 19, 19, 486], [8, 10, 10, 486], + [8, 38, 38, 324], [8, 5, 5, 486], [8, 3, 3, 324], + [100, 28, 28, 91]]: + trans_data_negative_target_tc.trans_data_negative_target_tc(src, dst, src_format, dst_format, kernel_name) + elif dst_format.lower() == "nchw" and dst_shape in [[2560, 512, 4, 26], [2560, 512, 1, 26], [2560, 256, 8, 25], + [16, 240, 7, 7], [16, 120, 14, 14], [1,19,1024,2048], [4,19,1024,2048]]: + print("=================================") + print("ntc dst shape:", dst_shape) + print("=================================") + trans_data_negative_target_ntc.trans_data_negative_target_ntc(src, dst, src_format, dst_format, kernel_name) + ... +``` +- 不同的batch_size,添加的shape不一样,shape大小为[*,19,1024,2048 ] ,以某模型为例,只测试batch1和batch4,因此添加的shape为[1,19,1024,2048],[4,19,1024,2048] + +修改完成后,重新转换生成om文件,atc转换过程会打印添加的日志,如下: +``` +ATC start working now, please wait for a moment. +================================= +ntc dst shape: [1, 19, 1024, 2048] +================================= +================================= +ntc dst shape: [1, 19, 1024, 2048] +================================= +ATC run success, welcome to the next use. +W11001: High-priority service of op[PartitionedCall_AvgPool_45_2] is invalid, low-priority service is used. It can work normally but may affect performance. +W11001: High-priority service of op[PartitionedCall_AvgPool_52_6] is invalid, low-priority service is used. It can work normally but may affect performance. +``` +(2)output_node输出节点类型更改为float16 +atc转换时指定输出节点类型为float16 +``` +atc --framework=5 --model=./ICNet.onnx --output=ICNet_bs1 --out_nodes="Resize_317:0" --output_type=FP16 --input_format=NCHW --input_shape="actual_input_1: 1,3,1024,2048" --log=debug --soc_version=Ascend310 +``` + +### FAQ15、onnx转om模型报错atc命令ERROR问题解决 +* 现象描述 + ``` + ATC run failed,please check the detail log. try 'atc --help' + E19999: Inter Error! + Unknown error occurred,please check the log. + ``` + 1. 设置环境变量 + ``` + export install_path=/usr/local/Ascend/ascend-toolkit/latest + export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH + export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH + export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH + export ASCEND_OPP_PATH=${install_path}/opp + ``` + 2. 更新最新的推理包run包 + + 3. 打印host日志 + ``` + export ASCEND_SLOG_PRINT_TO_STDOUT=1 + [WARNING] TBE(3112,atc.bin):2021-05-25-15:20:33.329.360 [image_ops.cc:2146][OP_PROTO] ResizeNearestInferShape:2146 OpName:[Resize_140] "Get + constValue failed of [sizes]" + [ERROR] TBE(3112,atc.bin):2021-05-25-15:20:33.329.371 [image_ops.cc:2084][OP_PROTO] CalculateSizeOut:2084 OpName:[Resize_140] "length of scale_out + after erase must be equal to 2" + [ERROR] TBE(3112,atc.bin):2021-05-25-15:20:33.329.376 [image_ops.cc:2155][OP_PROTO] ResizeNearestInferShape:2155 OpName:[Resize_140] "calculate size + out failed." + [ERROR] GE(3112,atc.bin):2021-05-25-15:20:33.329.391 [op_desc.cc:1345]3112 CallInferFunc: ErrorNo: -1(failed) [COMP][PRE_OPT]Resize_140 call infer + func. ret: 4294967295 + [ERROR] GE(3112,atc.bin):2021-05-25-15:20:33.329.397 [shape_refiner.cc:766]3112 InferShapeAndType: ErrorNo: -1(failed) [COMP][PRE_OPT]Resize_140 call + infer function failed. + ``` + 得出的结论为:onnx不支持 constValuse 需要进行优化转换 + 优化转换采用onnx-simplifier 工具进行转换 + 安装onnx-simplifier + pip3 install onnx-simplifier + 简化onnx模型: + python3 -m onnxsim ./hrnet_w18.onnx ./hrnet_w18_1.onnx --input-shape "16,3,224,224" + 转换完成再执行如下命令 + ``` + atc --framework=5 --model=./hrnet_w18_1.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=hrnet_bs16 --log=debug -- + soc_version=Ascend310 + ``` + onnx转om成功。 + +### FAQ16、离线推理后处理脚本适配 + 对于一些图像分类的模型,后处理脚本都是通用的;而有些模型(比如分割类)是没有后处理脚本的,需要读者自行适配。 +(1)源码中包含在线推理脚本(如evaluate.py)或测试类脚本(如test.py) +基于这两个脚本适配,一般脚本中都包含类似的model语句 +``` +outputs = model(image) +``` +benchmark离线推理得到的./result/dumpOutput_device0/数据就可以理解为在线推理的model(image)步骤,适配过程就是从./result/dumpOutput_device0/中按照对应的名字将数据读取出来,适配代码参考如下: +``` +outputs = self.file2tensor(annotation_file) + +# 生成的是bin文件 +def file2tensor(self, annotation_file): + + filepath = annotation_file + '_1.bin' + size = os.path.getsize(filepath) + res = [] + L = int(size/4) #由于需要的是float32类型,所以按照4字节读取;根据实际情况按字节读取 + binfile = open(filepath, 'rb') + for i in range(L): + data = binfile.read(4) + num = struct.unpack('f', data) + res.append(num[0]) + binfile.close() + + dim_res = np.array(res).reshape(1,19,1024,2048) #转换为对应的shape,可通过在线推理打印outputs的shape获取到 + tensor_res = torch.tensor(dim_res, dtype=torch.float32) + print(filepath, tensor_res.dtype, tensor_res.shape) + + return tensor_res +``` +(2)如上的文件都没有,可以参考训练过程的validation步骤进行适配,适配方法同上。 + + +### FAQ17、执行数据集预处理报错 +``` +python3.7 imagenet_torch_preprocess.py /opt/npu/imagenet/val ./pre_dataset +``` +报错如下 +``` +PIL.UnidentifeldImageError: cannot identify image file '/opt/npu/imagenet/val/xxxx.jpeg' +``` +出现这个问题代表图片文件损坏。 +解决方法:更换未损坏的val数据集。 + + +## [2.2 NPU模型精度调试常见问题FAQ](#22-NPU模型精度调试常见问题FAQ) + + 1.前后处理与模型参数是否与开源代码仓的推理使用的完全一致 + 2.使用开源代码仓提供的测评pth的脚本测试pth在线推理精度是否达标,可以添加算子输出结果的调试打印 + 3.如果导出的onnx可以推理,确定onnx精度是否达标 + 4.如果是om算子导致精度下降,则模型转换时指定算子为om的输出节点,然后与pth在线推理时该算子(开启verbose导出onnx时会打印算子对应的py文件代码行)的输出对比,查看是否一致 + 5.如果某算子导致精度下降问题,尝试是否可以修改模型使用其它方法替换掉该算子,然后看精度是否达标,如果遇到实在规避不了的算子问题则需要在modelzoo提issue +参考《推理指导》的4.5 maskrcnn端到端推理指导案例 + + +## [2.3 NPU模型性能优化常见问题FAQ](#22-NPU模型性能优化常见问题FAQ) + + 1.优化修改onnx模型去掉影响性能的冗余pad,用Ascend atc的相关优化选项尝试一下,尝试使用最近邻替换双线性的resize重新训练,降低图片分辨率等使性能达标。 + 2.对于算子导致的性能问题,需要使用profiling分析定位引起性能下降的原因,具体到引起性能下降的算子。优先修改模型代码以使其选择性能好的npu算子替换性能差的npu算子使性能达标,然后在modelzoo上提issue,等修复版本发布后再重测性能,继续优化。 + 3.需要交付profiling性能数据,对经过上述方法性能可以达标的模型,在交付文档中写明问题原因与达标需要执行的操作;对经过上述方法性能仍不达标的模型,在交付的README.md文档中写明问题原因与简要的定位过程。 + diff --git "a/docs/XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" similarity index 100% rename from "docs/XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" rename to "Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-XxxxNet\347\275\221\347\273\234\346\250\241\345\236\213[\344\272\244\344\273\230\345\206\205\345\256\271]\346\265\213\350\257\225\346\212\245\345\221\212.docx" diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274\344\271\246.docx" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\216\250\347\220\206\346\214\207\345\257\274\344\271\246.docx" new file mode 100644 index 0000000..e69de29 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\265\213\350\257\225\346\212\245\345\221\212.docx" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-Xxx\346\250\241\345\236\213\346\265\213\350\257\225\346\212\245\345\221\212.docx" new file mode 100644 index 0000000000000000000000000000000000000000..fd5fb68b511e1abc0ed06f9b2da249026fa813cb GIT binary patch literal 304048 zcma&Nb9gLWw=Wv5*tTukwr$(iN>*&!wrx8rwr$(V$@}iJ?>*nX=Z{;@)78~;j-EBf zZ*=^xM>~5bI{6#Vpw+kSd@;p?O~IEAaQEPg*iBVi#96&l1m?6^bfK}$oeW`Ub$ z7je+jrj{%XkB>&7qm*LUDg8=$M^sV1p3&-iz_l#l?vsf7lX|Gip##=t4D(yFde6*;3}K> zX9mpodoEScELZ2gufEfHTC?mIMVYn`c<8-&HOzIOrZVX=ua+`)?p4QmB@DNJXYN>Y zuhzu32yp(MEO35^7aI^z6^$U4BFa2mg_on3y$2QeB;M271mMHit-fS=o6~#TqWJ{; zPhAWaqZe}j=tAR17YP5SE{yCPP5x0vP`r-R06lW>HR!j{m`_IIqD^2kaFPxOe5G_J z)Yz)3HLrO7=2w@+@?swdozqkTnVq8|HC%mEcVD|`1!56|00McnNP#PfXNB4=kjNAh ziFyS};t4n`O~rbKKh}6t5LyL{`nF$$zXZp)OxV*FR(RdMSL|PsKk|8Pi>i!x$*v(V z`#IKN>_o=FB<`fCX02$$ri}Rt6fcF$dd)Z>{zm2k{&%n?65+lYaH{QmY|vRyimYGe z{aM4UDYkublVQBAra&FlQtjTM2zMnfKA#_)+AOxp->TU1D&V}>Vr`&D0>T3PawAz> z86UT0x_OjD8kh+hnKYL1^AFjGcM3a9ELYb-WGT&y-AT{|R*#yC*%_57e+fP$T^Z)GkiWb~gWj zT@*L{57;Bu{y$*zOvcbiajn5-IOF4Dv%AsRw4*LUvjA{xWnZo<~7&Sr|t1PKB02_xi>5rg=ELwa&{BbTwdc%*yr zJJKw3+MSjd&5DDLd;-mh0IH98`jDIOfDOx;ckwW8^i9E$N59> zT1^fG0Dx-^008&jBu>sA)+SE>z}}>@>AK#E(0$F}1BR>HgcePsm28+p?uE!z=EN$o zxwhIBEI!&FOspPXf6*yO_m2Di>z(z|%Nf8QnIBWJdAr<9@!HYdwZ@w7NZ0+Bk;>N>A2q4Fv%um~b#=(rOM^Me-;o3fbZJF+8z2|&+ z#|uZh9!VyE2~5AbLJ3lP?wX$EFqrJG-W*?P$JTv!sp+;3fhxbYufM;p@xB$*z+P!b zZ`avI(tF<4Yl*6=C~MXjG5wJQAFKZ|@TXK1roNUSiOZAY>wWAOOjV@m`P|ZIgvWlj zAB?qRhQv-B^>g7yU?Bu+AGW++t(UAzk3!em9qDPO((z5~aKT+e;;WX9ODN#}c87E? zzXLJad2l zSIIZn$~WlY8!F)&YT+BY{7FEoN#F_EEH3!9fM5>{&=Rr>(0kd(0buVn3Jb7-JNnPi z^Usg~-_H;wG=Ow0eh@VIoKPPkafm;OI1H#%EIMQ|J`FOpfFilNf1-RX2t=Mf#9ZD5 zxJORzs(AQrMnO=b2qDxJ%10PLHVHs=h=MRk`TGzPW{><%@?_5`b8~vvVwml<%P;BY zHZ4HUG0!|v$!u+RsN33cefsSs;On8@1Tow`^BE%paF9f33_AU0{TIRoiG*9wX?@;6 zxarwZY{6u4;teU)+f2eGr6+!{sRa3R)I29jNKA`Lrl_j$B1|i&aO((+zp*fQSwY6x zGJ~EJgJy9^%p#DBUOnN2vY=l{9Y&Xi?bVwn8pMPbK|JSQ~^I0ovh?lAQIX@KqjX^C=8ICkqEr9Dbmi}bBX+6?G znj_Duc&AG%xh^EKNJ@?e0)6Z2nbG^{Nq_kuOQez`yxl`|GEf+|(2`+eeq;5W!R3>`*@H+) z5l(MeTrBFQhrRWDj34{rY_YLD>+yPB>(i2XM3>E!9T_!Us`=-aQRkV6&p=w4qeF*+ z6dw`yQP-2cr14FY1SBch++P4&`jd79jVX3fmhM~zb|LaBZPc+Bp^IAHc$q^k(JM(Z zXjDP`Hb|3)>?ZV8ev}NJc7_xL;(NEqr6C5w1&u!^BWo!KU{9>KykuG`+h zp8$9)3>=}j02$BL5_1D49}EfQqYoDgDgxLvbIDrqTt&O>SB6%??Jn{tbVlZN$;>1}eE|C( zp3y87{m#PMx+l5k{qsMdOTSl2yQY6f#Wd;JAzgwi?BhknAh(s-w4AV9U_IZQvNRpq z@@&pa;TAVTS5tT*_azUq{DzCGZTk-L?pG=YXfDq!>Q_Z0iIZ)}rfxnX*Kn?VGOkk@ zZ?1oO%!n9#I^P6$aarPvbBA0VSGcZK8V=d}M- zKo?Qv?>dB~_5W{8`n{;bdJCbdGMS&-uKw_68O|0zs?!-KXMr<>HDOqht8+VCzhYS; z)mf*Femm^=KUt%WYj7tD+Ze=>@<*_vPLaW~l(7l_;hq$ZMM0@EPWWmsN*iRK{2v)X zFMlS6RJnA1?%5y8mOSx)Nchh(Mqxk5NF0!EsUt@Imy}kPNO`Fe-xmpl<|XQ{g{x< z0-hi+Hec4wrn-it&}weDuglTtrR z3^Xjdc{kx!rQt$`Ia^I?O*Jf8`(8`#n<4FP=Py6t^+_9h zNxyb3F9%vb-B|W}qqxxvQ5>*b$=qpB^UoqqwA|NyFeg>NcBWCNrv6!gZnQjdn(ry5 z$KO6W*7xWH-wM74s)2`m)e9b=ha!uyA${wYlWyD`Z>}ZklvJi)F^98@YLAQ9rKY?S z(N|k(isyTRr!Jrusv5O{oKOq0+N~W0<5Q0qfRmIBU0N0m&eF?!fGle^I(cquu3E0m zMro^EH7T{pylyRE{T{eU3P9i5x8!gA&VP{^LA7?+;XbjuBVTXAlDHA{kPmALkgA%r zdW4AkG0-=k%{(v5gYAF;$XKE%_$Y}wx}FJf&Ox|0Z#71;?>GYPNxpPPD#E6K&l+S| zN_lPCQ(jMyKcVx42S$242)SS((z$shpf`tt&?7=FWf&)-G!&$bUDT z##Ws>r#Eg7IxV$&UpX~d_o@PhD7U2YWTnaib&#ojB z71*Fo!~#Ex<9U}AQ(C_LJG!j#ET3Cg&{PKEdOrF7)1QH={loDN1_1Dk_Ft~!zjn8i ziLlB=H-Zh8nuMbiSkjOdDds0>3Ykn>xd1RZ&TQB zwTI|Tiv&DMHMfA_A~%winT5;hm6nFavujCUG`EJ>5k&6B9uJQA+P&JJLZ-_#q`MvycH9Xw2;dL2^8*arq zcd(U@E#}1`hbA}P5NAC)0D}k#Se*>Y~g|BtF4GmPk%L^%vt6A%HZEm;OZ7%1p!^mbn z_*&?guG3mVX^m{Hj!cKlMXhi)P8qX3wQ)GxJ^7Zl_yv@Hx&sJ%+B^97n!BCn*S}Yl zt{>gbWIXr^jy$#DV$p-NHlz6YYGcy)O4FBA+EYw8gfHhT-PHA2W>nBr11y1`Cy??VIgM?8Qh7sLO+rt;(Hj?6Em=Nuppi)WUG4~GvQ(uh=091{HVN+g0W&FRv{#sF?8 zPsG53kVHt5Y9m1%8cty-fM}J?Nc~F!RY8<~ea?>&Q@>7jKDDS!_@Pi^6nTiyaxc~1 zf92O21QfF&mWe{IN#u$sXo(_|Fr11sebku(wZSmHLjh(#YS3;QA9JV@g8tVGzDgnp z_?EeTtE_^BLTMrdOFmh&5Fyat9NyeIQTYB+0+3#J+9(FtW)X2=3^<4|;ynr^N&6(i zRnvHKTufr8SPP2QK6m5^bIaHhdJ?2;QSjPzd3$G(jwrn>hQ(fDyfag{RMT@ zwakq1V&YkjhzJ6Q&O9DEQJLj|C+N3{^RY}ggv|n02*d=l9>3XvIxrpD1KD1R6R7P=liwaL+ud zW8=T>BII$ikOwhNGX2d7#h`^4A0QF|Hx)oO)h0$KVbBczH6b``)Q>!zFvPG-9*B8%2@M1SM#x$Q4^luZiGWI&Em6excsapz~Kl)DJsN{MURVWswhRw38_hqL>{BBsB}@N zB1s}hn#x=R8niy)K|>BeH%%@&i_jX7H8m3p+@~Ha9w7vwF8qdNOfws9I4faXjt?2y zm}u%J45C7Lh-~P$;LkXt2e=W+TmYJ~Vk3_Piq;Q*s)BiCU@s&-9=O@hz=AJn1&(* z3#h*!K|Ba!PRAKpp%vVqc?biAaL5p*NM?xL!QV~SSHN#sr|FC=Fz4fAkuQy|{I)G? z#d06Y%VB*Qe)mbSiuH?u_AIU2fu6&$hT?p{lvGzYIN$2IjuHt!qEKG{V&udJ1~ z?tTeNYE|_6poPqo)m>xp5liZCJT)$Klb8BZJOf=O->36u84Xsk79T!3UAn2{VSMO> za5xuT+-6t+AfeGGp{^_ar!BD;<~rZ!e=UXodxY*M3in?TI%jhe8ql(~hr;Zs|@9ZHdG4~)fv5<_HsUH*P9nCK=WmIM;k@Xz={hWb8R3c=GB z$oP5qo_cu^GD=Uy6pJ>PH^i#I+46WOr6WZ_y=Q#s`}G3W#p7=9?ZB%bHP%;$L+A z4eh;*maNkc-6&<$;C9y~f#^rI}gLQ&0q@MIs!p;b)8CkU%d= zW3HvP3&h3UXC9>9ruEj_>w{UHh$z)oFkN*%Q`QfE^||Xzj|~VgSQIH%Ei^-YWAYy| zRx4dEa5nYl{L0;hS50?d@KJTcQZRF3I2{EnOawQwA*DNzt_{~ zck7@D%@116h1S#QSEGDNN{^Stto_O~Q2my%&H4AWMzVrU<6M+1Nqk0TBvE1|&6BS>_BQefN}TqiRiUSu{$ zBHH(0Wk!|~vJ{nr=8!O%|0WixWjNB^?tVGFj4<1}4LBZw`_1$F<>sZ2E{$ly5{)F! z;_)bi?%U%o?fJRkdW-vQEeehFYdVL|_jN?g7T-FbBTH?Y`}6#DUM;4_{d1oP%sZpt z55#o>I@C2M@11QOI%;u&A=3!(mo4fiIKsRk7X+weZ2Hh{4-pZRk54$cYnvIum54WC*ucInvQ{tf@Coex6!}G2TO|E6nZ!wUrkwN!O(3j}Q;WReVGEOl z08iM22~|JpWCk(`Vi0@ZoYM3Py3xql6{6wLqaQQ}k~MKx$KblaAVxedhWa8i+ZqYO zfyo&m_-nJFll{oL!9vp+D{o_5Efu^=?6r|)&@E=VIC6*flY;c5-!&0NoA^_)xA`8Y zpkT3(blNgREj(=o7WR@Au@4-`VAc~bXOs|Qa*tqjH!HVSbn58{d<9s1A9sHwlKTca`aGFX_7X=PMWzny~ zO~s2;VzO~J)`3?DHu1v>yBv0RJ>{{jqro;aSF}J+6kCOCtw^wgy$UBRnfMHJ)u)zWHQZh?;6*$fMeceh`z?N3NX`cGtEUlU zv{!}}Ab5+i!4TBiqjAl*>|UB{j*&cRAhOhIwAd1X?|1%xUO%i?wnz8>ajW; z6DBTQe+Px|Iq>LIA{<(2N1=5prfU>}ye3%}mNzi~P2OpVBgd=}v0xD&S92lbyMGdN z*u)RxQ$ZpFeS7ted{T>uP`Rqcps-@N*k$Rzn}A65LEK1%9dhpyi#(Hcvi{!xeJI!0 zM3MJL!}0+A%d{!4twt+XMN8GBWV}XbkeDGOC?@($kB|p&yjUhGC$z8Qw3%A6kY6Gg zH`1T0(DR{3J(X@%tF`beJylWw>ALiEnYCP=Q7Do16G3}(*CK-WJfB6_iK-?5Ut8S~CN?QT6gJ+L3?oL= zoqVzpi->Y=RZadynkrMS>2TBsHmnjtYpEZm0z`VDnwDq7?6rO$#9O&$_ z`+!Zu#r_#{*Xo71B|hC$KDydI9{3p6&e1G+3dTP(xmCXZBXL>5!ZaC$2LKQi2KaB{ z@?Y=sC*ka3V`A(4@85*Zxf7CD^Y`zHtMA;os`e(WAiO)M{HRqzkOt*AYLxm8yi%HF zcjkK2IMbu4Nb3PIGC*rE>&{&iT!>iy{z*i9FO8*_*PaVY(LJQ^{?Dwdw7B8qC{yZ* zerHPc`-{^nr?-bwr{Bj9?>$-M2S`TY7G>?MsnRa4WJuwp=NKwjIMQFc^Lf$0V}T}8 znxf<9>5Qq|1?pqlxF6Hs?!MevvxkNPvlk%YGnEgm1aPEI%{GM{?bIA4$ix*5x#PvQ zry3nfOHewVtQF&u%dNJi?VjyQn-`<>E*f>z-q*E-Q4*?iRMoY0%$xGe-c%JCpkvLP zE>4XV_-Q2(qF|D2#9BBC_vJpnOx)mJhE4*;^2>_4AYDLE^3~=#1-D|pgWK{@T)HQ= zKW5GWpN43a#tLBDO%Ig|)*@Sr^1?WJB#pFK3@c-wbFkPHeXzRGnA;FAz0D=}@kN?? zVTKk@BcXlEVvj%hAF32xe8QrT(P{hWTJXy>ltz{=Hvp# zbAzw4+)bxF2(w~Y^p}zb{$D1GZQpI9kG-K`zmN0w>r+10tI=UZ(p9BV@p$QI zv@1<~Hu@vH(GyXRHn{jag8u&HOqt@k_ib48j|cNgw#$M-0NQVv6`(%%*n9vTnvrMw z+ArXe9biy0%VV`D@aFDV&TeavD@C!6Jx^y<(DXdf{&F0Ygvp)q?CauH(ey-UxpF9f zIh!&*44K}js7PvBpZc)2Fu2uAa1)0OB8g@CjC#Kp+S)uXXUjh?kx=2bkSdPKR$|I_ zo+$(Z8M;yU1eV~v3=_XzC6b=IA!{2oPvmu2d#M?-o9XDZ%L1bl061O+HM=tOc=he` zZ9T+6f}7+>`PS5JKm9wEN40TI_s#R_QQAf9ky;x?WJ5)+YO2fN+8s@T4kX7dSUtcoeY%hko{M=o3H1T8xW$|p*&RnkzMDdLiDZ7 zrSjMCttrzxe%UZ;a#NxIk@AATETc7xh}62nm~y+Z#C&^gAvH2fB|nx0#PYAtKLQ2C zE;a#6$O?CUb}p>r$nHWnS0BeMjoj-2MT&MWQN zd{t`$m%88XMxgOMn^>w+sRrbJLt)Wjz`z5*K3S#;R)u#}32H1VBc@$^)TCne3ZDv&x!p-QL$s1JY#Plcvf|C_!b++8w8qCm1BYs>>abhP-wz zRVijwCzCCG*8J!1MMZPtP5MdGXhl==DOd9qr|sD0F$Wd~!FaMZ*qM#f2PnQ}`&GZ~ zU*%evud0+UnSKdpD>u4SLuOf6Z zI#_1mNI%WG$aTJnf|46MY~#$@q&n<(+msiyt>g@Q=b9#N4}G%we#YGDj+_8^Sa$dl z`)YF*pp~(x2%4(M%?}NPBzb=kg{Apfi&n^tN}e%5_H8P&X*>OP09*~g-9GT?kz=Fq zTjnl>=7U*zC@nJ$%@DoyQYXXW2tVbRId&L-oA~SAaci+%9HIISy4JbNOc*Wg9#GKy z4)QFD)gW`cswkJ3SidWE`VoBIn8YrOhWFTGs`Zu@6*U10PsS@4k8|co)_ucBk_2s0C+;wGO}7VW}VGG>}6VFPdA&nUA4lOMVH?CHTN zoHSesEbX?rFjNaneehS;$@H4*ew*|D{;;jARb}f>dY5iC#A0wTwv+i#=2GzcxRKK?}KC~+t1QcJ%JTaYrG`pV{B zXUs>Wg*dFN0GGbz$F9JRtug{6e1pi4U`^A~t%r4vRuBWhns!hY;i91mRD}~{{^qZAxc^GEKBZ514>pEXV5cLDjn(oM{kjOr*r+luy(F= zzK5BIhuiRHs8OKChD|O$%5X$Qjh5DP1$#E0j)uS4 z=uv{^ksS_|Tx}HGeSn%h{u+h`g{Z9s!!6yF{XW>xt>X5*+VB_*K@x$j^bYAwd?Hp{ z_;LrPaRm-I3F;7(#IR5)P|p_rwfCw-YIt%Iuyew$k6G5OJ9u0x#tR|4Ix!w7vpat` zs9)dHQc`2*>&r-xgCTu&eR+9)YSxe*r&_Ne(j0RmronIMAJR=A``Y!JI~n~J);tp( zImxGAT7E@Rj@2j}V(@1`5ym)mtAF*1`9E)6^+57Zwd zRP1rwL5J7I!(esMU7BzBeG;&b+g@#+A-_rCFxDhj-~{50yPtJl%g47vugU@vG=Vhy zXt;EhjjdPSq{AG@?2R{i?&VkoN=(4ra(@$nRtAeJkC7ONR!L2mAp0AVjz2%$L38o? z{AORr8T0K;T$d$yfqTY6F+Cc|RFfi-T%Y7#JveW!u+bzVoaXohAu#HY50D}(l7&sZ zZ886Z3Y95L>NKoUBbN<9CktTSNZD_F6&b&X0lgU$;>G{F`47N!0i?c(vtN52WNu_) z@^G}4uE~ekNqw@^GtcP3AIxLxLz^YjN{Da$57gUbYJU=smXLJ6`6~J-p zMi0l{CC=FGWbi^tKpvH%U*l8&1siK^z=ii!y!21PB^Xhh#rKfp-|a6L3}sgkbRH}5 zTL-(#xD99PA(&PW3Zd-QKHJTfBIg|fq3Ed%C)>n7TEd(!^a7;-Qp)^`xmJ#jr(($$J zesI&3)!R}gjbU9yp(UXm$7L`sUh;lwy^hrA3jt#8EJl&lbiZ@jPGqdnn!Ob_G%I#L z)!sc2Fft$_?)J+(TNzV4oG5KGp-BB@L1LdXmKMz7?osHW&t4My0_r*HdmfpS24M*K zx&i6C30_oYYx{Ir!PxmCo&k`fxYgV8$)MFpQ~;T>2+wamO=5d7eZi_cg>*ux6|%N__tf2a+owtXVJ0Lu1vN zvRJ*x=_$88co@SSG8}0Vbi5fcaWDxz#U&>Th+I@j`}}?{t-B9mFR+8g+(D+TB7uAL z#JM`<^Yci|N4AUGo>H>$Avi}@LKsppujj=;-$BmIjx67*vl(P7s4#CBx5Os z93K1x0~ew(Y%K%xBXP@g_SMI>UxblzkKC`;7MEdOk3~7#_e*W#ZCc{@P!P+=eI1+F z-AN_i+9}&eX^vFSbhnypMd9m!0P_bjq9g!JAnbI5qc5hrplL|P^W&8&S``>t73mr5{gI!kVpwaN7;d0 zGy$p4CEA$g*SKjkYHQ25fT8~}dyLe|W4}DM)YzcA;zwg=YM6Dvmj_{cWP-SW9l07j zj&4sLa5cmL#L*1-T2(T4{4)|2Gy6OHplb8UtTMs|WZQ^Z| zHB@9aFFT}Kr(yr8DAxi?Aw)DupJ=UpPZBEWgw!;Z+|@oIKM6}O(jQyGPv~LsY0|Kq zs+)Y7dsn2PC+POD2HY8E+;h+ml3qy{`n1uozLsjz3rQgXFPVML^eR`B=@CG{tIyN9 zGm(qrvn?O>Zl^lPYp?bTX&QfvGn3xCgH_^Y-S+-Ehr^-{c#B4+s|}_p{|}7cDa6|W zTCpLcZiN(ncorX4jXk%+Zn4s2@JUx)DM=`Dnse{Ry8MB*DvS0@RBomy8eWw@ZMfXVkhwPTo><$}?>~bPu4k zM?QViT1uMThn2GT*}FBZe0)22HI>}7HQWM7uOBlqHa~l8#q`n*b_j{TB0&U^nx{7i zqFE_r!SRTB27aQ(cVmnncuAz}c8H9^9qFfM@0}mj&fH{z!QzlfajN^X@(r3sOl)>6AL6LE? z%HHpJ1}_~+FXy&uleH&}Cm%gy17s z7c4Ok8_^;w!~i9t^{N6H9+(=i7N9OYQK5K&oY?M+6GmE`%Zi~bJywy2Hmu0w+2r{P zv7)puYNnZgq#{pE&d0OMXP@dI#B%Lp78O$$rp@y_f$I7EZ5Fbw6K)+k4w6epFcH1q zcNF&T!a!+l?Pf6u$ut@Q$-af1)Rp?HV8CU&QCC+*!ujy-V8#7F%p+ON# z`z~K0P*kl3Y8%eVb#|ZuE04EG?H^W1QsV|EBGoT+P7URzm?i4bdd#~%gVXiW_tr6Q zxh8}2{W=t zu{zU9w@LSfzooPUeHjFObr)f=(n1d&Om7<*ET>|F6)foSh)ZPq>eQ4KYPDqSpIMsD zbTS-rwkA;v0Ir2cGXg3NU~ezcBZqp)nRaxB$CkvRtl?LeEN)L7bNz&J2^XLuwJ;S3 z$OQFHElMAnp~L@rzw9)dr{`EF5$LJo3r=>vj)cr5*4jc#8I#ro%EPV+S>ds4-4%3! z<}8Tf6Z#@T>R~6S5yc2#D3%qmE%1CwhocclQ27f9fuCXzY}X)m*FrC2{RBbC=T8I2 zS1oSiF?d9f?Tt4`#&YndAxawc7i+Zw$9;0#*0FLz+@r9yCH~n8ciPzmG|a_0*19Pd z3jpC2(%CgW0P`A`z@-AGhdRf1^K)}T$E$2fCCiP6= zahDj~U0XQG&}>EDh_SOlQm+LipH0c+3KU%jbWa5!S=ywaDI^G?F_$3~gdQS`hOygR zy3Q*ay#hi8<%H{v6a7o;6;0MwtYgJtOV*66&K^g@6^ge`oNvU9Q|WOd%#$S^7+6|t zf@dc`cnO4P=|;x)Cun^RVhM3Ff|>qLP=hjEXw{>Std%S(;r=JiSPp%Rw0m+QTy0;; z-m1#rlD450?R3&FB9YqNpjwTeKvMQ~ZHDdG&E}(o6Mmy2JR)}WtWXDeA3-l$6SD0F zkC6WE8W>c|VIZ5B$XVhN+V6=Bf@e|KaJnq&! zDRxn6==uPTtAr%iO85|_S+7z=en1C17l)vUsJ?@wW24LG+%K~luS0E&9U6o@aJ8e6 z=h>D-u6rn4x8YgtR+}=0WV$Q`A5+hSB9A@yG~r+9*Dlq<%hk=6%+{d_%};BCqlY9K zh@$mCIa9)0i3*d){Rj{eGX`G;E?9rd5Locidx9bqTglzr`XJCq)Sd^+l^SRwFR*jt z!C%zYR<7X1=9Ke%tzBw4nk?#(Q71gtI#x&(3V%nmTg-JB)Ng2~^QVHJV6_sbaFQm7fmsH1>aUEkg+a~EYoO_0|4;!u`Fplq1E|H3`6_cK% z1K%m&a(C>;|FYc(9WWj{{6=H{Wh_%#`T4kbETyd+)S5kg%z&g}jYNYO>?VdXp?puX zLl{p|ai%}#;r}6|M*hV3Um>OGWnN;fJ`5*$4Ktq+6*)XN4GlB6gs}{hwBu!Y1Utm> z)VbF3sQw0%LY1fu?XXD8)f~pl>}X!_=x8IkP-Q*3CnUodZ^X>{lq84?L~F8xus-AK zKfu5|52@b2raPCnnP%pvEe#8ktX1PKi9S}jsW465xh?B^hyo(GP+Cv#(s z1rs2}8;&=0X{Mz~-V{KG;&12bVxnQ6AK=`FCh%f#SV)LU#)U|;OS+Ti3Ru*l&+ja# z7!*dWR5dZBkF3|l-CQ&rWhul}OKiz8uS{GT@K@Pc#5y{^C=NPi^8Zx(7h0A^M4OY@ zD03KNn8T>Vko;np``@nKct|`ANjRxGks~m)N^?YUJon3gyL#*YdU^GmT&3pDBFKBM zH8()SVs{Q3SmVL9C98H{drQ-PNS7B2~q33=RG+c`ACyvTEy7TgPwfPVt4=_vy7IMA)` z+gzOn!oA4}@y4~%;DR(Z{l$qgA+U*&#HFbWFo_fn@3ws4bYc;L_z`~VjV2&8TY1&0=_ck2tr9g&5E!~Jnvc1?_(-@3p zBX2gd^u^-}MTjjBBs5zAn^Zj2R`zCX=WM_VBgNDMeel9ZzzVQ9PJ>b{9 z!)a)&1)ybbjkB~Y)h!gl!pcwz8t&Xb_xI@ zE9B@`jitQYdcG0*z-wDz5>JV$OFu<=r|GN)WkHsAOIpY7j;P%oLO4m?IQ4jq^hM&> zYn4nzLg#vJMHUh)p|K_Gx7vJlLAe_5^MB=|wx&KI%!is2r^(EbUG>p1Jy}+V(DgQ^DISYuWB9R^Ps<;zmZ+u*~)yn-aS3vPVjnS`AHE~RO9M} zl`f*=u4MDS-XaFbqc`FE-V$kexYt?Lya_?&!TEvrU-p#Tt`t1J{2M$6(te{Flk9iU zbjQpRgkz91o#FP7H8P8i@`msDe-Ze6LO{R5S|Mflp zJDqY|tV=f?NGUskNxR97G5jDXhq!u|(@TKu(XRIFV`ByUUEGPdm>nW22Gp`K7W_K$ zIY96bvV<7>`Af~a1|4)4f=RibUj-dmT}r~rgv2c8mwmjoYBxXPDFEhQgkv{!HikxF zJ4gxpQpOBTg6Fd}pO*K3ZOq<4eRZozUD=!HBc%C znvWvonx|^hE3@`_JuqLhUcDV$5WyS|c~nWlw$e^yZ^1_Lg@>_!M*yo6B*gQRzN6RF zAv)46>%lDCaCrB1 zH*&j;Eu{4Q^d+!CoOXstXz*ss)pG6)uFSK*mg&-kj- z&8gQyiQofZTKp-OKCuU+IkbP)$||edpNliwI;JkYrXE0_*I(!QwGU6>4oQfc+^>G~ zpv4#azZ(52Wc~Y*Ot`Ks#2_k{eR!K$w&1l1Z0s}D2g_rh`wXz06Ezz{jHCvb*!ReL z!Rn7>T0eXW7&yI`sMT80LXq9-$yOqcF@!CRdt#P4Us=8G{I=!A^KfN8Yg(d=f@&Pr3o)eaMC# zLgaY_9pNYvHUxCHf&Jn8Pv!Xky&mkJQKNqwVAXA`|3^94hA;1Q($755PuJ`}9kc&0 zorSZB&A-K9S@67w{=qb|UKTa*X6Dz%r*_O!^u4`nQ;c?={B z6SVDY$^2Gq+i-skF!BSWqeCmwW()j!i>zqecBb7YwNzWlzpm^lJ!o z*Zm|02iF*$w$a$ooqgIfy;`?9TGn(H;kE7S=gfaGhZFGANRvH=^{rcBp~9<=xQu`m z#Pxw`t}MpDshG9kxNUGU?at;weh9vKSfhJiS)?7?Yz0bWUJl4;&(s#45=cjw3gz5W zTTL850p(Qj3^THGvDre@?L~&Kk4Bbut!79#93MIp7GD?j4u@*wxe|x%lM$NY)od*S zJgyx-zkzyrQ7v2E6Mrivn}j9LiVAKkSx9!eya2L&@!`9OeATJ>+|(?sx3700H^x-BLHyid)7ue!z=kr#mlB0+Mfe`#rX zsp2UZdJP~|_t?`_aL=`SR|kLSTwv#8(Cr$N*;M9j>g{L3j)F*(iR2M^gc7oXgmT{j ztC+89L?2FgM1ww@k7$WAfB#L;0^rA^gERoVCKc<2De$*!WJ!}~ZN`hrkj6XjH(kdxX#)u*u zfR53~QY2Fg*u_I}fC2|)6n0bKFCGFsM#wEl+y3)?ig^fax7w_yTYNe=rFx)0;-01eN1=`9C~ zCmjH@f_JYFmdjIfE~+Xe!)Tl#0yDZ?&(8odE6js$O${)nXXaAJOq z`EsADDb!cbzOk^u^HEUiZz;Df?DBa{WV|g8aaEy_xHpJN}C)lqnH~g=_OTT8`>-yK50$VKeq)OVw6b_wlm!A%^4;LTk ziXQAT*mWjB6O3Yb$(dRXoig*|SI-n;d6gA9^Foqo1+dKL-`6kIOI}&jnwapHrI}GQ zg4I=2<$d-|sL0DW)k%^bnbfovpz&T8&{h2PQ^hioNoqc`(0aDt07uBnZ3|y6ik`E~ zuVSXwgC-}JzB5qg6(yPvi!M8tzFjL)^is7=31Dk>f;TV z{h1T(wFNl_?&MI%kv5-hl&NJ;Kn>NB5wlwRJzi9m0sE}z(;jykH@)gXQt1jxl`zB! z&ce|k2LN@*JVVAfL-YaN2p?_AZAF_6mu!ga2RTrBHeD5NzPf&s)ix`m_7$gY4P|5Z zq&*thoN@?0NI1G9h*m@-f{Y^9R>XiXD>Mb+)mS@XI&!C<9Vkg;JS9kh*-~j{xAYcFz?k_-y`OkoYYpGU;+8i_*a%-Zk(8Ao;=Wt;Jamv^`fq)p_ z6flG+iNgl(ATBLTEWHvjTL-s4FGPXC2>_2K1W^`bDjSbxq_ za)0#Kh|F5|ou|Mt5<86--+&#A=SaQdX9skEkGrms1Tk8%|>y8DNK|}IRfcqpV-FC#G*u+B2to~X0tTOltGi(R2u16R*_|lDNJ*3@!#Lh ztoN?-Z6@;>-<)%P_aEQhoiCoYc9sJ^GA|eaKu3KB))o+e%yT~gw42%|y+7=dH2?re zhY?I;{KR;l5NMsn&0VpCyB=Fi^XZuQj2m;FEz4*X^i$(oE~H)73r{QYr}J(zP&4Vk z#2U_(y&cq<_zxhvc--_fqe!7iW3GUjAC9TpbVF-fq=}aPfkGFGVIq9Q87a ztO<;eM?;<#drwups@dMhzt3ml_GEF&N61Whh=EYGpKKir%MQpf|KDn-8<7oXAQHh? z3Bl&Dn0a&#ZNEUV_8{=wYaGC=rC;LrjJl2FhiIp|42>pKzfao{%W;m$XOUsFk zATjnR9^a(Nl(VS5Oy5oD0i6U5Jr$>nt5FLhJTL5WOAr(Y@GyJofyPGEOQ>MSORf*UJ_6VaRKw8$g!0 zR1){BSlZ~P@pvx*LR(Fxi*8c&?N;qe1r9kYq>3c9#`FRPnNko$ck#;-R6tzwPHRRc zz4i{V5})sT^kp}6E>wB%d^;ppNA-r7awKx!LZVG`pO2AS;>=Wy;;6zHfL*m#zQIdv zLOwVVSr!X6GkuNq55z+%yf0=L>HrLMIWL#TNh~8RO@t5TQ7Sw_DhE*b1uL@;xbV@P zz#9$JFk8ugQcR;(~7VH@RtU z2Ll3VXs|cDO#0F+Bc2@dddXP}5tqFuhQ5k)2%yK(Cc-ui+Gt)+_acKzb`#wWPujBA zU21dAw;;E754C%d&jjPM4mcA3kWxHmmT>Ju4L2iQxdeBP$0VdIsXRSdV_LH^Dd9lk zc_!)4qMn@kOk;Z zCf{ewk9x1pZXaBZkA+;$7?z?O+0InA7W% zzw)pOmEUf^VBr{P(c@i#}E80`ERJl|51HY`?HZ&Y3pqG;4%fau%XS;6nE zVm@rg{kfr>&hB!IbEMwSO4oXt>b%6nr8N+y1@_0_X(aw@V(%}RPD!PvEXCFc%-Qq= z4&5T{I=zVe{SfwvO#LTTOS83>T4_;ZMoIBm`4u1ey9}DoKl{G^c96nL@xo}eIn)SA z))Yyl>cnd`xEqM|vSEGtpkC*1a4uQAo2ryLxfWdrxtOYn_GOO-7Edq0gHb%Y%!_Ku z!&>w16+oOghhl;-_T1q`jS`4W_d=Cc%Jpm<=U%(1PNj$7V7!gM-a)BxU8e@a8>@;C zq$-3AdpK&`t0%@U_wO7~de|!#*3N-0mzv5P7&D|nMRTu#T#o4ed%;>y0JFFea8SvpsG{${ZeXg|&dpZ$Q3DZ=WRmB-J8A z%8ydN;ktdLy=&%8CJXxv!=)@PaW*Cex+`o+vL+suU2~0Y(xqqjChFYonE(x*nnK#X zb6B+7j1mb!;>n)*4b<%ybgVe3R(wHPM9zP(3qC~W%Y2vsOoA0;9B>ohua zGe%THaf3x3YIzs)bnYJDq1nkn8zCYG>8J33!frX11<1q7TD-Te=Ir}-wPc;Fxa}v9 zzwJs(d~jqUU|*DY@Ii4$I<~9_FibhSjM#+!{b{H;Y3l zsIYSlwqFin=zd2kjdP>{7Mmw;&h(R!`^mVYAQV4ak)K&Lg|rF&Exf>vmc?wrBb5t- zOsLC4E#*smg_9;|%47~l5U61FMjZeNgzgHoO-(L9Nfv5deBEkqNqsBEO^M$#7a)sQ z`9y&EwcWjvZ-(@A!5{q*2;AR28OJ<&5-h|+zO|HPJ5!v>m2U!%zp@3xaYFZ{5!&Ij zWZ-nHM6tw20)xS)mD6MK{m=4p?Ej;gXw*n@7cfji!&2^Bk zpUBeKp%9`wPI_NqT&-&eKY2(T@+91DN^m#w5P)bXW=*04F{u7W4ORZx9)E0ZE6~RJ z(|h5=R6&}tnCf0z<1dZjo~K%9g4~c2E|GQ98F<$GWXL1m0hrVRkvCVJX>{t5A#x)3 z0)H`DeXhVbl_K`K-@Xa*nD^)5<_q4mgb*2rJ5c^`JQ6iKnZFOeUmPJ`Br-mYKC18U zNixvt4OYkdlLI7eK4JX4rudP&Iq8;Q%((T)=kaKsK3+I)0pbEy?X>)8`*M*Xc_s{Q^+>8`4!_!(cB7O)p8WrTZi2U^CYyR z_=ID7ywqa_4j4E7`g60BYHa))V9Ru$;7y9RH6}0pl6BwV)OccWuy#()EZiOEeB*J_ zeyEj}b7O~Oi{k)IdnE7>KW}>VlqS~q$=rJbW#?3QV>sZSPe|Z z_gs7qt;M#=AfAfZ0!c&y?7=z@;n|rKu2vXi`mMdtv~PNpTtL4Cu&cgKC~4~qA?$3q zb?E81VV<`Pw@K|9Ew^!k`LuXTeX9G=(YtXLr+2PE5zf3a*F5YY_XwES&+ZzON{bBm zy}Bfak7aVt-^uNvqmR0%AEB~&(`efg-IC33zlGmQU zP>u^d7jiR?d90P#0kZHtc0xi$Npf-?4uP!GsYfo4Hd^jGg~u~@pmcpc>C{1BMh?!) zdwz9=<$~s1&eg6l=rI!geFoR5#Kt7Dwx4<_<;p9w3Pct5V?G?YQVL2Lx%%DYPXA7Gb|ny6wyflF13=(S@BU$OSBVC*mtX?)z!@}lRf%qaVEf9!NM}8j z%Ci%NG$YDb=s{y7!Q4sD@*sc-c2(;S-fV{k_LA0p>W$Cv`-FYTm<$Nxm*{3Qr}fq8 zxAqYT*5-dDiE_-nqcG}XVULE+l_49Bdy9_Nm?lJZ`4GcStGPjRkUx@x=iX1UNs={L0DT}tU*;F3|wwx-K; zJ}!;<%}Jc5je)hoaylM6kY1ZI>@X+$5VD*^-*1f<{LwQauDw}81v9qm%~HMC+3Mmw zCMg-98Hi6~%PjoDkY?KzGs4|P55V`rd(J||QStck4Ys3RPd@Am4RWsXAqQRolQP?> zGTK@V8sYFAo&ueYy}Us30+$v$s$#N_fSUL3unmjpv}|+U=d?PNij5rW%|<*ZS*KeB zzQhnp8&0E{CS1eo^<1s0uy5D_1Q~uGby()7IB`33dzVXXEP(}5&gx(0Aye|f!=$xfKX&@KrE^S_vyY`5gwjD{o+&~vY2~3#X zXuUo)Pa7{`rE?zOk0CBb)8x{o)_<|526Rlf7cYO)d-8wCz{XqdDvj&sw?h3V;`2fQ z^^CM)g?-%wK#RuE&scnC?G0GPlYI30pYS~+LwE1B0JF> zKHvE-xXn9ZL4D8Je;j*|%UnpFkEtT<|Ar<4?$zdO@p8-ZUvkCU`PI0~Ju9RCrXH7P zyQpTy_lvEVggon;S3%xDHSQSpkKo0*Eg8Sb*2|Def;Fulm;B*Wkqu@~D5FxC@zA9t4Cts%wkE6)W z=mSp-flG>NrU7gYkyH@!I~=~T(EnA|Pnk|-%xSFvYcd@UHS2|sDyvU2kPdmgO7YBq z%Nhggg&j_ZCq612-|RCNnUGP|SQP|D1o)6~8A|C+7UkxR`3~xj}6DzhpqeGTpP!4r)WDY~njH^0oiEb`>VJ zOLyC_J2GY6CCZmzePGNOWXnsO2;I!y21*R=ksc^c$t(4~T%wxrPYVIT_Bka9uIPwjmc@{sv$L;;JGZsI0JKlAL&+R7M}L}f#xSKyOPKl@?4;XT$kLkV{>EQyFxwu zIK1f()ZG^P59l|XncStkcuu>Le&$UnI6maMY7R^8pPef>rVLW{cI;`Of;gto7(^Zh zUwd6^vR=arYJEve3-ZS0-hl| z<~*1!W`Lo@opNuDMbZ;&i9_@3^niSk*Wfa)i#=EZ84cG=N2O={QS;N%nH_Y{Q%Mt% zEC|cJ>D{c-7FXSQM_Jg_<2*iHOY}h3T30h%a;w~4d||k=WN&$-dX@F3NBcUsDtIZq zo?OccB?ZQVn)1NK;Q2Ot1p>?{;cdexF5p3YQB^#j^!AIn75(hJ3jmzSarYvr#b_b+ zmKQwS3g4JkZA+~=#9uivsulBlHtB}#dx=S?5Wkz3902fpxeQmRTk9>b3b<&@FOZ&x z$}HT^VE?W1F%h&u&YSwHs^>02oijIkh!)1UM(6>&mMB}kK9H~a2Bt^bVL6FiRv=yx z|Ek~SIcH9E>%_gVmFcq7dPu7IwDd&&%@lq3VRXD_yiRYxXRlT$2IvOA1%`jG466Qr z0VYh=5n#Hq6oWa4wGD`{k?xf>$|d4H`r#5Y4t?M5SnsVh2IInB*SHqzq4k=kN4;!N zlQeTEhf#-h>yw!%yNFFz05Yiji_#24y<2wtDSKj8sg$p+-3%Mfm+9kTCK1eEIq7K? zZbQC>H+Ie*GTS9xu33DMUKUneDqlw@U&LDpmXV+Zv%rGuZV5)WSodC?HI@F*RJ5)l z1LnVDD+@Q!Qy~Ujp@t;bwk&aprI}%{`(^&Dw0D1z#9?A40lRnJ{?@AerU;j@d+KfvOZ9PtG^lyfNM{sQ*M1FGcz7-yz;@9WL)ZM`p-(6foM)c`hdJ(H zS@C2xXO12^9`o0&E#z_{whdDIN8%2$`o4`Ak|BcI_N!i29`mM0)Y+%62sgh$%$gPhVtKo7~t?Y6(;1rAQ?UYUGD3rFW495h} z`4%BsO=`Sab*IwzFa#M<*$g~c>nLvc?IaTiu=EGTpCQhN0MFY#y>d^#Tsf$^v0hC= zuKWOTVHj3W{ZUq}SJ4G=^Y)k^^WNckzAmAoWatBf3gVOPe#qH-NWlEGH}Py6lf7@| z+Oq3JN^pEtW)|*k7np@8%T+Z7)gm2 z$u0PxU6cm~RQ|)q=dnwh{KSJERb&uUn(S(!l6V%-I*#ynsouwJ1k=k<8pKMH#4bM* z!GdGG0appeZ!49FyJ_cH{0(*<-R^>4L`|V}ERaD{GLAhG`@JnUz>z@&R^XTL^${?4 zTMtF~zw67!m!@v=_jfJ-Sm;htGpfitY&Z@uT2e5DqnOp>IZtxi6#gh~9-9M`mKCOQ ziaWgOH7gjDe`FMTTl?gtgmdE(P@!a2ckF=S;v-_GBa6^VFvfjK!yB3BmR^|n@`r|E z(n1lT9xheu5Zx+slF2r;@d|YcVS=k`x}iWT*6VmDn1fIm-%p?*Z|0!@Tz-f#GB5fD zbWn#O0Yf`0ahSzH`7;a-Oo{r>D`-Dq!2uqHSh^EgpJ?7B=D^yjYPQbJH;ZWs8{TyL z@O{iz^K{D7Op8cH3XG+;)z~?INO>jxOSg%@3G#7e%i`{&jnn6*r9HzcG>INW-bWJr zHh{^G_WQM1Z&V&gu2tQRFm}>l7`(7*k?K2H*RAK zSj9jo({HEPXxPrfY@95lrO@;{d^{Ci*RG+s*3BQ~(b`0VOXm#wilSuBA`{kjC94ck z@rnFf!W<|^X;&%9QZx>zv z$GN{((Rf_|J9xtKl$oc3bLR9f4Tp(+(H+Y}(a-+tV^Q_iJFgWL+VY1if*tV_og#si zo0#w7>CBqa+X$0E`=2S?OgxjkkWDE}3A*tWl1T<=3lW(Kf8B(XT$iT9Eb()m5UTO@ z%Mm%p<6NG6pB5!gVlaV>Chsx|=XeN5-KDclZ0|v8WoNtfqe%kvOIUWGW{;O-WJm~s z>dGV0G7&dL?&p8+a+Wwgxr~>aEf%L1KFb|6bE}M|o6fS{(#UtJZ!$orFs0x+t>o-(w1Vk@0hD7PiR zN%)U(_CDC4mKH?6tV8{au?{!zyW)Hu`g`rXi~`)AmlqP!r?0>q3TVl#(06|vlzzc= zL$5=*1}S-|%4>={)#l54Y%e`r)XtUJAEpvYjdS>7BAGF(vV*EnjsM08>PzUS&Rqf& zr(lq(u#bx72n_Dtsf4D4TS9RC*ZWJ>UQU@Qb#?W75!rnrd;_HznSGAXZ#}aoN_yk> zNR+ICCLVREgxfo^AsD)Md|o871VeoXgSUvX$a{F_OJ+!R5>OV&Z;r45Dg0db)!DW$mqR(A z4iI%vGu&QxBeOc-mVPEYf|odx#yPuMHhe+^t)pFD*eF=#+mqAweXq7(^C+wRTaA0` zu2Jl_QET!kLmBq1a+BTu!eG(C8whwIu0g}vJQq(xUg!mqITZfJ?mf>%cbQ63xUxDR z9BZUHU1VYZWCsNvHs?cXj2bio)ESBwa4>4SemPaa0R(DrILe>cI^o6I>G3(n?UULj z!9*_E+?I0uY8qHItEcti<*1!j4gD`IWQ9-)%|Gsp?au{T&!Op!AxJ7)t=J7%euf}2RwQBUIyl97E+O7-;LjZlht({W>G9V%FH$o2 z$B<4iCfK&Te+!~djcVct@F@Xm$vyfDfb+}+6^ABQ!^z&I zKX_f+&WC?Qd$8~Ncm3S+f8HCY^DQrd`#Ah)7HP?sdxIMrQej0#iueZm?hfAPQk zvIDwpXPH-J*UO}bqTfI-=neH#GhFFgkTWy9P*NEt&oOp?R!VM%%Rgd+yp5nwbVxOc zu{SJbA=`MIdJ+w*Go0u!J@dAy3ga$TXXS{dT-%U#x9Gs}=ltQD6{|zV=z;5tTeRQW zU9cA3y|Q(}ni~496PV!Pn@zn5F*gobe4TyX3#5?%1}MCrX$P_eBm-Phg;hI_v^f_M zs@GTgtW#8sEu+{OIn4@UIaR-%K>+Q0WlNPbQ8H9Lt}UdRc@O8}&1{H+|stK8mnRZ5%1#Uhe4s z!Tq=6gAN17Y34iU!^)D+tC!dxmZ>j45TPqim~+LLb6N6`v}z1VGN2QKxQ_X9AlbQ}HZe8$EGb8Gp^BC4ZM~dulm{F3 zrF(E?OEM=P;$K-U5o!9sS@=#BPMN>k3aVXRb$9OYhvxMyy>7Nyd_%XO+-+hLHQcnZ zv@uo3(MusH%jbpg)ddM$Uw?qOc@H{naQoqT$UD2G_rl6N;>SWVLo?L=-Z%b(3+Bfi zFr80rz}DaKnZwv$J6EU;<1cFKv-H2osd8&b$toW6`qUpreJ%1cVt1hn0)GdAC_FV> zo+Z-n`+k6C<+1(zHOJ83Iiz)a+=WX;ER&1l@42UBv-!V@U3080q!=?Bkt1H`XU54? z>9nzxu=b-r;QteB!k~r)DgprjF!*l;&;MBU{x^(+iO$s4>_kIfM%xsHzm(RQ3zrTy z(>sB$#xWrjCC7WgM9g&XJjhXoMH6gFD%WwLc7FBRP!L&Wb@QvHr zr%#`=tgNg)8#}wdDkKBCGZQ<7^vb?~4hasNepYt2v~fQ*(e!A}v~E~GRI8tgq+Kt- zsfpeF(;_{Y>iV7h3uEf)H0+_$_pI!`db?;M?EBs$|M&jSSG_2l-1m!{x7+OwaTx9h zeUHJkKN=I$QvXJmiql#D{%`UYoOvO}s)4xFL%r$F2de}AwFIS~^{mQu6C%Dx=Q(Et zJV@;Fd#dVF>als<^b9}Ls=)2^9y>~I>#5WedA(jwZPC6jb{E(!=UVMg_NaW{F?=voR<;-OM{UpMFa48wdb-+dvz~q5Y`h|Q$#neAnY_>a zO4IiTqiMxTi|+4tYZ~^`NA`>vszixxt*dS6``z)M{NW)caq=WZLUV$wKIQMr_uolh z0=KX6qK2NHTF>$*z_0vuk3TIfEhaW#k3ZbzfqeNASfW&EO50|r&#LF9va<5n$Vdny zA735IjRC{Et+4R0_CEc~Z~gAyL}N!sMS-mq?5yQh)eGZstB`ZIPTkk_J$dpZs-La0 zwyhKD;{rb$TiZ}Sm|Y5)YL6Hi8k&lo6|9qGf7^?O_04~Z3RTvcMD2$oEX<6|Ov#zK zIcyMTYi@n??C4P=F#-gIv#qhbWOeBNKQE!e`L(5`rAiSZ!y53bV>Q-&Y*%pW`>!AT zdvDmYqodY<@#VF(eoOZq?tw1Yec484bKyx~PV6~3PrO%45C$^Aa8bGp_vBArlKKK> zBq93}a0n@SF9iBYN6S>ZX^#^KF7lJ(HI?EspRNUF=$t9GBJE}^GV#xo(dwVadH&2@ zE*V_V^J0bWfVd5rdOZ8BKhYLsV>FCz%g-wu*jd9rP5JAe=DfbQ+5Nm6BaRY%D}3z8 zthSG@?LgfH&3oS8nYo3l`Qg342v6p`arq(li-klA+eBSMK>yVr>5TDvjmIqQ5HN_- z+2Y&Y74h5}U-q$-sac7uJAPhFSP_+cQJ?z@=b<3b46qD)b_baCbnIvy|Db#0 z>CT&h<^bq|`87}%fbsyhobsb{!~vFm-LQw0MxKdd!R(bbBYCA zLjwXZ>_;Z`|;6!oF@ZX>j+_IJiAA+#6vXaIiK0_gRukR$d99WWKP?{p& z&oP*jj=J0=nA$fl@0QmRrencxy@IkyWYwUAj!AxGh{HS6`f?_y?yP6(4PET$UnQ3H z7DvF()qBq%(Uxf}pH(OYh~}MKCX|=0=VYd{hW}e^AxRGoF1pwqLp~?#xja!~h*`2k zS8#yP^Uq)hRIHR#O4Q5H+rqLjA^6MCr;FE^nyIfd+)dq${5Mgb)c0uqx_MjEWy!D4 zzXjWn8{(YA)U(@1Cpe_yyAVa6E?)SyQ-bIX0YwgkynZOnrV)90;mo};507d!SWmi{)pv1!vxqtBMgo;dC zQlU&m=raC5N{qcrQ@$rSy}B4aFjpEc(TAOJIy$Cw;1{=DLuAk$O)A+H+~1K9EH+3_ z#6Le)80OI@8qMf+$c-hcyw>*u3ynrIpNH;_ zATrPGD9vkYfP5-^HvkI?<6533#8(82-URKwpn|_$pgWi+H?d|sW5q~Y{V?swzM4xp zPF`A;1JZ>zjqH^xOr*+9s1Tiub59#Kiy0rH4*n9e8z*aY91iU%{JyUjlAVVQYlFvj z5uKwY$ct)k%_((po^yI@5G-xUcMcRbZ)Elk{0(Kv#=4P_s9=&=C-+WEzNv*kndrPx_*KyUR;#!s zr3;~5BK-c{+d%>WGcoghR3GLR=V8K|n-SJ|Xvz-5C!VHIdqNms3^gjk#SG$FUertM z1z5-W0SMa{{F3w7(bsVtP_Yh{S&}6T7#TJN81hwTB)WXvo>mZ(C6w@UJD#@goORj? zSn)BPbF>%}qS{3dKU~?nOrQZ>mT73{Q=!N(;>wlCX2V>ky2_QfdW4645y5X?%=*Wt zU|Hp5rjo^QF||#f4Wr0*YM{~ zz*C?xzN-YyxCTn_%?WC-*vn=GJVU^kp^u%lGQ;LQ&4!j@n|gA$@wN+cx-G zMGVs7YfQ^zuCHJ%|HG<^V7Z#+{qYLoif%HC2JR{+bvDAoXfM?yvDM-8Q^3`U7C)wy zxK=do669yH9zLhTYh^<-Ocg`K=ha1)|(Jh+4I>Ewp<%e*2T<_iS*RT`Ab{ zvt9;)9Xb30!ir~vNtJo@*wD;R<4+&JpNIk&*jtpxvUEibvG#=q?uJ^|e>AM$686`(PJePh`;DWBTh2M^{?(C#f&*h^BOtoNZK z4lZVou33N%)=qEGiesRew)d=(*%HX!C-(5k74O={AFf}8x*-W6Cbe9{FyBjm_eK|7 zW73pUE-PW;IUB1g`{A9Vt5<~!qc1-x#vaNXOZ08<*_0tc>I>iQ(&P&ER6+#<*eBKh zj#|=yg(yD2mDnn+_XSKN0&q$Jkb!3sohaa;+(sQ0DB`r*r0m=IA%E5H?vUPuMY)}B zOwIV;EI+}nZ+Qb_diSA|VJlt}dKOdzZ+4D6GPH-gOV|UFp46d=3auiI$=@tdA4_Ez zq?$QuMHLD7{E$Y2_1SUeoS_gtj|V2zG3Xc?{|yu(6E9WMIf|_w<75%8K+gMWi>2xk zH#okfE?&7c3q#v)bcqC!-T3e3yqTa1t@b~x37;zY?ia7tbuzvzDv;B=GeCz04O!py z>cwW0gNc;=IOOh$w;2jP$`I~>rDGo1c&Bu&sh#XNxdoLs;R(RRJ1g@t!ViSU8LFeVI0(?$79D9_%?x zczs@ka{XH%VwSZ7e;M$*BFIs9fLO33S81^}zic}tcsBF`J?Hre++^YhnT!mFmYv7= zf#C=Lq7-34>gRd)B73m=b4G*Qe&ypqSaG7pBP!n9lPN59+ie`oS)n_(E45JkJEv~o z3(@I%ob4KY`2+Jilxgo5Wa_S6g4ZjX;m+&56O@}kYQYQWEmp&<_*Tx+$G2L4i}wsD zz3sFf$^;WwPc(tS!ttlc!*Xabg*NNl%d@rQVf>*fWTsEb_!D~A7jUfFm{*Juz1g0z zWccQ>_#~)2KD$&I#Kqf!JE_6pF%X(LPj?svY@wYmRE|n9DkJgQ6lPwBxINjIkTXzh z-Ms|~p_mv3`Qke$LYO=U%UGAUHo?v2*8>Qp>roE~x8- z_3+NKtkkcARV!p9iuaTixxS;roGO!4tG>44~JlIaKb69M4f6j$mT#dbg@$ zzVK2d80=kcq{(+@Shg`(a!hzttI`MR~_;NV~w@wCU;Omz#OS(ithOqs^ zJ)jqm+dXkC;WdMw?{ZrQA=363&<1Y>e3Zm?s_&9$%Gaw6yMBj~0ZqR~#-9zPGHs#| zlQ(c~sx+i^2%SrWQWS?arVcpGli=K^1_&CK*gz9IhLM}acQB?0s^!W9I{*ABwTzCZ zNj08I$0$P&w061~M3)YqCN1Ks*c?Lesi8%2w4INFlpHfwSc{Jj={yr4GF@DK>}b5S z0GC|Z%kG716UeVETY z^GVo+$84t0J`?+jo&vT3{Qb}uJa{$JT_&8HI}9Ve?qCG(yz24j-~{@kt@oO!99TRv ztU#?F!Pb)Tz z+b+{1-WaNjmo{5qJzaNrANEA!?FQ!ld|`^NO4Oep5QEIp)~ip$Px^#nTC24~ZO77y z7wG{xlPxNp6U)e@lvfIK)ZZ$k#KP{1>J3^{#Nx!D`(~01>=Mg0fQx-iCb&-EDkRsi zzB<-hyDiR1#Q*F5E9*!)C9xaqca!b_I+IL&U)^CLrb$V4d>rT~xvM5ig**IYvX(Hg zbXrPs)T#`QVhE3R>Y}F`N6{=bfv<|G7-1Dmn0(zx!xPdiM#Z1jc;-mk?uHIdd_t#h zHtNaAZI#T#@gVeCvE8tOxwwa9W#3!pgtvy5V)Y+4J;~eJgZ{>NKCqHs7Syo^`pe)~ z*N(^`ul8%;z3ljW_P%vjA$|6I)m03rv-Twk7W(a{LmI=Bfdf}15r^Llvb%gY@Y{R% zSh^iC=^a!cx0JBnsZPC!_q5sf?&)r81QEOJb#00Sx**=mN=Q}?b%cexhIE0yrjnN1 zxeC7wOB^LaB+{fJK4V^+x?8JLAW3T^%t(_ME6R{Slg`s#@>Y_N8dDJ0ptN8_AT+W# zP~k)$El~~b8#nP5-{qK_Q77Qd*%H0bOm}vT5hplo-l&7hGAJ#MJ12%{r1-FC8zcWU#p8TA3~cU? zH}$~mX$RQ+z$>-LeuKO?e_`1(LGw>m_eKZqaVoxd{8PmXkw@^WB}sjq-%&@yok%**bJO|P5^TM$>MLPrU z+!3}oP}<4d%VFPm#Q6<(s_s!g=TQ4EJn6t1DZTf8boQw_r5z;N+8mmxL$Wtxsmf@& zUY>RDr|vJ615#pXPkZ|Njg9BNEsB&VR_Au4Zq)XBtoG`}1Z5Cdtsq+B;FVsWTcHP` ze5AMAt!`lul1&FcMCyYkDgulyq6Zy9GH9=VtlZVN*)b7Sbz0t&BkR^`WU?A&kM zAl~ikE^sAA!rEKI;x2HGp2t@B*Vy~1rp1I6$;2&iBhT`XV2GmfIC|=&O)*mDy1L5| zWc<|(E?$|^-#o-?oY0iOxvp1RudIyr&iDK2=Co(Xm<}}7{d#^UGUH$z(#FQEq#vrV z5fO7kWK!csM847Dh(^C|&FV96B(gZz7XXj@-`=}g@TW8;{FTZL+10?u?as_sgwn0@ zy&E`Ii*`9sY7vV`)b?*Ijz+-x3Q=Wx_5IVV;}N#U*7i*Jj^}>PLM21cB^pgeNR-Ug zmRF-Y`Ts=V?HNt#jIxvtFPNi#p-J_d`(DiwqUYDu{EA)$D78clcXRj2cQoo-*StH$ zTW^Du8s?f$?i>Hnjesk=UFWI=b7IJNxv1gFUMOJ5dRnQCse2P)3^%p`Te zLG{aNcrEt;BhCt#fFtGhOG}Sz^wopWZb0tc8^pDTxZ68Phqnl%*?<(3tTmSHJ|<1S zRe5X*v-A3zElK<9b>BDffkI7xgawM}u|@4Ir1S|hsN;?w?nbsdgA=S{~lR!SMdFNH?!T|utJ z54uz@`z-WBz-}G4qRMz1wm6`4R#faKXckjV?~W+xjv@KrM-{J zd2~87ao}DlIc~D>OPU?{`D~;hME_Xeq?{Dk(eb&a@Yi@1TJ{?HTm9GG6odX)D$d+e zE^R6A@M9ZP&#o7^pN+1Z881uOAKw^IqI9M`F5E&X^mIinIfYd547?d-67SbVAo`8H z_C}@--NG`g@vGO)k0m&yk*QDKULtZDGo$L;`Xi*(RBK{ZB-Ul6{BqWo(<_iaj`Wfo zf^dEJvy`A}jnXX3I}pl^UhaX`#KV|KFOW3@2FDm{VODa#6U#O_;HaE4>`+i_;heEwY@JW}F4rG!`> zq+x$%RF)+8m*6~K*v5DEOMUhJTWCOM9|TRKbB%zo5_su0&S*W-0N$;?&y3gkzFX?D zu?%%zPV;$3Y3kfYru*My|wfgO7ogoj2#bP-vfSv^K`4cpU86 zk+Fe|8XyX#TPPROqp&5-3yLI1cYuk3W-gdXP zZMR$7=C3!o_n({JWG0zpCJ)YYP9}45p3gV2f-PP>zYJT|@b~c$uU?3*U0kZLI%aCy0CP)loq+|)~`WE8H3 zxT3i_#f1`{e6lXyd}pm6*-O$%X}|SFdo-Y!r>A-@e?>jYn-aNc6mn#SxR=1_!E>zv4GflyoSa6~fK2&ulH<^%#dOst0S1~N zt^r$x8$I({!rnTKpK()YJDOhon^z-Cb?5P)@!|FJbhV2seKiISSMBcb4kLk>$aiW> zE%O>2UfMtZy%&VbReQxc?oI_>BYH~zJA1ySAlXI3@j`!S^ZT&28jWY^!oK*hQ zD85n>wx5^tvVA$m{)-4_W)I+I zZE~uUu`%{Wk{TS_9ZKt!bDghq+kqT`(EIC8mRoM7} z743#wVBN1Cj7WR)Ml-;^>+qjEzBJo2JvI~;Havsx=c@m`4mzdJGLqWy;jEiBeW)B? z(!^ECko;vS;dFZ^B)A`DN&s7jnbGE8@k6DR32wVJ*Lq~L6}uj7OuV@k?-XHvk%Iti<$aUm*Nli3C$?l@JEW=<|wuZ;O0{LQ;n@ zWqAl!@(Lc>BN%?aO1h1E?w@?^2n5FR2GxrXooaEeFdC?3Btw;yb0v`?CE&$8`@ zCAk>bXK~BFoW#f&n|o^iAM{18_WQLm7ll%Q3<5)%Y^ci|wMRAOyMY!#j)8IGu5_1Q zHuq(Jpq`V0FBW8D&Z4lz{Iy3{QT(f~ifj-s!}6@JU^LZ#)9ID_ZryK$s!InX!9Li* za@|V(iLjOG(eS?|A9*qs6_kbVb|M|5Js$~d0A3PDp`86GmHTCMcpIZ^j4n4TFf8Gh zg3P`>TRP~aE-;e*c>M!^TIXcby+Yl^^0wxFbZQ zyI({phzAc@LWD*LY`##_>~s6Amfh6jdMfA{zthS&+mtRPlnhHw=5o}GfeqS%SH&`M#oTM}8jrvC|A@5K>XkA9GaRCbJdTC*&`eIjdS@3k9~Gy|I)7MEx`e=xn@c!mw$y6hwG}30fH$habgsTcv-~`?AD?m>K;R zn{?m8Ncu7vx+Uc(Ev^4vCp=YAeR3+yGmrYh$F4}f47g*KGU6JGZa!T@ zr_y2g3!=plos4xk_UhpKwPkYvEIfO~>t)^Jp5`BvF}`XApUSZFm)s`%AA%_uydZb~ z8YZ)*I5(wBhuks8rXBl(!wB_ga~=q#O$ayZ^%Rw6BqAEBoeJEWq}yc##8jkvIdg6+ z#zDKozYQfHCbplFs4&^%Uwa8bb>oZ-%ZA}2WTVfxY^Co{t3ft@cKV_-$@FbSSD=PC zD`Wg=uP}|-QGO}pl}GQle0i&on55Tq1R)&qNFK6B1$81=e}n%u>s-+}XZdXn?=OL@ zsFoS2vAjFGx`X|*JXOrt*G68r2lMo`>J!KtM4b+;qGg?Nfo-&|mO;(YCdb*iy+BO3 z9%b2*&yV{-B)MGg{(TQkaA;Mwv9-%@^y#MWCLegtbe))@kH^ykJBf0gEYx$^>WZa} zZerV=C{YdGX1vYZr`)ITq-$AKY~3ce!b-Iuae^P z*p#;zH7P?Q(8s!_|%NYWEVA(gja4?$Dj(7E6B) zC~p1!l_s7XhLetyG#^4Aw2XkeRUQ(-TOo$~$N6xaaqA2dNKO=cDV4r;-#j9}HP@b< z?n@gZpZ%$FN?8Eb;ezNYmiO&0YOU))O|CFog*+3y|CAVHx%vjEO?Z0~M9rxn%RQ2y zXFaH3dYT~?8W`ENeK*@{DPM~|vSQTtsLRYz@n)^bMA27Ku>x{^-twD7k++(K9(IaG zsCi@{qkZ04;}o0l^5QfyBN8GAuE@%-IpQ+h3Y?%n>pF_0tKnP!t7`Jl9$BXsM2W0mR3|g;A(_|Te8<~gb2neA zQulBg8(C$gyjw1T2JlpL*J0c(IgwvVERy0q!-GDct{AI%`(PO(m`4*ywDvU!6RMr( za`D)dl%6b@==ko7I|NhK(5wi_ltJAPU;8^DofC1&8F`Phuu+4b>&&Zs>71?~#Pw?V zJF!$(O#MpSC>p9XH!P+HQ@`kSVFpfY56Rqqwr{;?ohKOn(5f>EdpmCD-ZRw>-bmA@ zrZ6S8&`w*Ey=ST*2XBaYVrv6i&Q6<5a?>+z0*@j6!v+$>Q#Zz(l|2vO98!8C{?_Ig zJaDvU`a7AzM>Brox#P@(@o41gAm8OG;Z41>Es*pqdon#=uD^+1<6)akxr^M_J>MNG z$Bn`(R>b#9SXb;VvAEZ?Zsn}_M$o5=(pbDz5?CvcQi3c87E ze038d%3&mFzmE}`HkQVuAG*XhBH1ybUfjI^R(ZW9Yx!h7wY8H@y#qd)o>yOtxSI*< zdtWgUeCQN@pLYeJIShH1YSmNfv;2zpE8D->Pwi|HE2XNg_FPknh>hwv}TU9+z z+FfYT(}9P~@K&e7li1O!=S@SgQXxrgRwtbzI^!3ZZ3WJeP^fsYhwoKrvmc?kx2Y*+ zMEHw*zA;5Ny%k%8jDkKS=DYjfz$)LZDg^vXiag#_C$?PUk4mUi#x3t`g@1|eP((Bw z*gwgrD9aRocu2BbNNJC1FFZm7n#i()qy79dbE*S2`xM~AsW5KO(qvLxvVUyY|1N_YQU|OiBie#hNFK2A3U8VJ6qoIp@xjN z;5{use=B)FJU3cxFHGCzq0)w|mD`pGvC)8~SIp^Wh>M8v3)W~*ZWit7SZS3>KEuNT zAVR8nY%=UliZ4cVX9CE)O>}5adlR~*0r3W77AfT-g!w6BQX=Ce!2KyPT-!UMNlWjl zH#38Si<8C7Hn8~4Q1!GQ;KgtgY&_p4=}mrQuImwu$JBr{jO_j0Y3kBuRx*XyjMXh+ z$s5uScLky{_*2QO+B>Y86WPDsH_YpY$xmw&!`~m@BaMhAAb1)63WGy_gZ=w)RSKW{ z!E>_w$Jq79zpKCX*?;r`GzfLH_Eee+(K{w*LQ4&w6y5+ih-pNq8@AhgnTk^yqLf&j zV_ky-bsevBPO=YnIpNV>KZleeX+HD7^Yd5Us}*@uYPG^PGI`mjKMpO@G9hoys| zPU1+Mhn&_+z_rxcCFkJPxj9I7uiVQ;b%!bZ4}0CV+|f4cg@!6UO^4c3&pq9#aYdiS z&1;ZvdTrocnY0YfX5R_CQbd-`o?_E&^i6ARPmP69%#m%((R8B3quEex;YD;sVRF<) z;Y7bs-wXK7w6Ms>UV&@P;rE>FD<^sj^9EA!nZD+d!y&HHo+d5&geiF?G0+t>&zPnHY%n4NevS-M@Ew4lPpMGS)M{AJhat zbZ%T@J6EvREywELgr@tbh*tHPZ-p--Z;Yk~47qP?l6vB=Be^D;buL+a&pn4AFjNhi zR=A4V^%9Ih!D;v#3foq3j05xyQR#SQMgA>h{Qkt^6p~NgU#@EQW{>N9k2hfizh@s;O8_MeDM4}lluMUiZ)SvnNeKf0xEN*-$HjdZ+v4O)C zyofmZ=R#6x4026P?(L!lKtsQxl!(xS6r+j>@-)*gEcSl$NG(t+-K^h$ds!*_Hm~?d z?^kDdW?6*p+1>izq7gXEZ{m4MZ(_B3at;uEY2ZH{gzfP?&>L zFIC`o)u!G*G;lXN>R&i}5VEIuKPkLC(W_*vDX`s5l1Yq;uQ+L?)@~ZNHU8ll51bMobld7Nd=C} z7dk((A<_OWV2J5+@F}Jz3EnnD1Jn2& z#tzrMYI4TLnyJ+(1b#hQoIW@{v5ff>Oh5HTtmN)58#CIj!AyGq`0g zon4=gsmip4D$*YGkQ^Y~?Qr()?zAqKPyK|4#wI#GPQdncxQ>rwtg(FE zh?-bfSnz#a-Y=bvmY07I&pTUn??0ATb=2P_*i>Xs9IVORtI90B;k@W-k$mv?x)6Oy zwn>Qdrb$pf&0jRP1&#vzCBT4zfY-Pw;=H`PPafQK2zpqs8^|j~Pou?8zPBxI=r6v* zd*kN^?x$vacV{q|5V#Sb4!{lI2LM4ZLSQ7|N`P_zJAii^9{>)5?FKUfcLMzRdios% z5P~5ALjjip6a!cRJRmS3*nBW0a4SF)fEOSD0$vez!x(`vgKGh*0h|Cn5O_E2HkcE* zC!ihh9Uv$KAb~*vg8>%<6arX4++Q8R#)C z02hd_8+;q~3d|GS7tje1>{bM!)lu>pNsEx|H+L^zZ&847Kco7%wsk-9!nmmG|S>@@I#P4|`Vr9e zN`>_KiTAN`5rFimxpa(7j{<;!cmsm~1_8f;l!S0d2vC6G5MsbWAPY!Kh=7DB9v}rl z4on2nfYgNWNC@MB!w_b`1|SbePl$+wSP4K4fdPyJQh-#1aPtY2faMTsz)BzoNJoe; zpXdtz3VIq~1;Gx?1Tug$gz)nTr-4ThZopQc0LVayIG>mqfCGU5{0SrjDR<)-5ikSO zA>@FiKsJzeH-QmR9Y70$9+(QG1F3i8Z8I4W)&Z9x?10TcK9GJlkrDA%1_Ocs7z?BV zsdnRT6F33?KvOq(EfZaetfFTeC zAO*kx;C*=*VE|%Zh+*6>mx9a(P6M|9H~={S76AXthak6sS3n4W2mlp?^W`*< zUq%380Av7|AiOUxf&7{i5CL=vkN}{AaKGFF@@tqt0)PSl8-)Mm2asPq1Hk~oAe1lj zf}uc$1N(t!0BI1$mp#GbA%}r`KsbOn2<^*&U`mkXzVSmjS{0j03 z=nwV=1cHRRnS@YCaK3yWG6dKQL(DQM> zf(5b(xC$f$DF|Wb<4=PhLEZshfZ!nEZWJS&uh@Z%{t}@@2T6Bh7~wI4r$ded4}kC> z@oqFD+^;}_tOkC7EdswNP2E^V_;ujRkY~ULAT&s{8+9A!E3zQ7fs;UNkZd>RHl7ps zAINp!1rQk|*^R!9`xR)A?Z8bSF-Q^MbPV=2ZJWd{kZL5Ml*8#yNy>VaB`=ty78TG@FE`>X>-ySH1yllS^`a>?=a zgX9xQ36Ak>zDmzEJ`Wj~XSG5iO6Rjj(NC-1$JL{RC&&te)d#>+U)y2bXt{Q<97;A7 zHN0!z`?#G6dR*;Q*XdnJ8#m2iCoz^YTAP#5H!0-dKG_gz@kr4bvUH&=EQ1&PQm3nC zKPaj&E-yhfc3sj@sUQjUvcRtKvvI!UV;IHSB<;Wc2@;4__kyy;h4@c9VBbYde;z-4 z)BIi7UPgD#lU3iD0vL+X*Gs4`)GJ5nK)KtsydJW~|2A2?&&(v?cut#6ViI^}S=8)M zL33ugR<_XBU|UN&8N#=xf3eaioeNJGrdp<7)vVv0%sD`3oj%nl+I?x8#8`nCzDu>3 zzJhD*>nSBq-|D$a{c7fFoJ$l78_o0xTY$j0?MT_RiWe4J{C?1ZcR})bs4vXWI5rGC zgOfMXB>7y@H?}*pdYS4Bk6cQ*Bol-0O!gBE2`GMLTS8l-76{;!k@)^xl+)jd>Lo|8 z_x7(LJYVF6gqq^^;iQsF5!#TVhwsYc0mgz@7Ud=Ey!M$h9ZC1MDKl&5r5oNLJ*}kD zlUrFC=Jjfka^S&1(?o~>23u(RYm7*(R1^4EOM5bO7e#F9r%9Sshz!>~h2a=i&?yNE zVh^Zb*8da!*LVzvYUIJMrqMb#=*7O_L^=Sjt1REVIiku)^vqeh1Nl+|6}ntD>vql$9%o&^`6RjKLzB7WE&KpS+kkg;pBNfw+or12}+ zfV8Hc{8bmiR~D6zYQ;cUFm!)Q-f9>l>~-?hBhlpBL;3dM>QOfbP4Q5{pH!LW+~X*$PrcZ_}XN?X^nlFWHKm+c5z^DZ_l_g(R0BbAtmx}2X-|95Q zU#7k4G{_WZE*yJd`>t2FPbm!(zje18;yI1+8qN)Htha}v)AuR-+8W6Y3Bxeb?DjRZ*I~NvS9mz`XLOJ@<+3%p!3s)Pu@Ap%bi^pf zK2W0+??q_-;dA~~h7hu&Hu8Bx8HyaG_}iqdqNNt-8GP9bCsQH zin%>bSu*9$+T5yy`z)XxMOC7fgtV-k9wRj+h{U6z@#=5>9)LA3Gk@f(75 zZ++Zu9K4ZXnKs1HJ+KJ85sVWAL0`8P^NvLDLx}ZHBeDYaWgnRO)WOcsuWQ}6{NhoQ z{_kGK5So}-6|)|8PL+|yK1cP0YY9J|aik%3KA&L^>Edf0JYp$7&&^zW6qEnLWJkNm zeHJbL+aghzBw8$0_%!ngzDVD7V0-a~y|;Ws`h9Q{vydC=EiQfPW3`q~XQ`K3%3Kj) z%u8c&me&|ZvZ@<&*v++qAB2SxF4vL)d!-%(*H_`|E~4DCu?aiOdJqF6Xcs?|T(>_L{7 z-+(3JQzrOxO{Xh;h?qs(8pKJy-wzi0+$pnzicl4T1&+mYHZd@>^6@HwNO{O$r&3Cr zs7a`{^F%bKB6Bd@Ih*!?ZT`-`WZ#znfSuqvHdm1T=t=h zDhMOk*O$i^UUaLhnAg&53aogK#M;~?ZQS^?vY1~zV;e8pPy~HLZ^her0Y$7qhjane zl|{J5s~a_Rdkj?+P)yxS)`wJXlp*~lXVBubz-Ap`Hl5z@(DitBl=%9tpvCDx=k33b zBw-{PFxS&_{gOz{a)p?2ceP*nc`J*zc%%Ucro8dbbX*BN@#`o%4x5BIR(lm##KB2e zku;)KUfrKNv6Udl3d>U5l`S3dj`h3oM`9~qJ={MyTr1gl%2l zhIuskqcUuqdhmbWI2BIY?@ad#Az?JDV-rAp@FKZ0t#VO~d5A4}qqk0ZeHQB0nHBvV z7|np7N;Pa@DRSsVw`e=R!O=oJIh2U$$Qxfe=w=<4@R^>C*8Y{%mBb(<&4$IkG3%MR z9G?lWBhEx8J@dPRXLHWLdC}JC&}KhcnR5&C~IUZ#)`#0>#m$RRFUjDUd&dOt$841cWLme)b6xOul1z_`Gv@dV5a zO0$>c>qDb3(FleMDNxE0`;ECag!DAmmBxw$q()jl(XKKBEIIkPIV$?t#T_uho5?J0 zN+P&V`om}i)m8$~y}s2jvOUhu5@l9F?ab``ivRmO*6N+#jN1=TA!>j7crK@Mk9bLu zp#rxXej?*loTZ52yDxoN@Gjd*q-31Cfd40N#Tba*xz`q$z-4PAHp0}SxHcW($F94$ z8|8iwXvc1Se|DxBG(iYOdFkUDAuR8iiW!(OYd%S(JPRyN9=aZ441n7Qsc$(|MpL%x zm2~gap5|`Ja_Jkv<1ERyF~TI9%B(oX>>Q9&vWbiH#C5&)KE9{IaIeX=%c-QNGRO!D zejhHf+c{vM5ko}xG8d%cf3a9XJckW)9bgY%{QEcQRrD>&*C_qhiV9-N%Z~%wtd^`; zPw`obT*1)<^H_%Kc0!)v%)7Qpa5-n?wBM+y4G9jWjZ184c2c_@YT+a%Vy_z`%Sk?{Y`+IQ zqh0os@vGtKB3i-L6rEg9>j!x$GfrsBzW5{Qh=#54QrHhcQ{#J`O5fQRw_$}o!3!si zMn^+B+!v3M7-9vzU-(+;zWgqMoY}b_&Xx$f5R3kg<-}rm;p`WS3)w zmksZHBXs;*g_oE2TNs7xdFh*>DcV&v-@P}8J;GnSDPpQP_byP#{LIyX-2d_$ejgzP zzJV1lV*8WxXj-ZX9#i`#FCgtDv1T+3Ao)hc5|apEK9&Be6LzhfULbWYRmxe|mf>N` zn96zTdm~ajnR4V7YfzuSkxF*GaW&%Mm2x8HrQ0`Prc5m&}jky+@kcVtJDU4(7RE+XR;V?lUiF4P6bnnN*t`t5E zbOfP8NIg{?W?x^GEx7Q9_ST64#&PA8g8v~!!XK!|VqmuQ#u9P2T-IlaI~2*j=a16zJp4-64j_D!S2l_QIJGBuw@W8t&5g*XZa>WpZELCDG zIJx{0Kraw}W0)j&5^f_2&j4ixl{JB!3dL&#j>}I^%K2$W9<-s%QQN(tHHUX6coRDY z9j@$19Z&Yj(DxCOyGQi=6H100#hK(YK{2OiiZNUIMnidm z4K4@BhFMQ*bW~fZ^WWI37I^0UPok7$V%&K!>m}~^=y_II=AI0F-7Wlz#KqzNs72X%2 z{lq@;{Pf6p9gxiREO?i|{?Iul`@%Yk*3$rLpcKwiBSlsv>wTLoAUdAjv zliVVw28Dxmt)%Rw3c7#XJcD&aixm9ZCi6Bj)&y0;qZW^R*}d{LxNF&MRT!1Nm9#|? z8LtM_^C*-32syvTmhdJV;Oa~#%1NvG^1t;;+3dPT_7+FU zm7n}_p$H+P>AhXna0hoPrwl^C($-iA9z0yB3x98;pzvO9j>iQ*KGM;VA^#dfFuk&UK z?s+ysC)OM9>bZzu*42$~tZ0nYL@ZQl3s%~pm@9sMK2dJP^oOSj{pA}d*iv=peNCs9 zgbJ=Ny;F58u!1E>#h9V1!unx4-w5j;oDcMS%z)+h+r4*3cdyoin6z>WtRzqCnb~>G z7r_hF7-;VFfE7F|s_#(utMWaS?^DD`{RkL&vl<&>WbNxKoO2qf5NuOV0any&1jTfV zG%zS`6#q_^DU2I)Y1mOL{zhqCpg6mGQ$_Q^FP=2+CZ{>#N;2dA~sM9Ff=5XHn zM;RUrul8%?8hM^s%n$gg6b8QeT95X9^5w$3CnT2CUBbqMMe~Kky%8EMvTcd@vFKCW z>7}9KNGRRk2ILL~_Vprg^i$rC*%txSZlX$+-`Frt$-Xz>waIC_PU{(Ih@SQFHzSIa zU-0O3?5x?3vWG5njMcLCnD24RhweHDi99O!3a%A01m4^7tctXivdy#w^!|qg^(^=qh>#2sQk6D=d*xSxwok2Du!q1n{l}Y ze!ChDlEp_mEYNbAVXJS*1QfO9KSVb!PK*MsIretqa>( zKx$S_K>BL>N6YN3+`aiHx>5t>z)PIv?Po6UC)svaZS-ey!4!x2xzln2FLUf0`y}s) zoPu8OiA7CWUz9oG zDBYX0GVGfIOQg&G`_reSVbGEMTG1OQ@nA4W-`{IV=y(_>oArh)Z-KvVHGmchx7}N0 zP;?fh$PfosGc3Es#dbgx=8oZJ%Rlbl`ukXI4 zy7=(Td)>WE;spYiAu_r00N+@!9`YfhGKJ~zFP^t6iAzhu{461k-~4*r-zkP7ZAguN zQAT|}UJU%&Ai<(x$|beS^JZ-t$9oQ+jnyNO+u&4}e|PRa3l*1_3sBy_FoJB4niZoa z^`qNe}OE5^IgDUmM9P|7BM$%fg0f^u7be7>+qYq zuE?qHB>la7?&I7c&wV=MCdXnqJ2uK`jw_)BorP*ZGYY(QrYzOdc zFa+1s&rwXTx)EWRQ~Dbu8vT~y$ME@F{)dJ}i z7tJ(r@t8vk9wD&ov!IiqDQk;nl8%1S38Nv9@RiOL`h<8{W4MFt5j8TK_e!$TK5_1! zB|4na`vwm_+ab+lAnW6lX`=~^Nu+F!D)z5+R-2Y)pd4R!i;cal-#=6zDXgt?ba+rW zAnGsOAd{k-?ttC7)Nn@*92LRJ6M|f`{}7Dg&ylgfl&OjEwVNN8ah^o(qQ%6F>AN$w zOq!k?U#T4>@%C)-ix1$c>a@oUBTTf6o0%uT)03 z$u!C`)ENTf@GGH;A*h$=5f=JuI1(Ch31KttEPW|K6)g7Z?Quf*T!O7+|$b-D+R3Im3;HkM}E%4jydzTosLphgb{i%GO2kS`fVvFIeZ!q+ z#4o)-Y3_t`LTyXGHEz`-g3%JSeblecWfI_)Bej+Rvun`4(=Q1`7Ot&KlOb-HSpukBMs*Zk?mvyer-iehaiq zvDlsWX~U_>ea!t;_s{>lEeh#V>oM`nFI10z?pikoR|M(V$=T<8cF0-PuKkKAc)E7< z6N1w7N57ANS)fT?8%Ty3Pl-b3!cZ4uiB4-S!^@DM=Nu$reJjMbt z2yNvQ-cD8={jA=~meBagUHX%=xOpz_X~N!2z~0LM$e!cggZqK@5RGy)`ktzR9ZV(^gF@_H3w;e;`6l?{DReBcT#3Vnzdp?BCIY z6Ixgd^GrACm=CW76iNn3G#d?8hLc^Fn^4w$F*5N<v`@9oAyvXL6~U$juMn&X}K8 zI{4k@T3%LDYRFnBAe(LGPyAC= z8{4z=DoUYl`$n0NuN1cK zT{g#VX>w2UE`kfG*r$q;CFm1W#%#QJ735IMEXaE1;s#h2WMYmA&ZDy>Fd89i%}%t5 z(YLYOm(&5s>f)e}`sJ>>EYHrg>|33Y`6Dq&5(?m3iE^j%HqMXV;%uS8n_iqjH~4tJ zlrL{4)^k)p_8Ju&i#nmq7cBU9rqZh}eNx!5eGEm}2cyjYV5epwySIT!66*8@%zjI^ zOC`=m*-Iv?lu>-TUeP!P#aV;uvnZ{}{Hs%wn^@m(REi_$wmR#IRymcoC7Ryb;x&}7 z&uXysbw&p7ZSvd`=I;Nrc%X7sSg#g)1dgru}^#gp;F2XC_I8&f2twInsJ8vfbDM6YTEyWciCV7(t2zEGx|q zI_BsUSgk%?hzEr;LDbP0rrnsiF%sMVWVjXt!>g#qKW@({<~&A;{3`F+If6BCcsP4` zZ#%ssj_{JNGiQ~8nW}H}WV2Qp@aq@0ki=+r4DP5ZREmMK0hA=~H41iqpi7>E%;wSV zl8WN9N@R}?J$uTal8de-nn8U9=jFT+YD09*n9s+YIEEhOj5F)*sW_Y%DvT`^XT?DB* z70;9QUwTaDnTRcVyA?Of1ALr&;mKaeMn;Ci4#*4}ZeKJc8_h6Ygjgc_#V$H6E8jj$ z^PdBy6l_dH7G=tE0`JPd7DOGG@zqU*&Ozq`mRpro>;!vCWNA+Xj&|fs@kkoSB`jKP z_v^Ayu2_j_z{+de4a#D3q^x_ob44E!CtOeQ2v?nDk)vpas_+~=V1@6vS!hKaCi*I> z!=VC7SSLXAFyC)xHmkK3H~$3A!?VbfUGTAr5TYBJ%lWTSeuETCS?%15&gr`xi9i9} zUoR9T&zmr!mTv}|V+FMY;THyrPV@D@INz_UxtgYG`4m@tVhw93SMjzRG}k}ms#%H~ zE!S3#eF=GWm+C9_0?PeQ?VJpqY=6xfo{^NVFltMoDl^f6YMOs%FJInN)^F6e{&}=% z{(_S?SS;7Ac;-#}6U{uV>juy|cx61goZT#^-L`tPZ`7fSlw0nRP^3H${k` z9aNjumt>jn6a`c%)mE7qmjadyE2IGq^a&j?Cz=gLibCG~3Q?>ST_k znSbPRj*c@(>a{MhOJ@(p@WkQyv#>`~C%wV?msH`Io;dkf)_#$YrJDgoK}p_V%p z>DPrdNs>MR9xTYSztK{Po*VFBqNsYpT+~;4ii7&X!+xb3k9?^0OExfgg8El5zoMsp zJ5UWFy`8n=e7Cscoy09fnPK+RW*<(Z>OVk$rHM~MYp@yW(nM2bW)cl{CF0Baw&20# z4o5OR1d62znoS&ba02y`c1i>;amQ1ir}455^@?hYiKjsOWaUXQqRrO+*^j4~a<@lEM;gOAmW~r@g+ov4IM~gP-XUu&7UF_%m zZk7z#K`_uB_QP88O0ZX(J8Sh#7iTK+BJjE8TCSUFHrRenM2z%n!~CP;ZMYKFzHrUx zFLO41c{nL>Tv=-(KUi?cwD(ix0D%;fk@Eb0?6f1d8Fxxi%BwCRI4*))C;t>;74FtF zm!=Y-qw0ibhknUo3uIgFXgdO$h%x2IsyUI<_ECQL}hV7H%in4ir!FI+UGERh;QN#XhO!Ax0H7~#xci!#t9x)jn}fQ zx9y4=maWZK=~9%va@>chi#B%1SoCF)KjI^Tvo^3=9jI7+?YSG8OesWX|2b`C{CR39 z_hD9Jn@VA~OAA_#&bKfH?)l$eUGG<=VNWFPGaQ$xG+1lhM5j-eHiW9RjiFP&LJJl| z9ZBZ2PW@Wqd;84h$Q!f#UM)n1svZ9Mq~G6l&(O@sDN!0LWPaauE|$1xx767OX(*EW zuP4w$jC9n5JIZ`twjb@@FKo^e09#9r=>kilpQRP`7d4rGL~xa|7Q165g;@RXpW$aT zQ)}reJ*NDO3}V_8Ch?D_`x0r@MU5EYrJhNG$^U>Y4VQ8ZM8Gs;kktdvRGy@ z^&!F6eQS_9{$L@!TKiS12#R@z2mFzI!y7{oyH*UAjj`soa(s?Wyj?#ItMHbi{_wG} zxc*uHFj1_kl{6N)(~z^D()k*8OXY80Z{%j$PmQg8bHnMxowW850ms*N_t_z!f{;T? zB^4o38KB+2_dRtdiXv^$(=exA&)cU8?K?u*E?4s3#};UjC>e;ckTolb59pdBPKQ0{q8R4M zu|KxR@ys}_E9NgeER^8?WrJK#!aI?@D8Xazv2@QB2NfWsHi{^;z#egNAj;hIYZa5K z48Pe?+n|jtCoxGTh71GOWaXm`@P`;B52_Rdx}aM%xBd+Y_Iw@&N1r+QG(mC?4+hTS z!Ck$-K9aCCHo9xDus3|PENF(EMB5MkV9(1I^atzoaGzEyo>#9*K{KH9i^5Wci8iiY zB*%?-#)nav}4L4Qg zW*WFr!0d%tzqnE*72kr@R!D|UYHhW9QkVVQV#OGDztlMIP!EtwQBW#_BZ+kqj`*?- zQjN;fYmD2=@Uso+xly?>f3^064VisA6GqRvwba!|*LS4CdSSq>^uV>Yc<_p>T_b~2 z4#^t@SGOP8l9*JGQRK{$1Qc$8HaDaKdtFRN?%<^vExwoyl6bnL%F*csKFAg6`F8j8 zQhyWjCn3le2oMRkdobjjJ5a%;B+DIb4q6SlCD8_0VeYddgkZvGWt???P|P<43;b=%ImmLwiGI z2~#oHeKMRO zGizP79Dqkktk)gCfi^Q9p>zx#ZqtL_Y`BCGMj|*vd%@VX{eoX^*hi_-P5tfn&w>7b zd|Cb9tGR7VO)L!m+jTXQF{*|-3}~vK@?WaC|0AGE zmQ~IB$A(kqsrO+fKSGKXo$IghG|n9Jzq|Oxf1UW*nWjyRznMVz3s`$DHOLAOrfVRD zmM6#IyNixJhEPK}gB6ze>^J(Yo}l%+7^PN*-rjnq(+vnM-{mk`Ce7N8Z5?iqme1RA z;XV0nqaS*=xGD`Z9l=H^`AKZHM|Pvw{5dIAxgR$!H>WUCQ8uu=!{2UAA8dmA^LvlH zFa7&|Sd$*K&mFhJ>Qg}UNZlN{mpj+TX%}IHUSTI^wVr7&H9{b{GJ91`Le6MQb1Ytp zw>-Y@LOO;I8mI%FFE2+mc7@N=K!-}_)y>0G2A8 z^Rlx*$`&<1g15R_(afZezgnN>F;UsA%X+DFv<{BQIXV-9n+0D#(7Qcf(^;={al$1q zYUIV~%zMD`_-Qj-hU&~Irl2AL3k{%%kjA7HY+w83R zAFt7aE$LO9)l)e4vg_W^_z2vCt(V#G2@_2btXRk2dcTZY=-*)-uCvk?zC9zMwtk>w zKNIt9q-UI{)%k^_IrtUXshCsCX5V6;=$y}n4>c{$J)bP#KD+lmHLZ_!>b9=eoHgC> zr^+_t>M^d8T-n(UiK>lQ?l$T3SiP z8{)#OH;>@Qlug;}Lx!HvHxLonRI|Sr^oOReVDndD z5r+Hc3c-|ZIg6J&JylIcP4OqWhaH`>TlnX#u!hsR$o)6IJWtpq&?@n;n(m^y4z0o2 zgH*)%OrJK(uEvFL-!S%xOpt`t8^>sfsM%>ljX~W(J?}HdA=^u-l&LCeSIye3Z+{PC zFi}IB-BZ3pQh^dDN6ppw9KM|K!IiVNZb@ga{4%A)9`lBUdH`X3@cZrh40rHLZ?BoU z%VOK8Km_-+-FFcstZEPRcnI4#we_&YrQ>vkL%c$xAr1> zW&E`sQ#flJD4Lkcep$Q(IpDW!WB=?N*F1?R#SP_XjrU90c%nJSA@MI~k8F|fS0j%n z8>y|ZAlx;9laD^TU~}Qw6-ZL;eV7oOny4qXpvMYX{n}mrzIB3eT18?!o*5)2Mr76; zIRscQ%&fh?r0VJo1T?Qf=qA2yP7%fM)^p2UO2oR}Ga~_U26Lp^`PsxPYp2sscsP(C zEGLg#DWL79h2-9af{Fe&>pj0iDVx`ku=uAdXh8NW7O}CP%4UoWj3Z9tRAEN1oSK5< z9Vyv4vnB1Hm8yt8SnwR{>)_Ao=6>f~EZBi~O+w15tcGJ>mV#f-CmJmf$V0r12p9{W z1d4#uipM8I+vp71bHB0;Txyhia)cEP6%W?VPRBj`2Ceic$nZHVJ{lg2?$P^&NPFgv zM^ygv%@Von{k(wu#%-j;lw|EdI-h`ie@LoismEC-zY;Sk+pXI}iTS&b$R0XHugd|1gQ4o$U+-Vs=05@~WHW4| zV)|dcrTE}1Q}8YM5tzcHBUGPk#155K@xM5}k@adQ@MG4X{W8Q2g)W`?^J6eWwZhDv z-Sr3NcMbNd8;rP(x1WgVQqC5>ue+x`SxE`!`J_uDsdcQL(oTCf2202NkI)%YI|RqJ z5;jlS9_9H7XnBqaenYoS$05Z@@WqLcnrAvS2K*1>7^uItmRxl?e<1J^uQ3cLr~YPx zI{Tf+eK9lE*7{YnS(Gz_?GJIY^Sa});TG*gZQ4n`!E)P02(MY$*{YzRQTyxhSETU{m6|DZA1}jHQ&dSO2@SQ_9e@Ej5Shrf-r)8ng zZz`hVi`1#tNj0N3<2MgrD?=I$RBsjF!|N~@Sfe3)4{>HgZ#KNX;BSjM!o=YL)HKa47l=l&0ZsG4c*q6<1b`vqoa`{_V1J-DX>t z$b80LR$LxsB(L_18d4cjjn7}PNXL&y*Q2xc#`H|GwaOD=&c#y;B%5(Xmr-tE^msiI%Dv<06Jw;&^IHH;Y? z&xe6u{HM{DuWWl_+oVK!RR8QrvUUfx`nEQX+i`-|Hd4_~jN+L-b7eGkffgiBRx9)= zQ8w@!6XB9u1~4)B2fVd%#wxyvTD_^u$cao?YVyU(B#a1F_eK3H6-vY@?oQ^G73~eh z4%It^WCH6h!p~HRzR#cY?<&sbqUphWWzNHvi;3imUyPPr537Hpz!VAn#sDd6PxW2yqu@eG41fUzuP~an;-((U$1a%Cb?71 z>@5&Ae!gv+87-wDx>dVUk3O;bIu_?Jm|Rl-_8Gb|J0$tpC`U)sDS`{Z*X=v&VS-0_ zahcoTW5mCIdV#a zH^=vCtxZE^#X~XwA?2=fBP3C3!Wb=(%1$_ua%lea$CSTMFiw`L@SC->#72Gt29|Yf!Wa));$=rl!bs zOdlODuEPD2>D~q82Cr;Owsm2j9H{QzaK?KY8kQoi6n?cWHC=JtTpE))q3)_OyuBaF zZK>amh&@fJlD?VPoR#YO97WjmA};j@cDxm=?|w~!Q{m_F#T#?ww5$YcAZ=vM(HUTk zl##Zgd$F`z%p${bBObpE;qRp2;23&5MA(C@$96gYX~IL~AdEvm7d4$@8>v@+`6rC2 zlV+_j_2^AYE?`twg!moN^1BnEN?G^hx)i>Xr!RLB+aXOqF>%e@rm4{%9+{OiwT|`Z zzwXSKem00?DrHN;7h^JnTOLK!LY>mYclg2$4GUGaWx*erAs#Fqv#8b2k@&xCT1r0Fnis*2QoG-(<@EjV>ZzAg z*%z~HonI#{4_`7ULs})qUs;u$ZcsUsVdr+!2k*QBU=TYJ{aBkWv3_H9dRtyQ&IKI^ zi1p?{U3LjBcTJEuo5q3O8PC7mt0gU4XMGPP+K;mDG3~GFe@)Q6?ZFEtW24_(uwGr# zXGW|~;p?B0bQqJhj*q}CJFu=7+Wq$Di+Ac)?){0gE}P-|P@yF;Q}ZqDafsu~FKOjH zDU^`fSm~lIz4RJ`mp3ttKOlzNdapqPG&~naFwl>xvur&BE@YQg7AJ9I6}**EMPV z8rQ8o##EGzY1qUPgQz8fNJ;Vd6td%1$&W2yGOk#BJTo+!VmP~Sa9HOG>6jY+^Cfzj z^6GW#Hg z{Cl{J$9&NS;Rlw97w)8|bX22Uul}6I7-JRWq9SPN*FYh9Nqkn0yi?Eiu!~e(`NkPL zzu&99oU3K7PtJL|ED?`XTLfdLzpH&2o-a`=V36ov&$Df$x)xMcfNgL@Tbp|Fh1Eg| zG~{_*95M32Ml8E|XH38u2aC?&wh?bZjxHGb?!=_=>20-PUinbw){% z8loQw=W$DT)681gKrN_r7%k_wfxbPe7cG4sq+_Ke53vl3{ARTukqU}9siS0|DkRmO z8kjaL9tc^j57dB*R!0Xzcw~(@C}rO5>;Ii*AIiqIj^xUD-UTxb43^}T)PTwpYT6t) z>a4Mzu~kLb{ksgcDv&cq4u98etf&PJEnJwatgX?a!3cG;%3>YQU&rOAd z{L>z5DVa(vNeb#HlJ)h#V0SiKs#X^1HFHscxj-AMDQUTcS*3vOjeAiQd*;_b`N;-d z%Hn%3;)PU~wIeYZ%sP$8Tjnez&c*tg09iA%OQLM&6wAs&?TJGBJgl70Dw5xHZI;_J zJ`*-ij~Z?EwTuP+c*I+8@O-Yim_WyMWPO^n7Z_jv<;+zlZ=J_ljcm6X6tx#)gMf?@ zZLE zy5%;J<0M`vp;Ju{40d+TK$DnF2UI;LneprhqHXvFRQ{Usm4WPe=`eRQyCG5$p>mW! zi*IvG7B9Sp(X786!-Mx|pr_N%WkIbLvA`au8g#7)Wu8e>|_o7S2% zHx-7mt|-Wk!JH`whhrFN^YULBYBUzTuP$rbdkWmnANUR<;x@ykjmX-1FTXL(@eaWg zPmTq8Uw!qx>6ps^<+j^?38mUErG0cceZK&);DkPF`s+92L-r(!pMdb_`WW2IT%5Khsahk&k>M^J9EPqltiVBmfX4oqc z6-OV}vK@G{Gby9SFe<~<@tA)k!nONnS@m&B6L&gq$I<8t`h(89L9yNpKd6Ks?e7Z* ztUJf}U7dvean;Sz9CY3->if2g57he~GH+t#8G(l8w>EEd3%^qyEHVekmYSg;=z z*ZGquRMA5m$3yv!EW!@g;1V<(S`!aLA4_f#?8{BgR0^3>%ktSJ(ZG?qx@R1l^lx|Z z^21}qYU*IqrezWlEgip-K$ZMV+xnqI9UEL?S^^o595pIj+34#ZYIe*2oQ zg%!?1Z?9@fw+;<-PSJ0MJw3#&8YAyD`n5o==#gD)l^&|Sd5V@+_R2I2G=W!WRb?nr z9OOo%-MY|WDj%(hLtI9k(hAM2CAS?SyFgYo^Wro*MqK3R}%&OvrTs zzt$8dL@Sj)?8Sapqdh}m8r3zAV#nXLvc=F=A4d1r&Z*_h=3lpfa!=(0hK-?!#V-UE z{&}4=t`}8VptgfqWh>^3E%`J!f(aa5B}=9wMh`jN&mxTpG1oDR7!lD?(n~$%9N#s9 zYP`TNtJij(9}zln$C1NODD0d$qS1s$!mTJ)<$;ESgPoTsCMbNx;h z!7?tL!LjG%jn`{(JCogoBk?W|V6E5vgueerII!Eq4WNXwkKFD=H zjabFbsq2OoJyNYm)2l*UDNbEUO5mbX6AuFex+IfCZ$E`H8mYg=_{MUtUg_f=UoL%I zDpw;uPr_wZ40(766G>E<9d-tpA~;dAm-VooA^Yn2wvpA8jE6t>=lL}8~JfXCGNq0qspIZWr7)>GAzTMO{n|z zg0BoifX)vTWKJU}88e9345#X)b{%dkndN2TBG#v4a9jd^OY)`sZ(J|61i$B&_Aw27PNL6DQN3hVVEyZ)9m&}aM{XzO@;BlPV`+j%Ww*ygb#X#B zEsE}?hmNKS%46rDMRoN{1ZxLbb`eOrhrnds!DMUiGATQw1Lw?BM}8LCy}Q!ebMMoV z>$2WSWk228vAJ_DMi;mQthN}zMP|`SXzSq716{J=p?}Ln{xMXWjnd-}0nxKMpp4>f zs|)=z>|y_rw*ZA-vSv4_+a%}D4+>6h|8R};8LJ|$8ODc;Be;a05#%GJ6TK*$VD`b) z(v23Vr+j}7W5ko|H@Iy3rG7VNi?9>23s%#o3a+q@y~Bt$;NP{fR*aaoRHj5exkWB( z{^G;TKWU}TFJnA=)>yJicMy70j2)TiAJ7iNt7%50@)4;@A7&K@yh7g4g`RehzVe?j z%@CoD#R-Npy)9YV7XS4zchboI>G6#~j9<&VK<8Lo@GEzjBdqRB?6h?GIflIMLx+jU zm}|zl{RC<8KsKVtIWxR9@TIo9$L%UKJoqBaVb5o(-KxB^CVAhOO;n@eDTON1mIIzVeuvAWk$O_|Xr#xrkBtCyG zFy8DX$zQbd-u@n#>Fe0^H=KAgO?IP2FH3~Amg~8BWVV%9+p2afhq+YKK5#n9av8Mb z$dHeKEfn;bEM}@{LnBKh%lXO)Vl|#eQomQ^TCv6*_mwfsF}EmTS-5HIH`KY};cfSo z*1_ibM6--tqtQlP=VB25w>x&e1vL2_4xNJl+%wM-#ICVLG+(;Z%qS|Kcxk#p*$8(t zKPrbFJ&U8WCH3s<*uhgGJ9Y%;c(!p0XMF=lvZxEeA39DQ(vsc&U6?>~215w?8O|2{ zwSu_I3ti*Yyqw4-eqM7z90&VBY%Yd71Lw8cSdq3@;`$9C^)D&{eFT!#?LT<(KDgLk zr)0KQBfG51HecBheX}xm?AYO6&v+()Up7Z$R6-B zmB%eTGzyFB=;2N|UQdVOU+lRK;}w6p=)CR^RNYwn>xy7SCMg$f3*jd1#HIX0K&s$b zm<&-B;~ga@65FfjsABrG0mEk^8BRmZ+vGy%75T<6EEn?o-b_l4NDAFP7lAclnbpLS zT_~^8SNfuR*6k(sKX`JJt2!1m_D-?uOuDql8Q()nz11Vbxip?;^z3WOBvNmWHRmNv zTG>m`2NMv_dU1^%M=LmI3%YSmE)|2);vX0#NCjX&e|G7md(CvY=-Qh%D` zGd|3Qe3s56<{Ub9wQ(hHX9nM;^6~05_K}*ndZU7$h7INZiE{N-04iaH z49(HfCEek^%=x61l!8(W3qI1#WRy5EVZ8ym8$C=^@|QOMyQAl5fZ2i4$hzay0t{1c zRPo!r!SSyo!HgWxsO{6-iMWi+;Fx7K8b9@vn-0>6J#uHWL9?3GMc_AR#if)g&~;tS zNFo+h4A8GP%Kq-KDteBoQxWLW$!PNz!XMgUu0*m=_&GxmZ$<_pBjx<_5r>R)Bvj5Mxm=2e`X%voA}&scWU*_c=Dyp5 zz%=TpEnfO^XY*Y|fumNUvsE*4j6&llF?Hg1g@ zB*dwr>h`8}ON{!fU(pI$*Kk5qcY!qqHGNGO`d(-$(|S58h!Q;aG#_wurb(ID4NmWt znm6xYbT+dHm(W_ac(RfNx4n@0BK3k&4`J|)%|fm%k_?~BStSgB0)YyALQY7?V7_GgAt9cRJ3uH_xrwE8o=bl4Z{(R*jnbEl;?sebk`#nm@pkIoJ&SyH{S=;t#lp?Bdh%r$+=5zRm+zVxe2QsPLVeIBny-TLV0_|{qPg!1N14q)z4&VOK zS=9+2@BNy2V4!VRh(j>V=f%=%^|xVzoGGuR;aeRj88)O12_LPSIa)NcH-c_c$RVjWw2S*o+Hf+LMn8)|4nFuj~sPmjn?7m#_ z;U*QV-@gkfiT8db`-_N`M!CEuZNOgd&b3nWI4}9$@Uyvjau}4tG#R=K&q02B^txpE zX&Y?+u&-P(&cc`8G@9g&)-U^rf9dzEJhhbq+81Ge(v3iJy9-BT`x7m7{cEjUSjDS@ z+K6F{Uxy?dcPN~-BsBL^T7iYmw7xqX9nxXD#CDiI;pfba30Mq;`R}B}i4Vr|W`7F4 zwg6pTaWmh>KbZbA;3UZOzHC29A}XimWeu~HWQVPkX~GlAnNqHYl)nsFU&b$<=586C zo<2zD$oXn~`|J8m(aiP7CpRnz^!lZXxZ&bos^s*kV2`P86J#RDj{FP`0NKeZMQFw$L3lwC+wlQ^q8z zN`G1mw(#?_fU~c++7$9Aen4GQ!s8=YY4IPYd>DA^kW}1wd9UzalwC~q$29H>BzCc2 z8p$Bv9ceIm46HI8J7`V;y(VejS_z?dPnNvZZLB$7J}4`<>jGzL^@s&a*i^!(woXQT zcb?qq;6$?BFzyq}(;T!1DQ!lu8TDK+c@!34A6uCDGylUAWLC z3bdJCfC}nD^zzezDv-2IB|@icKa%;ODGEM&nd>EiJo$KqbVml~oGX7?4ZVdMWs}8~ zrMN_*eSk%g^C!1AV_rMB<|)}h^ciMGo1!_kKuc}+W&ryYFZh?{W9Q*FRhN;5oERo5 zeDcv&2n~vvnHuh8Jj|nNM=XaxDeHut@Ev0irPW+x$fI2bcD@*}X5Xm__4G~8fbjj* zXul=qHZ+oRyt3xvdfBFTUiQb0E7<_sam~VINsP4X-3+@-rX;(FbUhyR0s-(FN#&3z zk_Bg%>Kmaua34Y&p&t4aMn^s<62Y}&EI_-M)Qn~fsdQ&Yj>Gn zqbwCztSEX**KIki@;|t=M6$j$W?m%f><44c41bO9YuF5Ks5-CuNfYHxyY zpFTB^GDE}}7I+W^e*8XBz-udvUhl8sSDNbww*?vFbqla9x8u<1oO4;Q_#C-(7+Mo2 zFcLpjDcaDVj62_tqQ)<87=f+%=ozbu$Zi~$jzV7jtI4;nQ>Ph|K_E+DPdW>VmWggy zU_OW)e};hAeBtRMDsNK2!c${&Y`{drzKgdEzTjlsB-gFb>MLAOe(tqgFH@^N^;@cM zR&|)Dj<>G>+lwYFyx8ha5t%aPL`%<%{tnIx!Ang+Chzol5!MA7#`Swn&94~vVyF7Y?4t>LurEX|TFhq`m<>OSxb=$P=S6IDu4A2d z;9p3ze5ED(E>tMo)Q1^Im@(rd4#GVeG=fzvMmy_^8&-YX;~hri8Y{WjxYS!z=cl&? zxi)*`rX3AwT+Q( zi)MI>y%8LxTS)%)h)@6Z2A|A!a&*(^Vka;d(Q%~ZM+)X@#bXlCSL$%zj&{rQ?2VaU zj6qX?`)i)I9TqJPvP?@!PqYQOtQ781iDS;~teM#ZzBM7c zWpDrqhg;^(3I;A+W`O3B7Zx3Nz2cQKYK%2$o^Ff=s(m2HBKHY?y+|KCWo=5Dx2gb1 zRtcMkl58L6>iS!WVJn_>>$rH1-22#Mo5M_?)jd0NEiqj$Qrar%trj^4j#zk;G?BCdr5MKd9|<&Ft;4=uKYF`UL_!DQ$c7)AS^SEiQxO|K z`h-)Q&2tJ%quxKh#<5ldbpfGg+D>*+N1 zozmNxhE@FtH!}`2NOsEC9o5D75iO1P(X=_pKaBOvA2-)bucO$=f#lg+&gs%mTZyUq z^o-m+-17dVx&9{y3VSGLUAAqvb=`fHVYstsz%`T-q#*Sz;Y-Qp4_pt}KY?_OirLld zsQ1U})r}iAsH+c}`kmB{Zu4AaQdSH@a*)okn*;*7ud|P zP|XVO&CePb6eS)prj#Yf?5c}`OYe#2hYaA?Z+s%B5`SJVjYd~5c{J~|1_-InM&zeu z*f%f7df&1YePyH@;`}$W)6OJBLAld@eljOXJBc!0eE;5!k;`JnlH5;Mu%SnJ=A%49Dws z+{PBU2l_kyKou)Gp9%v5^~gsNoK=awM)Sw0rf-+D|UBtN!!cN1uq%R;~Au{ zpeuuZ3K|DoM=SY-vE+dJ5@l4&>I0t*e@W4`frDt0_5 zM$Z??Fh9ZbF$T0!m(6CEBNy@Hws-l`38He6Xv~v3&YyXnm9S%WYo-wOae0()L07!xh%t1L8&+6(9{du%+s*UzZZP!0lAX~poq-^W ze?uCgtZAOOzrYR8)WR*9^n;D`d=U8IY;Ob(x+K;;_n>#q5oZKhgG zj)3Sg*0>8$A9>Z{6=f^B;zvmi;F@g0kt(Fl(M+A1Q!S4Sh(0phy@}stqT0rIaKNhl zHo4Lb38Lb18`rJuhv7{wc0(jaMKiFEBI9|_lRk6yn!iZ76I(N@P7^0wu#-UAZkI*k zG|ud1BQ(tX(5))PDNGE3$DN9nog)L{tML&=1MSORy7VtaP{y9A=$C5AsAWv}Shuu;6p@q890`!!*V2kD*Z# z^26{%Zzk<8zT99L*7onbiMY}g_9ce?pw3cg8WiP0Vf(Cb-+wF{2m9;y%wbu-M>_TI z$%L+h&E_HcH@VVGDtad%-I&|^o7O{1oP70X*K;|081>ABo7JxR4At=J2wa*Y1Vb`B;umIVHg;23dwULqvClu`-rcX`=T>F>iUDg> z9M&#~uP5@F=2?CEEq_=oQwFoF8mSb4XWIM53phy@kiXR56jOiF35|*5zsvDjhrmYZ zp}sP8gV9^`CwZz0hnrJ(HtZ20lE@z{rUofPTwTUINYi=Cas|LxJ6wmiWFsE`tW~b1 zM$*6qyOC8@B(VPJ9Fsq~5r+v+W1-s0)Pen`L)u)na9!)2@>KtX%3m)6BFgtgfp%#0JLxSyu{xp+;isyp&RP5Y|J)XI<$I-<9jNA*lHL#$4CNXcA z%nHW8uBeCRs3L;n6_E=S|N4Y3sV*7DJSjY6cc)%H>Y3=*M%~bauCL== zio0lw&*NoE0CEO{1G62Bdc+2?OoE6wgKsxplW|Gih#;NjwFeX3Y5Hh5u{BZ9v#YxHqp8;Dys5q`!HD!k^zDZgog<*jbqJN%l`D^lfT3A2-k>m5!VIdaly6u9fyGFpg znVI7KQpi%&cW#4cw6wPpQ_uAbEXS}OBuEhX*(KuB(&fO_!_0eSV(&Z6Bz?9;BBcf9 z$Is>xM!M`;xGz>~bE*dRI0&XvUt%Iol|{y#(=oEzqM~phqgrw7x-RxQ(y#V}np{Na zbHr2!SKPnuv}UXvfE`T2t;Rk<*B5*=U;nIn;Vo3;oUC&LSM{Hv4A$G)D4P=C0@ zd*Yywf7KDl^EfBW!#;%wuBcP#%J0xse_kaPv%SqL3TlBnrTv*@bGG$4nJ`!_)x0+v zoxWQNtpP`Ei`x{*Oom76j|d}aL=ZCC$*IO zPXFa1+28taD@TZYny8(XT&g^%T=ie~)Xv4(a1CW0%3Z5oUbDSoR?Ep&c>lgE#HjhM zI0^f{k&~UN3HV|!NauqUu^I)L=5_M?PULMTdNLuMLT#Vrub9aN81h*?F75cBANd&O`e^X~n6}8AiByeP*Iq}W~ zFFx%|i$DBsUFGCejPyk|no-epeXi4HLNtnEuKy;D>xXn+5FBdq2n@ib8^Z57_xkSM zsXT+Ox-zY?vPP|gf{Cn&>jmKHQD8M8L0Q7S+mITj9 zAU46J3__rEp`o;Vx%CC;&DZ2mFdSUF5T= zgQu4(d*aCJz&ed^D2=o}Jss2LeROn6OfCW~g8PG!APakONU7u5WY{`y#U%ritA1k# z4~S&^$z|p#NoT~i|I*#LioR)5z{!t%vYwzt?77}0yOy8ZQNOGfqe38M-k@;z+gqlf z?V*2XN7YpYO+jROdS^)8sn>v4Uaz29O&|vhr+sF$wsn6NT}+y-rsDUVfe)WYnC}?lkA|lon4f?jz^czo(jfMBuH}JK6$2A)rwfsaz#W+_^`(1%$N$O@7>|{VwiVnU9xV=;vehqXs>=e*rq^yUP-7r>WiNr zwv<7X*kX8&v;{-q;J+N-SV@Xc7@-i+Gt1d}8hi9YqFH&vjO{(PmyA2K6IJ}}y#dCH zG-Z6Q5Nu_fw&HG^7bxoNTsQTL8N5zB>8m1}fjSW{$k~cl9uT+za}vE znd*H{5VNb1DOTCCidL@AifZ1Cn9FkM&*vVHdq=*_Fmn<+imh#KVgJ4=7umD?WN7qU z4&?|NaAhP|b&={1=#}lhuKk}Qkg`Fy^Ui*V2`@hn8`yo>yfq)ed>G5+IJ*Bv-iw~B z^JjE1H98jrN{F?8MO=?+e(~HGRK)0vsLXDSEjLATL3l|~rwBE;-$tzPu^(vOpq~sG z2=$5;ygDwht{|>jyBC%c>6@BmiIb7DbGtF#oFX_qYfB+`d*Xrb6BYqqx_+)dyw7Lw znF#zOex~SD(oMM89Iagm_e2))#GeBYQN6Cb&+rNr7(9)hx?cREdCE z`LB5`saRE9@~NiXeoN_}NQV*H=>u~7NR0 z=MzLmZ3Umvqt=_QQAVogHCvXOMf#BQ59rCuIqv$@jjx;BJY4*CaaJ$FSz^K;!*VtZ zMo&x<0x@d=Qr`o;sBTNCFJaxD>hl;XI)zW&*rY$p2;k)kK5hSc(m)0kgy|O$xQ4B1 z23O}Y7x*>(W%T%X>3{pQkSuP|I|@bJq)?C<9l5BP3OI5fttkHPKE>9#gq^qYn0};N zpxqGA0U;=l?S{uiYQ_u#IVXJ`5cF5RGUw#_hIiMSeP_k{biY(|h5NoR8FYOcdQbY* z^-8ySiTxx4tiX%6DCVAQ>aRcMpX^8BhfZ_U>?5n)}q<;V^NP@F7Kp@Ckc{C{$4$^wV{}Feuqt#v_f&9ILnF z2P2XwzFoALhsgPiNBa)ft|cM6CjqP!otBPDco#4X69NUfa&{yJmcQ(NlY%?YJF> zL_m|zDj`K*r@Z=dV@@dcpc1MV;k=5UAyFy}xlm>rpkp|=I5k5sTEJUjt;^M6KfKD> z@o7~az!7_yeMEXG3xW=HAR4y(Z*)i01j$X%Uk)w{juC( zws4fBgDaq)$zGKYOz}W^o+~NOx+N}YlQoyvax{hmH%9jx_2(k4sslD$1K19iWFyQ) zU$>X?*O`X2_0C&EjjV;;$9L(yCyvtL&cu(F>^~zBQ6Q4IGhJ+;Hpj;xhw^4;{F)8^v0lVJtM(pEJQ)}tGvuM%Ue(jtu%Z-AS z=)>OCW5r&+1d{MV=Q9Bpfj>*LT##(qL_S`EuRf+|7)wf13AqA-zQPV4us{A7B~a}7 z9Iw?@X|}NZnb)Vyk<2daHDV!kE2L)M&8vqx#^j8SqO@V|IKlT=e%lKRo^I{H%5SX5 z|3Hg9QE}5k*VOlyfJ$Lf{7bBe+~<#4$}EnM!Zooy{&M!P_kX@fip9Al*< zKA_v8EN;%9uh6&(pTeO(26kXR>iXB3SwD2f!fSG0!^OVvkwa*B+OU2L=^a)_xA|dP zla()Wzq94Cud1b%f^&#d10Kdlo@a}95p(MLO-I`}rNGJY0j}-X#=mm+ikQ`0j)G;z z3BMEQ8Ou+}J-zt}lFiU8$>|$Mk|q{za15+(4is$wWVPR52PV5O-%OlCzG*!^()0mh@GBD9?#@d zubr6B$2(#Zl{RuC%vQw;i8fi62K!O3AB9%L3X)qgDT><)&DUx}7$SWfOT9MPK}$6G z^x^Yv7NRB+4@`=qE_d0YOY4+^o6y_FQlp zwmEFsU8={yO1V%7`w4R z%hdux)(dX8?Df!~;%cc$CV3){q{Nb^k_x4mILBB0?q78m8Bx$*}a<=pCJp0hsp}$iv z(Hi4h&<&9mM5tGUvqK}%{$tVWUe^Zq~0|mPd>}p=@GF7V_85!_l z*A$GyF5%gep^0xF8h9B!f;QXgvJ*o7^U&d0@SFn4rf1KhiS93RT!H;0{x@}1Y7LDa%-s3a z1RUFu$fWBuP4q%YRp?Z4V7fqCO5BnM>nGZHx@M(I@B1||({g{7jJ}T5`N^uUrtJZ~ zm_D~ATDN@%69)APq35*W&o=Z;J~b__eZ$0M159rXO9fc(J_r_67{|jq5!_9eeY`u4 zsiXIUt*o>e8aS)w2<2tv=Pa4Jeeq6}3o3PJ&9RqoKBz@c!lfU_{rk>K_)6igrGd;1Kq=dl2~(8y5m3XV0^CA*Jf^H>Zjp z6w&37eTMhLYk7jdetwgiK+8uOW02PONGctIRRm8P=#Z}$;32DC9hQT@WPWz$+Mkhd zD?yPu%0^Cp|0^E<)ZIBci&{fa*1b2v-t=1Cd}!}e-HKL{>7j96rJG@_+m zRH`obwS;`i!LB%DdXg=#z?@)yRQb{KfwPG79Y<_=>X)d0of0q+L5j95J}?4L&Dg z{zr#*uBI5X{~Jn)+_z~+K2PNR0{y$M$)nyhwaXJp?>X+`(m&;<-JzChq7Ff}Q=C%d z`ypS$QmSG-d611`^s~5T&h*j=3%#z=q;Bug+BDvvdsrVoEOXUT-&JXno0JajYF3T- z%~^I_dR1Ng*g_>4{>pKB$(wjJiqUL1gr?sWXzr!1iTto9VZ6wOXF5)qXB}jXkD`KljhRt)B#TkVV+xN}$RAnE5B zHm)0FB`{#@ zx?#bhP>2k-0ycU!P(jVsrR>sR&bWpEjF2p7J3APmX(G^_$n~q)`%Dud*#k9(?&;ge zu%*eOov`Z7t^8If>i~45(PF(jzE&Nxr%v*z+5kB~#=kp1^*)_vdGP9T$ubrn#!#t3 zE~K0Zt6TJojiqf5JrsLSdyn;L1)HlW%gv08^{W`8OQm;4GWIb(4YVVI<|N8| zC!n0FJhDUOhEZZ-x$ygn6d?7(HUX8&n{z(A{9;5dr?%IJ=nuVu0VP9p(O>8^LN31+ z5GQU?(uP6%cuxO<;=EP;uW^)FV=sM91kiXxu_X~(gdHNy{EXAjD?$guANR7EJyiKt z4X=_ta{@^|yrEQmG`LA&x$64}-R^UcBe~-Q5G>tCg5ph;Zd^V|8RzRsfjHg*mq99K z%5!}Kw1c=LezkTXJQhd4v~4sIcmICnt!usQZWFiL5xIBpNOZ%Q@)7w+5CYpbTf?dT-W_$Mk_&Km#RB(*4P*H5wevc{<-5@E#LZyP?gu~ zf?P98ZLIy4m=KNDHEm009SKac-&nK@(a?gF_R#2oM;{-mSqhOqD*9!sKGN;(lo1S*Ouj1$>*Hbms~MiTf*I7LL5wze&s){`rru(EX780M7vKl zMvOwmVm9j16yx3qHoN;^Hlq|1XymzoGv*he6!4BapUdUNb?*7W*ZUEI5Dp27 z-3MZ2I30IQ^-J%9PUsc)3%d5gnB3FGP)YMYf+s#-r2W=VIoE-|m+bmEsT=z>bhRV3 z-UzY0FoqvqL@+G?rmnY!U8zKfQ|IK!e1y)-ttmehR>Yg55hS}t2$pnI+oqc#1i{g0 zs<)+)LkR~}JL2cj>6uDt=S{gA@gFDY5?5$&J|7g@NMujg80@lWtZQYUww3!LMMLom zKXhc0)TsWIZ0E$fz=5P(x_<^dXp$3HX{Hj~>q&I0)hLBOvOMT?g2g7XJeskq-=#!d zjQwbujTfp)Z%{^I#pdp|Y#XtVApaTe{df3?3Vod&*=l1{73{7vPqu;{S+UzK@8t~g zOeo{)!85yBGQJNXis{Ryw}NA(E#o8qD<7p3x^f7ZIlk%xYDI2Ig?m&#a2uAp2QTih4lS zV6UJ1*Vt(p19Ggr5B7}M{2Lb3`s);E_+J4Dojol!=@GXj8A7Q^8zmba##rYV!q|}S za(Dwoa)$KZ&;&-lLE}#LBZc848sWw-QJ3M2L*2RRC;ELoqL4JGM=FxL;YtVNu>6r>1o0b1Xe83XcAd7=6Au{cH`eBj^LT5BYG`<^P}z) zi2>zB(aLeQBO!3vGI}-&ZsOo$aIjNPS1eO&&1nk{$3e=~V(%<%=0nv>f8SjAai<1o z+=P1f5W^ThU*Ih!$hCNqYTaBda}>WO5J;pXJqFxm24q4tjXon59kgMT-Jso{N1X$m zC@=T*SXi}GWwr^`QV#2drQTCi%g_Y7;K|TgmqvEax#eQ^oTdj`6) z;Tuilj;!F>Qj~q+#v8dF&`aE~lzZNQtNcT?r@~)zKVkPRJhfS-jqF*!REqfgpvJHd z;ckRqwq&v)dDGlPzu^rAImNiDz%_vhO-_oUPj3Fks!<>kx#3l*&{*xZc}HO9=3!9U z(i%UKBj)EfB;D*U+O?f7!b}arI`zB*PxIPzGLKbNG}SG8V*0(uh2rk*Yz^98);8JP z{#ZrxxMv^@emtc!GkdYbbqgDYj===cYAjPBsUKS*MevbS#;nd6?vqCyi{II}AH=k+ zi+v%c;8Un?$kSQl+v|LhVPeNTLRr9an4dc+oqc;5+JD7G<&*{Krw&eEjA*;ERZ*6Y zPx2o>pK9Y=VqjTSZyEny&cwzrPzIZ+XN=MDk`kHaC(E`ra0G~^!wQ}PLy}8rXJHT-$J4`Vmh?>>a z^U%U`3}*P zBjADFDm)FI$@O$L9Dnqr!;{*7g{gU2u}Y0jo*+AzyU9Pt@_S#1pJ|Hfh1CbPKh^U#Acy? zV|06w((S=TF|3G-+hNHphjet~*d3a@e(6{}Oi$(hn5>J;vJU@a=J@<2k`UToYahDiVQf55@Rs6^G6ELfe3Rd%!-nJA|W_OH=rj90P>S{Z$ zCPvPj#kvKn>AmsQxbZ2Gl5|J)HJB1X&eXtpaWs`wZsy zP7YL1Z;@=&ZR_7`%I_PGAwH~@4V-hgsS(gS8&fReFmon>qk+k|NMp}_Q^@flhowS)Gtsdsja}RvuFXRO zbO?20}wTNdr97rpqB!?`QS=TDhn7o*vzKgA~i(R_+ezv258#yq&UTeYns7$?{ zngja_JaaDAP1ibIxKAao4o)3;DJWq9V}-46i~4IW@mjHDGVg#4jcBYrVldKZ(leUF z64{Q8Dc*jHzcD$Q^ro$MC_$R)3?`9FngK!~Hi06az&CqTPK%ficgHS2pFk%ERKYD0 zXIj#PKEgd)?&D|_eE^ks!eD!LZS?6|U1pd@hyn)XNDcWO|+Q$naG%(vzp~0f-_Pg)lKhV!lXZ$ z&G64D*yiTQxlO!V>cPsW7Zl9g80C^ac{Or(oz6BCAuCbUD=ANm7m3+J zIIe`|DSe*F(xPc@iPeHjYZcmRhw{5aT)wgAq>q){7~f>WBe%_^E@J5?Im1^)MDBGA z--qKOUr~g1E}JJL#>!c$>o#Jk%3I0WQ#)~UiHta$G?NCDM+K{Pg-mN!nXMiP16_KW zyiEVWCYOw5qpnnlA8um%>+TXHMXQ2Oxcloo_BM$NH=wQWvR&GfLXeXT==|1 zm+g`1a5RjUPgLI4xdPL|p1p$tl&V`yTd2|~ovxfsvAd$)QBLgBv=h624WGPMmScFD zWgLR$xVv<6rkgzlE;(XuTm|U{VKc9mdHQ8eSW=IA)Jhv>t>oYVHe!=~*e|+oA;^ zvNPltfoYWCW(DEuQjipo5>aAaiJ;p_4$?*2cj0VDGhQ#f*{{- zCbaoMB)xFNy_bA>cI@S%1Vt{$$o2skTwDBW=n@VI4uALVlTJre{i)3{v%>vPwLLB3eX5N*qBtb50heD3lJ-GSE5*)pnKLXwBu^M2?~;Y|KH>9P3S^0R^|v z6%F^s5~pLL06FHnpe+&Dis`ZtInXL^lY=N{TxyQnp)8ENNA5uI)_44`J}b@BgtgFO_~>{Kt;h6{^TFn&-OhWV7Aub^c`O;vf+)r)CvH*ts4&)UmE z!QsQ|6h;~QOWAG&HE-W%rRtjl){jIP@iBJ)DoeLfd^i%2Z#Gk5AM4+Ey~%y^XBbhq zTspHZ`ehfxe8DIJwn)zpp6*e5e;mUNAIXYE!z%6ji}4jMK~5+=!CTJ~jds7%ST=3) zuLhoTO_Z~$F7q%%j+_S(I8F2jZ;-LZn=xH@CVHi%OK=FCSy(}Pn8t36Xg#r)Xz&8& zA5N8PG9}T3UV7q&?V@AUVR{IwnMb0 z6-wqK8#gN!CXwT_gor1AV- zyJ;Jd`STqt-OQWQ%hbaA;h)`VoWn2Co_G~R@LgrIO~cEem4R~uSaZZ*QWQtUpg!8( ziaoa0xxFHCY)5@AJS?v9zr|C*^I6+H>}=qxaREJL$}b|#E&!dr^QO_8tomvLf9%M< zlY*%^(t0$}m9}&=H12ayU4F-(Gz1Fy8876`w7P36%++8&ZULE4ii+i_==tSz_{5bL z6U0?2Xg0o5pHBZtK%sxfv#=XMPRkY!JzalcO_SigoHRuiV#i!%OSFG?bw3e@|0#ZU z$XeE`3(_)JX#Y@ERm!60bgL0BTOD-3Ht}GiU(YuH&Re`pba9iooI@-HvWN$QO{wk5 zyDrL;->jj1pDhcz?1`e-odo9ye<3F?V*xiaHq5?j&4nnQVfvSyS8!%3lz}m}e&L0k zG*1+}OO1sTPZlxO%5PiC7yFr;_mYf^>TGtDzgKn*O2`{cGPG-he3>>g2rJ9;ZO@)` zb@?m~9J_&3784v`QC)xjexbQ+p+o1%f?C3(g;ETET|(yZ@%92&MWLpbnmGBe)_Yud zEH(FC$o&#NwU(%?&D5YNex+VTh0slqFz#R_+r#Z(NTI17dba>$)?)U@y}s=}6OUEvuKI}o4*+n0kAF-8 ztqIv{=aN9DaK;3;r_Wf37dcE5Hq^%UHdo|-&F^0puUbRbRKRX|X2GpVi7gUJ)b9AW zs{q`m#((&3^@O#4P4KhWqOOW|BD`*J7dL#8Qf_1|_uI>E-M%@GOo5AB9?~6}UWo zrdeZvu-!>*-pm#?`4qSV$!G93_?~bz2M9Qke%HeV`*KN^_kC{xXJb98LyHBvm~wX+ zYuu*w+!KB&k%j2UAkteYHd^K01?Q4p*l!woMaZKiN*0H6w|hP-eS08D?rv>d%3HK6BQc{6^cBP z-|IZdKE1{4orr1O^c{E?Ne?{lDFPk9XmMiZ6YA>f5Y`M=IIFcN0DaEUPiTUaYSZ`? zKXL^tLB6$D-0BltL_~Xt0NYUIt0?%&=u_ZE|J~wzH+H&l_DY%-(9gI9{x(;2T};^R zcfAhL7X8SP&6)_!ZlUxK%qWN5y=PII=d3py13O%KMT;KT3R9ULBqpbA-H&8nbZT{D z9m>>AJy$OYn5 zu6U^3-u7$as>2_$9Pb3LA8yp+EIHQUEN%GemnvY(32$U#N498f%FD--F0^||>p_Ix z7lKFDVx}9dLlXCqt|6{pobcIk3Bc0-bTHrpqU{$xYAsY3AHTb>X4iB&BZuyg-5qlS zHa7VA6#$fbG;F_CkO5E$eTFcf(YOKHFb}?rai@*JjbebO=sG)oDx9l;@p8fBIkHCY z8XXBx`6$PqoR>}muq2e#yrxfuANiT!LVi#)yQd=r2G&C7RSKDS9-AN>=?Z%L0jw`Q zv_}Er*frn@u}o@TX&nYQ3@S;=)J1Z7WfGk?>|aaLPWFlPSHew4f9tj{6E0Q=Y)Xw7 z?(!@IAw@k|)siwIQiuUSQ*bL{g@T5u*?DU*!z7<=)HvMvab-wIwS4PWLUKrJ^WY zUxdnnsP`XKR*Ugjq#O%+bzmY{7;kp?5x-&f+I&cWF2GnvIoPA?favyxKja%;k?MPC z+wyfR$rLEMqE}_{55wgvgq9F5mbM`Y;)L*IpV3B}4Ju$336xm8F{~BpsuX z4WWMg#DRGP>jfM~$r;K#QJn}LA>|tkP4H1(%sf#9I+PV0_X|GlTvzov21^v4yq`Sd z&vP=!6C`XpYxkqkvUCP26>Lv_IQH{#QdBzC2+7AvG*dukCo>hEq%oE0o^f#Q*Qf8V zy=a@>i5$Ky1(0cK+@Md^pjW^P1oW3&jjz*{ z{^H_KFZ}^zsCLSo8ZkB{);xw*?43uq{*BChzd5d$M*$4(nY_|x^;a6kbey|^n!daP zzSgaL)2*Bwi&cC7R+|8=tLJsu;*oaLP%LMdE$;Ficxz9);E0kh9OsV_C7W-1Zv|P0 z+{i|F8H}sN_i<6%8G^3+$*WCz_W$cNm~HQX+kuOar%|Ew7|N^(tlB4st5&QZDt#%_ zLM%9-BO!{nu}8k}*g?#nSR?yMFk9Pac!a0$C-g;wxp7213+mi6oe<#~mDg}!Yt0YY ziEJ+dFTY>u@WGq>Xw(Ve-zm=dj8pQsQ@Z!*dT`}taqDLo<4G`}HGH=?;14 znF(<;PsfnYt2L-om1jDVGNm4mArkthZ)b3BFl0&zL8mZT0M35Q41TN(DI&hn=tQYB zenv-~kXigc0J@M^_?PPBQxo4?2^T3siIYDjoc;U7+v*zGKPjIQVBLp*l$dYnosRZ$ zT6^q#%MkY{YK`zp<2}G7zg+kT4nHA%1waqU*o6LxN3$+EP?WbR}VISOO?1 ztcOLaQ7-6b9h2A1I_fJ`>EC{`Ux~_Ppw3-1m->Y|#ORyh9IF&u=(K{OyM}d>d(Td&)FKw3PHQ^s;JxD}LtTXR4ZC|%w0*>|U&!2Rfgzrwva0Hj~^tY2}Ikv~1UhqCb$o0W0O!y1WK zOyO^<$!zUho9VdHBKU{fq(@nX;V0g+76O8a;IU{K`m^l~lf+ceTtww^{EpI9UB+kX zR3|J^UM20ZfL1tXd~K7bES^2KfClITR+hGliy`s4yr=*!2d2oZrIXyu`;nrWhrPkb zA9K7>l0C-E8n3CaHfUK)72Ai*2iM^sQ3F_)NZ`(Y0aJ8}_goY~CJel`d#2_(`SWjy z0U>^xShY`vs%PBX(|{O0aBHyn`)wfRoOYleNvF>I^t~}- zeuK4sS15K{#DsE*$$`Rd;2WkDq_2-458i3l7=q+7*crK+k*qh|RuD>9Y(9c=c!^7= z-<~MyS+}rQeC2!Q6hL71n@%g)MEtN326f!_=NFZBsBMA(QpT~JTZ=c2lowE0d>6^~ z(;K$C?5dUk|Ef`gJgoTzaC4TS%(jE;5HqpjkWu?A($~ji)f@I{*S&{LZQam_6B0Wk zoFT_(jiD6!Y1Y=Ugkp0Ciyt@=!CEH*b5);Shy?3Qm`PJz-_1FgpqJ#snK5V1FxLAd zM2O7@r{B`mx0ey1x2al>dXRek$A;lgpi%d9hTS|Q=PX0uj;4tsF`*~0k6MS!0PXP! zd<$RFe3y7=>lW{@(yE$8|DhV^lEf>;Z5#!8(*G(6-_Z zXR~;6UssoMC!kT{MDEAn)gkDowS3y+r#Husr(QvAPsD%n#c(8c|F!4-H*;ZF=3nLl zneV&Dh2-S+5$+IxWde*BaPWoj*}LT|Lh4=e3akjqZ*}Z%hBcAnJz@ zk*NarBV;f5AISb>vSdqbj=b7wJY@?*A9jeyrYj_`_v`>ag$= zRuqi>8_(w?Da)xRtFrF2vU=WWRtHPyM`+8LkI?L%m$2t&OikdF)!uTz-=ID#Mrh#! z6->*gMnt(>;n!{A)|fx-c%P@KDx^gQSSc+71E#|tAT!P6mS0Qsl5`lB53S1MB_HK~ zyEo*8!3wY93Kj2+6uX1~U20-$cB3<}rX05-#`LFk+U3`nX~%q*7lbC&@JD*Zaur%0 z7i77Do|e@GAih0!V)z^d;=eg${)OyS0J3vbbr%UxY|1i2 zVhP~SCnv7tLP5V;^EdlSv+aJn-yx3JAga&|pyldXl1nkvEm*VTxPNyKvvntbZ|SKJ z_sKVnZfJb3{@fYHIsQXcq%++MQ{)H%#ETQNvb;43$C}XgoiPZg%q0#QnJwY-9u(&N z+9}2wzuf(?aBh%%vPE`5j60IrV-4;Gy_QGTiG^AP=R+rOa#R#0MWu#IpzIc|@H2TF zJ>pBeuW=nQvA$xxFU0gdPKY)|OL0 zGn}NJ>t%C#*lZGCQ-l)xyefp@slJC}C@M@OyGVcabE}A+C?U!~#NTpWLkP zzA_71MdzFX3Y_t2exZW*>VF;iUQy=dkH|;3;s)5oC%cy^B7SVZ-S2>!qF~Ma+wU^b zOdCnDoE<*$l^&<~yp?W!AnCW%{Q5%NQ}}i}3eFdb+&CiRA`~*t8n+8)oxn-5bTpZ! znAgp|grQMn{yW1Pmsg^8Ey3iDFJ&;QhQ?DGKo=Z%FohQ5&ahj1*+}`JJtt-aI{ebXQKoJ^?7wp8=HswS4a*RDvY*0qL@^c&3V@spX010x4l7cKRDO97Z@d^FRP3qZ(+kQG(%X$$2fy2Iw(J8xS@3hFj7sVy7^l5XBQtGzb|Qu^G|`Z9!kYB_n=lAZ-s}73Z^o=)nzn z4mZW@-onYWE+70igkM_&?pUjB1LOM#F@HWuAHc>A_^JC^zY%WCX#!gy|s^ zULC3G;);I=KSINH8?b82cOFfd!-s+#IL5bSrnY8TpIEIp& z-1JkTpIJjqQoi?BTd%%Y($W_=QdsalCA3y*QV0j`HZ zY1Re=u!cz*5by9B9YFYi%@Ad~n-_If>nr8(%B`J7_aAzSAPe!m_DXq``Y3VYYx22v zO|68ZyFq-ZA>;Oy0_i2BB5p=;qaXU|vMJ6G^^?0suU zaP>MAI1bRX%&Ij03)+Fx8@aOemhx-sP+eFsWtaX9?D1CtLu*d>UY#Yo8CH+WlY0Uy z9x@uSt^4N+?u%j~B2WvPuW%ZjK(U+vbPrMc7rOIUpR?s^=aP#sbVjj$u=iHd6uTr`TVbyBh zI;X5wGW;0W8KoL$p@@+_cL7N_U3kTa#wAT~)!G{p5Ch*H8-f3&zB~ogmo#Ho^`!># zox#Q_!eIm1)c3F{*9!8=G-X3Q{m&81GakXb7u$_SxYmz@A9l_3MohTU?Xbb4s$8t->sMXKq?~5Ftei^!}-d@&gx#vzB zu{D$-eSspA?+FnB*Hw8~tkMSYz_T!6<8~peI172fh_pNE3Vu|?F|nrykIcJ=l}5+N zSBPyfg_Vr7ZrE^SZ8o-Ln8Fro8a0HWWKhv7@I4>|5t;HIi!}79@-}PhW4?q7;l(d6 z-7;wOm6-mb+Mx~r;@nKkW67o3;PHPaeOt}%d7 z(Eg0kAsfdG{p9qB@A)j2l%gLQmR!hUJ9g3wOI&`F(BC06t#NHL4kzo3kV_tSWYpgH z(=GTfQmcByksh5jS9`pUH!0EI=YDnIRza z`h!gw$^NBcUiOC8gyYBv`R%qAfI}^ef2k}JU8Ii`4gAlqzwFbGiy)mP@T4q);-4zj zT^_>AI)Z_Mrd}G5&9fVrht#RR=uWzdKU8$ORwRJFXp^OgM)EE~rE}0wG#=wwiK{Gx ziXZlnLAwp(viX!4XGx-SfNz4PY$^3IT2DnexbkOlWHO3xoU8UKn70l}kdYt@^( zd$hk`f>jgT_y?QvFYycjp&kv}@N^r=s z(^ZlrBw&l^>F2j!-NwiSpeygaLY}+B_ukY_RNopxvyo^^H$Gsp&L)arVU;E{DJ=BNlSpCvsQ+&lPN)Gi3q&#{y+M zv_Q|*@P1bp>FJMb$(>YR{Bk6*@BAaN@7$Av#ERP@9WHilZ41pYZ&`j?-^%=W5?6K0 zVaHEA?H4j`X@B_J0dU7SvR5d`*E7dbo_sv+6e@)|?1buBN5IQWo!m2bIg;1KrW91J zA!d?hbe0d1{MHeY&pS{Gf@-(m;_I!0O(BKzkVjK&q;d~}Wh_3tAjTN!@r)>oM8%rWj13XE*81`fb z{&w%_7u7>h{Ht$Z>WzQCQTh3IBh^ecaQzJXqBt^mtTHfris1 zgGOcG283~f>TrS#shrZb?0ntYs9j_{h$nY$6~bbTuEIxpdaq#Js(bJ301eDqrd~X{ z8s)Q9N2y_artXe%Idzb4!_X|eV}QXhN9fMBdk*o$3OmI^=R7oR$JnwcrtR2_;B$N6 z{!WRuV7dV>Ax`I|MjOn=Ft24_DvmlDVfgf$$X8?10M@TKQ#kRv>|2cp+M}XU{LxU;FfuxZ6Hkta|ds z?0^w%C?yk=C#e4|9h@kJpExL*7=Z+)73u>P|cJh8NbY;25ArkvY2d<&hs!ri}Na>idH#pJyge0=5x zTnBZrbqgmIIRPxW3zQ{y{JzWe1y$f56Z@aa;ZHd>G9KLj(P(n;>+`FEWjXpU4wjD4 z8AMZ@4^zCESQ#HflRS@Tq~m^`Y;><=GFH|G#xg%JkF10-zZJUzFJHbItIST!O&4H- z;Hv8WRHDp|p|rEh-`V@K@9l<^!CdhpLqH|+DkD@)ES^1(^&qA+SHcoLP;MsDg2vQu z9}$CwbBye}x9}dGog~GWP&EywrnErD06VUqU2~vCehGR0?LL2jqB2lGgRn%Y*$9%D ze6lc&dmX*w5A0s#Ifnz|ijcNrq1uoay_2AY-v@xwMQ)j|zN2xAl#bRl?ATGzW%sU$ z8c`>+hAqu|Ka~C_d%K*7C|CVcwPI)nsUo6RkX4PB=BuYPRQHSPU^i#4BN#@I``gaz zhDDg%xZ;n1Qk@uwkSy9jKD)If4ES~7+K?M^JU}~I29bJBYF4&{`g3ThrA@tGS^GPw z^l|Z~h^2#19(fRUB#)-=*~7*==eL$;Q}(_uJcsq6XEzJZrjokGdlMiK@;W9b2L4Wx zwO0M60O*t4^PDxA@S`$3r*Cnl|6$;}As{&T{FwjR^IN&D0sZlBl6LQWfOXd-J+-;3PvuWbbLeh(Ah^$-YF6K=txn(ViePZ9R z<@7%hpDzqR*cP%$GMjGRq+9!^DZ^}Mwt1Av$i@}RVDqjLlf&j@|DYaXta<-w%SIH6 z(8x;i$^jUMr2rt(LIe8!&cQ`F-e_y!}gA8`?pskRN0n z7;52U5`d;|3@)R*mOQ6Id)TBIc)=(}y-Je{CEi7#``Vc&h?y4la05$=mC1F_w6z;@+q_5>H-t(!K zxypVso8oV)=|_fIMpMjf_TDHKAZvO30H}b6B~);yS8kVo!SkNN&fZ0^GNumu>-qKw z5^iC&a&fJo`cB%4`(YFJqVVZGwCQP!Z&k&Ov`N4FLVr|+h${{9?Ho35Du`olrJ2Zk zXnji!BIXVd2D+j|VQ6-*-$Vs?v8qJ|9COG}Pqgm*Gtt~R5AcqJv&{!H%?d|(qVHyU zH;+Yx3Zdm4ge6kTuIK@(*TY{giLBjr%eh@hPVxiK&xVl-G#ZLD!V%c%Y6x5ZqTqsE z6Aw&J|7wXF*~DsD|MXJFG5tznNLe!SJ>@K>Fn7HEISbZ^tvvUn|E^==roV19%Wra@ z=}>W{$Zrj^G!!fRp;BZeBtDcTv-5tgST2m}PZENQ)HP?4X~FQmIJZKcHtmz(3g<}VFd#t>&G_G!&)}0!vgLmv zb2ZDu-6@odk?MO)+VX2n6}N&IU4dq^A1Wqt)je&8A=hUocZ-#L&)y|1;A9i5Lwf#> zwYaiPGz%7=S-OZ<=pKJcGqLApX9lf1((mB5wM;k_n@0N)vx|h|w4Nc99a7X7s zN`Lw$F==JndF&>)mgC4mbD|{BoP{D9Ujat(ani*ky$Xxgou4D=x#x^(2yBNzhhHiy zKyh=~Jb81l^s)h4p9Ce^8g#eB4JoLAsXR^5@1EE{awTGX>VtKPi*bPY-@*o9e6)+l z-HwlM-PB>C{|+wQcOfrd%Dybp4)SqD2&lQ}rnT!+7F>(YHzD~b^#Sh1 zy+aZ1ioJ|7{j_gf$IBgY5ngakf?|D0>`dkZjsLhQN|uYKj*>}=-^-lcw(+J1hfyc@ zT(9p+kr~a~%Y8|Rvm_t$7lttpLe5g^)wJ)M*ibKT1)ArPOK&{Y%Ic3!#wa8Vu;x)u z-^Gzt(E6qtYnVGcWEHDrU?YnBE7&^pVT7h^=f{FlRXb|5B2TtHN=#IG^8BLbv*$p5 zTGXJp^jja|hMX}qkZ@YQbUtA{#r$(H7GudeLUL=*ZT+Ni8fP4yw=`FsOw`lc_3Lp4 z%&M@aoxWF|wHoV^Kq$-UOCvwUoJn4$G;yiRxs}7oRlzuPz97kGbP%{U4=8xaua%83 z|5INq7cU;`<@Qg~>C;spEyY9 zY}9y_(YiR;7$3Q!e|nQIV7zPmDuD}B@bCcZ75UZ}|CgH{ zxpub=KG2mMMe)<~k7@&WdB=C{P2<-M#PWO>N)UI7v4!_Pg-Q@Xo`+gRX;Ps6JJb5c zI^}ys8UTO)*5aab`Vw!4D@6^jCn3)jI8C#b#<+#NBvD**52od(C!&v2$nAe(O)n8g zv8KwSSW`#O!39d*)NUl|BJ*J8&^B}xL211TS~35$UMvMFIB{1Xl3-NTGh; zErRX#Ui%%NKOD1A4LBzvbLfo=AN&pYaS8o~GShH!GzIY)`Bz67jJ}Wzl7RC0pS)?# zt4~UUNE_0C16RgA;I@>KQld)GBBEtQua7U{#!8T`MDLoWeDlmZUnqLre$ShNQrIEh zcTn&?;Y4)pb}kykn5}%&=CEaS`F)R?Ie059d?NB+*`?wbDaEA`;Mta>yQ_{G6&OGB zI*mmTh<8>``^xQF0%jk|n~Kv{-Erf|#og0BXj6M;e=hVM+VF3nH+jF@H&I0iKiM74 z;w>5ijna;p3(=`77Xf<&WMrb-59O|({jKl2`wwmLt>&kmf6YXc9wkMGbJ~IN1}Gk2 zD@+vv7wqq{L?9ZzJWOrTG{SUGu-lKIX$ZkqO_DiCNqaxhx zcM$<|{Qk^u97I$o5}Pmop^c)NM(;@7&gC&<`fk;GX+<`d5VuMBtX(Rj!RqsIVl&c1 z{C%86w`Gs`_=yiRv@64@(2`&Pk?(-Fjb#jLGDroEKMeBe0 z-u%A`-vF6{28>`1Bk^2(`ir-SZzSu#hpj-NpJKOyOXv%ja43F%RxcM0de@0!jj$#A z4}kV@`^xcMulnG&qRfW}5{%gal$E*y;!7!`ZM=~w>~70C8g!%JUlw;{>CKaq5+ME# zBX6#g;qW*E-Pvu$L4?=u_~3sDZ!<9WnE?>dGhXxbW?>xP`H5nsu%*u_F5%8}Bj1=kE*w@pr7aHl;oXXwZ2jQFA%p(WDoW6Y3b4^T z7%qZViov(A6(fegZ+R3#C>AY6HRBf^$QV^CWeZe4q1x+Ox4uT1rek37oA}!(8F2BI z*)2YHGqK6V&{f5{D@XNgk`(k6ZeVsC(S6`OI+*!iZ9$LnW6TJc(_d8Og270yNE_uc z3~?M8EFmAI->fC}-J`5qBF2=Btm3)??A^16JBBh^&=>qLVsl;uB6{!O?EKkwV%WLE zToYXS0NTmlSmghy)9fQ@7_5Gm1la0!l4RY|0tk?CIvoK1@{RN|V}Jt6?^kpWL7D1N zVdlyC0UO92O2CQ47=G9lMwbIKdYkgpp+l??cu$E6n*5gy-b;X(j_x6Mx94Yp2YBaOY9*h_NpHz4C-tEYOomFiFPb)r0ZHZ5I<-F3ogNh{MVH;9HHXW zE(Z!jJoINXmHqG<0}xL-MF65q#?=16IP(>RsD1)ni9KYepT|l6o!5F+H_m4bNn17d ze~rnk`u~@hd>v>^ZZoxiM)(hm|K)S@pF8xq9n>GiqG)$2p{M1OHB;ra_AbVYer{;< zC93R0j|FYX1D^Lcff>nP`;8)xqDjMY95h9n`uA>BxZ(NiZ$Zvpfh3uKUE6rmKp3)u zjBhU&WxfYEVD{FboOH;lm1)>r_6`S>?Ld?PXnhxoA7msq<8 zK=RfD5!c3y?RHL&q(`hy(WEL0vA!3)=`comU}cJA6{&iIh+JO$05HB>PZCqOlOTm) zvHnG(V@FDQJ@;W#Jh{?E-{9cNPs|q z*on1eGe5rDK^d&O3nq+Kzy(GI{SH&B`wVybXoY z>Dxia4Lw;V%=mR#VyQxD04HF+!QC8~5IYWvb&#wDVnv8+xxfdzJB46JZ+#NMR>gWC zj6nR)%H7rtQ05>lS8_s{m4?3Z-}eH9*S0bO=jf zv9r4Udp!Cez+E6!8T|Y^o^)*uhz7Sdbycl9j#VfEPRCXR>0MM_^}siKVXXx4a-X#m zGzvI}g~M~$E#}r}=JKDobqSDr#)4@EIf?S|x=Ganac+2(M_<9fptN7G+#7l$FQQXt zZUgM=95m`QP$!H7C}DN0w|`M(xBpP({(n(rZ-6RG-uoPU2<2Ps6ewpYI;cHMq0NiG z5&s|HvlTkw1fv;c9lM@zn3RogD7|{H9S`O>A1qJ}@aG(FVy~XeJ^~oz%6V|X0V0y- zEOH-QX^c2SK+6I7e+2-Gmvr@}86FiuJ}&MmS5oScF%v;2!xx}cxz{P>h=+i#c};<5 zVM@#N|8MZ@UG^V%mdaNKoC)lM@_VEhB4EDkCi>Kf{xh@{c1IWfjO{w)nZ7Gu{5dGGjpV`{)j64KY@Dcd?pcO`85TM@MIzbLf8$Mp277nB?l^pF9ETW+=j{%Hd5bbmWJh~QeMK}QzFi`&3H2S3=P>903+{|svKzNFNJfZ}WZSDq z-g`pXh|9nr^f^MA2OG4FG-f`W@ON1CzAXImNJ3R`5qe)`UjjfDih$L^Xu#w3y&!YQp6MrW1EcQGjsSW* zNDfdwuvR(5@rW!y`52KS3McmZ5N)Oaf_dvHpnNb)a! zUYKbO3}0!s)j+<4pIHJPRBko0ahZ6WwqK3X{a?GTs`9UdXlT%J5iQE&CxYKqoQ%jKRhkb&?eJ`U;6=vU)W~uF{xV`r zNnz&sBNqMZF%bihcHmkDQfkAQf7#FjSfeATW!I<=8ouM1^zRXYmp%o13>VW~<-88O zNf*dLlFvO)4&?bLtb)a=32(QHTdxLJVRFln{Qu91^kOZn7x+N~{0&a+%EMH$rc`h^ z5GYDzrA^E6+I%+oH%c##Cd(5f#~baLG_%2NV=9sqaovAxm?)jPcLo7bZ|s8rgTB5h zX0Z-STjCAb{Dlt&mgd2GzZVp#dg=<2fFJfco?v*g41+KpgoE zkixfzfl)4CUhF^e1V_&pt|f?nuDNB(m-0-Bu#&~uUBPq~;DH@SVNZjR?WOqa2b~>< zL%$moZKRw%rdY^(4f!xQV01Wh8b-u0&}Kx%#|~h;bdS!URf%30RDTmJ)pcL>bj2{P zKM7c6yqMzFX=2RZB`X40@5j2M?lMBBw@_IT#TOdy$Kus3`m#fIPqp1`0Nab!vZ>xTy3=bZi!N*vHaDAeO`t>ZtrqZb2j?#|tGMMv!h zOi+|*Enr?^oRSd#*LVP#G9F<5vr_oTR95qix9I*qIP~tnR1OW?DL~~o`8C!Jh$Y({ z|6ApFGI!>19M)E|_Iz+G;PXNdfX|B@aGT-*d|p!9V(I)}xP3i(Ei~eAcuOR%xE@K^ zP7QMy=V4^;tP=X)h3;NR36L}0ufi0UYP>sCKB$WU<%4~^1kkyEPZa;};x;`n<^E}6 z;h)6li2KoS0I^Zqa<$(50y`x)q||hX+e~Eh;4_4Ux5&fOv zfeH)t$wev*?nQFg=5aRxu&sF9sSD1;jGA;`f6|F$B-4MZ)hK7b1T}PaU{un-%y>yV z(~|P6hBbAi5;w#pgK>l~o^4>nj_6!e+>VuCoEz^G1bnaSN6%Zhe{$C_R^$kE6_u(e z`Op6W-%=@-izX!m1<6CXI$ZxBxf)~(%zA(ewjPCRDAS9!P97@OvK^lqUyQkj1FmNA zC}xNiP;sm%LpaZ_Bj?$`VEY|p}l1gI95TpMSdqcs4LrfcF*l3^Q49)?X}K1pTn;l92Ax{DRc zs1f7wFMoq_>k~iSTZd0m5L*pMgV{Wm~T_+($H)^KpmJ{em)q^L4vpYHA8u z^z;Oy{jI}c0L6yq=MLNhv7J}9zG0w#_&bM_0g1rXjdq~)4u1pp;aJz>7(k+)+x__V zua_wnrl;27GY;7J|9QqihcVZYYNCAySR$TU1^-M^DaA$p^_yRNGK*;b`kv%i=NtQ5 zlatQ&fJwo5;DwX|LrM8{gs81hf}F2HVDxE*zH^LXe4}Y6bWWJ3wofa3QaH)1byDo- z;~BhdOvKoH^KHmQ(9|j5|1%r(<5(;R1iH_34fuB%tvK*+i*POwC{((c5d_jqvZV#4 z$Mof>L79UnZ(4shVXL&OI%g~C_^%5lg4YHL zd_{|`^PL{LzcP7Yddmg`8cmiB-D<7yXTJ=b6bPiJW1wAh)i(V90Z>Z=1QY-O00;nw zPU1j%lpIzyM*#q`YXJZi0001YZ*pWWZDnL>VJ~TIVP|DAE^uyVto>y`98t41j6%@h z5?lj;;O-iN2X}XOcMSx$;4Z=4-Q6VwXK?qyU2gN7_dVx)-@QL?|CkwOnBCpGcU7%g zwW=pfURE3#0UrSZ0s>i5LPQY)0?rfy0`e6e7I;Uz(AEnAf)qkhL`c~^<7CytU3qtD z=)w+z^cVj(3~Zqfzd~`z4J!iJYbis&X+)`0nST<>A;U(e3aynC()tF87=)7`CQ6ly z5sINCss#g`s}GNcz0`~CT08kPl$tu8CVdOhHRa+ll5sWel;vbPlIoQGx(Eg~}7TQww`bcZJL-&;QXWCpj)%OfOl)7>POP!vD&z4n??cwQ-& zql`VxYJZ6{CQeoF4V{t4+^BpRKJ?rLS#dPZn(p9b2ELo)!(_qEBgeHnL68q}ziz=L zv480Q+9V_|fgvX{yM}mKE80U8DP;@OB?L^jEQ7CFY4HaAzD}kW?^%)6*We%@xo-tVem3KG|&j;dbS{m_^A z`azgSVWO+V-*aGew$m zTTt2~hMAES5+%k@sqJHSqfcYIo6vvHjEKliq4LdwNt5)$){YUS;>@Kf{mtd==B;aB z31~;4&2>N8cKiHRCB@MRqI{}ln}gh`@uOy|1Kcew6zC~8rq>Yy=Nj0EaT5{Webm?b zBIMLeuP*S66yk3a-A^GIV?6mT)~zXht!SDJJg5!jIl${=j^w=0$0e7<0snJBo8@;; zD2H3N+P8YgjJ2hE8BfeADSYg@p7*~z^B|FfILwd9{vv#PvQoCR>!Nzn5@d;PA8%Mp zo{$ROBzPff5l@PD`Gez26j!WRRxo`QFo79!y%}CcTGbGvYkSAKU~D|Le0fTH$Pj7hqZ8N>N%^ouC!xWHl^%T(|p-GM^tF8>LLk0YNY1~Pw#$y%7B zud_ktShVYpz`2F)`K_2jMozh8?2;nd)RhZt9frU8eEwUM1ZsAj*<;g}NK4&isCEiM z9?!G{;jbejBQBu9p`rBOrpV^=UviMdDgS$QkTr$>*za_1W8gfrsO7!xYjYL!&J3so z%kw`HZD)&%JMMnHdTV0fWYJ3ZHhc#CbW_Jj?8$gRz^kb{_q--*_lsr{dYq)Ak%H;Fi|Gr{y|c>oqI zaMD{-{<&1ESQ3ZYWdjPZYdm$r%iwp}`><{+_s6I0cP^Z17wGqZo2yj&0q1P`HmxIJ z5(xgrzymd;DVf6Es<_Cjg*!A{I3sn6vw29 zynbR;*YWMh*7&RbuX$ef7D5Z&%T4=dH<6!nF)sx0#a+`yN}FmdKri?zi*kEn>E%m^jT(*7ms2VHiD!g%qm%6) zQXt&0tY!8%ZvQc)K-H;T@w##bBkP@Pj5bIg8x55&9{H*qEbXpgtlRea^YHy`EKsuN zV594ba zug9B=I;2xu@yY#Mikr(&(0iSDq`IszPpQj>axMr0-G0S`vBoDFt6;uyTN)l2nc$>C z9MmCyFqv&N8XejAhw8U!IZ6Xj8iZ81XQw+fHuRDy_&@XHcziMX!>MHjU%$b)2iqLC za~dok#>L`iE19Tix5r;~$Wz*6C%HGDJkqx1Bbdk=*#j!{Yx_07Y`3xSqOgDQDM#^| z-J5Z8e|f(V6cC}72{JX)-B#3OVTS_SFD(?*>kUt~@6|sCaH}3?9zHIvN>pl((Zyd5 zRIcPe}N4_yI?N|xt&n~A$*^MB0nm4^# z(&IxIZH*aQPTGcfifb23+F!dU+qh`w5EkGX0X@u~@K-a1dr^Yx!QgH(o<=FtfC9U= zp$k4eGvW%tgNdOSIiqwFg7$9{Lq?FzQ<{%WOg8i&=H;cBRfF~AC__R-$rGci6&Xf( zCs``qF2YU9#Wb^-qMDV3eb5Jx$KWe_%}LCa1Cj8L{abHq);%c1yk;{UyO=>bn6~Ra zz2^4gs`;TLzXeZy*%G@az`bv{$}TBXjW>A_{(I(ly_Q~AB#HlDK5s8^o6PW|vE2=M z+bzNUs!7)hh`DYKA6iFN?8MQ^p1tgl=Sgv>rm;syDr^g$1^mN#vb^DzD8wzqO4&?c zLEx^B+O5GKNmpy3PMcBia=Q!O<6gA%ey&j~%j5KexNoG(Lk7}VLS5zF9zA4sKX~Kl zX51U3T&5YMt0N^4-%!pDUpxVIhufpk%|L}nftF%(|BWK44VABMyKc+&&z3`m(rsY8 zU~TP0ytl6@YRll4Iv`HoY6}dQ75PyvP*NRxMlZEL$TBX$k{`G0bLt(94hb4XwHPy| z>*nljGyWZ0^)7|OMrVCMna>~cU|KJ&$ZTgR;^du?`)4?VIP2XGNefrCrTDl$-ZS@L zjc>A4SY(4*du~_F_L7gD@67D_bh{zZl@LcIXyo^W1Hxz$np9zVg%ve8QBuG0N8%`P)5N&*RkHm5 z1aulg@G||#3wJLi8V}OCIJg_byh3rT@M1bvJM7fx8N?xhxTjx9qOl^k+qN|vdMeBj z9obnY05(Ft^$eldTrlm@@d8u6oXbA04M$Iy*=o75RAx|kjL`w$Ln+S}9wK=v|98?Q zg$7dZC2Ct*{iMoB?3Av?2~}n6lrPx>eymWH+%6L z()Ma)@a@)APo{g$(%mxstpj_9bqAYVLV^Sm*J*q{gSFv4wX{+f^VqkA ztFqC^6qCd*#`Jp_bZ9|)%cy^Y@hC$EdtrG9`TzKp2c`#Qr*w8JlS%1`(z&>myvkao zuCHSl<_*y1>FwL5(Hc(}eP}^;*X-J|{ca8djfea#k_Ot6_`!D9Jl@e_kPh-zNSy{G z@hY_F>&sUyw=Ksx)^nrw6n(oiwX9Y4X9uX-0(B(*<5q{pEh$>_y$cxh*Cl6-QSOwd zC!$vwwV!wPQZn{3=!YUT7|-l>Vb-zSpiQPYsH}QLDo6T@itf%253^54ak~7x3Emz~ zv)y--kzF+!v)V%D=_7bXE88=D8b|m%sRDk z;bqhm5ZF&nGQ9UwCnhI*hK4|=o^F71ZlpCq+Q7LC`F{7mW|5OqY4=;ZBT9?i-yNdT zg)AUaf#kV)qBANujxE=J-UaCqeyDgFS-};XhZ%~dTWVn?srHpF4o2)Gp& zf21PPN{@7ro&oqu9d{UT#MOvReCSk@ulCZ>B9~l+<>XHd@DCMughT0X-3(=O^W}{DS~MGrA}Sr>2rVd2W1Hd8O&=ZcE=FJ3*h$Rif!^76 zYVGBll)qlNMim@Kr5Qox*Pjf`Pw`8p@jgc*99qs`d1G}k2Wzz2@mWZPBZXV+RYbq+KT}StDG$YmGq>SK_%lHF)izD z6Ibi=GF15*P#B#cVcF=JY%E$561(qd>{QO{?0#1~`U#A%R09bq$X8^qbg#aBk5(yEsiUWP)%qK`Z_vgi)yJAF*rh`NN- z*YuQUY=`6oKdM>luss}l!y3>m11UgBerrS{=oxk7)-pQG4&+-~{SWOq@pkNfn$EbA zb7{yrXKiwcme37HKTgH&?9baowYw5bb#T-{R88qR^!>X|b{y0ES0h~XWNJbhOnVtG zHXv;87h#sJCmC~Y*j2{K9v{%J6&4tvisPw(ePU@dCi~mw*R_Ck&|dvz?Dk zPIGazY`nXq7JI$^sUtscLT%J~>H0{MRW*~97EeExs}XOj7wrb0txz|qpMGPzmkeXh z9Pqn~9S8(_LxE}z)EN5EQbkq!S+2`sF2Mkk+ko>mNN= zi*+3(B%2k6eiPkUnT`lQhVDschqrghO?L)Ms*>veczj#!B^O?$UF-HSmv>(SW!@T1 zgCpihap1{-@;0h?)Of(3G}X2*Va+Z)PT z(A48fOgVsoaD0%#dE}XUsmP_jH?I$OM$wC?t!H_QQ8@l99^HvA#7H`W20!t#&t=*%9XkyE_HH-u`&D>u6OVrtkM)UqF#?S?S z_P@rK_muso9P&T#8>~{n@`J!s{6Y?&&AlOf@!t&(@oFd z3ytU}OB@v++N{S zMTjH3toZMtZhPH2;m)Bmu{{%)BvVhj%SCE&V|5vzJ5NRhm000MfZH}z{VKHeENm;-#d*Z7`p*w6<#hyy^|$t!qhH* zqBa}{IVvAr>1MAib>B#k3Gm(~OQwU5qq1p&ro-~K5?MMwekw?~FcRz>yE_bt_>}Nc z;m6u(Em&CZDlUQ`k5LdQ`rp?h{{*WrveIqvqvhF2pxa)FBC*QAZ-p0PhKD=KBE=4G z(1USy!QR8oz3$S-Z?=h~46Aj~+Uo)PMM2?uqs_TW`i<~EHPBlesF`U0J3c#Fo&OiV z(d0IM=TRwFLmkzy30tCY6eMb+}66e&~i;h~Tbz0s_yM3j=W0|aN&+Q^6 zkEg_mMQC~b|7DfH2#S<;f|cwZS;xq4-x68Vi=EG;Xmn(Hcv+DhD--U{M}y^D9Lu2N zbm)Jbv}TT!lZ2Hho?a5q(Mtm7^}n{3l2n>G%DkR$Eci%t;6KCk<8AG5r2l=+?=Kwd zl>VRl7RgWWNR_5k@<9Fm-xL1+V#l;F^8Yb7daPn$clrOoci@!-_5b%^|9g_(cc?h} zDdm5b%KO1mn;?vb|3Ah_V1Y4>I9Lp0-Q|al#VEL||6iX<94h>O9k2=wK!y!RQPXmU z$Q0exK3wa@F%W;H-^NgLWRY@7pYth2JJ5=+){Hv9{+l-JKd*dyOFd`vzxF9{$m6<& z6h$!42sTLIfiV@$|I1N$YM?qw8LiE$R|x8ei-u!5PiuQA?GirOC z^w8(`llniKqB@NU9U1W$d#byS9wE8wP?CrId?NY6B&(qtT@^Tje8HFoy!pRQCbYRA za)|{E2WMzBk+#MA!LjqSRJCGM@AVR=x3@REfj@raS8}pi&ExP?*Xs@Ae7T0Os%p~M zO(LyYq2Jpp-$g%W(Am?~l;D2H-P+il`dqn2yx{9ajPvn)#orgHi*bpi{#x?&q&ubz zI^Iiq-R>P#g4_hH?9JTQtz0XJxe&Z>~&*9{eZl{~TgMGXh!) ze{bzUPky9?v8#x{1cupPP8TMt5RL{dZ2UL)ks02EN2&;;-O)rk&HB*g2CID#12uIR zx6>Yz@AEwwoS6(I<`mx*Ql5AWB_ChLL^@}k`81iMqvQF-#b++A6ci$!^QZMUVv_9K ziHR1|34GNm-KeUns<9h9JUk?Pw&K4(HR4D5y}%tqaTHR;8g+#r*Zrw%v+)#Wz-`w= zS$NSH%g-Ag#@bn^6_t$QM}&rQ@zkevb|~R>SYZ&2dC{2y#GH5ADNZ9;;9CTU;BUQy z4@;dRm*+DQ$c-dt4BXq2w`-H-*36mJQUu;E2zgm$3?^F70q0Z<$xlL`%`%id;~Rr3~NxV^#Um`9*p=b4y_RoqG?*i$Y5i__l4=*XN?5 zV#R_V9;j8)gn}NyZfdq0$uA)v%fV9{kg?B&r$oE_<%+TK@qLG^`{rmnJ*Z1trKAhQ zYntb%5QEXag@odKzo7-e|wEVF01Y)|Gh{n^~-p=Na1+o0>ZE`U1!0w?_4J3VgWQ`b3MEfZ?WtQR!Zxq8B3 zP>MJEyb!JLo0M%3G%Kq0GeuLE69Hd=V>PQfb~WiBDM{KUfvisaxQVH;zgT=@zhwxA zZhh4e*M&B?@6@Thg<*!qV_GLJLqEg`zA?AFBW8Vce70`pW{_>&<*({CQW+c?M&5pk z_8wIH+vzs8@3jLzma7-IRQcq3=i$q+GGL`4yWX;H`xKNA`1M|R9sX#c!la3T;s%Yw!VdSx&nct<&cpAB zQr0=zbgllXUC;#`EFQBlGJp0YrCAq>C31PG19CTt61>WIq>H}tzL2FOkZUkLsDI+R zk>xAG5H!~Qr_qhU#WC34ci2*cGBl);v-Gd)!hC2JTw%4A^2iU}0?&~R*jv!LQ}3L8 zADNW<+ke19KGkHFl-W~^B%KeKHI1!wtZX}E`nplr99%LpTrTjwMXIR1N9$wtzJSROX8y!QXL@v`#pKEFLRpnrv1l z4uZD}-Z?qzhyJTKd(SaCBiv56d{xq^qU&ohm+E(uLB;i!PCBy_?CS)Z%fg~6`ug)% z5M^JZcf5KM*3idme5{>n9D816$E{$i6J?%wncMN~o1sHggOeEb&RliW)p56nV)kzq z^!EEu1+Sas)F7{0oc3!i=V*?%fp{@fszO^Rn>kuwzcqVT$5G_30k=0}~xO)1-Fc0U-aHjZ-R(7jR#;_@+Q@W3wuk8}}|v!^O&FM(Sx-0o4DFZ!iYg_`kspwQFhtqtyP>I@w>yi;2wWj}-Y@mj3a zewY+!%t*)$%km9j>sq`JSn#eCt$d&1iCYtzv6!!Qrt@dtsq-^S`HHnzIkBj679+ET^nIIuQJq>+y%l<}k|i6-m9%zW1iKtpdZ@QO z2|ZpSOEB>7h}PKcue>GFV9dT>{nWo8;MDr7{i(~N{AaOBh}$_T12sOzlb>%iM#gP3 zpm9<59vZJUdnHyyhs?RZ`NX(P!QL?vxP@#!;DpNz_M z$v8Xr(e264Z0WvTh0}&Lf&3mo;CR#1wwQ2FE+^{t$B`n0pi{P<^TbZJ+KS99;^g3|X|7FuFnF3DH zYK0T~Cqc*VUOjfb>8zUrW);V6u1DYY#ff*81Um+2@em)5uLYA=1~V%Ixg1|l)0wyf zWOSs!gO582`_l}l5PvDzQgl~Iw$k6u7?kzr8}!rmo#NefFE`N}`-Nhb3j9vF zsHz3(^srqq_I~WipLv;G65vL1xIbje~6ZJ@@5UQK5=ILA22N z=nr}OU6o6UbPE#kY|MsfkQe<}bAp3X7Ka{_OJ`p9xM-5*k($WVSB9roQ=nuW916XPAAQvpdg2r$D8l`Lmd8{K*-!1PwkUp z?AWQT0OE2WEVALn)<6)#cwtcy4sJpNbSOPu_u=6o@f~>b?PdLKx=f7`7|Kj%zRSuq6^gH|4Fxwjv3)w2iN+FI5W z$ciO(vB8VeZN&jD3ZK26p-QKXFz;#c1FZYdg<32&cC8LG9bx9etl*xZY$*#1t1A_<$@kcXcy?mouq2gd>bl!51lJ zsJcWG_|5;T1!&~_ZB5B$%jHnx;b$D#E%N$vbmNcG0s0I67%}x*Bz>|FLrA**Wi7nU z`UtkkOJ@VqhmXyzw3dNR=SbCvxzEq@`rp?_&BpmjM$Am8e`NGHJ%wO$@`J$=Lx-$O zpE|>T{YmPU6momAX>_30)spLYa{hs72&wr!$uQze=bSNeeU@& z$o4=iJBzy9sa0aA&8y#!#R6essoojvp7mE$c>1b zJ9t68b13J$=`}YY9fSgDVq+V@3?TInm4G>INNz28v~tTeE)6!9q@S>`M@25SD-H^U zMMR#+LEP5WI^#hm5k9=uLANBrT>V>Nh1IJw9(Mr4yy774&)J}eMcmmqub(*`7m-@cB*o%) z@RuW0uHz}}ByF+e%};_PmxFTjksC*A?t`g!O9Do>gu+EG?1M%G9lB>${qnOq(=mJlHFfLT zX*h8)%&*_04E1n4Fgaj3-4=3vrTHFqy1tn=r2Yh+TDp=#IK*LyOHW6H`@AW*{*S!W zNnnbt(!vM3A-rM!*y2H@qoR3%_{UghLLi+^XIf{M-zR@jaOD@%MN-WmcU}6@mAw>X z-+aucB-?M;7Gi`B{S%l&kl1# zUEW`8dtVITAQ5s^D+km;!J*JGZD=-F&V4r-zt|qaBqp9zlH=cHp>hM+ zG+hAU8QX*REWtXs!2kfsd`~BJvSYk1NBzwX+u?*MFcM4IqyKixJocy;NHW$}+)y1Z z^c+s^!@iqL=(ak4l%{OUDHrYVbg=+6(hHtF$9e(Jmy1M+gEdVOt?uogJoyL8!2-D^ z$&hS<_Z&vJ!@)U#w83_E=9usnKcw7(3Vqz}HD0H%g13#4n0Bvuob$K39R~O4FZ&Sv zZP;|dG=?{IVhR&GgySUJ-NSJC+|SwLELQeoy^ZCi1{EtD|eGl#!Aq&ySKQRVHwzQ*Ysa59an3mblxdMmJLu*`fzlb3vk~2_xIpL=RNAhLMy8 z`c3cOcSXb-wofFld(6;SQ7dXi2bxp53?8clU0bDd_7mvlui#Rcy`u@h)VyVs+)J zKv)jLKT>pICZaCsx*Qp_OW`|5(wvxpT6^J~gXl&omR$c?!>iy$Jq21T?T)d&btsRES3f;AL+*vb6u3r`y{?58EOS%I-d0K z{ChA{=V`Fgu(8ZWOqz^=J-FDMW|7Dvnr_!l4gCWhaqMU_meyg7V2VJ#BRsr-bBa$z zAfB$gS^-mt_@8DvGwZib2Y$WsZJ-2}5J+zm#v1BLn=;VG1EIz5-g`Rl&k&+I!^zj1 zBHpks9altE@(amXadZzi@_cth?Ph>kw9~k5}hW5MfsPWo5!mdn)yd z+e`T0bKJoyy&pwY2w6@bNAid=53Lo`<>gmWM}#=g(cspH+p>E1C->OYL?!*U@p9|u zQdI{{+f2-PMazYf0gq01GXf!UA}Qo-b`}}tYw-Ho8Off^A}|&WL?d$zgO?_5qed4s zUCUGX+@-i10_B)9BDjAA?9c*yf2rKOJ$cjr9ZfS8!^HjsxxbnD;W%C456#;DzR%7v z`1^u%=pLOIwoiKC+X_eQ4=+VJ|BO#X^&j?FkpX~I4`bIb_o}(wIpqT+k1ks;F-`Ja zx;m|{G^zc8rm{*EOUt9bniL^JN_GfoF1cSS=?A?FI#p;`cMA;K+b%IxeR0L*zv*Q8 zwW89e;VryY=}qk%H9W^1N2Xu!YS8u)8)!Hqjk}gRCXL1&;EAL`H6-oy&0>v3>Dxm8 zR?pWNUKo0N&DimQb3cwMnXOcbk1vwF8coo+e)2)}6mc2l^!;7!jL1(X8Owmz^YCi~ z{g;^9iWREexK{{`3Qe-Rq!CaH^H=>m5r)T6EZ?)ll4Ufns+5(r8rF%aIx`YCFyn>D zKhM~ZT@)!h9`VybikSr^T%)d4Ax{jjEKp8)ne;M&j8G3ZK-B?Y(Bt4wNcikqzGS?_ zk5NIsc>pu-O=fE4zY+z7gwz39=i?pNuIYJ-*;QSQw0hbliWis*?*ya36P?ju z8lkF^wLItLCBIY?%9nDhxkns$1@{5gfT#`+RD0=%2XRFewRymfD$Z z03yh-?PMB=Qeou5#Uo9a3yv)j`aj^e7f|!&UiFJa@huD?to{6l-L zL*IgBXTDBb3?MZU21JJ_eYUKAx#y7n;{0Ym*d%QbE&MT&D#Tw)<1G2~ss92+x=7lN zR?Wn~W~?Xa^RXFJR{C>jh?6as@wnz*t!##n`MrqwYLvh?i4x>x1>HSHcY&dD(WCI) za!|S<>x&5S!q1!U@3!3N>1Nyn6GuWj_y}|WmGN!10Drqs>B}+8< zki4O*t_xOL-Vmjt8IOFQlqWHUIyQPuLqIuzW9%}%as4&arrdkqtC=<}nQ#iZR6pzg(`8F zB*tTO6>aL=IcYE_Yfo8e^7un&LQcCqc%XBoNwo9IR<>ixQ;0%PNxc z7d%-V9PGv(Ko9}=V4y59-N9q^d7n|+VClnDk?7F^Z=A4rbj_AtHaCo^&MK3&y@QkE zRI2NOkfM(91pb20C2?CuRsC-)CH}c$A%0yV!ExKAGk&-f7TzjZjjbl$!#j28KsaoB z6>0!rYNC+0EGbZ;Nq;>K*C)$stkxeuPpcsuL@Ao^-G1!ROfTZ4tp2p`Il}mKi`lhn z^~z&}(O@+Po#V^zXDXZitq&Gv!RCm|hnfh}3~d>-njp2eVB<+PGEad8UKR!fu_NQ- zG}8jBFedZ}1R3;Aii7^>=0wbaf6w7pOe9Ka&UP~@!wTD z?N6ptQl)2?m(wXKDQR?gvIC6z_1S9MUms&*hV;eJ6dL_QnmK?=OA@1;r*L82e~z|LkO!>_}T55M%m* zDQAHFEo$-$9~vaw`XsF&;WczfuV%bu9bNf@IjR4q0hD9~N=ebb$P@LVF3cj0%XOG> z$7q)}LaU1us6d5Pb4>P>=x(8Oy7pfqT3q+TU*bY!9f=5kXb%tuW8y$ErCdblmdR_> zOwc&ZHpevimeV8@@)ycpuIpO(CNam2+x&Cs^c z>|_(JbZdS7w_qaV$!>#ml`LgMa>_dVWdsrXeF3vQa)T}=kgB&bQxdi9bmyEg9d>o|G!(vLm>+5@)U-95Qr%-xht92(Q^J0l8 z2$pg1`f9m`jLcA*nVI0?62RxyWIG73K@yVuLZRsoMqRJ##2=0q{s?^|x+8*Vp+&S# zUp-_RDi>~3Dw@Q0I8*cbiHd5x*7w`>k?$vDbZTMEN&9gp8MH}{FCck)m{2u#ryS`r{=6! z8I|=P&y6;;u*mL_RetU>B^62=vz|6nt9DXPqXvzT;@hy^ZsRldEoq%aDLiI-vsc1tCKn)FwzTk%FLCZq-w9N( z;J=TQ-hF;~Jj~DNxcghJKOBm@yVV40w=4q2HJ1sRa0xD5im38*SpIzKPjR>J#>6a;tUdH76`3mF|&iUu&{Vf z{jz-@!!ii-L^2BaYGYzhdwP2KuKx=E$7D_X`+9qEnYxF4opBNxbU5|9A=*wRaxGVy znd?j^wt(WF(JD_~Xb%u2@%TO2z@1MFF30l)G6blUeKh$1Fu32ylf%7wR&w{ksT8RO zSR=P}AI^2ZHz<|Td&MfSaYGSlnsBD0>h*)*25F>^*wNXZ8 z+Y_bB(fHQ`!`5a9Z5_xx5m&Dy52eq@=ySnHF2(U)S@FoK@ph{i%!-*QDC8q*`-EY3 zp6UWOuHnwWYHE|6&ME$_x}XZZ2aC$;7G7&hQU6!S1R|A529pQbxSX-BrSDgzTa?p~ z-{Ss#C|`;91aEiG{#p(WxEShM>*|x6{3>!QWiRj;o3Q?oAu_aO`~f?5H8Ub#W>7+r(brfe z?p}BSEx|4^2>yH%Jmi(~?dt+(zWTJOZa%v+Rmy%Mjvtv#D4*ZDG;dQ&33(JZ?ko;j z*T`duJ5d$Y+6T=Plp;M^^VH%k1!9|wi`(c3baAohjOJ6sIT#m@M)-K6s}BFXP{XXv zp`R2&Cln-H@*Uq2t_kbA2}^rS-RN{^so^CWYca~Cp_Wu18oGltBvt(?=#0OSN0xTRE>IKp-i_j&j1<@!OuCh#nftDlmHtt=`JG9yunF!gL7n>{eCU$0pD z<^smid^R`1r=z@i%4?snbftE6@;j>@5w)PS>Z+a_LZwXgPg-`7l1^F`IaCR4<2R|0 zlN}%hC$M^LqIjtOmC5h6_6R0w^WCaB%S5~(Y|>U)byBPWXxD^A_H1|U+(nFxD4na1 zO1d6BHf@gSM)RJKE&I||)$9h2(0`0Z&tR-(i_wLHk*IG@6AEAuvHzB9uu{rqgl1=x zba>v-*z0P^O{6eGoqH3n#4R)6_ADjq`QrI!WDo-MsW`ymz{A7q-<~W&#lw!VP+gm*cu#Hhe76Nh zQ6^2bOBO`1AXC~bttf6zR3)U_rz)sqzOj^sr2Yo}k6|hOTlesfl~hH7B0x2waV_c= z-$`@Q*rEN@?NsrufD(bq#Cn8H=7!6#K2e4sAS?rg&@~o)skJ**PHG7tKqO3@Px9Nl zJ*~+;#v7@-wwe7LiPmYm(0*1m6?dk(DxZmF?z5=-oH*S^naWBF7thjLdzzPj0<&WB zD|J6B>dRb-@sAeh7Anm_YoN4{1+qgCDC%ux-s6_^oHSWL@R7uQG3f|lKEO>lSx z*~lixP8{|MtpT*BgV5d8W&KYE7SE`SucDK)?6v0c^K^#VbhsvQa6BeE_`8WOr&3f} zEEYC(Rz$ZwwSIs?poW?$&gWG*hZP)cz$F!~naBwDgU*v>rjq#_n@qaWr z)M-a5KI&IsF0p%7W(N#vRdW5Dh=n{|b?Lr>yudq6cWWBM zYfZH~^anL-dw@{es}Ul4p)1^J)wmn%^|x9`J~qp0Y8d*A8RSbCa$EGb)M}=kgR{FK zB<{DK|6C%F6(V}e;!QE9-x=Pdg0)#!;!v_%$%6l$ClEXi)T-o(Av0(+J>H{&dzBv{p#lD75rfl|Fn<4WL-YD7e}jcSk?$<1<783Ldtb ztNr1I4`+qU$xJA)im1No_J^DJnUe5UHshS>&{yewdq?O2;9=I`yRnqk$q;C6Kg)ov>T5pt^%Zj?j0=XW4y#XNL;gQ`^ z+PV#DUE8{cV8=ctz&W`tuM)8@wx7_>-+1@KpKz&utHwJvgH9gHaQ+w{b+TCGzaK2oD-BSF~Ipm*o@R`L`*N;(On6?rs^L99to z%a>bg?WBE|2hW57cN_Iu=IxAt;YvVt0H(iI@0xw~VE&QyCJk?+S#*A-RaJ8mn`^4S zzZz!o5~(?bMKtD=tdt0Vox?V&#WQt0KPWQzs3sD%>W4wxvCiw%Yzaq^wW#nx86gxu z*75bsc_)*|sqmL|VjQ>f0FTM#9;_qVIa8Sr9yG0pAu)vxXT2uR-evEWw|62{tkM@F zx9zu4Q`uF6IJq~0cl@(jVy}U_8xRY8lM05t$K-H?)+tARK4f9}2cQ4pPK;ppbMIqJ zH7sl2BZ1IVz4sXF5(^yX-5~2(=IP{alHAjbizZv!x<>!XOp=GuYTdXVsQSw1_K6sK zq}^g7$|Ol~K$%rF1)kHt&K2$~&O&@Wt~)WeAc#omWuOv~-a69x%$%~HvubB6 z*qh>ZckQ|-t7DuX>T2G_d*OvICSL1xM5YS;UiA>eeR{Na0%$1oZC_U$2<&%*js3^w zeE{}wx)|C?lfDKMNy_K=i1r@iEkVP~8rpZYYbdDsvA3LEP)|>4*~?#n*$2`3?%!;ahpD~XcP-CX z&{7G{;-<3iU|obfMQZ+H%eFAZ--uYIYP%Df`DL9pW;q9wiehz#D*pC{4H9Eh(~HaI z*jBTmL7s3I1d3D}x>hb^<}G3c`4U?jZW;n>0S?s)5&SaNY}?s!f%wgdnp z0tzy@--(GTI}moh04*~G6dpuXla4$L4pzumS@M0$q8{tqaOgR-Dcu%39*yti6))5 z?r$i%kg`kT^yzv}d+5-5lKJ~?dU4RsE66Y4X#XQoS`$9zFL6I~gJ`Eu!*^r+vL6Pu zGa3O-g0AP!uaUq9y3vc?=8AW4i7?8=WyK$6ar&4)!ezh%h}7UI^#!Rp5$C#3h8C(>Tq|Qit50XDa3%+osiA6ZC{IA5q5K&tZA*PpB!oD(s6a7 ze1pe44S1~MJ`ws>bF!Z@fjH{vnwO!l<&7mBN;(ksVGRhm{#TlteTXX_4on zJ+>>-k+}7Y*T6p{{m$j|J5r?@ef+-71Xq61qTS9`B zi~buW%IR&@<|%OgzD>18xpGHXit}nbxY0cFV@5#TU`9>Q(hsNOjFPi?uQ(rq`}6Kz zlY_>yQ9iZ12%x?IKe$pu>}-_UHNVjgo*c>}Le(Gv2`m^`Hx7NtA!1DRtK%V}9jYCM zVA~&w9c+ZK=ka|d250{#ZA)lhZ+Jsq)77PW=<>sSbHUg^_HR;SuzIuO02Ip(N7tfz z46jYASI?!b3>U|jtr77koTob*~rSYLVP69JYo-g5`qZT;{y z?QO{kr`i569K<$T? zH~f2dujIS`)AwbI0CFuj?;=qGGHOwk^-&zq|ei%ly|v36*VPvlf6AXvsJO&wjf71JrcQ^&S1$ zuR*IgF|kPwVYJ+-1+A+`0xIy=6(g$+_%rMqD7hcBl7};D|F4Ml{kcE}ug;jI=D)Q7 z!1;-s?au%Ce<^{dzsUUWZvo$v#`|9_um2h8|HnK3Pm%QhUv_$L`HuK zJdr7?Kd$|b%fE|X8NPl^)XEfqa%BDc)O_H45-vMbx%JoTGg;36TSHw^ro0A)R^xK& z3A;b3NBZ?^QF{oPqN%sHck!_vL04(#E`LJ$@|+c3hCs5ctgUU>?eX@h%(qn@<*S+F zuZl^|lcZ+{;L_o!lKRQHA90gU|0OtW{*;o4u)Fer$YzTR^rin+blzFOWb3E^8j+xUq{wcfCP%^s?Uba1Co+QyhA;_VZkj@UA0{_*3wz zqKkAu)gM5VFVBeEUnlgpyVMd#)onkC`|+P$KQ49Zk||zfayy}5QhqpEaMHw?t`H1M zUr5k=1J;hC71crTisjsMv*%6PO^xmCCu$E02U~EMEkB&Y{csJ&G5EGHYN_48CG6%a z85vgJJh-vO!RZ6YZ)IXcsR+^>?bElmMzYj=^ zG6bAZ{QVzYTAnKBoe!^hTU{TC5@{@M#nGvCf*z9`kaj#U`f077a?>tH5hL+=Qz;P5 zobGfgRvUR;pTCg5v4vUq;i*{~0cvhU%J{I)Q|h|v=^6`wOV%E%nTK;QXp{a zHTmgv>UaLHOgRF2eishZQ4)Zhf8P0eVf#zJkGanZvt`cAcJ1TpR|-a6-t=gN7@WuU zF;xxN?QGv$IYL3-(*8%QS*lPI@M}l^BW)?Uc+8C5-#Q?-n%S+PJ8S7ELLuQcy?bD& zj8=e>DrZEeZV#}nqzTvBg{33nT|gmuNR+Ia-1L5g@w___t0G=4)|(>nXEmFub6Sua zPhll_<7r5}IC8hO^VH&wS*)ghrG0&DqF`&O_`KTYVz^W1moFp_q5mAU`f7whQohvP z-qkghJtQD*jFUMPE^$abe@X%nUt@RTZZ}7B;WIve>iu$v>U6WaLz%0pou{=mLmHhe zoi6^FWO?}VO$oew$s}7&URo3p`O$|8a?^P0uo<%TonauXMl}x(7XQSpepu-@Wb^G1 zS_Dh~Mmy$b9+%Ut_n&OFt}MZ9pXR@LjiMhDga;6kjzie5U;5OeGbQQ-7{J z|H)?83D~YM{cZqvs2Cc3YDsN=Tyq#_xN#<0abM?lGy;V0n@wFUA;9@xO|F~DSET9v zd7>KAf4mfBNYbO0fEp58MeYC~6ckKFZ5^$_0e(xC$5=Ziwn$s=5%qk|zNb{iMI{LHD+-qm}U zRvWVz&|~1CZw`-5tHH`hLCED;0~6WX~`zC^@?6G-#a7h1weHcnC;#q^#dam?)y^sA0l^GdMwMmy1C&#R5NuMq^AXxhW z*Z6$eE5gHeT;%m}kBqYY+8ZxYQN)&=Q>W*8m;wDgt(ZM`DA(l32BmXwto!gJ~k#A;bQjiv|(d@Y$ylh@6@Mj2QxO_fXY^nMU0Oog zm?J6OlAo_BGDYO_8Sj_un&-?~C=gU9j+Jx`=U18@jK&e4oksYck7sZ1A51(LZl%Xb zJWg*so^PVmEM}>ShhG3M8NN`|HL@s(%-A!HwU)}de}AkM@vtA~aesfGaI6jl;g!_u zBf>2FXZN2%8d`1*ErCU_f|L1b`{!$d`^j4e+MrKwSRcc*_)$hRXDf9g2CmFlHc+Rg zZ?oEz=ha1itjOS`z#Kf7zWK9O__`X{9&rb*dS z=_W;LnY*&);r|gC9seS=Tl&XQxGgM>;3EPRDZ?+rf={qw=xd*FYSG#@R`Lh5C=>B| zUC?IgSB~~oDfjP&=rU5dS;rksAqBt-Woj&A+*27h+*5ZRol|c|^5^rK5&uws<By|QZeKz0e&LP*4$75nbGJ?Dp(hhNQ z4hu^|wc1Oui6euYQ~Bu0m`coCCYa`wC`nw5ygp-e5FJ zUn9nZlW17uOvPg=850A_tA++Fk;A~KNY54vba%6ntrU>({!HF~!Cb1^H~2TJzS9wV zI2&7?)cV%xW&aA^uIAzGWnwjeAeL(Co+<`x? z0G}Ig;qJB@=EZD*DAY zQ-v=_&Zw`;{(n5{MT;{8ixX?}e>RXQ{9H}*E4xjC+LU7Ar@@#}g3KW%z)Ut&qu(Iu zgvLV}Y{j#aW>hrf@el83A*{LYn3q9=wqDZ=Fp=6j)+KnmHmB2V#h;S#ge7(r+b8Vs zcH2GsspizF%1&Ch!?PAgkl<=-OP|BDjZ~Uua-i$)&hh0kcC2itE{?OvDfQ&0E>Whe_uAo+Qed2WikPYpLqVdlzG3EDN6GY z(V4}9G}5|l#7ZgA=n{Lmj;M_#UVgWJ7umTLc0fe#|8a4C-hJXH+b2V`ybV?|`}`I~ zcYWs4Ru*BhQ*$nUK9*W1@k1jFYb-fOr;ULA^=z)Ji=5ho;(LhTuULah8QB+bxDVH} zFuiI%gQEpE(h@6C_-8k{9)`-IG%0pF>N4I^G=&JGvxlY@0Rq3~HbNt@Aj#Row7tW_ zu&*Q}KVwEmGx1Yinhi-Fug|Eti^>bm+T74Q?~cM^#_m4zSvN-(kmt@<4#1j1c26w; z-NvucTDC1q!3rXeB`5tog2JCmekjK~&D9Pw@qEkY5Et@hTh^*Hn4lgbWLctx4gyvykLULV|cYoSl?*+IF?ScYIB4v1sExZ5Sy@~u|EwnpyoD^!a9`PPe&5kTjl=SRbnr}x< zi$7a!#qBV)%V{{a$vl1Rv}U^r4i~{Jkvm2}k1azP{sF6xRD5hdL?kaC2X#zvcHBRr zl)~r3K`9g2o#p2jFpIUMetAh4D97&FJ!5GZtueReNXVldc%DP(vOnP)6be7u^ZA#- z$pMQs?NUVe?8YG5;E*!cj{o}C0t(`oX{E2Z!XdXQZ1mW6HtNXPuV;7Kye9wZxV&!Z z{;8D{rQG?w#8Ze2KQ?wi46T5Hep0A0g_S;s zwey$gxooBe#1CW=-E(ZL%24_=RsD^cnM2|ikqSpy1 zv{1qNw-zSLt7$>KQ01V)Hrx9y(W+H#9S4R~zl~J^UqQ{aQrjL}s?=%I-*RF!JXnX_ z?DT>&I?wq=D~>-lbRCMb-BL75u77E7oa6gg6u#nkFDfT0L(TM0E$EWRK3WfMOggn> zfopDspkRk=-*u_I0D3!{xPsGhuCEVjdnjsfkDm7wC4ekN*M#-40h9EL)LfOn$QE-C?Bcrg%gX_}#CX!4+ zQQd9_3ehr1hrx`7TJ!4@%P!LMn2A>TcOLq~1Lsx2_XZ>rh-Wk|G~C@^m$5HtNV#4}sn#eO3ox=M>qq=@Do2XOG$P|i&2Dp)!Z3DPYx zW4$&E<$SMRn#`OybgviNxi5O(p>$ZZ@Yyjf`o<*V*aRp?mV8 z4=ah?XpBC09cm3H*KYM*DztHX+-FT;v%wboB~b45DhRZrEw8$hQ%ZY4!VuI==XqWI zu<$;*W^{EKKK0Zx%Xrmjt?S71zFIYCKYw;|W!zu$IvX660guR&k>!fj!kWg+WmnUj zM&xx-j)rI`A7w~OHN0AqV=OPx zwz@sVb*A<++``fNLWNnC1;V~#vv0q9i^qe7mU<^Z7n5ECREy2?<{rT}-YP&)@-F;v z9P_zQB6CwLktFK;pL{|gtZ*U;a%2j^Eb)xDAd!(2wt<*@>T3YpU=AC*`U|1_amcrO zebqVp6O`qMSd||)<{l%;gDu6;AN+sx%bC+b$;U)3M)2f}l=DvPy4TMu<%#F|Jh`TD z7%|M7F%GTz5giR?Q#hr~#r$UJ&D z2DQGi?nQgC?$c&JHbHc=X7$kp(UEcQs9SO2YLCkE$FH5^7CRi!^;&v*3Ua7*(``c( z?!$q-NVFdU0Vmd>ZZ;e9hpenD^`~Vq0JS(9eAEijo6P6~pwCiA1~Gr@*1q24=zmrp zJz3(#NVY8bXAhW80e5(}mm3sdjl-ZD5ziE;p94|2ots%bsRnz-Y+N{(Sg9wOK7(mfE zi@ZU;JKqk=W}@(n)|KbreB=LQtqt0F&c_NvD8HirR?|z+t*awX!&C_|CwjDD0rMR_ z55n>KQ8~y!IPu=&8<8^1tVPU=nJkRqU2o^rA_0{5ShZHFKN?~diRRG-*QEl?a+6gE zUY+h>A!psj{;_U+U%izZc@iakztmN7j@AN$JKZyS-c-UFgZRLj-1%s!34HV=e~dtf zO}g9b_|lau3B}3b;YY{->oGF2IHd^q@j*u2=$~%)2BJq~)+_aRSfg?mQ@Ah6FvP^f z656;C)zf}M--~J!ua=5Ht6y@07)|a&W6ij zEW|+;_m*di{t=l010dg#MXao7g$o2|^V@A8(SaTu=oaL_Cy?=u8P5C|;6I`Dy(ALS#eQB{9{aaj{@Gb3SfX=s zgSG|n;<;9g+!R1Z=S7%hGKR*sMt%o%Nt92WkovaSH; zsQ#z+#QI<+$0$Cpbttcu1Uw#kFrF7p>>v2Oo9-x=BWGGgC2WJ&USsF8e|UH6ZZT&a zFM7n^dU(M`H>1Y1g&d9+*?FuvS4^D1#T{wnm$TgzN6c~D1g*w!%LW(yC_Te1_Yh@s>I@=_>#)fp}~)7C`oB%bVep|C6Pw@ zgu`=EnREbSeqxW(8_4!~b^5SaHZv0D_$xUnGf_e3?zj6mRX6^{ml_PInYwC&RmUa* zdfNFiHB>rT?~i3HdUJ^5owv(c>KXSKp0_8Pz5hy7NFGQy?5KrClK9kcf49Ww4e6`W{XYAxTr}u_iQ3Qn-CLAX+p7tGAXITto}AH-T-A6l#85qA&V<7 zkY)u=q9%1Lq*<@kVgg9m#x=h*Bm|B!(hDQK>7<^={^(#Avn8JK^@)9NTyAiAjvCv~ z>~PB?g;h|4CDF;rREB0-&o}-nRzhA%yPkx!s6wn(v;uFxlA3uyZJPcB#QI>))Z)pW z;2nM}zrfl}*0dG)#R@KX)eZHT{by^@kU(_M$HT?PH3FGJMifbv((FtK<=J_Ct)vgb z*i*c;XH-_h6HTOh#y{kt9GYOh~Hwg@OIP<`G^k`qm6BqIR0 z`+?_Xyf_=E08)M7GhQIEB2;D6qcdY(os=QsNm+5_bUDyx`+5%qe<6)Vm>NcD;qR9S z04(T-$o6f$iz$sZ5Zyag@4Pk%Ndlo0H~Yc!xV2)n-BDdEN*rq*xPldOrIKacH+=U6 zgkuo=pj#fm2G3kGN-i3PLvU^A{BtYy!6G8_S7^{ZTiA{Mq%fsr(wx#T(A4q@kBkv& z#2ByTOS>9JJ90%eR2DF|Z+BKJykFq)#&pFPF*^+8GWddEK3d#AZn)ipIs&U1oTzPE zc1_#HHYU&TZgClOI;|I2Y>)mz!U;S{6kQ6hlGng@64!Fp9y-@;clozt4HlpA@d;nJ`4a7s4`xgoX30mm`uAlHQV>_!`eGQ+FiYTLoobvQc;Fn)0R~tw8np!*i1qMTKXg zXR5kZQ9s^@)LyG?(=xSqAOugdEH7Wy_}lXCGwI^Npgt@Ybfuw&*$aFQ9t@l1dZ9a8 zof`WyL|ojBGTs-k4 zod@A?1QDH>186tr5RJBRD}NOik4u#iAJWJ&eg$PuDcK9 zPTiil@UBg0?QdYTBugbTj$PUdiBzhz=5^;ztvZfPaPX6Idp=Yovi`&2H6l90&`umX z+KoVOB89P1E7Aw@Ypv_d_ZiE07#SWn@8s_jmrDtXBfGlEff6V9ebeLesbBMU!(xOb zOp~=wj8}@_wE$o0^yBsDA)rOQ-)rl8&qyGST6>CP|Ac08PYh6yx9se7giL6;ZDlsq zrV~QS$UI&2%c*B;`PasMzrH%Nxve{M%p&+98`^jBAUUktpfNsPr=^VC#+uu~!LTQ5dHq^2>CD|lB(iVbgNHwt8f`#i+y)HTj5@uc%{5Ric*vyq5xMMMN~|$(W=7irqNMKsbcd$^--WUeqe@I&*rj z7A^HnqG|TvJ&;+X`n+{d!_t8+zs6-Xw1+RwvV;}y0>#qah3v%ER@@zk08-MYnyPHLKupJ?QFs^8H^%A%HI!Zw(UAkG)<4_*!4=WDK5!IpdC#%n=^e6u6C7u?jwiGyLceIxuyY*FG-_ zU6hx^swYvGF0Ah8o6_Y=R%V41=Dztt6=(NY1hQ2=`0)l-4d-XE1_m_bVVUlL8i1~N zJ(dnu4pr4~N<U+MkC_qO%DTqp70&b3b0>mz~bl$;pgS(x2! zuViT`&>J|)!+vWS3`A?@>G?eg5>h?4BS%}5-?7AYh7`9*dM``y8{VdJ;O=a% z4K)B#xXH{ee}+t6W-cinVAeJOK}CBNhQ^_%FjNwqPSjgTw1LtY{>sV)Wm#txJOP}^ z5;k07iPY|m?4 z-Nzk!>n!*Sz3@+Oim4^{`$V}Lk^$oK2!JysySq^V++xHQ5y@wxrLp_h5PaiuUu2jd zt&9C^zufZtzjFf=IG#X>pmoDa>3zj8a9K?_e|J2;z82ZL92i!A!Ox2JCa$WI6!w49 z`y!}yC5WY)(gEK`f**YE@y3=~ni@K+s|sWKbbB~QwwFzMH&<{~!F4e+)$IGyS{Csg zMHvWjfS=F;miYJNNy69-^VL@8UdDZfVd9LN%WIJ)hD>7}%Gk0M)^5y*p@tq>673;S z-OAJFdZ~RahqLD@U@!{+{jDH@%S7hUp&=15|Jm=IcrC5AV=HSR*!TRPHwUaGGj-Ee<|}?aM+!1l4GnR z?|};V)QY|if#$rBc`$#&LFaB!I6eH#3qJ1yGS+IcE0+vz&}9*;MJbW?5&uQw7a!p_ z4qPi$3Ub#q3}Ch7E*uU|FV|ZH_TMixJLL#c^4-bBD>?G>+D?F+1piB1dM7R*Yp@$h-Nj-zj${x=0_Nt6b1C`Yz+0ncM)d$-RO zm{{0?CaW`7fXP33=w|QHbpyF(`&?kf&D2%{zOa%Ze5M7|NlLcxo1IDO%wiZSW_znZ z%9URDjFiXjyGRA*ln+@gb``;wuf%e*84cIlYL(sPtX#ZIQkL{DggNs+hiXonPpyY+ z_63?5L*c7g5}DcM^n4!wltp~|kLK$CJj|9hREA;wM-=(r58D3Z@9z118>Vk|O54~= zuuc8XpDRicgv?I>DGd*=$o8-v`jxV^wOC~5!&4Vu==64>;af7Cj%M_thh%7GTYrHG zX)R1q(N{8Rtha|xKy6|3=kKEGyZ9->S~~89|NCPtJJ$4${U;YBa=EQPmUi}myjzwE zd1QB50n_1mC|<0psqI4SnVkuOk?)N#AY_&H$rH<9+aBPFA^dH1?E1cv*a3apAu?m0 z2h&9tBSTvBnnktN5h`QE7yTl$B%EskG8aG|?W_22q=ppCK28#+8& zCtIUh3J5%H(E@eD<(&tft*|ilR(E2+_P2K8(DA&;$Pnz>8$Wb--ZFWD0SwPs6JlgeTRvM3jIVQ{gT zUVm!c{*CT2z}ufZE|j5YWNUX#&pM9Rm=Y}q-%oYkBRC)`0vI8DWQswHg~Vy^D>;CJ zfax++62~NK;k&rcd-wdctjYKtP2O{5xKBG0A<3J-%*Fn0{{YwZKpc41iD@x{81-Rb z{Y^fw8HQR0Rsgx6!WXkFxdU1VVvl}O4#)fQ)tN#G}du z@ZZp@E?*(uL!#;nwp233KF_e{{Hy|fD{vA98%VJE@f6cHa zF0AQ<{H{28yKAX}@K23VHj!e=Jg!mDMwF*@M)jxR;eqSdTI{+xW~4`*7EdfYYr6hp zJL}$m*K~aNm!v?iv9Xrk%SAkw-{yyaGkhu#ix#aKtT|F(Erk6#khlR;G7}iPP^Bk+ z5gqgUPK=TiLdj^4>^8`vmbfYDpKY><SmClofHe#}zaN)jnxL*xWZtmwlpPZ}w z6kABdEmelc?Y(&(e(hkL-#sA$CEwmbne^C{S`0gz;n@y9V8))EWItecUI)yL3Anwg zL8Dy5Kma>SpQuW|`q%rZ<4RY``&DV|>luWxj9DwdjyxEN#`+_BeS(*eUf_39`9f1Y z*#t2FTc!ZaA79zvGlFiU7E8X`O2PayttvtQf66T&*Iu<1p0v-4aQWb}ud$ErCjvR?hyd3T+Y za*PX1ggJP*Zq63 zyTj|o-*FvBR--A!Kpq?7ti6u}FiUP1FZrw3)EeAO-qr9Xr-hu>A0-QaE9pQ1M>G^1 z9jQT+nYIH!YDT0>tm0eW!!H%Uf((gqw;3+tpd;GePPtdSuyQApIAotXY23>7py9UD{3KVQ5W=gamOmfff{g%3a^Hb&H}XSIIPLQr;o*xu%;<3X3fZ_7l1qm zJ{~0O#qT!9szjpnIR7bTlRc*r4INkmwe?7pM+_)= zwAlhBt#Qm>ZPm1>F{&XWt}DMTqmTv*X&fI1uxR(4n%Z=X)m|-KH=_AX#Xhaif-Sc0 z!X^c+jt6gN3y4H(rGpo^$(N0K-8GUjXTm8P19=JW8ZA6Yw5~wbTw)Z08Hz}1OvZ}}^1}q;SAWYzgKln3Q7dr}(rx#8` z8huhP@nRmYB?g5blZy0WC)g zF}u)SL8I)R*iE8e{nF>-czkNB;U#jjb;ehSDG|OuPfw|#U(bCn;NVsGHvZm(OTw5m z^!I4E;c`oz87=<(Jgk}ei(-sLrchhvhmUCT;9{)u!!$f2vZzHP!X_qy%z72p$Kqff z^*JL12S@vIIoVx^BJk#z zBah*+!Xo^wDZWGeQl^eI*tLrT^`W8+=&gZ6qSC{eu1Ml*9F)*MJOkClke3PSV-n}P z%*AF4)^{eoX>F}M9a{(uIzPWkD48LX$;vcAY19z$)FT`4U?vnnt&d5n_6JJEbn7S( z?+N1j0*CmPJegUpn@llQ>qf+(P*@TeGA!i&ds8XefvC+e1|ctZ4wCQGY0f!ffx?Y{ z&4q4ckF;~|N0dDBqS08|O_d8Mz!41>2kNbyCA6u4hxecTs*g}d%ugF36I@;%B+!r1 zS;hpaYGercV1EqK|I%osE#xPJRrZ;{e{0n`iJ`oS5pihbAuaAG3I{VA)d3vflA zYg=yoz|*MG=Q}pWQ=>?|kGK@k*cv>t+UFISO5Wh>W622o7$}HVp7;Z|cj?=5mR2qQ zf<1wTCb;&|d)_wUzjQ-2?r$_RXfVTS3 zf0AUofp)U%&P@fifPxg0Wv`+&p#T=U5%%G#7s?@#S(J;fiZ5|=KYdL(imfFtVWS~$ z^G0)1DB;uMiCx_(ZenLrsmd3z?czf_o|q9EHPbzfnfiB8lM*d@w)#h|1KmHBwBM?9 znOvQ58&ivE?{QzXVkpOeg#;xDYbvZ*WO(=MdpAz;NYo%h;NY0BMsE6IH*SI@_~F=(D#?E?fhoa_@PG zGkAW^;CP+|Ct=PpWTQN9Uj`Vrei4OXN)VA?5&GBV;TEVM z75Mr~%vfph2iG~ILl0K;7ypAv;}OIAU^%0ZiO>(mi#{raUaf_?MdE}XO+)eIji2RK z=xoBU(zS^!kKn!Jc04<(-wBeC)(AUij%VVcLMxF(gn$5J+w}F=p%`3&sz4cuc{3H9 z+ihSgNC+Te8D1UB-~kNIt@nA>j@R-x8VEW)?3mOVMqxB)wqRgjUasd6+%92wc$_bA?ilTWaad z34&`VOpc7a^&|IKO`>klruj1djzKu3hs%6WRLpEOhy00ynMz|{gbj2kDc@syr>9+ymo;|u+XxPQR48( zWluQ`W+saPT22}@%E6J`KIN;H&r0)O~-Fg7>#arygeZG1+N9Va;{O0T>|O5{pm7$y^R%*%hpXWY~D0g4X8 zD$p}-FJNjpp(=5q`dOBEA&n;0}39=PR4~>sx%%~ajlGx zKt^UP6gg2fMgefQ=SyouC^|_Q$L5MUpULKd(WxK>(wxoO>Jo`f>n~^}y$O0y0l(ygr;dr9I@w7qQx+rVsWL5dNWw zt%xlp$gPgg#LS*6A7`3ga+|dMcN6~?+-`A`Ti16H=-mypUch7HH71^jidxzmZ67gX zp`>b6Mrz+6(Q)JdjcakTA)T@-9~yMWGQ|@8054|h&bGHF7`nCjR1I$h?vWCy4IMDne3c&_?2HeWL$gQ<{ZF)x%KZXVo#R1m4Mn%h&T@ZF7`KR zH&hXTdlExq=w}k#L_PHglY&XkN$_GD=^I+}X-6>Ox#MBr5kjn?DW9=eBm; z@^4Q;eJiBFhZJH@TVKW@_(~E;E$|eZB6)`*Hn82B1f9haIbbMRGzhAFnjmvyo z>h!}a=5Ll%n=&eyk%-VX^XIdTq-A$G+}~wrFnnW7oPaJu<=+jG#+gaWE^Y(4S|)dn zIoHSgt`8C8x6e6QOPd;1gFv>&)-#tAFclRt7dBvrlFYuF_?V`& z=Ozw``_ zD#*yli?ud5zI+#$uMIa)d3OL?8?;{}nnuKJBFEiN9tz3EZF&1d5}lJ%t$ydxGAd+@ zcq(kL@lBABWqORuJ}0~NVj>#ObcEC_OgW7y9UJxA>($T}dL5;rV$_6c?g&~Y3o`4E zQGbjVa2b?rniz1WG*bLy?__<|%3fzXdrmx2qmM0j3jnRORS+a~F&a*B04hq>8S^}U z?V(z=T;b9`JvROhSSpUi8@!m9EA9uj1^B}&S^kcXVo=}sIV#l*dQb`;@qV!OuADc! zsq(LB+?`^6l{5HpO(>ntLag*EfWldC6A^GH?M7V7rk`tjKGSZ>R3QEQ%P5rDQn^e- zwgPn!1xZvIP^@EM!4D(!1=AFmYEhDstIYf@Z6Q`UQP5jAg)RXAyuql1&5+iOlf!{- zi9(>SdbJVFvsH{xkbWIruC_)D3n4lTL`W@yJbHme`7HYn!BEEzSeu(pgh;6&D#Us6 zDZA^z>)b(fUV*eHB($(ds*Wb2JNZ08_FgrjXX zjZ`{lQ4lTb$_29%Vbl-Pqb+5`+r#_K2~Xj|=+GXu>)tKwXrq9V)TmZ4%YhP4N_@Qa z3|}`agHC@IMQK;bF3?6^KmEWLE61%1qrhV>^2;pI-4=&@-;>h_nGOev8u8tHjV}pr zfv#y^sD_yxZ{M+jkxzv*YY+*(Ur4`GX60JpDPM8As`zZGxNw%2hOH41WfMs_yV4Dt zYYtyD{ynKOSnEj0;$J&+hlD2zd1DZItTnWfG8n-KoW-zH%H{`_!FmS!E^c%w|IvP0 z`sEW)&8dQ$?k3HZ#;K#;?3P{q_uwjb7f||+98*U8`}EdE!tvBpu+G{jr*2+29u@tu zWgFSA%=PI66Fb^Zjf;j5({U(WFUba^pIy!LS)~p|h)NFa6sKeTqIx5{TJC+=+>DtZ z?Ky!}mPkL4^JmT}_czWnE8yR%jd|H0yCRayUkjFRegHqtkZ~U4$AAVLo_c6zQ~!~P zREy(Hsr^*-&@Vs`GhE9jg!W5RlJ?iax;yB^Y`#l_bX+gN@}_ozXbSilLHK!+s{oZ17d93+UMH@ z0IqEU-5j5Q0`(7bb8{xosy}~(aIr2j&X;iXH0tTB#{R6LEU#1h%2qg22(bJ&*Y9LwZ`1z_Q&<$JN}zSs};4u zE(LE?X5$IFXT2B6O~0G3$hvd3MmKL`6jM-vPP6*i?8H_{nY4SObUI?pG9drPanGSN z-?pC@{3>YBa{>mN?iUZMA_nPHRif`pdtg7H#c9A86k>8JaGSKS?DXxi)#7ovGS3Yu zmB+TcEm$hnWL-lAT)XEzUk`K2KkrU7XA8G_>)@@UHmI%( z#2PhX)E~WW_R^qTcPf*aoy)-kEWJDru%f!%8>;021@f@ zW4<60DzN5n%fQ;#LL)6_c;?G5Epo*rIUxzzclzg{ zqLp~%x3r#lG(G5BVm92n?zps#E;zr?$a;h<(BnEscIr1i5(%`GA}Zc3L)DPji6{~2 zbQ=f`%A2igq3GMolQ)8h(|pSs(5ywyPeJ@%lmJ$KoJs0??FDviGXfyo5=}fHn8-ab zvNG8)V~|jj2-&rKcKjBvC!gLGOSKgzfFH+5tRu z<`(S#)dB!o3W;bGteAwkLS9?TXf?0Kp{IeibLoD}xRO$!q;+oHkLliZ54GaA&M(6- zw(KZT;KI>hliwJjy;~hz;-N&ujgvo{Q8zzHZG{XlzI6aT^#MxSd+M|EykGvQCgQG- zx$q~lQ%4?ILUwKLb(Oh+lgGtIIp z*j9J~P8%`9(I0vBLyex)Z*tl#l=?4%(sJ4QW&Jye?mjj=z|E7w0b;&&ws&~}1~LWj zsdj=oB$+-nhWjc$wt?j%kmkl}H+39W|5~Gxp|{b|+v*5^HX4qE-P`-lc$z~!Su%9& zH0$#;d#LAp7?DRQ7*PQd>@Op@yl_3!biYd^#yi7Iv=SgH{LX`H1IXB)#ZyQoltbkE zsr(5$Ah&|4jpsn<@-e?JvbnKBR}ojA^EoaCA=X(=R4GaO1B}@jykO^TwPYw^ZumUo z(V3WF3xL+UAq7!GVkXVZ*sV}xJo25_sF#5fXL{*AZ8a?;Z^sne0hC20(+TsOb0+95 zq5E#F%z%{uo4HDem zJbsrM8xa8n)irqgh^Fh&yF5z z%8|kSl|l5zU_8#JUQ~}1-p_FGoPs2c)(gUz8o3ZMjYU9uI=41-k}}!zvV|)$yCMN+ znrM-tKoAI%H|I2tGOR*MY~WuNR$z`#OoM@Apry+8?7Bmi=SLA)yUHcGF3>^u17ukm z`^^iTq%2d46x|auUF{1E6VxHLZgWEk`Vl>Cdt$kbGt)$?7_?h{l#7SwD68Oc7cGs6 zj1f|C%tXGZLkb2z+*<~E(2y9C_(G-5doA8u{n5^P*auEBV(TxEGHEOvGt?hO80|Ck z)KUOlGXqj;qef= zQSfRJu@?k8JNx<7ReS}VW>vYm;q42xAIaAzo`!mhOfW)wpS3-<|A3H&xRWT_Up%MD z@R1t=9TFxNvbw8ob6%0K8CA)A>9~xT8sgp7V8TN&rM!nZn+zjWTkIkhSO^nHOR+Ed zJpL)od^=P>fBQaFBmNl%ep2%C4zDyBMKQ5T21JeUA>zY4i1MCvsfjSpHNVd<(szho z?YC33MoYO?cF23j=CaC2D+m5qTHe11mJH|KGO8NtI?vo!> z@8~9Jww}qr;(>bB71?G7<>k_N9V3*_<&f8{(@e~sS{ERkb=Kh z+30+ppBp5dQT_`el{cndWlh|(+kRnK-lqf$TYe_*WipbZVENPoyX){ZEm{`nh|EM5 zDJB*fMA;ZlA>5%NCje8AvXx)y1W41tuu(H^C&K*hO0pJSa`8JZO6B=<9I~qbe_GCZG;8o2>1gc3 z*6UMb*moeB+Y*7LzR!#Ic`?Lgd7$OKxhaw2skv99Ya*E2RA(^P%r21I^oCG;ViW&9 z-Agx7NYobPxo&2Yq)p97XA1Vi{8Q*C1N)6Akbp4u{pm9qbdf`@n_3bPag9S7S3QB` z_$2xvY?lk+28N@*+F(k8!gXOV#~F^vO&T9o8QD$ZC;i1m<=q#(rh4Ye@4JEjXbScW zi6SG>Esnw_*k>WNSP_fqzlq4>p&&;Jm8eiO=}IVn(HG4NTr!blCo_g<+X$4F0DGSX z$_9xrDw)0Of^>Cj2_fk4Z59&%C7d!9?ZtC_7pQ|yc+;Sd`5Z;i*tj$JUHlkKunZ`@ zY`cH=xNWYsTi;*1`3$TR1NRu9T*rhnfdV6SyM_36197$%&ID3_e=_r6=?Wst+wm)p zYrFaM|A)Q*j%#A=-o9Z30Tl%i6_sWMQHn?h2_PT}1d!gNAe{(OrGz3XAkw50Ak-i= z(jf>zX-e;*NRt{uhfop{-a+@?*WTB4-Oum7@8{3wozG{JZOUZM$;_O`I@Wis@enXu z3QmzmGQ(k4eFH>h^259H-k@lA>X{xL1Z>n0YmAsdrvcKSZ4ntqh50G^w zxx$Q6D$?PXUSCDZYL53SA4xz&KWKwp;vQHmej#(GcX+ z($9V}dqXSiB4mmJWfetNM+S-eO zfz+19^BlD@P8**uk8ZF(GOSQqUHt=)G^q6{4pr&@X47AgErSQrAw_3W4!D))B|mWr zTHMiOzsGb;YjT@&Y^`UT`mjhVrb+Nimj{*EXk-L$&kG5cIljX@A3&0+@qsg@E}U;) z&QVhU3LhBUXObwU+%!s5n%t6$H|UzXbdd99v6nf)ccnN6a-$C?kE7R-nf;aQ#~?rT ztQ z*^{NZo_J9!du8dx2lsKEZ|c?>u69s18mOZ;MOrh9wOJGcz`!;FGo=#w`dqDWw@%Pe zY2BH*$?m81e#kQy$$TjmdShua?#K|YaPCO!B9=glYRDg_+2?V~?2G5*N;;x-D27i8%A8OALJg? z3e~Y^-rbt7I~l^*gx5#!27HnDC z2E`E^q|q(ZBcyWs{jt6B5cMbO7E{57yo>W4cO1T*A9#F&CTm+8(xr%1P@>L@dh6e) z5-snfO?dob&!f%Y!PTn@9|8=av;|)$-5Od?o_-re)i0i)RIhf!he*>jY== zOqeNKKi~e zMlU`a#rpB=q~@cKpHx&(F&kR0b-9BrN~?RiQ>4aTNgjx}8JC+3R0j1_YxDGF<}LRF zsL~*Sfgt=SAqO=tkEcAZRk~XXbqO?rIp%+BMmdI@`mrI)}YRx}!98r0@=^I`P4>$)RUKR$S!jygnzNG8>t!uFAxRGqpPD zKcS+g2Q+mmjQ9uuCaE3xeH6CC-=YDBB*~O42Olwr?z-t5=EW>t^j4HkLJ7&5oyl#( z{59qDeknleX}ZOR+&#>?>1Gi<#f3NqlH89>3!s}@a6`-`tojOL%z0NIA2~==36UED zHPX*Mfmaj)Rh(;lmz^e`&9v)epdaoVZianw?sK!d8{KLE)IDAUr=S4W(B#<)O+}wH zcZ}_^3z_36E&0H;9x^Ml!!6F_ie~!{MSbA{ zySMW6t2PXT&lIS)I1+aHG1mH36?Kmz4hv<>auw;e&Ecy~Y;m?UEEk#iv_r;jywxV3 zX8ztb!(OVT)*)mZ^|5Yp@z(6Sqi!cA&lucmDKPH27nyWbav~wj$A9_eYaq^&`O2l(U8T$4Y&7X6% zTw2ANMkK9M)}`5N8JB;wRU9>fCshvHokwb)p||9>eVCU|dtT8#hx5~&*kbr6iP+-D zWTfCe9r3CBTlqjLAT3+IzFIsS?Y60F%VtMa&!Zbc0e1SaJfXhe%j%LiBGsFvmdrQq zt3Jvb1Tz`$Z(=zG=^O{O;W6uax`d7sj4+jWy()m3ez68$JwjzF<C;DR<{Q`hI)I zMpOYysPeO~@`*pQa?Vg)SH3suBoK;nIae+^6qij^(gcMS*|Z7k6)V4a+_TtI>!PX< zWjMo+R^NRZ;PrreR}oCh&93mizqSR4wnj@XML3ex5u(SqGR@xF7J*i>zn4aU55(*v zdbB3)y|&K9zbA5JDb8x<35&H(tanKv{ot>GYB0`Xec?CC7sR%99t4lO69Ajk^{_aO zif4kYQ9Ja#y11!ebi%9afX2JnQKfK#xmuP<{&bRZxUeuMhiK2Udi4T;73A(?k9INr z?0S*IadC3dTq2BLo~0gL{~drIGbk_n<^!&**42KRr`hXdaKQ}p^|6eh-Q~MRYb;il zE(C?QivZ0OwSdF6FF5zDx>(soRQb!X%aZE=^!R)6XsqXx>_0F?|6II!I*MWK2hUaebu`dEWBt#ytON%b z{`v0TkMtHmH;cb7-@c4gp{e>yX3j1Rs&aTJ7;~B{79RN-UEAXO!FaAq)T8)1!K_ciMQZ|A2e{805@Ab6Z>6faRaR*YDk6 z*E0eXs-n>+|J+l*ANeoQl>YM>#^#UzzM(?@<%c@+kHJV@_|MhU|Ie#|9(qi`&10qy zD?#GlBuNSiYPjNhFD{Q(y0|?y5>4&<`?js*bO5&`?x{1C1}2o-BW3H(Zk`_#iJM(` z2{+=&&J^j0@mg;*TB7h$m+jw=ft_hjvKI}F#Is5<=w~PIh$8lE9NkNGpugi^OEs4U z)yc_7zL)IPOe+lk_;7FY0PSUGWo3Yd-&?JV;L5Xi+L+;WU3F)pT#`5)%?%2ZrYE+c zi)4#T8`wNoGnXb1Q$YCiL{&bnpp9o|W@_q){OKQ7l7Ab?jKRtFGa&#mtUoyS1gm_| zT=wyJg2)wWaxE#Xh@#9cvCD6kK)&02Pv}f3XqYs zZFe~iXAdePhtu7O7>&*+d6$o!?EKrWB1*HN7#D05T(BhWRE4Hn#q1!TyE+8Hr6xNA zC)HLSelu?w<`2t&a)IJMJiL-JNS_56r1bx9WivOUb0m$2xp< zkkylxs=Khi?}Iuc=##;Q28A6&b(bbSYNr!(2ezmP)IAjfsmu2`pkvNUQjj}p^W&lf zklMtq%xZ24P0YE4!P$dK=LHrhID# zpRRTP@E-J0x8u}rSWPf`JLPE$_SfDwRc(E;STi>Wwcl%xRW6#)_;V|O$0F<^b*a|1 z!Ll!xm+>P9!g8bNWDr4^=)KcJHm%72He*fl-uW_nTOVo`FGj_N*gEkbDCqLbp(kVB z(UjuTzBI%ulx&+HFQ-rK&UG_i&#rw3)TB*9;AX|1DRUwNhF?CoeEq-(5EBnm9N!{@ z%K5ZWiC4?bv{zMYbI7Sv2eUU3{SWmUutT1&|8=SRxM%McU|2HoBa83=^}CzW%s{5H zc>lmbFmTM$+>te}01mew{pq!1_yTEnYM^p|(3{^6CUqA(du?@W`GPd@G2bKDxku~5 zFZ9zeV|d}F@ias(BNK8w{>1Z3zCqXSgQqL5{OfeF^SzL8BOmyxeumD9*Fb^M(Q!|D zM)rL@)&x;;^TR6prwcHJC8LShHvJbRY1J`N{*r3ehE$Ml~# zb<5QZS{{EvH1S}M!i8I&ae21ap8b;hUtg6m_vHnPo>nOGRC7fP@J(uoB8-l+tW!(F zn^v+O+e@Af?cPkx8ZytZ#KmxgzsG9oht1}3;JQIaKNjFvm)|^rxM+#>;!EeKUF2 z+ztCgA&d018xmKfMI`L*KK0N^$a;~zRJFAD`dP5$-O?J1Pp3LN)v32nGljG^(n#M> zyr@#DboU#LNbKoGrs4b7kp?{_{bx#~FYxsvZ_ta!jgeYLD@k=ZU8{($+8t7^R_?^k z50_3C%zW~EvUjTYD#PGfs-b&v8m zrpIIU&k%`InOk9*5p)A?i!~bE`DS~iy=5q0vLo)te#8P7IYJg3#C1Hs#^@*ybej!`-T%I% zU33+=qgu8dB0Ah>{d3}&k85b5zKQy(-vrmqK1dDu)~{_L1|Ce)n0*;;YC?3khphI- zb)Zc4RyYn8^%p7~VX~&gjrg7#zj<+-=z;5Pet!O*3CftqLh}U0qii+v;0wuT>{RIwqqAA+B&Bj*x-K$Cadvz}RXHDG`_dT>nzg<(N#8D;>_NmD0 z2Lv>9tTU~D$4&?kO5XNqgu6>y?lHbATf>?^Au(Byv-#=GP^O>Z9v&h++6-$UpZO z6_sznwX0|#Ioj*)f8O7>k-m)f-##rkSi7KvU-%B(Qs^@M(HHAKH@9YXx_U!_N>z(^ zI*UvHe7iqaZRxyknd{1rdol<64B5~lr{eE?McjhFBI^Qb=jP>B4K z^Z)!X@UJt0^?d*FKp=A)3N1IWC!QdjES*3UG&SU~)P_R$|)f5thYhjvTFjJvEiy%l(PgSCMI}nBE8% zlHIkx|Eed)($jqVXVbikPM^Ua>s$x`S^BR4E0FEx-j=&9)2+<3b>uSB1^ZuLN@F7D z*Fyu_;^U76TJ}?s<(T$^ITtLL%6g-<5qtr|^WBq1|3@pj%t;^I&7@Wz0etV_*GUq- zu#}$k|34XGAkU2>%*24>yUa4(Eu1#lIc^&5W0DWM700-35iKE-8MZaZ~bdv#g5 z;<`>!YTVFex-f}_(yv?ILpU8HF2jlHEoR0ESU zYHQAV-5IKRnA?S*%pZ76Lbib3IG8Pc`#KB4$|5ev=3qd-XPeAaY>>t*)N8$Au|?+T z+B#US3EzfvS@gXL1@(CYI|;=@RLt&)wq77XYEu>z4O?38BfKU&H%`L*CSqM%1lJOh z4Hu>ZKjIWByA5pN>`$_5KF-QG-hwgUTRW zD*Lc>+c$#8zhB-pFhg7-*iWOz)hDX!K7+=5v^Ln)FESvZ7DfPZbrI?|dbtGQjUKO=(FQA9GJrs9<9OcQ7sxk@0H}x)4MRC6uot`BWAmxQ2{Yri$Q<>c5pci%hLQj-J3yLkC9mLbU$#5 z42)rPXTrcIs!I#ONHsDvPQluK}{KusrC%qjwo{A0_UzMDqwg79` z!YVY(ay}dSY;XF>E2HY(RajTr#r6_EnQyqGLsyUSIzD3!@b0t7i->toac3`*3+mUp5|Pz*Jc`sAN!lGhRdWjfy6A0Iv*AepU*WK zoh9a;@7ZO{d|!WByH*NG>+;#Wg~of(bGctY+&Z{?oB?);8n|3&A)aZp=K*OVcFw4x z-K#M71GlZg^~YXqqq}^d-Gyt;=b-${h9Q9@CDNiJMk4$q#b}6eDx0K*sJpqeYan zFdOiv)lk=o6h)7*ZF{BmyqiT#w*_)?M~gBuJt&-^;_qH$Ir!}X8zh^H(rEVRxgHC< zLo@w|3C?q)^u-4XQCX2ikn3cto{9T+^`x}vEi8w(U7Wpaf(*sRmL)higzI8Wa?!Jm z)A7gt8tHY7_C;W%K9_vlS8L1soLR$IdWMXB!z%nX1s66?CLd>oEvl_nM{R42x0-Ke zSAj}-aMG?EwXryhCryN&I~B9HGAt}1wqFbpVu$0^{>%*?o~-up#@gmOK}t}VPHkoQ;~ z$Bat#Gq9XV4b2yv8l^2JQ}%=xtAq1=}esUyYK3}Zla$5 zuAzhrIKsF_Fx$s_&aACnbIh2i*}OI4Cxe(!Eu2i(5cb#ea$aO%PY|WG45|8DLp%$k z2uJkJG?oZ+cnjAqFDm1N5rV4^UoI#pk{nI;XdL~j;vwJGi^6i7BO@5@`bm&elj`pI!hg`~L9cENQ!s3~Yh4|M^(wdD9?pX~ zPnR(8mEAcvSGMR0Me1!}Qhb`;wJVUcKR4z&(K@nqcA4CrWAHig^&3$81X~Fpmi4^du}{rJ-}ZnD32buMfc2Am8+%bNjSX-DHs7a0xb%px?^3>4LS-LBXG4$s z>%x!0;^1`!W#-4WfEoBw^GwSEOEeM%bj{o;0M3AW$3$+Ma0F6dY_-2OlK0$ z<9X9cG2>NE>V{M}^0FE}&DM}vXq>5>9zHsgQ0&&=WXn?W70IQ8{Rv#KE0})V(><@} z`)ujF*TQ$r(IVnOO%h#O=!3>IIGoAA+rTx2c8& z;prM1IM>DjewNDUmt@h{1bzusSJp8w%K8(IdI`ABHz*;BbB%4C4{FOi;%tbrV_?_$ znqYa8xG>O<3vhJA$_I*=Q|0etm^_hI>+Dj^mSPx|6Tl|dMsFDsJk8+Hch~jJl=w3e zM&5hxZTwtWp2QOI%MkxocO8D{$NPrX2%YzSpSbGH%vxezXM@yuQ3E%j<49G^2Qk)U z_zdc=XCr&mB}Rjz;^LkCCk{JciE1jt?{mA1!e)I zVYM@^N-D?5!#Usc&@5eb&cz+PUHZax@4of*Tz;%z}>We{zzb;H! z%C^XtPvUYxC#TW|hR!!N_?Nce76yz@$WF#;aLv~j*aHXdmzw-|8pV<~L%WS`iX^UE zUOT|a_zXDPSJ9>Wec_7twk{r?GkYC1IKof+A6Bb5#ta(W>|4x9k z*2ROd(U6tT@WadDPn_sgGvX(UrSoX$#$)t!r$%hW>r>gJ&_&`iKqd9!SF?VS&SVp& z7+YSN*p08bIA(`5p?VyXFP!X5^|X^EH!}r~m+$E>oN6k#P1EIjM@75RR9Sg-zxJob z>k?ps+UXOR;s!H{V@?b8Zh=1u>tHRfut-pTxlOy*7|+*tTNE;B}|mS7>4-5nJaq2{8@Sz zFiM`TZsQ%32dIO@8v6-|GUDY%oVgyeP>=G0v@0;%mh)^%^&)(~)8Te7A9~&z>|bI| z$vK`Odm9=eD>Zg7)e<_qGo!;4+xcX_r$A;(T1jLmjI_r2|5XIy;v|nMPCy?W}14tvxej}pW(T8Mz ze*}5;eC%7O>;*s8F*gDmF5F{G!;!bj{ppl)vp~-6i%SQsPtr9MCesHF2QT}elG4tG z>y_5W%$^Qy&DB^<3D&LqUEJN@!%(EX+5|Dp9VLuM4i!3Z74Oo(JPUMH zI8BU^&#uW!LDLa${1ugS%^wg~7yQNuQth<-G@H`0lcB#upelb#1|aOeobpbiwtpnL zb`6LCWV%%}_+WGMtTr#J4%CSZtf{e_7Uaf)3l44p4cB^;|4%|HA>o}JjaacGXRT(e z1$m*=_LIuM7kj70rX~|54x0ZnsG}19uaTkwruD)<0>ERlep0UDDcl){+Fb=seZnP) zNS->VncMC~0-@=7uL&u4uiOKD6wz+a!g1D7sF3)BZP$cD_*-KxX(0lU5^OI@MRn{m z8B&)DBF95+L+S4Ly90r%e{~ux4SX2ZPPTIG_sdI>=a{(<75k^WzG$W%f{spQyp z-(a63r=|e+5A8H%h!d(+#*l}!RIX8GCk|MzUX_xeP+|r|k`$D!a7i2%*&R<94yU0w z93ePv5G}l*O_#m$&VBLHLj1qNQ6Rn6D=O=_y7+?$Ckz5S9~WUMl`YYWdD?7be8nVarzW9|I~3J+h0e|e~W2%7dIyaYodZd zr?D3_Pa=G~S7{VY7wG>yfS5dBoO`1!3tW_^J=}Ll*N7m~{p;`*BL=s)KH}1(dvENU23`u#`#pL&&O3wuI8XyX9oblXk5(4;SSHu+ zfE74gnXF-yCbpuhS9|HL<~t8=0Hin1)eQ0M%I|P9l(OP8nGl_pni*S$A|qfAc}!w~ zRUUo|?v)-dHrup2Lb#iYiYkf8N0MsahE&z==_?di0?lYE4m2o(CBO11D;LbADV3$% z1*c3rTqO@fGI)H~E=sZF-tQ@Ha&k476l)2ssJh(d;b1kG&j&BLZSD%vFMRfG11|y= zvT+{h)6wF;to454%*Q3Yuu}*)$fs@c8}eX^t0!Ijhl(sAgH^Q;0i#Hw=Wg%x4kJt= zkxUExzE+c1l}cllebD2|eSsaZss8iep1fN*W@g{lO;OUWrg@piVPV>S+e&VX#ruL( zLN*%)fr7o68^XogH=hwil-O7&+opTBDQ8Y>9buQ#)C0HLivS_qRihj=&+mf05XzEZ zFaE0jN9SE|(y=@M@bl1aWm2+6?84rKCfH4B9^^g}$i1l>fj45VyC(o$6%<;01A1Ea zG&$+H6O_2O=j63db_M>UnaF-wb*zzwEWvNOh%sGKU99K3c^`MG@vO-3Hu6f%XyGHa z`rJo_8f-0&O+8#P>MTJA@9WmL7D+6?--}5S`_`^o$`SUP&e-fSST>&C-qsu*vq1p& zpLiWmK?^uEbq~RLY?=7UODrFMIX6_S5+Ht39&QJ?f>BYGq?5yjdl$?^t&Pfpan(HV zgZ*9X0$G@;aE}5r_ULh}{Ym&T3pevN_oifzh5uFMun#i%00Tke)0~7g2;1S3y&65{$*Fa zUYr&lG->^qQ)32y*OIu@J#1+acnekZ971v+2C42{A=g)MjV#GltvqhbGE*+FUrQkO z>j$!vt8$6E{i}U`_{MG(%h2z7@fK=(5<53c_v+?5w?l^Q`c#(c&5a0MF#;@iZ(s=T zuzii$D9+vIKYR6+M}PFEz!RQ--L6V_y`Y^^ng_3HyLzKsh!p|ux=EvK;AaXqdArMtX)ZtCF8c#d}v2JEDYUpoo5uPTG-lwDjB&TcYb ztqK>chYLiAvZ0G2zO!ZzD(|GM>`N@rL+Z}|vcWWhK}fg4Ft7lYs|brs_rA*wbDP)i z(kDBX=2+Pyzz0t2%IV%1r0>U?2X|*Rss;1%6dLup3}rta2QBI33iX8(jEfH1Q zkf;Qsy@l@Vy0b3q=NQ`c-Y&A|zjWxU+8L<9m9q(6yPAQ0{`vh1jIeTZ_FZ`6?5>m$ zjXDU}RjW&NKvu~Dw5PS*rb7<;w(O(O+L^1N*7#l0eYhmlVLgHS3W?*ieicd7u;#l> z=>*eyO~p2eXvqP69z9fMn12gZ;A>1WFE9p5YLITl%lAH@QL!eKF`NqA2OmqjkOxio zFug_rPUw4s4htn9F{}VD;rN?S-?iENG|b^8*oSa@O#5zO7<4CD+XP+_rM0PUvvip- zp1oYI&nw#-Q~M2x&)m4iON8W-lZ$q;BP*Vf8Xay9m`h!EVr_lvhR1b5gdefQ*CkQ* zMWf4}ntm2n)}OzDUm%1YAE?HQ-E?7A01jw%mc!#f_B=01TWlfn#E^4@teP(e&8?FHie#NRbgAo-xj~WljYDlDWNVw zDAgf4NdhkRM~ys+zMX6iuTx*pEKBLnu@EFnS#^GTuE*Pi$dvfD>baM8fq~oUVhgF0 zQ?CnTET?(m$G`(QA|Maz`8nB6%0TNxacV-L%`3?5N7`GDmc=E@g@VLFiuGEL&=!ws&Q z(ni@rm(cSLG&=;h_PvTM*%Q!~WbYcuZBvs#ycD_7m8+^l_s3Rou6Xkc-l*a3H+9cK z3&%obJ;OT@m?ta=WS%XQ83n#Y58?Jd z9pXWrvt>oopt{EjXj=X)RPxrCn^+RjnvpWS1UYro3OcofHQC7StvOEI-8GFS3%KMx zhelEtLCuA+i-Rh>bJvTEh9gTL-$zv3!v*YF;yq=QAq+|$l)K)R6V>hTJkW(+CY@z_ z3rxrEF;mto2ib1+Uo&8PMO*8;5FAmcdGAYnWlOwAI_5PMY%8HTx)ajYdI!f+BEZzs z(n1%Dz;-A}k@76u=5y==*(1`MNhGOXPH}>$|PD?_UsZnMB=J0lx0kJVT z!c;^L*7D+Wb*Be!5t~EnC;rixxsvB6a$C4hxrTB_i0)pMscx>u--{VRe2L#gc93t- zbt^}#mIt=I5U3m5kw7>*jCNRRbUX{>v$4$+=SimsZF9yj@S?snZJ73&gs<~9Kc&t~)b`=8C`k5@J%TQ|K!uNCyGGjE- z6u5<4^@rexRo7-)UFVLLDwZ{yjqTi!;D{m7L={ZRi@G%!QDKf=$uN4h=YU*w{))8WiBjRyu%UchtU?3 zIPARN#t%8bO!&A{Xpuv@Rd7rfxPRh?$9*G0wfLH$z!ng+1P9wpG_&1brvDuWi6qnD_9l?Z;--L zcNGEjL{ZUo>T9VhWuMk-KQ6 z&V`1*2JEu%jgNVnht6LX8zV+_k_wXb_w-Q1)*=GCbRlzEfpwtem0pQu+fb4()gZMj za1l7nzBTqL`^2s{%_AT#?dVCP-vj#l--65DZkxn$H0#cR%GNE5zrXbB{}-e_=wIyW z3mB%y)xeQ9U7E}Irp&FC;UHY`;%0ZbtJ5=)h}hqs@*nKQ6Krg4ZD;+cl8zDu7fvkT zEdwaOUR-_xBRBjQ*68cdAV&rVH`6o1b7)-VZJbBXE*F5hKo415j&=T#%-sL-K-lK) z0w(Qt8Y-Ta7%1if?B>fEOx#+00rBY7!1O08Keu|hBvJfEYwN}z)Tj#;JLFYYmN{Fpn|Xsz(9RCnn`oltY{G_q zfZ%|2H0%~!JFY9Fw@yj~gMyj(h|*6wE^SthaSd(c34R7TG^$5n5aC?1W+S7hI5`T9YLdwdi8xIay z^^GjLr|p{p;+^}ZS*S25T><;T9sbkU7dv%#P>-fmVcOLRIK)#^Z(6C7y7@;&{BipZ zy)g0Znzv9$9wah>n#=SkaZPbhKp;WD%aWmOv2>}^McXU55Y+flcDp(9BZ|L7DWh$# z`gvr+VpkZ>W5E5O{<5Zt9r{eIO? zjPN=w!4owmehXz4J>Q-o!cIw%z+cW9IyJf%+E^I3Zn$(Gg>WRvy83G1^5H-2gC`x; z=6mV{sbkJn@mOCk zhRNf^m});Uc63ecOOiaa4`$VB`7KaUrt{9D+(3y1E)3{TTlet&zrnA)gBTKF*d2${fdyxYU$D)yRy z`Ri%kL0d$!Ni;->mGC&Whi=5xOsKO7IR^Csf4eHB>r}@}U>SD&{2~{##qm1LFlOmC z(zEoeP>asO%l=sn(LvQpB|frbJJ)3yUACM{)EFp-;<~4xP+vB8vi`~8w+NEn&57<#WPz15xfU&)s7WZm34hK$fwp(m-oJCA(L1X zIz-nXIoW50bk`VTQk#XJKgjE<5OcxLqv5w3r`0^R-k1z6yb`Iiq|uDB|M6zG)WD+k z=#EfqTai|sgreW`O?qpI!dR7nx(x2-kvu+e0Dn4-udg&7;p;PD}Rrd z;S7~^aa>H&$CaTTTEerIs(y=T$hp_uW1gwh3w<&U<$SfrPhY#qmZ`?-(2B*eS+GQm z8fcE0Jm#_V6B%%{;G&Xp-cGqd$l6}Npgk+@;ohvWj%MuZe#qDO7kQ93d0n|>?zNH)l@x>7z~{YLAQo@6#5I1|=CS^vwe>y( ze!oZ5x$p*Ta3{HS^+b_{WIW0djiQ5%{B)G_b(IHeuW6ms(L-3&osm$|9E|Hli3wDv zw!vzr6c)*sO)1<9Ebwlbq+=V{2DX3;c)m%ZYlU5d`P%JIF$Igs6(M`z9cGhGvvxVB)0IY)#m+u zX)6-($9r8O^*H^oNj{v%w5umZLlldBVVIvdxPb)cnxVvWo&RfapCoU^&k^F~0j*<-O0-2&ytGNS1H#quXC zgGuR5>jUX~swBRxH)}qoJK||4D*_($y{N-zuqDX?S6a&$UllO|r~4v)t3PIl=|tfu z>x4^)Ov{V_*Va@93w0@#zr*hMxzTfJ5$t_qoq%rgf7kC~XS0{R8KgBr>D{ZL9yGr! z`lIEp2WjN!{;5N$hL&43Zc*RS7rPybAleJZVOVMyN1;!9ly>l{$iL3)`~uVAi`|TaI|u(EaRYzd z!~qFM5U(&ZhgplC`@NoSu}?iGlTQAdVty)l=kPpTOdotYQfn(LSUMBE4$NRs42YX=Da?kN z04YJ%cQ!$)rN9Rd(Qw9ux?fF7D$kcek?>U?sU&zB+r3b&2>5yeG7X9vT5Kw4r3=zV4(d zVfY!MbuMPsC8G?**n#jO$KI0V`9tiwh7}rDGWw(@CO$@n+Ln~4wd~HI9J+_~a(t89 z_V%*o8(sMeHjn4I#>-G4_9fTHn0jNaTT#*nXI-8x5PZIQ@eKM#<@&aB*i|jf7gU0r zZIT%onVCEKSiZO0a-o%sStBJ~MwKKlH0tFOX+wC|dX&~afi)bVprn>TkULQaEs$Llj&nYbh$+K|dFazS*nYsUG~ z@*2v)S`pvRQbyg>-p&bnQqn0rbNl@NXTo-ycpHg&{tk8J6-FpUm2LlsK`7s8qXTs{ zEUlTYycVmmnpo>1mR7~It4madCvP3+-0@uafoZs4?ZoxL+t*syd5~>$VHaK(Dc^5p z8WX%b5K+q}oV@Kan0tm1AuC_@wCbRYZw+a5;T5Pv>E>2Qkcz6!4*<)|u1k;1Fp1wf zEuH6Mx+pFdI%D9qyD*BZRzyPEg)DmCwb5SD*Q(TD&2?1F>oxW9S)v%BI}}=;+4wTf zcMa1g5(TXnp%=ZwmZaiGiw1P;AF+`vcCz~|0!H`0j_C)TS;T9R7P(26uI|D$tM+J4 zceb~yIZ+ML3LQ(T@m)!n^B=-bA;WOJ*lR)|LKE>Ot3ylkB5s9|fxXy@tG)G0f63aB z)i!FXR%ZL&MS|DugJ-`*Z8&pZW7B)E@4CV026!93kO2Z4okaH{n;tH$qwvy5^!@$E zxXX}mqUq|E+EeWUAv1KX+~!T0I>}CZp%$No1VGPb8{2Z7QT5wxcz)W#rEfbK-HxS(DS_XY=d2oZ&f?`kla5r@ zYDcsTCrh7c#yzCKKMi=4@Hl`qCed}5Bt<0$T`F8+zWc=*c*mKnjx@ z<05P%xG*joX7E1E;r(6HPrQ&E$M@jRKP=?kHeW-%Y-*`uxaX@vA8XigA?(`4Lweaj zATihKTA#@hJ?eWQ%lWWY1uiI)jcWb5fz`@En)sjA&*csKUgYuEfpNM|Isrjze(#>YearJtXA)%YRkFu4;X~!0P0D@M{F{_Dc4}-y?t2mAxg;-^ zQ@r+|-8`(iDTD(_BAvEVg$OUSbY1icA%4x0eX;WRuFH}OJCwR3{Smo^@HR)&r;VvR z*A*U9g!!s`(7VaH5N*$&NPJ-a<0Csgwm7*BD?X+l2!gJQUzSCuW%6F&r4!EYs*rE* z-ba>LYJ3}{DMq{MN=kmOPL1vm5+lSU(`c>I<2L5&IH&!W_uS+|1^SrU1vs5>{wx{O9fZZO?*A=So6YEiZ=G9ny-UR_&&7V!D`w)^iQJk-qtBPga@F4|x)=a|q>(@b;3l+6~6Io zrXOsD8e&Qs55JnXX-i@^7j7OM90Y&u2bs3aYiSqcB+CR5xPEQBFliBPb6_DjIlSjIgakr9jS%!s=pCia*l9;>vN zGF~QhO`5KXzA}H>O

7+}v9?T8m8bsz$Fqn+E?wCEk% zpVJH`B7nObGiat#<_ zjVp2~O?Zx$VC0BSpB)`PIipp_B!#%(Owk>_4iN2c{){dz7vk4uX5FwBzQhW(A?* zId@e)c(FGn?6|fJ7v-4FVg8?F>=B*cGS*3oE7de({3N8Y* zU;E3=P1L3>JaXe@lVzqo5y(l4r1V5xxx~o{+(sPWbh2GGHBgdP@0_!l`itE$ zAhyF+3)U0dzLfvo#=rmVE~%?rX(D6J)czZ?bND87=VJbQAb_s1=0djQ#iZNgCcoJn ztiG{L0{V6FKk<3?Zu{ho%3o$SAXl}#YB_;QpTd16xoEiqTGJ!2TE0V60}%Hd1*-?* z8uW9WJu?-&BZjb4SR~LQAn=!sXybEd{;LQ$8OB>n^JgiNKhKa29hi>H>PovsD3S9! zT#5hoC-wvm)(p_<}X`~I{atC8PH zX_cQfgUk^wyqHw1`*?|@`XHUaHvE4wYyVTfR`m-aaSmm<`u7Cf_i}}nRri)mM>~a( zC2o5zQ#D*!x4U8>9u&Zl?Z)p#0`a(SWEcmy*6KTLJaA^3RiCeuXX?f;WWF>S41V4- zNaxV0@xvd>uQw&pKeT-#F%G&KBp}C?`kF}9r-X8NY6y!P^L(ztR6G!LfzUF3;S8Z% zsp6Zfq%0P=12egLJ(1oGS46INrDPLqsSPSWuCMFrqJf)#RhS`!o|Z_u?Q!4tF?+v% zqf54*rzsg@S&={#WL2|IS-i0B9gk;985Fzprqhx8f7_Oo7{o-=-qk&#q8llk<$D!& zXDC}e{&IoFUhd#jpej3eroLAOW_S-XtZI^bZkuJj= z6$u~8(Vn+YaV*6_U5K>XZ=Y%OQb*Z69nw>q=W)rBnjf_)bWZDKLT>{RVy@%Otee;y zGL7N4Ao0Yoj;oS!oFHNatY~0rBpz(!W~j=hhQBna4!fomyMi8k{|K;Y$Fh$9wrQ{b z-`KQI&lMymCLZ_tFu;AYed>|nIgmG22=%2}`_QD#9WB}N%E7_-MGr07tboIN^&xA{ zi}i6_%d}_tWn*=SRNG3FL(>$tylP|pp70lQ^FOwZT|ut8wFKM#G;ij{yexq30lVDw zF+wb&T8#gJd4!+ogwVjbpAjk(VR~ratE4b-s-%aw^^O;2q1DXHiw&-O+Bm(DoTgqx z!~T(8mj9)q)$8ur(Zz_NPL8A9@bws-?H8V=#qV_}<&Ruq!vAt;O9ot5Dqy*}u#+cC z=5(8b=C`*4OGcY*bFMf1*o)p8>SnaPL|xXN4ZQ;YMsFzEJu$wtgeG z^UBuOAZ^T1x$D@fKP2uG={eP?zxlOyc-A?l*@KI>w>a9WePq7T1D4%;UI|`?_sz#_ z^k!RIkCSWlJCEqIITy?)-BXSQbZxR<^tO=cpftMvc9fUn2YfY+xLv@or_>$NWtmS? zMp9MajjW{&AqZsmN|>WoHq9hsa!684ryk*ToWMsO>E>)s_s6z;x4g_*_dC| zk-G6%YxB)MS~#%v%|ngTV&`?l_4}i;Lsb~Xqp?U5yg0tSJyODEV_aLrRa?( zn-IFQx7024RCx7m3`7bO(!Hr6;}_dG@l0YD)k=p2l&{uk3whu4(Q^Li3LefFL^

5dyTBBm~N$Pra20>w` z@-~HMZ+rmgUg8Y7mhLeP&{LwWS4mjN6w9UP-E`OGr^_|DH_)0I;w4>B{D#E>o0i{m zhmo)SgWOzc4OzOQSbm{nJs5`9ySKDG#Q@POz~+MfjNE|E>Bet>0S9{y)xFmkxru7k zd)n$~cVI$vNszt&Uoiajx>T`=_Wcyw~vAk#XVXkcUMSHq_`E{ye zJP!ZzZ^!@c--7?-Y!PqjPN$;q6uOu%H~bf)pKI4S4p+xCH~WT=)4I{HN9Bk!M!pL|?8L_`lmFr2K06ds|9=mcit2y%a2fhUggne|OubVb zm**N6`RtX&%W47J?XY`Cj)U{R z=xa;;{xDV5t1zqXXWf4C`f?CKhOJ+ehz-;2fZ>#D)8!~-+$jaczEBO8o=>_`-F@3l zrMP{sHWWsW!$O4~=HM201p0ShHZ!$|I4(BUQm3gXZL;VgLtmWYusPc4Mwm8V#B2MV zP=jWxMq~%Auq!Olz&<^H9G!RYx=0jkiS6mo&Tai=$f_ZJMr&9ps2rr`Q7*X0Gl|k8^Lf&q156j4h=!Nt8o!ixdi4EtJNLubC-(1dD*d&d#H0irgQJO)1qhxvP z(4jV%PfWaMiB90Y-R7$H5H#=txsUd_t8-~L*;q}Dq*dpQMQ?GN29j3jajn9$UB${* zOL=FAUp;E4dk2f4JxGG0%Ap~9c*^|(3p(~f*w)ZZcG}|7&QC_;pBYuua-peUFV8H} zsIk_TG^r5`-RG-$Wg7Lk%=@{#Egtq`&ktYipWQbRf2c-iX|_&f!Kea|_!3^~P{rb8 z|4^}5vEt(2D)#HtHWqa3HjU3N_ELV4Xzgycz2vWCa3sc)39!e{nII9*#tL%V% zCsvWoCrb}r1F|X#^E)whXx_TF$0*IN0?;)edr;zUNtPBeWu?-kl>`%l*zEz!u1?^z zDNGL+#WJvaa;#=BTO4=k$G_Nu@?UJhDthNe!!t)4<8lslk|aKB;!lG|EUeWfvG>b~ zfv=wm6FN2K9=dhvtI!w}Mjw8Ok|IXR8S7GVnhFk8$5Z?z2+*GC`d~W2z zk>m=SFU$qTKeZ= zR#bCas4Ms0)Swt|^kcKP2f*8+ifiKOPMh?X0quGbMO9GrIZ`eMkZE2V+LzH=ETvb| zy|LNYwp?DA%$ev5n=7F?G>S z^Oy-&ADCP8AP+C;^lxWYFRl2*ljf5nb9@-Hpj(9*icS^%UIKHR@+yGK_GZR$1cbD! zo%UjhD&Ti0uV-&S7(H>1*>-?u$S5ivrgU1ido2{Oyi2 z?E2f~t*K&c&p8Gj+$Rm-S2)3&lD`|d&nBB{=pjD^GVI83@tbXwiz8?yN*O{6S5aFmybY? zS6yY0(9_7(I{*@H9Z$8Ns|_*kz04GNtN2 zn{XD9Q3z`B0Xw&De^dj&bm(UjTG=(!3!}aq-Mmz+-rSpOMRh0g3bHg zO5(MV(hZ}ZKya{gM@Gz*f4NMBo+>M?@j6FlcmhkM#vY+d0Cl$_9ZH;0EW0hrE10Hw z`qEjqYP>*&yTI(cO=(Jo&Y4?Y;UrFZTbP@}2de3fmsHIGR#6Tx=*)Z)5Sm z>)QgJ^c+7#)ZB4Y-40XVB2e^f%p7HR7`?xP86eoHpv?+#Cf1!`x4^kyyfE^waU!SGF<3!=C)6TIk#v?qEmqP@x`&78|g}u)4aW})>3YoqiI6uffiKH z>5~hI0&a*;ycpm2OS;Y7JgYBDdoI5P3yBJs+dnto?Y_{`$Sz$3cv`0<1 zFt8Kvd89=ppGD7Hn8=;)O!f^oru4eI+{(P3+w!=;v&nsBGx1u}|081{{|94W29PC&X^lnKJaOY6_VH~0pek6nQw1bO^Z#B5WfkHEgz=T zKR%Snc^e0j^1GS6ESQ41{e+)Rs{zL6Qi%cs$q8UdW7w*5TXOZYEG^m1#fVw#NniO@ zHpmCL2Tn#u1P5-?BM%^UG}kHq$;JNpc%z10b@YCg$D}uFuXBZT{%u^1so1?|@lcc=XN` zRn2g0O@SL|=($oyRXzCm{D#(~hzmpRm%cx{`TL`3DO+ALM!>SizfQ{d+NN!)Qh3f?ZxYu^CsVhIn4>1+RUzt6$=ko-{@b~A1 z*nh9K+vn5cljxH~S(_6J`5kn>iRb#D*!rc;D4=0g=GNOQ8MvmL5Ss*>^_$C(mlwxG z^k|<3+3tM9FF0Ch^iDu9Gn`h5U{STF^Fl+xG^ub?* ztwsRYShr=UA|T>aozd&9@}ehp7y&@%4iyF5#|w98*VW}RDj&pU`dkHC)8xs%<+!q2 zi*aXP6!M{^H5U- zrLo_);%p!^?kO;MHWJ4@wO z2swA5qHu!SFBq0voTy;p^LE$NX3rcH{@Tnjaa-;_fl%B~$=6qWYt&+yKdJ1CPkKJQla)6}-9*2k=Thl^c&3p{aFuFi3-KccSNnrrnO;3_Tl=O=q)r!G?;51i`MX8dT z=dNBm^lim#i(+kgo_D@ZR*f#Rxr*GTS?L$l(ZvaXK<)>EV{^GJu8SL^MUb0F3IhGS zFBxwik2iT`NP@`PJg4tcw=SHs=fid?!VvF>eZCgdkGCI%R|EBDk0 zetS{T_8a*Co{)EUL=N70ySaL(tR@U~ygIXB%Oo<>SNk-ap$HD98qzE|NSFh|_j&gf z1Q3xUn8a2MH?wS*R>AgdnS&$Jd15DU9_al)h+BS(;xZJSkH4%9tC;-vUew0&T``y7 z74%SyY?+S`1nX1voymDK`p1rD)hNAQF0GZaZ5NQQQBW*<88Z%Z5CpW%vh!v41osG2 z)d?A3pau(Eu%;W>UN9R-d4m6mh|PI{(2gQ7NUYJM088WlGn14@Ttwv4zFN6pXjJQ~ z?F-+~nQiN3;z$%_&@M&Y%DhedSckt_@x_U8w=6MtOnZk$dk^{K(_(4c{)bWo=?6K1 z0qdDKE$MF+nSbz_Sjz5>+&Z337Jm$a=(qB`DTBoC-^?l3;zjokf*K9H#XR|(*9M@6 zI_bf7l3Bd|9vZ}d+xx57^)IKeGdVZk)NZ!$y;ql;>*uWK6+o=-CsO(1Cqk9%T|95o>maNH1GxVnut(lD;T}hVcLFCH+Go$!FyM_XdXF~)E*55W zhgl>sJ3BkTkFe^Wc!sFUqVJ<-rsZ_*csSx4`#Ws;|-ubq{@}J83^TyuV4>PoVI=i@?vLfXeKSphaC+DOngIb&bHi$G9@ncB;sYe`ywyZ|HL2kfLwL(h?{sejvxna z5_t>GdTy@`!AqqtW8GD@xT3olIfTVFpFiA_Y<6wk0KZ<0_oCcR!CXJ2Al@o>+WTJo zOJ)B6s8w##88(S6Yao_6W2iYu2}YH1h4zL zIZ)V8#IZoQ?_Ww=lzxRhF(dGrp@ZEyLOpntbyP}`WATzsU%cfO{Xbso0Kh}3ir<~iFcS5J=#4j2GwU|l3;16Mlp7X|JHMf?Vh}!@q z5?*}aXMWPpe3w6<-i6e6v1F_KB@Y`I%_ks-V4El9N`P;`d!e1OH$W~AJGV|Sf=rYa zL_f8Gknat!3F+P$S4c8>d*5|6u!IvizKur!n`Rs%#IOYT!X|fcR4H4#61Yg`M%vc` z7>6@Xg$*~xko!a-AdU5a-}qh;jyO~QAn<}%Q-6DZQFX|af(1K^%Tm>x8^8EWmFE32 zMkrpBUt(*An<`}{tNR9aSG`)VBMMu|C_r@1kGHrexu`LqUOdl@kW^F4hg{1tV-R~< zcu|EBqP5|5rOaeywk50V{4cy$OujkN;(lA@saM27F8sds2cncTHWmCxYl(h%CZaGj z^dQ_*t$p*DOi)pS(zSo%B2o_3;BPLXdNe+%mk1;t&VD*ilRE`E4>$9T_QxE)L5_6+ zh9AOFTCFVCwfzdb{=cWZXMi>x2}n~HfI2vYAPRk9FP{sJm4DiQ+5lwWEkE;&U4Oj2E_$`2WQbX9OKUFt}E(%QGmZmE(UsS#&6OzWuyfx*Y$J92`Tn<*V?C%5n24^CA_q4E|k#`&2 zOFA5-u>%pIK)*OL@1644Ji!rU&>(~()BYfmSR2Aj7!K?a09WGKwiqzWb~LNsIid@$Bxx)$Smk z689HhTzhUai0B~zXY5bfx1t3tAmZlf(CRUO{gd|1z}t% zW1o*)uPhX|q7rzpmrju`_iLMyg=awC5y1c|;m7wkR6({FI%*gwDtCAGuSHt2?l z{bFcw%lmvi-e!mBC<1JOX$#V;UTEqFCR+frOnd`G8M%0|j_Nx1>dx5; zd(i=--e$@W?1~#?P7LF;Gm!q8cx$B&orTI>e1lmJg3D6jfEu^Qbn3b-SRvXsFlvmH z36SFTw2*QQ=Yz>avc$U(hDnPb;&>;K6Rj+zWc>>eRr;W_%8X%s%6=Pwju;xB?K<>q zdroq{s<5c(UcUQ(<3Dp)=DylYUKAW#Rg-t6($XqHCH^73i-Z#Yl-^|h|517eJcR2X zX{kT!I*_ezTUW7Y){>0hw;XD}f+nBQrgXpfL9l<>SMA*Qij;cGgH(TuT)*^HWbCt( zB+OOqoJ~p&O)1khP334@gkb_Q#DWK)_yIik$byAS&{HhWWfmdAM|!Fzfn`BHRVi7L@C!-sYM22Ap?K{tlsNoW0BeE0?XF75~= z#ya$&HPbotxfOf%az|L3c3BZhj9cZPzvvWNvhl`C`;GECZz^6CYtrj5-?~e~jl2%6 z$BJ9}DPnGm^{I|71{xw7^qr}fjjjVfSwmp1i?=OtAHazF?vXowTA?#Jt2n&)$6UAN zNBp+S%Ut@L%<|=@BWbG^Cuk&jQ*stp1pa1arzWo4(>St)8$TM%9252!$shnH z-i28>{Csh|^&Km95d!*n?DNxT!3Sij8Fr5=gqe~)54EJpNsP+*JE^6>fZ6^@^3J%j6tYReDPbqOv!C`%0V*`_jSb(H1} zEOGOhkBo6^;bRjCBq#!Bmcu)DCLh?Z2r(mycSKlG5%A%Q?`{XYoxY?gQ6PThbV#tCI%X7*<06g{2mH*WmRCx4vH3M zx^sL+A_9G(m$62738x-aP4O~<*+Wf%@F<<1LN~)R!`pq~OItVgLK_I5`U-k09sfd? zV!#A;nuu3X3QyzvdDfo=Ul4u?o!pE&IwtC2N2qz@*i92EmsX#^$%ID z^%(r{id#Qay{8$uq10jCD+bfr;nA&?d^ihH;U6-mQ&Q3*U5ek(hI-9xW#3@kPSBUP zO>%z)DZofAPp};#TBUWMyXXqK^b$vs=Q5 z%DzKi#m@^GV}slz^!xAh9vpBA7u&ytIbbW`_O9q)`ZpoC{Tox0m_?1P>Vg4@$Pv)W z{Hlr_q+<*VwU;Hd2qBj+){nevHpOc2g+=69&LMJ@Lp~qO(58zv-@%;dstm{`$ z`H0}5```8WY68nIH2Jpa-_T?s)}R3ow3NyDn-$AEw|~j;!xA2(kb_Dke+?z2UCdaP zo(^ajV-C4qvu!9eHgCn&BfBv2L#M9G$iOyShz&^A}K^0A!)>^|1FYy?NgOUYOVQ`+&7}k-p0ep)fGC z?#JS+!Gmz3cDc3QW=wFo=VXD z*6&p9PckpKUflBQKHIGiKQ#g*3IJdJ5Y?zgv>RI{NK1p69EQ}ULyvW$y9b&aQ9OQ4 zZrN{mIZpmB1h($PD3bKgBD#OGu@^?<;yJZ20x7C~^FWC}S;}dMZfVL*eN{aw$VY$)O8=t}Qhcr5c_;J+2Vl10uI=r9pLhD&o)&ql0Y1RIHi%ng=N( znAtT1xJOWl%ncwMvgiMXaNLHwhzwg>1a|)K+t_lMyD0*vc{%YcRR!}xu{^gY&K3-0 z*9@UfY#3{T#2u=7^SeI3; zhfJx8%2d?=7cit&XrW;v5C0U^JJ|o%qWZ;&C+MRAma{cDbiDT~#nFh0)%rDS-A!@{|32;$Zx)SbFGM#giW&WK#lc#Vh46KbVZFpF58hnQrmhfpr9HX_tr&ZjV3?2u)4gcgqf9VmEk5WngDTkz zD#oPK7R$pk#oxZ@165^g^%qC=XaZ88oaG!Z; zY=velvw6hI1N~7cwg-5;pDjMyyuBDjh#;eC0kb|A83jJ4P>eu z7TdPMibM3T${FQ7FZMlWkQNv-vjjQR&BC!7|C4MUI%=u(40Yc9R4V$jk;JxkK5|3y zsAWk1p>RHF+LgZ_cY!a4mZ|1`YnlVcgyeP0F`+iP4mpkl(|gJhJ$Jy*FCbGaw9C9N z7a!zVTmtahm*qlKkNzjwJpJm+*<^mO%BX`6_#yd*&&433%MR}-T-@1o&Hs6C86~<; zHf!y8?y}%ACSOW&ix;syod#k124Et!7e{701`|YH@VNuO$eTk(1QAK~Y_UFj86O9O zSUB8*oII+JP#tSzpUzbs-3J;ot_^r@-!$QJKXUm7;Hd&Yh{rq1eAKeYeMQBbh16;m z*QOh*X(_Z}G#&J8n5R|Gvsx}c;3%e+toxsOQ2f(Xh*_;)TeiOXN@Z5(hCjnw+u;iR5!!^RlfH#>nkqfBR|op zx1WAErP<9?4UkdazvaRgLt2Ov;9t!tL|{?2mm0(@?qwB%VaFGQ$~CaOJs35Rv}^m! zg=k*UTQai$u&+ZvhedpUq0H4leiFQWL(FNRYfb#}jS{&`eEusp6lDzUmS1rXuFsO= zTYjfSpH=*LCt$|9WPyGAykxPWR*Qt!d7VJ358WzU<$mMRon^C4v^hs+nf0j7)##Bo zKu?N}2_1L&iiH8FTqrIr0;P?mUDhENk8=-9=>B(R_z*G z#>)b*cmiD!bE8iQn63C^EC`LZi0O*$uajyqIJvG1ULTPBgKsvW$tjsgFAQHlZrAEN zFKV?XK)|Q67agSBeu)&i2h@0*lQ(>_~{LRFMUbI+M)vqgrnFjS4Xah~1gYW;F zR=)fXtsDguh7o5tT>jTOSNy77O-N46XZ79kiEia^+;eL{mB}eBN{W{GtZZi^%Y3sp#s4ViYeL3PTnB2Jfu)=F zu+uaeQ(|bi-_MnAMp1v5589yvwXtfjMvKFe1E6utkU$CV_WXt`@TZR3AFWzBroO*!bl-n&Imyh zfnz5&atj!47+P8(IThmybiU8_(ay$b#SXT+jR?%&d$!X4O@g`jC&{Aafc{Zx={57F zk=s}G8pF$XKs=DJk79@0`{&|+eh-)F!6=LI-q0mB}<(&Lf1N;R6 zw?V4cvpRf|iORFyK$wP<5CHRMSP%rBqroe4L8K~GFmPXObym&SK>N28JpB{SU1nev zGM{1-4Ym`>FG4$9u2!A@b2I<-_~1M@PwLP?%zx!pq1 z-5X!9!-FK(y-oU7pz-mqW$o$R#P0F&@hLwlFKRMZ8NjQIrXBVJp`u!RGPN)*uj#&S ziLOB3jek)=0GW<^hL1~;PVlqzgbnMW?-^&gfOnJyrgIz7P=nVm7je z>+tuIIN9D=F}M?0(g0OJs=q3Sm+@NNq<4L~7u&$|tP9~4)#)&v!K7Z;o!xY=wL)z* zbmIFLJjzXMVGLB^z5Y}(CiY?C7+{9+B{NRPZ>OV@;77}JO)yxO}v7OfSzv? zVkrFmKAq+Vb@_*QN)^$A3=9BeJkz9)yZBivW zrSu#r`y-<74gR2ba!^WvbFT{EvcChC`8S+(X5b?~frN|`;X_5|psuRCvh>+7x5eTM z3YfE=7TTfug$eT&*n@7lbs)~bB^#TphhpN% zRN^iDpk@VBWz~>BKK0>JZ9{^$z0?>0s8j%vxi%o^L&$y+3zQY@Ek2lAhXj zaHo9HSdgpr`Xeu+E0Yxc(P~%dho?`6ZvRjmsDYVhmfmpq%(SF^{)NKjH;0`wWZpW< z01e?nBMr9KiXBy5LR#IC^||(tuJi-8%YIu!hk`NA980;T3SpXQoHc=M*Ljs#p0F#OpsanBG8&BBeuC$-U;5WXvfGuY?%+XL!Nfhp0 z6;XD7Va3FgLuN#o$KvGsze#-QRxR3j`#f2ZJC~v(Yj}hlX}DCjW4l4{R@s@2zF*i1fy(cWfI}*!%rn-hfCxi8B*4a; zLiAZ`o(KmeY^&aL;I8YE;vNtYvqqNySISKu18-yC3pbUvCeyd;JYl z;Q}xf4Nw^pz*M|!?YYbg1dTXDMRym40NlXt!MO`=$5V+Qa31ND!2nC107`7TC(zr? z<+z$la2qIbb>A|HrdY${b;o!V3aTS6*fefDeP?XJ&N%1zR<^rAHxGYLUxQ|4$u?`l|^u9tpn7r}K5($v@y@0g;_{DENBTJxeSwEWWtO)?#{fBtU zqQTO4{doYk>3;{-)O)~~OsrePZm|_RqXqcSj_tKbU^HD_9)|BV>LA4rpjRz5^%{S{ zDQd%Mr?!D8<;5G{vR(g`)pr2GF}xx_>GmQdRc6=s+lmZvV8xa=C6a@t+G<%lIy?EP zsVf)1W$N%vs-^LPZ!1x0&*GCt5rbjz!Mg){1-Bg&aMlmk;y-$Fi1tJBG-lChN92_3 z$+rPde2PU24;k@eavMDK#8GslumBVL@BN$Z%k~|5HZSAVy=Yj8$@hhjt#6rU7YcZl`(#6 z`{|_H8(#^SoMRUVIV?{MD}=jR6WLR`P{LGWpDHUomNf3gy?grMM6N==r0!bJbx4Jw zTC)|RpZrMQkx|P`F8;E}O}nc==ea1v*E&gg*WrG+kQcyoAo%Qr)cqWpFeFjU~(Lgcd-o+jP6Li?UbJeS#;t8=NdHnLtOlicW z^V3(HEQYM!2bGCpMcQ)`Ea2+e2NH7HvPMAt(clr44oiL5|dv^Xp z6dU{_8EmOPqmE*@O0eT%^GQMT+T@g04w*H~%ZIBJ;SyBpOoGA~(VNq)XP9BTJ*H64 zA!U?;+A}sMvGsLY0kP;lFj;<3KQ1vkA+%vCA(t_p_8pXzJJa&X@gcdmxL8#DZS|IB zRb69P&iJD-P$3KH>ag4MxIWnPiYRhmZinPvvc{+5dfyPH`^I*ANDG=P+tGNj?{lqr zt`En2rDRkJuSiT38M9P>o1Xz9jrVH~A+=)Ly%#JD|BAPMkn&4@*S=~Pz^Npr^fP8!q z%Rg^F29gB$`jaWPmNxiasDDvm?xC+-P#WfJOil8hW)1GgHz%vR^ov`xK~ z`L<^CA|UL#qrB8l#X@R)b9ted-dW=#_Ck?KBQ56m4P)S9btwI0?(8iuPpqOG7+Gb1 zS`5$w@XQoxX2^r^6Zf+g!NS~A^Z!K&UgGvwd7@CFnvJ95>Y6N-3_yw-9f6fY|8U9O zJ^^#;STnx`|buw+0fn2HM0S$M+Noqk_6*p{-+IfO^Jx zmngxwa)3eE$aPrAkk$7+W`e78A>v%sc`@97+2dpSEkHG6-Tn|+-VurBQps5G8{3+5j)&pG_WMFpor<^M2!GgOyhalH8Xy1-A{7Xd@u?%}-HcPKvWqJQWW*?zyA zZAZy{>)YF^k7aWhum=`j3f;Drm*UY-4X%kv6h^xOQROAjI+;=y+gz=uP- zG&xFB6x2XT(t01d@%-^J$KXWsRgUH##(g+Wd&j6!Fqn*d09HY}^`lMl~1wB?}_L2q1swhAp^gB8T znwz(~fwrNxMY_YCWq01oSeeYuk2Ko9>0j=<_VZ01K)hTVyWSH2+ZK~N;v3u zb8#s`nH9NIFB^d?;yee{7>s z&Uo@)@Mm(B64gdpF3$;%>R$ z^vt0tPPzSuDJ~>t?Nl0l5dx9ezB9*|(HqpmnAEK@Fe!3wzH3;%M%b467jk_cs;Uy}nm1X+Xz zQ~4vBF8L4=0O(NEVl(eaA4@)Z!u3=xB6^j0%Ss6Ll3Jf9LbdCSE#r$}p3-4`hQv*; z&H1}x-pX~`AU(*3^h?0a|rz zY)HCzeE15oToQiZ05$YtxeQp|Z`D`b&>HKsB)C=5U^1cI(&7M)DI%R@&KLC%MLgf3(__A${?q<*=VW(qVpAjFh#af%bBV1OV>-SNwBI{%UrR z>8r8>NM!+tk(W!hE19b7PgX?q57T>DKk!d{TVAFNbl!VyAp3A{M{W!>rV~bEPCY_# zJZCIUw75T*DF#B9+kYII@zN5DNx-^W{D`1_&R*C0aY+9S0rEJC5u)^WwH*c_c)iKK{}C-pzm=F;(fu-}EGKz|$H78QiT=^@I{L|Eu23g;<^>H6C!#u#}vVfFHJFhJVK)DVOoL+uB1+FboyTD~tB7 zvnK!4x=HxeBSS4}!Kj}z!|ff;Xgjr`{>f7%f!SC!CG`?QvQeZ6y68mcDMqE6vv{3aDv8Lw z8V9?-qo}ZLY`z3boEcrjN{cSf^Q{>kcCV{CFCks8Zsn8g8qDq)V15ggbkqdz^r$jQ zaIV_dUO3M|5`y3U9uRiQeBtcj1#FJ{EmK$&&)m=pe*7)*>yVq-(3BG2PfSah+;2>s z!&YM~t|}W&NZA!TLJ5O6uPKvrM|jJ8M+s%QoRZ+rzq)w*3)R+nzPw?fHFCa|#HL9H zzijc4v#0N7H%MgyxQo||0zPBhIc#Ap|zz#!^W627vjk&M6~bfH~$x_%?e2D zAHRDshXcKsKkknZRKd2SSb>QX2B87{{h;&4@e^m(#1EzJLcn)N#{*wKOP8u$X0CjY z1b6nS37(d^d{zzCRdRB0@VLyLonrYPD(>(TfTR3PgR9nMcW*2j)yoB{)z@Uxe6=cD zdKY_GCF!OYms3_Ia^q@7O9Un>Xf$GOgKr5&Dlc(bB6XWdRbDdu`LfE_o@HBz>d;PZ zuLpWDk9E|82mxZn^c8o1dCXMbXquBQ4z-+II=p3~5Dn>qfh=*iw_2Atwgq=a`(M3x zu)k;mxOVjsz$%sk3fmh?x|Sl09a35TTNArGC3hs(8!0)8sJnks8@~qv{PiCn1}fh> zrA(7y8SPF__fFJ;Ze)x_;2gyrdsD7FB?2Xs1QMt7h&wV`_Acrc%`v*Kh0PZ4b99bB?4> zSa4gHPW%XY!9Q}py)QG&WRe)wIj;DLpLt1}1`R5(13KAFJzMYzQ>{Ktz8^(!%AFvv z&$yTH1GIL?p7{tihjMnPY#_3wpAP9dinio18R^ZTnEAuDfYv+eKs)A}WuWJ&z|Hio ziV`c?H$oOGA$dfL1?E?)M#`P@S0Q}%-nG`z8kP$Gcc!zh87tX@pg*vh0XYHKSoraB z`@L>*f>E%L@?xuti_em%KdK;-95dt%L3&yA3t8C>cRi-J^6*t~>vkP zr;U!*gi_);&z75y#y5r_jE`;|1N`MY_ZI;*3!;0M{s(utf9v}E&*H}o1+s_TL0p4hg*oOd!3FIEU?V?_tiN63@~EcV8E&0pS3ogHlB=J* zyCq>>qgU3$pZ!`^P{xXEieYdq{Rz!DV z?%tyzHi*ytvZ~iPmKWc?d5Wr%V)KkT@uPdVVjgspniQk-U_5Y}~8A6Y5Qh>`sx54)P1 z--`L47`djWS$sC#uZQlj8<79Z2qM&2`#( zP4oFe&JXpCG-|T|40fKhaMLQP-$Cpxi!eq=QTiSq~B(pX$lz07}uq+){r&vH5Mz3!)kMI#nJtB?=| z%lcjv8uydXv%RZovlibR0b1lA;#k8M=@W}8ESTce)$OkQmqd1G&Rqtvd7K*0WSUh3ifQ3P>t8=nA)@s8h^ps-}R26f4Rs{1D>>Is<7ba zTSxzY?VWiz)cwQ#l~74Yp=_laAtMz+O!n+$Y05Tbl68v4$DUjNy3?-Fx5rcYmLMe~;^G)751C%=w)6>zq^H*#Jv~m*+QdsI=L?Y!C2s zu5+uFU^fNOb#1=;4C*G!FL_!keO*o4sK|#WgPzpS!;L)!@>(a`&L!%cnlJZXCY0!I zo=PCQEysz2*^zb)3MS&&zI^>dB`gSFB3I6(VQ?j%8YOX7H)3IQrRZljG+{LE6FK60 z?rCj%$U-8{u^Ey)cD&=XP6rLX%B&6_^Tkql5iP)ra3xgCNs!&9c9~pAbUSn}O0L%< zXxP-_O(y6kWZKtvV)eeoOaIUoJ=e8x!Ib1iN_X=ELUgf=+O62Lt4B{36r7%qH``;5 zsCTY3*Mpmnut(i5u6wjF>CbAZ1-~Nj?{%^nOK4ayb@+*_voDxKU9`ei?v^8rX2hFI zq<(Rh+lMcgB|2~cs+t$=bh)tv(MXGQxOk@2JqVTW9*H^|(|R9qk15ndGxvXYLbxCM-b@c_=zfM<76R^~N(#b__U&osy0vdYfLQ0_6nP&O4fI(gQf(ZGm zZ@SR6%BVE0AY=65DB*XmhNU01=6Q5Uk0Sv|t-hMtMtOtzF1w(LF|Xcn8}bCmuFvRJ zj65{FOt2nnBT7n0Q2rxZoG|{e&FLbk>N_v)?n^7_$YtcQoTyy}61g{Mcir0vvhCWg zm^Y^H%*}h~aI2FBZbs5>lqb;4`=42agGmDV@|d-`aMB3-Gs~ywCgRTu+~I2Uc(l zY{JmPdlTGKFI%2JTV2uI%Qk>M;A6IaU^18#9HC`t(xCn1Cgj$9Jb z(=9>58VRT{u{oA4*8`gM5?^h{zA+lx@+U_TeLiG?o@YlgWINf|9_AFxQ?;FJtnC?O zhscULeoJ*#FVvZ*Jnz139z30#PDwUOAC*`U3SW2+Pw~>{FgUI^XC&;(fM^^m2jkL?Vu&LWtaK8asM10+WD-KfuAw>w8nIN7P7}TS5 z06rb!VMKOIP9jIs$kFeObEd}ZwxH{6c)nu>U-=hiaP{{%v1Lk^h`Gtc({cL5YesdN zi(I{`TKh}KkBtb{DZR)+yL6e2^v`5m{R2I%C+iq_e;c0$Rd=DB=!9{+Ldsx2CC@10 zMwu(25*yb3Y$2Dx&jBYkdsi3!%#{qIW`0PJe z!DVtsCrP$|CCfaXgtAX+ze(gzl`8)%ksIaP zT%u5AD-T;dT%o5ll$eGKFl?xa=W_z&j(#v0On#vfH@S15ZG4ne@`I#8R24^od&eCmPu8*&4 zJGo~h|3!4GGWz8PY{b|7KNa6Nm9Sm4!ihmT+6HkLB^8h6RY^{D%ETC zC=E~;q6Z2?*w5w9O{Gxh%9O_yVwK%&8jrx=Co_kNn;GWeA2p~=v!*4|sRV+a=euT8 z`~+@q&l#m|2*iP{>|a>P0droqlIgaibBHv!0XL^_F1&SpJ?%uyz>oOi)87E=j9vg( zM?;Laf@lhRjxN{NftC=>f*dJTZ72tqCef+#u)+2MU%ZhzK?)QE>IK!X6BFw>G*A%A zUR_m86Z^)l2n!?<38_EDbB^W&#ZiVx_trbL>x4mQjS%%Z{>_|YW9P;Za?1uJUU3$; zvYL7C$Nd;pM)(b5)0lD;wk}4???)U!AE^m_I<91E&68=#`7N#Mn>bk!7a4{^4(wwAGH4dV7cDhVxYuhZu_(@7k@|0l{iL`3gO6nPTN5s)KL*-nut z$9vfIB`l5GZ2B5@c|M(P1Wq}XJ*vM4Q8@{D(Zio?{NGa;Owbsi3vfNMc(YAXiKoY5 zDo#Gqeo6cOP@U2LQaj75cc!ECSoEL1ciTwdgJL39o8{+` z2BoO4e#I4Ya{;B!7H-;qQkOBZdutaybk+>GVgqy8zIUD8n^Y&zX6s*kosIh8ay+So zSiM+P<0BX(r1c?I*#4xB06Yvu?%}s*moJxB5nXNPnq?lid7oiD?@o`mEC$Fe{v(SaYxFydQ7d_0Fe&jDeQ1br*oW)Oiek+tAeagX zcaH2e8#mr3o^$}en6MM>49qNWiM#~2ro$T4{-C5}Izm2{0=A?y8OyM=uaAP7)aXpJ z84Y0b_rzP++v{LC-31a?x{#lvenT;6_L^yFLn`FRCun-kV=xjKZQ;zO9Ka z#_@0WZ>}DvB2}e^+N_-zYDi{bKT+@^w?}L4)d8I=2k$XMlQTPAU%hg9;UBmWiVs+> zyBTZzO>3@vY^y(Ih%WIj_yxX5o^B>Ce={7w)%#nLx_%cQS?<4N;r6R zO21My#3Q!!neEXiPsd@a=u&hjTN~eAhG&tNL0iap@<)4Zct7z|Ps!%Hiy6oZ39F2$ zb^2PHGDrAG`j({bo8^ZRKvvI2?FwT?5D@<^f?w?~iG+HHXEO-mzcq%-d2O4`y0i>; z8HTm5$lNiy1(*x^FQ@|2!xl}Lt6gxJbPV&k{x*_pLz*CqOjOx-I zkHzEk4JYYXCK?Kp)b5;KO>Ib$OWED_-egBakK7WS6<+h{8MvzADkpxrR`F}?8C6T8 zZ!g-;C=A%oIiQ7TjALKO-qByN^V{g!EX8dwS#L)Ym-$jFy%-9dOwzkAS(B}yUNEq( z)bA;)+JvrtdW?8(gW7=$r)XIa?x#206IJ06l&?GMeW zL8oF^N~HA~=~TP!l+!FV5hjG4O`}}=AsMD|8Vv@=*c=fo`Q@pcEZy(fSKriOQllQ- zE7?3%(D41i%lKC|(n+sGF}LD<7B-4NHvtdI8D_-2+CIy~(nlMu<>8SS&^+#}=k*dF zW^-*DSZfr!iNs^0WZ+5J4p_&BjTB732YA5!pHA7{F{Gar6EAp!re+c5= zp%tAJw4!zPY4LI#v9BjX2u^gh@B~-9qHDp3@}yt>pr-q!Z3aR@Kp3$;nC(>3a++F2{=sl(4uaX+*+Pl<#@8$PcCnnjy~m$wivefA z#?GTv@gsaDT0RTcGX&DFK`gM99(56PNuYOYiiuG4MtQvaUNFUv0cxn zN4jong3|a=&qv2!UPdO7!gc*Js&GEFa#yeSZ9G7E<*wt5>^3{Zv%@ZO z4S3y6^7J>rNMl_)?HjSXpMBq0Np~VseW8Y$(XLYbYL+IsT&P|41MNMSAKTK~I(X-& ziu%vv)hwjgpRT94P4%WfxBrsa5hX(hXN4yeUW30gH%N_fDY-y;ye+horMmX)Jkay7 zhT976?(V)@yQcU3#WD7IGFoLj>*?k+bso@KyJM0;Imm>T?j8%kP-o~k&i;rtvY+TZ zMJo$RLjFi=;2oGEhLYfm7vG&58Da!nQw$W(8`JkmqkP!Y8zRDeBm$YyIS`PH{S0}O z3%R*e8q{(y#0P_@Qn^!vI{#)GE_iU@vz&MZfr^>pIQwBsz7=5Qh);NDM|I0*rRwKz z8x7yfWLc@nqZEXE?N`UJAv;ZGt~H2ZVH#dpTpx^ncbw6OZMxYd)aFMV>jlzAP^mLP zajJX4@}E>}fJgR+`A>KLt>OHT0{~%lXnP>5I&GKztTOTGjZdd7;=QaXdBw@&;bh2F z6=BR*5LYyL5H1_a#HCeNlAxZ{zgAJVzYBf16SKO zH6448DI&iTSkDYGBI69i4xjheY)P*gUr+n0R>--4#_6H}Q-gPR*E36;qBR)XDa$DX zlOVDf+1Oj}nhzq2`(R$SMNVRP@u)=!ho~#j8H#fQ)Yl8A!FY;d$*juL%omi}y?^06 zPeHZ+CE2`m!^kL#UbnGMQ(iM_8G?5jLE`8c4%bm~iiw{Bx5jE6<`EZhZcXMo)k)}H zcVDr!9_JDeQyjO|bB7CQQKa6l1LP7!A&wzmMC!4@DHvG%0Pv$%kecyGIjFKq}GjuTfb`!2cJ zHOW}9zRfdFq&10-9XiZSou4JCxbG(*b303JCq1qQw%9o_0Aoa#Yhqr*4u-73Z*814 ziJH8|{M=iA*Betmb7*(Wn=w}tIYLqD2V+ysLJ`p^@fbSTmM_mxjBr-GDrx%mb8oR2 z*mI|BD_0IU%b^ruS+?<%@~0>Z>)DseP>O)2|5ZQ_&4K#8yUThpFe{$J>16d*uZkCO zfeOT!RF4CMjBm5)b!S;MNY+=tKH#Nle!m;b`}D&r(jgfMn-Wf!KiF7u^(J>#{efM+ zyUvlzV~Z~2xi9O^^n3{LC54L}92TN&1aJlUw8B}IU-CYD&qLbN5>!)G3$e4d-m#mn@ZEU5f`?7P*uOiI-{P& zE4p(9*N7OG#JMI#(Vq^)CNrB$~82T1G(j@ z(V6`?jh4+Lx;hY97fJmmxE(iDt|qo)=17V2T(2y#ZytU~1lTD{)`*IC z?j8GDKEF7$$};=?O@hbUI{|`lP1gru*&>J>9mrGZnk{i6736|ZWN1krNW>B~T4{=B z8H;c6`f5>figH#v<$IHFe&R}ZSa`xiHcbCOY*;dmBwwCzRJ!zbt-+_JLLOsL`wDz_Z4LRdLAg#U2u5ZZ_`Yd>QyG(6)zvYD1CG`6PW7@m@+sQ50kpyA)?B zlAluUafY>M1Sz6sNlwI1O}^~Hw295|$Ycy&;Lgp^oxkC8@4|Xu-w`+RToASogoVcD zPhbJePZ7=6)y3PWmtzF;FBpCdqM#az3u=zBZ@bW)=B;4Vi6WbS z1vKnIX6e;fSdH|xX?!m1Q0jTejswkqTgFd)aN@1ABd$EQIK#H0(vZL{{`sYOQ-UwI%U%H{dt!Xpa+3c3Q24r@*Zw zXtmbag}FA!e(((&vR^tubk_%?3ubGgnBp7Su)uv$Znl`TXb(G0#|cHs75S$;a*=q7 zoz^&SY)E>Qc}8uuHRcBCEvEDODhIf9na5-t=j!ry3OquCkWp>qLlh|aWbIp>|06a2*QAEj zs?ax%+;nhGTp9kuc)n48alkDW;fjIIo%v6U#sMBXisHtdHIXS*P#VBhPP#ik zC{O|dz;F8asZ#K?>9TsnP_AAyX&c7?w-v%alg%G_cAU5sfJI90xe-3yu^fO)Z!UF4 zJb2R5`Jc0byd{##tspBnzHXCgF5`gxx;K-jq2RE$$xow#p-gbB+d)T}zSo5@v(+AA z^I)w-NWs}(Of+)FHF#u&SW|sdvG>kf{aNV&@t2GDnZ>|h%eZf7mI?Y){9}MxqCLg# z#6l+YJTcOaa%=>%8$-vw`~v1$%+la|zQWKwna(3pWM}E*P&f++##s_ViTPjC5ma6X zIBIT_iW3;IJ88jWXSP0LE}=dNyzl1Q+)KZ*0HKUh<=1<^T$RJLK~QP}RVw8jk4>qg z8hv{u3m{^Rz6qsY){F^}Bt10%a8T&p1erV~C^Pkk01e#fp#-n~;!9I(g;=?mq*%=w z+!12?m{v)-hSJ_I)?y<4ZkGy~jtC_8@W0muwsjcNebJZqqfShO$0LS8Fr{Wl@jCg4(7JwrTw6E?{PX*BCh0dfjF!@J)`pCpmdUj2XXNwcL$?lf3?a~ z*RoW7k;Q1p}9it<2N!$1FtfDZ0#RiO*nG(MwzWV_gU`n4|VpdlD_nb4bo z^~0u?DXS-g#m_5ws9bw`dP2P$N7=*esKYU!QiXhC0uc@0D}kvumY%9A7CUAGO*{-t zfTU%yIS!3*_6fxf#TCTn*HkO$)E;JL5fIJNEC%7Mm>L1Bm2w9;t3YpB{MgecC#a;_ z@Zz{i&zrG1ejK|6$4e4?r^P~NcUD~bs_M|<&H0Hgf+kzNSMg+4NP>;ZpkLQE$wB5p za;sR8x);FQ=zsL1(Mc2$Z3PdD{1x2D#;d;Mk=k1Vn0C)b;Ief%AkO05#HLW{`xU0@ zBPV;irVd3e?1yo$)ZMk72~hI&?xTFx1aOmc!?!*M_K8}Rg1f;CR%sqp1-gdXrPO-{K1kE@DGxZpnT|#@OVwYvCIhs3n?kGH*{4oZl2BX;5c9Hf#G~xQkM6oET zp^ClP#piZuPhSudn3r!k6YM|Yg7+WsT|GN-HPIjMG=Ut)j;D8Ijr5N*oSc@dpRSQB zLCEqJJ`pTC{{zveXqbYSI*%wWybyg70LjJ|ym7V3Ibs=Q>?xQ8)ks`9v!Gy}Y^p&5 z6^-ZA3CS;4XB(1x@~#Ofiz4S055Qv@!)ueS!Dab@VW^Sv_KXXe0Tv~F`$mDIEq+CW zN2r`t(}^iF^;=wyN!BVupK7ieo%xLm#3F^P*fNDI9#pw1l(cc4?4Yhxs?+gi$QO5% zSwv$uCE>^E_K5kWBYzqd`(>H-dN#+j){@n|WioG~^!3@^7?1cf?5N`TeKJ)np*|+nlx*Ldji(ZtiOE0r zmM`ZliN{6VEcU#ig)g0Xm6m~=9E!B1=})M+sSk$uD&RKX)mZ6$ggA=ZRVGV{xI?FEXFHT2qxhATirV4BreBNm$6MA@(s?in$DH~o zh@T)R5SQI2Fm*fT@4oUr__6mI4G(hpjl3nS?hb_$lfv20s_!{u9hZOtJf<7OLu5j?GWK)Qqu#Q87LxKM@s@PU$C!z z$!QGer__2Y$(Hbgp4k)kLfiXXHK$VE*A>|9YoS}y_IKf#$}JYF!rGo&j2w4*JOeY_ zR>P#~jtLEL-*m|S{)Y$6S-a&gNGf=Wt6ITFO0u-I7)m~mJbb;6Fy9v!z2249n*K_$ z(We01oO`Z{=@H5(*A+Viy&ZHn>4=;4K&#Y&FK^*!1Bi#v9S@mBG5lr0?w}^5RgGGDnZm|Q7Ss}vqH?rzZ>xV8q8}umN%=i$Poa2bgV#don-iS6unL z#MDS>pshoGuodD<{-=f9g$Nx`WS8kVL(FJS$(iuT7yvbP5`J-dno)P>BRNK#0%Crr zmG@GfC|xqqZJ=@>^6))1S+vu7P?x#QYaXGKc?+*f>V6b-xZC00C{CPTI(7nY`0)v7 zvO|1NJmK=Z23GejFT)*x7^X#+5(J0%{fQs+yqn{^vdblm71MI9KYnhCuVaAXzzKd)^EY~b2ud}Bvs2}0!2 zPIsF8U3}}C=Y)ZSOGRa9Vg5i}LqiWZ%^T|)Zbf8ENbBd}LbxzJWo$dqDLt6)g)ea5 zZ}Cu5N-+S8tVi2B1b*hRWO2VKyxo*R_weG)4{x?9<_a1d8+e^JK0Zh^#wc+_t6VUd z{?zA{)~psh^%=$uKkyL1X4X7S`m$M6GbzPvO#ef${^6IC^dT%db6)B=A;ceEyYt)W z7(~`tH27||U*Pn(;SoG+=;{6X$rt0_LV5p0UYzj&NTRxZN0amX+^2|yqlw}xuP#bI5A-pF4}nNR=NCB zd-(IdWDOG*Vy4Tu2V}V>r(oromRuOa-v|pUIG3SZB1&ZaH^72Ql~VE$1bpEy6Cpu_ zp14;T8X+qV(hhDj9f~cY;{35+$6P%+YTT^37q{hFlGN#%x5kZ5`?Or&v?3={BT4## ze^jzJ4(qapPv0m}+sbqDZeaOIT2V=k{i7o2d&xwU7qAnBqCkE>2+gp=7_KHn; zh5a45sB0tmcE-{tr93)WV57`2q}$jkPmh+)^O(WI)Zj5q7^cb*hpDr)UQ`=rXw+T2 zjg0hn=`2M=FGRwV3z2Tqgv8oAU+%gyoxqCBsXe8r`hhROVxBj*KN>?E=diV5i(!$g zPTGKa9}97=4GqwsGo+x%IMilwd*3l62tfR;(?Z$twEKE*VN&taPGxf$!u!+RG=71o zv<(sCQo)#=e;6UCtSaVDDbSdEds-WH5MzCN|G{4RziHV6%1$zEZfT%X5-g$C& zjYOc0a4gY=Hvwt9Fjv@3Lj`vXEREPa9oCZFQ<8%cto;ads$=M!EO5k5$2i=Vs^S*p;ucJ72;Kz3out2Ah zvA%i(&Qu%iG;iDl5@pBfrMfMK%znWw+zv0Td#YA*9-Bb5ie)r1dTp9e2j;Os8AYH+ z2&JMq|2(VN^vnQ!;>pE@Njfy>XU?8!$X-xCa{h&wYPTHSYup5S9@D?UqRv3x)1@(o z*Fs8u?X6#|giXCqcA}|ZU6M4L_%7uA5$9^9;7pOjG9OznrV$36k>2=AMilOA z)|ue+9&-)Wm009Vb=uqM%*g?M-!ii}Ad(LT1W?%Ao4GIwm)`pTMU0aEb+MW7YFHqFQxnh z&NyP&>sIDJk_*yBncdb@Kx*n{*{hR?J)SPs)@?^NY4bFnC+pPQ6kL0Z4DvQG{)YXb zH03OVmyH!V1s_`L6RE|`z9vn8d8eWk9@@v3byl;L3*xmZE{9=~ zpwEzJJ-EkH#v@;(jyJjVPQR0ZW}Xfv8q|K&85R>5c$iD6g3EprZ8yW1gUj5E7Glw9G2z@Dai%-&bJrxrXMGBk zH2Rd!K*g0Xt+?3C5A*qZ1xC8jLX_TzqGIJje#hX}hVIdvYu|d7g=EJin6jA19djTB ztKVi`oG8Brz>O}n_Uqs(SlIU;6>t^znc&oOERPMycfk48m-tuBiRn0fS;2I5M%i-C z&`-7WLyS%GDW1lwVp`@k2y}v6_)A<{S!rYsb-r#V>cy($9eGaB`(S0;Qtryp_tpAj zXKjSdgCUC{u~=ZUnvgg2^&CR?cne`wD1-NQ8R0WxbPlw`aubj(H|Xgk75ba9YR8%1T{HDTv2Q zjj@oYdE!PdY1PJjw0AA8%-K#nr+Uc&5NmS?Dhi7La%_!MrBMLPq(+)C+L~_z3RExY zo{R4Y&7y~!fH)BQh{7!aX^JbwN=`WMQog@Q=q^T4O?vZyhv=m?dgE^%+s9Mbdr{z# z&7UHB+i&pA+qWFY{K-20rsk9d#XrB4QDBMKX-e(dW~wlbh&y#F$nTC3I3sQ4nd7X%tvbW8{mbgKza|3_|5(O0HoxZQH~lE|48^Qnyr^m){e1I6qIn_qt zSK_zl{NnU@Z{}4^eEPVCk3?srKt*amRBd_GNwID)nE!sA5npJ z5*mn)_q@~l>J<2)A9E&gphYZyK?ev^-v#e#oVQ>r-}xaI`!5L`Ff@!(S#yPZ$n&x* zYivUD5w*4Qe*X7~WKo&&ga8@3efKl64`Gz~sZLBs@8kUb$07;QX_Fa>y$X4SpcF}D zSY=K)U+sfTMmUr`Y{*xWWYsN%2l;UXCpm|U9bw6Pzz*1^&W(4EX2D>|8HJ;m*NK(KL10eN40$ z$CTw$qIcbR`^oYQmtu77v9qH9pX{G9w#5;CLWSNW+0U z?*~aVr)7qM$X${punJyOKx{HFKomg z+p3GTEs1M1crgQk2S>>B84-#-lmxvFcc5emZIP5B<-`n6ztMD{zI(f7((CqYFi@$c zJjxLBDwm)g)XNjU`nepJXF7hR?|q@{{)Jnd3;dS|>?t8Ejqw574(m#}tcFtSX7?+@ z4shf1v;dnsbidL2uJ^kSGJ;56czvqeEbGZQSAkL-^}@m9y~)o#YyH@7f(i7St?crB z>}O3r2HWQ>zT__m)^N|eQ2rPvsVB8W3kDmxsXQ8_7RAmjW&oBe zd|?13(gHov)uwZ)ta_Rw)zj*L+gf+=G}d_6M{@*1kyM(K=<_adRY*$9SSMrqDZ+NBZX4vag|h_=d+wy-@WDct(ufQ&!oGfx zd8mRexvR?>ex5ugH8CvF6M+A)W~0Cs-Hku;fv#VHDK>mAoB-*Y|g7%PF`wM4~khq{%d)2h#~^`pvKps6j4lB{@o4=eI^iyGDU13Deg zbh>8VX3`L_E@h|8BEPxy$Dg7|;P>RfzLh4}G^JbAgEYD6jcsy9KA1RFwa^@Yo>zKh zh^Ny_D)B+H*feVx;z-b4?7P#c3knhwM<4l@$m!~Bu?Kis=cF>`X!*T}WeK-CsiUl2 z+PALz{%+Zrvc*VGYcNL@r?Jrh4gjz2;5)Mx_^@1FJ|TyojDN)u{HCjd7Ob19 zgYyAFrzq+;hc?mpDR}5xtTLo4xDn%qEfdzdT{$(fzENM?2-?F0fkC;-cAr zO_M{WDL$Ev80|p(MJTKLa?U4r#CIKgWaKl`qXb!&DwYQO+JlCsg_Qy*)_*0F1t~JQ ztmOKdQSR8(2}YP#3QCB}hQ1>SU7H&WcPq2x?UDnLxTKshF}QB5#$~tZVH2%cZ$of~ zr%I|m?Wc&Pg)q2omJQUg#%d9mq?VQ6r+%_NbeFR82ksQ7erm-=v7^tc{7&(nsBWu~ zZ03VMmv*C-&<(L>IcYv5wV^tbBi_Uq#%$=SHiT%Zzv`8CZ-2(fJ=yuQh5nemshCG1 zAH)@p`iL^5B2I&**F@w{Y56XfczP8_dG)B{cP_tW$Jd4GJ-f2?O1kVkr8uIgu0Siz z$v>FeI;fv?mEmv25&tKce-p<4ESR0l76+j1TZD#Itk=Niv(?SzBlBeQ@YT&6jSq|0x4tz!khL{JF3;lbEQHG@m3!c6$P?*0*sUUR>@?JC(}LKv zu(4ex0p5TJK=;&bQn#^8Z@oLx1{?S>oPC9kp3S~oRY{5Muzzj_$o@<*e(L<8^ed`w zpN#`8I#pX#ILeYB4X(T3$9{KL)n^V1FU?paT6JwA6aa!d)sBzg(ic=(1K2Dvr!$YrrM)@2V zL9^%TgqQRz{aR#4Qpv&Tj{zuteEBPxpql+t|9Sa zcpEN)BxOu6r2xL87nwbi)IomPMvwPBvfTNS(tYLUE%LpzcPf|*R+-so2YKUEfkwWX zA3#57rgX%T6P>=mIn|-)j6b15E_h(_=%evb4npRt%h2)BnV`TxkG`#G;#+J)pl#+S zbr^@ITU{5b>|3XoFYYD&*5#<)>1e~h8n+>>TJsu3`7JM1_hphS+_DS^~moxWz<(i1oPGHhb1N@WF4if^;y`G zvsH`Ujn~$yTzPrcrA;r<{`pPEj1;?fO#2unQ^qk8qR+t6m3VV@Dp2%X35gOU(ZlIS zoxPh(xD)9Vo2zqzW?D3twwZ!E!Mwb~{$_grk5Y(n-h{j?Q4=q%3fAK^d}N5E{mkVI zr)d>!JFk$P7gbd-@71UuA|*Ppz}gKUh=H_pw!3-DcY#{tV4D*1IcV+ z1@qb22BNFVoQa|!_;BOvqss`F6fr+5o?jW;4Y>=R|22AAw_#B$T7{g*Yc(%qIn;EvuRm@9=SH(QPos!L@GoZBbq zphjVOsKMAl#UZ7#gFcR|1$&$Ol~vQj-J zsgG*KCR6&SDqf)n3(1DjUJ5C3u@Bw+#aMMo9qizM-Ko8^csJ8CB$NJzR_{wU=LyA$ zWM0rXdMAq-(|Zprf4Cn%|KgEU4#Rhj+~dM&4Howm94(F>`@wGByo(kAl#3vg%EMBOJxs%5aq%UJQ?fIEeO6_p6ub?NDNu#f-pxN+y>i#xSr&aN;00}h7^K#|mw#j&fQzc=po zutg8DPu92hoFE2*;&2X`=q&YJzn$JcjYg!P5+|G<_lx2;Kt@N+`a9fb-WX#o{_uD- zM;tmbJ}=9D2$r1mbn}20wc#BkGQ-j#%zgL00}1F_8^W=(&M)!=V{Kk&ctYZ^$3^n{ zyu2+bi7&C+`?MB0inIOuDbpJH(*`kKT0ZYpjOWjdOVZ3cldsz5j;;bi zqIjy=qVG-WLZj`Rav8!Gr0*2tB7j>g+SnH8%S0kfCkv-{oKPjD{_LzChCNZ}eLwJc zxE`{<{;cz=*st7bs%OmUlfGF(qd#_`A1)oIll7nTV(X#pPWLH>(Kq2KXZGl24Hm~h zi*G(!%Zh7Nj!b_(uut$^nCi;oPN5u@u|De=-US-QXl-q&WZo>@IQ+OiSoDwqni}c!T4z(;=RUFl#g%Vhx8CbKrQRAF>3~9Hoj!b@3-C%1hTHO7 zCc>QY@J-jeXk{ejgZQTxOW!Bu1p6&er?9oqI|}U_fo_~5>x_`raAV)e;D7PT^*kIM z%JP{k4m_9<{Fywz9HLw+yX&W0&Km50#+NH(l_0mN$BMvHB@4FSbS?5KtKlmtz zZlpX{0f#>s$;{lIjN4=zz%8PkaF6E#MhGxxV(gYZ(S1W(=1*@Kux=U>+;i5- z2~OuH{`UAc4_v3YV*%edNv`uUjVrHOtoz$L^zfYR&DF_OuS8*ZbV51l?Rpfrc>eXp z_C}ik`OWsHWsAhJ^?Gy3y(1(?XG1(@bF%gP$w>E@rCfS1E+f^y{zEFu$Soef@RFwB z59?l(zf@D!R?1Yk`RIQDP)h>@6aWAK2mpsp;y^$ket;}h0RTH~0RR;M004Jya%3-U zWn^h#FKKOIXJs=kaBgRe{bg7j-L^G=;t(_h_W;4&-2wrETX5Il?(S~EJ;7bmxCD21 zcWvCATY2}{ckgq)`{ULRXuA7ZRW;X|HpW;*xPqJn3L-ut1Ox=iS4lA?2ncvf2nZ-B z1UTTG%?(O+2#AjmU&TaJ+%k_>-Q84n7yB=4UAo#S4A4IbhlP=n&6TQ?TLvItibZKs zQ%C%qAj2e6KsUh+I?4p& zAMehev{O<>c*gnndCZbCdG4BWmTd=C=qQ6yNCZt@?oeb29_&7mDdC}#@a%y}!~Sy& zI5SSnL%&D=ucHLn$DIczXGwVDa2%mEE?iHS(+3vETcBLZ0I@iRhC1^ZUvX#tY$z$@ zT`Cm#-a~y}0M_!0*nf_lQv1uDcleyhbhD1nh)jwn=SrnSx-hUfwBL=tW$1N&c}Bd& z@NyUGEaFu3ULV$O-!x}jKZl`wn^3y>mOL-4tz518pV7p`Dm7l4BubhraUu6zA3Ug& zawlN=9c2i|SuTdZG-jI7S7F0|%0$IBp0hF*eC4k9~QDZmikM@nO;Okd1p#)nY3>}Tu5j70)wZ~m*+tRDEN6(Vg|q1FndgD`;`czOsZv|_O4-=> z_GOw#n*B<<=l+j>CyRKll5U|nS5#r)ovMCw!o zrwrWs`TO{!i}Et|EAx+M)A7z^+{>{w;2YATZ=77;+mrEKsW9tCF&!fD=4%L_gc*dV zUA!})KoB<>t8!}vgfU)kOyJsbD-37L&w#~M_roakT!%1$#vJCxlW1=Lx+UgDbxbrU?59gTZ{`=G+-4>@_~8?2XO zx1_(7c{@Q`1Sexl{1^H{jE#uTnokGYE<#AM;;I!4LT|$O;%nQtJO&KD-Qz8ST`KCB zucO?C0DXXhe`KvOB(Jdh?*yCtSHz)L1>Y0|VV4KGymD0$<`i z7My4b5fgrZAi;uls>0rVHVc@VBjj|JZBC1KsVBtpSWUo%69{I`DqpJ??#x@Uhm6 zq}TBM{;zoL`?cpchT$mW1yd6IHvC_Tu`t$1<0;8i-)g z(M1sFV$&&9mnscrqlP1Yf4ew?u;fkiJ$y*x$*2%>P~4GolUF-6vn$K|L7BaP5R|;H zi!7`qowU9_M91kw#GBR2pvj-x`@uS|CWeF-K5=)hy4x~mcQ2EMo>=(M`>U%S~GCjE<+;>xcO+xZ}XU8>N zF}TvFd#mACeQRjne|l7XI&IrzcNlR~V{~g3J)!U6+A<1WwkA|f;p6-5g1CSUYq9)( zvbuiNv6T&_wrm6J_{wi`H9>QquJLEWxsvTHeU)gywvDwqVmDRsNmE5{ji)&pOUPF? zyX23xc*O-jtYupn#kkLH$c^GNbU3pon&+V$UBgN21%0)F3hTUt`^Au=f`Yo$Pwc1` z>4@Q0=YEyA;U?SHk=~Ic2$fiTu?oGmC_TR?zM%hFQIFdxDJKq190wMI$qbW#TdkAD zVOBm*q;D)$VF^?{Xj-hSrqgLrDme8%p$70q_pKNDw)D*NR=r=k^4Y&g{K$OqfwW^} zdy1-lIY5t*w_&uyA>45hZw9er&_r*)*7wuf5Jl0h+31DV8(n4W>g-Ne0bLU7O$}Xi z*UBFl>PT+KRpLA}gn9+Ggygh2T_q5vb(O-+ow&Be7>@%-Cf2Cp3AJG82O5cuh1aXuB5YrH!fIAgliGWVItC!_fhSqbT#ySDO0a zZ^Xxb+I@||tDgIF!0M2Yq5)m$K03c7pO9%qa1N5OCG1bqn#yFetMD$`SU>#0k=kD% z|Ji=Xo`H0Fer}h4ei+T?4lVc8J7!~o*JZd$n|HYL?1zpZ)x1*I+_wrnrhB{T2JKd_ zf(6dmGT7;&mOD0GI<35F-WB&U^lI1@AVWoVF7>&>)sA@krP~VK&Lx+k_Tz)jh@WDx zPKdV?5?ZL)BjyZQ(-xco8|2*kf6frD0H=e#+$fJP!cl`?Co5l%B0_~sLGI0ZoPTZn z7B6^uRClT~P(8?1!%Nj0_*hY2fAxIaZZ*y$UWVeEKi3_t&hnjN-(igzUVFc0Q)8G@ zz`7+C*7nbjOe67)sosT?yOU2KQ(OJBI`N;5-4cVy#J(fm6VsIM`R`Ya@{?^sSM4s+ zQyX@V8vA19cih3LcQrmIE^x8(ia+P1>G44Ij3({MG@VO8x{Y1dZQ9za%KJ}RSK)be z)>OV1&6Krsq9(mt52Coow<0j&WB-9P_!8y23}@3xxPd|u%ZJcR5xlbFv$H*Lz;~A5 zGY3)aE5rNiPAPNQW;)YZI3mrzLRaUM+ zxY&{)oqdh>>M^r#xGmi1*w#7C0{iX^nT4iqV^^&opZ!MK-mjOkTWeh&KcgS(#0fKG zM5$=qKFvjTqK%uptfBKSTVRk>%H)qf8-}b{z?ATiL*$uSS1c+IU@g!%6+)dZZiax9*1dt(1Kbr;Qrp1QX+WKrn<-5K2F9zkfDdqpEVj zCw1PlKg5{#U$PLfBc0!+y{yvSxc+6!?dyOBp~koyx1Xz8@g%`Bf-35!&JSqhQwkPmt)KY<&*jZ2pv?vY5{{Yyrkov%BGig3oKbp%_3cwD!y^q zjN$5=r-MOlRb+fi;}wsOKCe=6^B@?HHWs)2nkN3risosviI1ig>k{&Ue&)7Gta%Sd zIPW*)eSAb}oGq`ekPr~gU0c?&juZor3q{}%q91iip8nGt#9L<2P!o0pyqprSdq(2_ zHAI373`Lrx|Q=aI#GmxhG<6P%z$fdC>E2;*cj6c1GdsA1BR#sdg{>u6| zPg8BVM)~esnlNQdF!qh-d%W^%E~XmSuiM*EYDRtBQHEs)^X$nlDh{#j0I1r0j2n1Q>0Kwm$nMAaLsL7QpjRk>)xeLnvf z!VuO_GR+&ie>m)t;kO+T%Xwki8sF6H+9{d6o;+aTir(3Vq)}}KGv3#*vpC)!ROwph zi8g`1Az|rqymmuUjWr)!*k&pZr)6EMBCu#$mhHV+;@%23;?>gf6yk|9^$HK=d64h&ezAuP1Q2MQi#4=8RW)+^-0 z)RoWWd_Omg1%pNl%?@QKzW?eT#92bdv!KYGUTz;lC95k6y7LRSMHRIXdyk7c`jay@|pF&2pw^4ac{@1!-mDqGf?XpogT9v58_jr7ly@(q}_ zWq5!fD#YL4h?83gSo!b+fqn~8yxMpPer2f!eH^+0O={eSb~H`a9TxTOhns&ej> zQ6V1ZIb(~z_)gBo1WH`LqpeKv~I?L3d;js8mm$h@0q}M?Q^ubNqYN^EC z6n=Y_?*#dNARX#Ki@y8nHH^fr^h0eQ>oW$^rtkNB<<+Gypp!ylv1h?~h7S1dK0j>(4XQ7lMb6vR zkxz(!M1*y30zQ6(+Ftm2xVw+jh&-doj#@UH)qsar-`3*%@y~jy|IA+yVV4P;fap&Y zTyU@1T=tNePY8OHH!UK!&L5;1ym^Z%kF&nwzdgY= z;DIP*#F^*!YOH_G8ME3~?yo4tIFKMUuZ^Bw^p9v{&xfMq;j_>^4(D1KH66Lpse@%h zV+rY8Nqir`_?6m&+w$Z5+73qlS<)8~2~_eE)duP{*3XyYN_Sv6g^hM`Flb9XCj9I+ z?YomQzpTxvPV@e2Y8TL6<%&uR;m_U#`lauzT#(54J4bkDQ&sshRXkm5F={t&xp@D7 zOJjx-nfx-&wZZaHYJEeNGPFe*%jeloW0Sjds>B0VX3F7 zOdjZS#l-Iw>t?+Rd~DQ*J>T)%yo)}Cr+YM z0N<#eP-d6#g<+ve!*_Vzm~PtO&W1tKMSD{(1y7ysgIu*MDd&~+{|h{+WK6Mj3;3xF ze#hN0JTF>eHz~W}b*jo3;>0+6Wufy*5}kqj##p=wX)$KqUS)8>h9c zIyMWI7SRthT!!7r$(>6>Lujfd^6T0_7+Y;l?uz>XUj00q1NUH&t6!m^5db1{xcTpn zY3Fex$1^d#B}4}YuO8wa_J>xRKuTGE9L@@|@=uoj8a*FW5j9lB6GL879M=FE5@uG5 z@%3l5SZL6ePCA^%f3p5x=qQ*png^#P-9qA393&d~bECB?RHa7dAxQf^ISy$Zd!6wo zWlDy?=x$-G(Ir|%W=%N8{{geoO)t@QmFmnISMFV5a#sF@ilv6XC0S87Zh$wx#rWZ4 z!i6U22}C?U>NHI;6D#Wz>BSy37yTd1>F)v+ z@xKni4Kvm>nReo!|I~@VtAQKwwEz3Xpa1)8VPR<0>U95WtSxB7xrqNY&b{CNbH4vy z!)VD##AILJyccVgIy3*TD~=!f_5Yj{=FtO!%)xn>j$XezeASA-CgWPqhm!A(5C^eR z<$%)?1_pyzDKd3Q_5gh@`M$ay<^djf`#%9uEZ>lb$Kyv~G{K_>Zjw}|COS>s&0;yY_q zKCUVKR)*d2_HuSOnI}>5lyaGt-Q-^Y-*J7zoJlWmPvP@thx1)K@!$DK{Co>w7CH8D zY&`oHXyN-me)*{hIvt9!1|6}fW#Bad?Ab5>*?ywPb^N|R;LX9P-M>7<|(YQUA z;asj&iN4=*Sb{^B>NSS2NH~o7a#_5+_0sb4X1B+SQ6xg`Bam7NgFOlIf`i41d12Af zCfj|{^xBPa0Rd3J5f&AduUsVeeB01L@$sh>NKmaxcNDk-gm>@kn*_PIQY1*cf6sJk z0z&|I*S*ynS@NwcdD+EmEYslnU=ndowHBD9QsbX?c?)H>KEggzpy4ak;H+p308! z*z;|lR_qsc;t6_NV(_!a*%;i8hmXsdx_dx-2wK*Si98+!Q`Fl7{VY6&xXY)Nt2VoF z`!^R`Lo3>qK0GpFT6dYnH=kH6Cs$uDIV+E*>$3o=2<8<`%AzCa3A>lFL;1M~oSvx0K;CuUV$S#68qY2TA z=hAN3c4F0FYHI3@p22DMi1Uq5EB9~lmS|PuZ*&kw;sS@=if&`u`X}45LSusoRMNBV z-61Fs%j>Uu2H-vZc>cir1q1pQ{de-e?N(`>m9g7G@cfr%2D=xytlr5dTBx);zCOFR z2%k*+66J9|O4K13W+4#r@oIvq_wn(G!si%Tk|>hPY6Mrd6ay>G-QC^T#H7YzmbTtz zb?CSGv=mnL%U*_Us*tZB@EQO0geXNl3BMb2o6t3M_3ew6qdN2H!nuC4@$B8UyEUV`(^ZJy*2~HM(D2E) z#!O>LbZQHAW=v*dxXN(zWoof=6~E7hiIN@r2z zQ-Oj)(!TywG0^C-)k1}&k#BrCX>&nGxXCV_VU@?GD@AYmpwR71z~4^|CSe|vPQ+pO zDsA4^EObP?dgEL{PuOO^L{n-EhsIZ%eNSo(|77{V{6Tm8y4bpfg$@s7_Flf~s`_e; zE%Rm@t`^eN3&oRv#&&xA*0zSJJtkR}fpGg9F;TpO7bBOAjZ+$d9 zb6Ksdy(1rXc;1GnkHh4P2gO7$p;A#iD3V#Xt3}}S-W!gkv@%-rPUdlW%*;4=Me4@x zxty`&G4+p!WNdHcOH5u#@vA0zMXfNmpHZ9AQ=E9l8n#QI*0Ntvr0zD`y5{T`8R@hp z3qFp$IDlh~Px*$<9{VS&eXLuY(=;SBI@}3nKL?mkj(nlEyQA!*JhPt!6ZiR;cI4a7 z_ZUJohNME0r}F8O`0BNx!TokpZ;9=YU;Hy!89tKb5z-;AF|dvyajvd)_fk-$JK(UD zW;d*=tDzIyBJp|2%I|MZ%X)g#TH4>Fv6&lS^-2}CimWgB$` zzT1-x%I)=)?j}R??>EUHybTE)vAsqW)w-j@?=D{>|K=P?jZEIG?)tMSjmxoqc1xuk zoPM-|2Nc=+y=AAhhkXyY*wn6d>{j8M35D3F-%l@R`N$o2L??P*-ae&fJQqvMX9&c5 zQpA=xCQoy15>vijxEh-hnT`b*Q^pd>lwr$CpZ*znO-D<-<|C?ZS<`;5xynu`8asbR zVmn;F^C3Q11Yxq(E@|U9JC5F2G~U{AoO7##-6v)-;3M0@q}I7jmj&ya{CIrjarWvV z7P>+Nba2<8xHo!#eakf&PHxh{!NBOgzu0zv*iCUiYrEq=o&KiaJ(kH+uH7{C$W2Wh z#%aC8Y%+vdD3>(^PvRx~_WB~Q*7y8ymBw!VVRCYk7_{ExNaeUUV$d6bMQq!h1UMqA z{g&9{^e*Pz2p9d2ljSbD5v5oV0cf8&({TZ;xW-~+8X>l>x8Hi7n zp=^J?lopRA9!;cC{=QgyjgvN(#n;cZZdG7DO}XMA%?A-f#Mg)O_JG#Vwbg=gn$^wL$%k+QL*|yeB@2p14U(s^|c;~$*(CjQIRqbn{anaN_qNI z>#tuLjO@eC^@z+}(xd4T%tKbr4BETdm4)0jBB92_IP!Kjz`&D-(i@y!8eHqWLRtQp z)?OR=cK8G-hKh$6UN#WH<{Lu4&oPIsB`NirC#@#Cu^CArL#R86#^wVaU({X9jVTY< zvj_LhF7zzy#j|=%zu%w!yLYUto|a?;QJ)~#vjU^eC7)$@?}7M5pJwlqkn<(;ZIX|C z8|~kCq|_W&yDg55<0+ckLQ>EdOV)7v78+ZVNx%o@AKiRvjTjVkPqyZ}Yet@d68plr zx24liO?tvdEPpJn@UvT1-MWR%)xs6reLZ_-&)-VCTq3`H?7_eHo{+uPa1;%j-ndQl z56jc~*~O4qUv%QS;XB49xGxgtxDYpPR!0a^YD~sf4wfM@CowM-NqhT2T94lcgM2&s z7aGPA$-Cz`-I_yd)*JRX@h7epPkZb_wuQx3g!fRHN#G$gFH@aIzC0c9*3AiWKMf^T zH8I%3I4(C2mkV?k*)se_+T)BdKj97gS7|Bey0v4Tox>ZeY{s;G2Hz23{dLMwUSr<% zY%09+Znan0*Dai88t44zw~)K-w%oi3A#HxZ2@QF`2A`Rq^g1*N*4#Zgg9QBla)^y)rbGs~t??g$PJ z_CqiB-c3@&74qRfS#D^kjE#uM1%`g!W%Av;nb+%IUtiyDm!-tub2;59%&^J*n$S15 z>wGlx%b=UN{ppmC*Xckw5blNrD}9LFN8+@0LptT~bg2*H^t75f79qO>j2AKwfnX%p zkdoSMgMDT>aBg=`?=8p{DzvhL0O?xFE4<+DL9lMvWMg=pHKn%7>Z7) zRWR&eJ`&II>?uM+!+)yq>NKY{hFHTk5-Em_E&MMjE&DI4E7N&H)jss_bgL%FLPz;xB+70;B&;a&w=gfWX z(tsW=V`P+~d=aP@YUeNx6I3qL&yN0Vlf0(O<$BsH$`C{RCOqp4n`UuC|k4x<>0}J zOyDhT$OiGU}-X3jj0C7=umyH+rlqX81Vg^R_dllUj3z&17I)dEZ1oY%2<%+HsO_3flH zEj-Bi)eKzAHM6IYrD^1}rB&QmYl!)0+kZNc5)aCD_SlULVO2&i|8$Ph7OU;Ev+;YO z=j?Y(47t_S6Qj;{t{p5yRafp04ST1(w;kTn$Pj9rq1%Qp(PwsqRu~_qRare<3mq@X z+^pYF33Gj>YSE;!-zDRgbK-0B!8yrR{@kH6<{5`EC|c^z9+cN`+yhg=8c}Z_N8Lmd zYfY_fY#7hlpBPu0o%6mXP|AUQUmwf_U#^wmPzlvZA(#KDVuLDFD>O@3Er`-&DEG#5 z#(^v!9v!`V(NE%vSKP80Lcr~HZCw@a@a5L=Dad3VStt zrC~<4@vp<P{{~q1mViM2hpQTv;$IBSF>7|LG0{e#A!av1b zMrboA5_p{-oH+BXj2ZeY2QLjisjnC9(a8KDDPW7e9AI8HkE;tSg;MVSz%{sMJh4GL z4FrS_+s+HN8V+#Y3Wrz33*EwZK+C)eXZ6PeidyTKSThg;M|U7(Al)|w$fgK%6BwqG7^ znzMn<9cHoR&f)r=hTQMT%A5NYBnz=(pSioF@sB6btkF;^SWXs#E`~2w z?}YZ^Lp@r)E7qsg+~O#Is8~(i{7qv{)k>Hd9b+MPJILmBdFXrSVk^N~hkCX7_{Qvh zdH-q5m;-5$ExakgCuzf5qweQ!P@HFqp&l>Ff8GHmP!C9(x+#Z-eu@yUkN{92sc zJ;ZS6JV_DAHE;L(@LsnM*pGLIU)SNc-o1Ob$7IV`U=aA|)P84kCtu%GxyfHBH?joz zwZ{J1Ra@^%m4FJ2^v^j?ZUSs<)yk>D_9~6#En<%o_wa%zJ}e#F`i#?sIFJ{f(9Nj` zA9JCXr9dKwNoyS8hYz0{Y*z2Dr{%Mt>XU`7fj?;J>uxW1hrRAMLwoBFr~6k^{LG%8 zpYOqXZx&geE31~S_vZ#Sjhi73*2D9_2U6c0=ab}xUJ~D7P}&VMoU*ZGaM_N4>fgWjV|D8Y0bOObOqZ9+UuN#FbpbF0HG=Ol((o#Bns6;^Jb zJdKSz@k+5Lw(@zTeVf8`-ov&0jg$sGWH~b|F%caemkr{Yc&9CL#s~!|G6k3y0 zg$d^#pCVnHpz7Vug9s`_bZ@J2eXTMTQ$Tmftc7?vRYSYEUcJ2gErJzwS*h}4^T%#8 zt$jD_gSv{ntd^050@RjvgZDw(kFOIoJp)9X~6iyGVJNRqK8Cu%2{`ML1mQUn^ zGtUj~A7+oP1ln8P_%3 z^JHyu{Kx3@$DFNYSR@Tw4TF~kp1OK#*FKPQ>e}KDBEm_(>@4`H3WlI{zKK-YhaOH& zW3Hd}Vsu;G4VQN$M@_{ymW0aPf}GxH(d4K4cKg%il6)xWt`lrtOg$O_b$j!|{+j=| zRSq>h{m%IG9>xdBa3I>Q3oHb8babSJ08zIs()9dPZsVgaE>~iMn%6+Yol+P=**Kg0b>255a#g2?ODslRTE@+>Mv;z4=e-3W+z`Um z5t@k(8`^IX-l9fV!VI zv>hZ(6fE3}9Q_!*uKgs{Yz%X(Ox2H`rS4oM(8_wp5D*yHDo9idFk+ZVoD7zK<|H^Q ztvU9?HFOSgen&7STjjK89Nu((cxpCtGB{<%ZGjv>RMNov*1M-s= z6wcH^8&jsFT=m6g7)1_0r1h(wF<)8a|ly< zKe9AbHJHY`h%kd4ZI1M{X85>P2IvM!gAG2(w8tmT4Qs=w=K`5eVdV|ymEZuSs9t^^;ojzC^E= ze__K*JQ9zXGCozU{F~yg9WS^7_jYmYp2bk_R&_>V2~!p`u`6n{(Wntyc}+d$$O9_M z-N2TO6!G?`%9!h>+4T#YMzgiZp(Vc7(0Mlf+bY|07+*L{BB6>`M{mAc*ZS+ge5p`o zX5yA@HCYDAL{4|oe(OSGIr*pbzJ`^~$urWeHGlqkPYk1ktRk6u!s%mQ+u*T=Y3`dS zbX=S7gps!=M>U3bc+J7HfvZ{?Bx|MGo&TZ=Xj*X07&k^1k?6C|+`ksUKlRnUZJ;N= zZ5sjnNLfZ03ZsqssX+HBuVixMvF3KO{G?IJjl48ZU1?D|yTzZ#?a(EL}Ooxgm#c$bmZ@%)iN9nL6Z+9uZiML&9U znkYeY$gypwKj*gE{0gU)*ZYIB#XQtcgFugw)O4|JafQ9st=iRzlBk`>C2_@su1rfs^k z0E58anrcN*-F7plwb<_c#DP)FAg8NKtf8S%t_g}*SVM^Msf7K zAcZ5b3#2c~z75~A@_SrUM-$-425aYuqj*q$N-SlP2UrRgkL&L(Owh|rahHf_C12)x zw-|*3SSl`%Mqr8;lV8thft<@$Jx-~`M=@ynqgxsdpCaA!9T?h8pq}=A2vS8{^nFuVq{ruJcnN;KdBQg zp#Gb@;^5{cJn-1@8&qBDZH6)#o)G1S(+p#hjc6V5!~j1)z`yOv6jiL@a*zzxbs&}X z=zdvDZMo}iv#-V6eg+W83YYm*BW)b);;~NhYt2}wr`oNN{<0sYbBehuGkGstzAXs0 z>M4LvuexGx+DI8J(<_9ggHTUmNQA!iY1ZC$oe>SJc#=N#;B|EqvMD7j`V60#Aj|}K zk1plD&Mm8k0GGNX_u%zEh^r$;e{)OU%%u1^FWEC{b3-uJdWKs4iA~lNkN1ryVQd{5 zT+wvIskK4EZsg65c}z$HS*x`GK9%j(bp0yn4YB~qrI#xHKMD1s@{BEp*?#0KVzhL% znLOdm`wk<;@u~#-!$1|^@6E|j;*CgKT}KNrrVL1T+3{|9$|r5zhR^BovGkjU&1`HZ%XQ7*MAHZ=C=!Q>72rG8e4eb2=BwmeT!=B}X$!LWTx;Df z40SyYDY(AOu3lYT<#|8ddcE9m!N7-3{1ox7iXD$XNf7~BMU6UB*(OA-2<`4;!FO)bWfPrAcG%>fWP}3C!emCmWGAvQf3CQop@D z^Dln#gGQwy-@|~&S?-9aq0vdr(8fF-Aly78vK}>G^dKjgqMU5v#|tzub`8y=5S??t zP+s}i$3)mOjZ9^d&N4^!W7^H|FIxhcH|Fq3F$ax&X-`gq{NqlBpWGpvWyY1*(PK3YCVdu5&nDBkheV`=OqcuG^Y4Ab;W=S7&z;X9j0LRDwaoY;HG-klB7U&zHu za!pHU6dGh(u4Z?8A{^WNxNtr>$Db{mJmIP-UvG_;c3u7alo7f1-u(gA>!L%)E2BMy zF-wX&^n$r=4vT!bobz!cHL!Jp)JcEWHYM12#)ql(T_<3J- zjzq}Bc!P7LFB8_Y>1Gi<>T1d9&&>fREBYF%{PM~iYhi_0&mxS(sIx-_KuaD2(t@Kq z%_RrJjH_3T{AHOYE0j1K@1a`x%wP6y0zZjk1w2D|kbXo%G!_a2)G0dp1s;aa%EHKt z^Hc>N>%((du2u|_&e--M@{ zZo2Z%xyIAkZf%RwYo8_0FpVqKiN*c%`nG7B0r1SfD+;(^E8<&XJf#~rr%HE zdRMj-=hQEI>EDIxx*T2Ofjc@@F9gu8k2Op88+R>S1gX9~Ed@5v>$Wazb4OB$E2l5Y zdF$5Ac_=pBAzMOl0{FsY9^Kg&tEv2Vf$(UR=O-$K4_EusxnGbduNIL;`e36`Ymp>56$9bS^u{ z!g{t+Cj-EN&R1R6Cx3~CN}(-y&?z8Ike4%%5Yv@tmOEq81NB~}zR~T)2Q8d;M)z5H z1I-E{kV^-|F3JzQc7t^znqa5qdMZl$1ohuEF|Whk*ZqGH|GAbqT|*Zz9{rN>lTJ&| z3p*CzC{}}#qnF$fzk^Z0WZ^k*gm&>fi}gKHrC7%wr4wp#llV)h7qT_(c&bOJtCZ z+Q?3y_Lp7MMB^46pD})I_k@Y9NwMwLy1=y~oS*)P)3KWDM6me7=6L%|8Jm@J3zbra z=z#2M9wAI=FG`nunbZ5`b01|OOYfy{(TI+b#h%CQG?Nyqjdh(J(QRj~?_n8EEh&Gt zR^9l7k6#(GXPJ1ADBL~%I*dPW z?N5WFmonLS4jo{*(M}ZrVlvWntW9QuPEV`K=|vZ4KEN*}2Xo-pxfp5RWc3ykar!Sc ze$6*VVUrTQ$rcgEg9bsIRY?O90D%K1iHrGRU+?A7=g0aC>BT2(><-j6!Rn?pgTYz` z|LvQqOKmRqgNC;6E7Z9%;~+C*R`cmLcg2jdjio@Y#QGj%85x<9?Cm^2 zyw0TG;XdXVr=@gQmyAZv%XwAm18F^5u8~k@Htwx$yF~o1#|^HZn585Zj)65q_6a5Q2NcEdI>bT6FRSFc<601I>Xp(H2sJN%BJI$J2}Z)L&8FmjKCmqxNtu3`1tg7sE*)~yH|U3MExhx-hJ*VEtGbVgpP$8WcdjdfTf z9Cq;EN?J6v*pc=xdTa=}6Gsp0_TQnrqdcwD8qr8sXfdTGv3?AJZ+;tM)Yno8oP%b$ z7_n|hsTuV2n?rtACNTn_n{}MU5)JVe;&xb#43nwiWB@>8>Bna3hmJ7H=wd;owdc2g zDjNlCgt}50r8=7QwjkqM*Br>iNap>0s&TZ*dFs*f(%#<#u#wV6X(s90VSEQTuN;=s z(fAD*%{E&}G<`T@*5Rw9$mH8Dfc!viQD4ZJHPv$Il%;{hVCF)gCYnbX8(*20@ArDz zEk`uQ_9Bl7=`aB0bj_gmwXZGzkOxk;K(YcG{Eq}IpA3cMQcG9sS`d)LLqGOmpxX#J zZtkvlk#$+H9dzc>meH-R%iIn+GCoOX5t^-h>>wZA1u>G>8?`g;D%4@G%o+-)T#sYN z)|!Tn)>}#IovJIHf-MhQ?k16=Ca>hT++WB3u6myEPkQ3rDSwD z@ruRqVEKXIgJ1W9!)4xPWVfcAjY)O&Uy>f64`eYomduI%#qPz9Qk#J@;XjRO3#_zg zBYX4eVcfDII5gssf%rax?u7Cg0Gmwa+2-QqO$S-dUk;GR#-*gF?fta12dKV3fFejv zHycbgDk36+jLZCU=H>C>;i6^zC1$fb)bX|TiCV8h6LkM_=4W)mv%VkTh3g z)ZdC-45~(1So6NqQK>p@m-B0IV^ctSzeDl|(7;74Xn19N&qA9m+%YA4b`r50^mCm*ufE9Iw1PqU54H2)9{EpIK~ks-n(|gl&4Tf)a=Cyag(c?G2$c-Q+9#u#=xQt;<9C*X8BGX%g*HQm z&&>0Xn#-E8U#2D1vdee7ql`>YB^wCpejqP@{0b=5n1m$vNf%Z)OlQ!YN7%i{+~Lw{ zFur~S_&1fc@bC){2>_^MXQii72J`^L(M@YK?;C=m1`frjDz|#)BHitYeiN`|qR@1w z@m289y9&DBel@jT`SgVB3?I+P+Qla?%{Gr&Z*aGE)Jdkc{~@XK!Wkkr^*hc%PA?={ zKva@mT|Kn|cyZLR@A`m_lqTkTN)E<0AJr0}icU{YE{VPiAzNAWElovA{JBTrmD$WK zAsZmsslo7OUX7t(juw^G*mi9C{&kWbsh%r{`&3$y=;Ja;)9qPguA>RLBvIDZYs>Pj zMCFBRg~dBW5wfKRV94m%UTH&9nXft+ywfjCM)*C|2KBpWyO`4hdM@Kg6?+~Y z`YH1|nNEhuH~7MeqniC-ZkMkg+RPSK2dp$yv*|U}01R?~*DdjutHEJ;Z|1NrQ^2+J z1apwFK$hzVd8;hi# z^?)8^MZ2o(Xr8!~RVZQ5`10msr755x%{Mn12Y**h20E|5th|Q@?`)Zx+4)9iPk%}( zG?ghVENmvXeSmnhwrqo%AP$r6CwBH3u)m7!#g4$sr92HFoHU_`E_Mpx=*Sqf{@^8m z{I^c7CAlLerMS_8LpzXpY3A@2ApEC+jW5eK8r6(^I<@FOsZkWfRqS1Aev!bY*AX4l zxch2I+9~pU`?`KE`Bt`HKaD1|HsE>Y+fy){B49zHS%D8t;^imU=Am*vFjQFDw@UjX z0JQ5Q&~Co7v^VJ4JYj4oz6Y1AQsZ}0z30ltLQDKci;sWb#@19x=~LL z?^{MwQyc205NlXVV$In7ysbk#-KE^K|5mwv(~h<0BAG46inkOQKhHQM#+H@3wc*UV zkI(8LB>lJS*5!X>x3{2-DS>0k4_(V11>*m`Azso%$<&%hCl=kOss2!a;lIzoqjLQ9 zzXjD|J@APd|5sQ%RF3tJ&Hw%50`h-rsKvy*gaIk<|2jw%{(FxsaEcyy%>TcJxhT29 zVSMyJAlrg|JdTt!KlBuB{dz{#lz#X^H_zt{x1;&<&VKQ5At@_HXZF|pXE8#W|3?Y! z-%jioMwi04@9;RH0dyW-0+{hq81LT;zj!z!RA6s`e`!xM>JIJp91+h`Y7fqT8Qcr`5s0!zu~2l0rhJvx_4Gd#*DD z6aG2DuTtV-*nf}jdKi(c@^vUC9;}$P1iD_Ug28gXs4CS{$85sJ!o*bQ(A9yp=#>-p z6S@J<&aQupFPoTk5OfAbDgQHprGiz)b1vW;gizGCV;Ao$Q`v>giv1he@4 zjxM7s_XSo%lVGA#g3Zm*FYQ*h!AEs2zX-P+uCWt=$-KKfLTXrUTBDnet%h$0! z8lF%DiD0RobKCH{Kr3Z^qHWj~0a*q1D|!jP$zz{A@~QiW1v=}eT6``K1dPJg&XvRM z@2<+gHs%;&zRsN2XZ){5#8uTbVPPt8fcANV>Md;0cq;)!cYbwMr)A5dQCa*-w`AKa zAbA^}P~aiwrY_okfBk}pAOBaRe>kz3^HtV;{ISuoOSq^%>83@#l?m9!-yZww4~P>J zn_>&2yEU%Hz`BG=d-o?y89_e*mfrIHXcoc#e z=#8BcHmmgufoZexJl$1Z8DD#TF3yh5esWH$g=?Ow_L@{+_6yBo8Kb<8?${#`prbl! zmfNNGS7*VgPbm!#)2T(_XN6Z9=p}@@O#=Di(MP?n@2eE^nz=Z)nZQCa zLDg>tuiFzSu!tzZ>IkSM|KUtLbuNptR$+fxa1n$varfTxK(21l_W}_?#}DlsdG@m) z%}zW29M4dR@`Lc@U0X_?HXFMhY zjyzasWUXpRw|-*GnlySF*$%`AKO+w9=Iz9_$RySU!*iC<#X$9nqs{1y`3vp zh~h1ST58FAb)M- zJY6{PD94k|#{PbPG14SmtSFEu#IJKn z)B*IlZXQ{q0h9LSAWN3`@57IPJ{{_JU+*6o903U)?T?95=3^M-9`D=Ck?OWq;Ct`R z{B^q{nhwamO#r%es>|urBu2(mtcKIaq=YM^oVO>s*UW2p8OqcSJ02dM4uuAr$#63_ z^=`zI4pyv`S&bxCt}i{IQ-v4;9(fO);@L5vXV3SOoNSI`rvwsv{H|O+JlSPHbB%7_ z&U|*iaNFh5{lj8)*<06o(N`DpvFWL`u)_HEROW!7-wOQsj!R7pRimqT(Ed|F0SnS_ zbUkslR-Y`H(V9~(m2H!$?HGVI>_6;hZ$t;5osTRWj$~wst@48SeO~*|7>fT)E_ymF zoVg$FHnlzuMFOO>;-1f6?_`qqjArr0J?))LS{qC#a=*Fx@@##f4G_>?yni83SIRc- zjDC0lZNs0pZ;LHgjm1MRXd5JAj)NJ}r$`YgU$)i0yRg;a{}huN{Up6DZlLg;3r7hZ zRs-|B;^!JA#DrXgVLK7qFdcoV5qjC9baZWa7TIA?nYpuxkMBrVS{hq7y&3k#l~0=o zMP26Ah(^wpSI+(u!b{G&9Y2mB(Zyhz+g$}AU$Wdvc3V<$oLU-D@$A9e3AWY9*M1E7 z@vh?dxBkQdF=Mp~PIV**4B}QC-{%Xe*BWY~rp4jvmSZ+*xy-JRV-vVTydIdsp&E2W zZsOS)b+!19-Psp~5>6Iw;yb@6KX=^KH}7+$jejSX(@?I~mR$~0)(g9F7D-K|V!I#V zeA7PZc)`7QM3*K1StwV(Be0~a6d}CGZEeEFyrFBwMDs<=O}V>-0aC^>`Ajkfp-=E- z#45AnqoCSyg@EVAsw?M08*;q@Z4zpZTI#@Ts-v&kjsz)=M;7>b*bluk74Z*27jI^p zw9JfyI;)v=o$*C+NzA-nP@jV;SK+ozL2?3Xq5g$luKTr4nm#&Otn?N zxhBUD8b=qM{+r97Xo69PcbPv|sWHmO+p|Sm%9yNHNSSEg`cUOe<_V3kt5-UO#h`U} ze&F4<3H90o%Pk3akmnhmb?8+1)77|8jfXYklrpv!=D+4LpR$)s&>f2y88;N*XPvo& z!!nC!^>iqO=(}&$efvjzKjkX-XH*}~VmckUCBRA-e<_q>verLx6CwZnM=+=lQsaw3 z@jNHo_kgfcMLZFu*Pon2nd0*iFnG7cZxzM#@Si`RDvr=sV zT+1Qs+Ed{A+xU1|vvYAgjDu*N9fOt%?4$^|M$- zOvyflnr|}eUXJGwi-}>iM&QqD0AfLn6=WqQNlD^kSAY82D~OE0pyw2$>A31WFNsVz zL6VbrCMs#?7gRt_W-)aX2!dd?I{2vl_Tsv7nUYa~z<5yDi2(<516w$x$hKxTrU?r{ zA`b^&K^ZAgv*wtk9{co1rHOY|1IwY#(jT`I=jiU&936t7GvZGh63mTO&~5$4LFSF5 zB-Z736!r{CHYuWdzDN+b0!J<vEAl^Jm29UgeaunVC?@jBNu6`!50TFa8q4A}R2@Ow zF=p(H>m19O|F+I1FBA@Ji6r-))%-+7FUH5Fguu1d=zsmqlErG_{j-2oI<%Deif@&6 z?`4u@4X*%X7H>L1g zRS~W!vMFjI%A70xY$eWQTNtZVG6l1%gRVnpOC+0g>a7?$uluRMPM_@^9lNj16KYJr zav6aox4P{d$LI*`Nwvf~8e| zmU^`qgYAO$l$JHFbO9FAnlXiZF;8(83;JexZ>UE(T~2QPRkcQ`O(3We!~dvRW~yURPSx?(BK zUuY2+k@r;)y<1bQU!Gm}sk2zldU+ts_OU_jgBS2~-2Y~cx<95TRun_TvYm#bSWN@( z{nRKKs>YTDXwPMp$*-G1@MEu5x|~!cURP&EZWqC3*1BUlu513XwvlEm_j*h>KpfJOv*l6+cE;9NThDYX zA_7zv2Z`F>m`(-{N28{$L+;*Z_{NM@YPUsT(ajr->+*!4-F2n5h;IQ4ztM5$4bST~ zLFa7yJl=PELrss|tXu28y%0542B9Z6b@9NUS6jFqK_)4W^4oa;KgurxOl}Wk-n?uM z+DrM5mi16bm;m^MR71?B$d4HY&CLD>1AJBbelcV!l_#>WvlPUx^7Tu^iNnKX6;REP z3cAE}ddA9T`>Ap5Y)=*FO}4e{UfFXSK2-MLna7@>&uG>;;29b!SKG)U_B~rC?hL~_ zL3;uHXW#ReLG?O^G@UsU`HmMC*~{Nf6~BJvf(k^i=SXCtXOz9=bo$HYWCOMJ)FQ1H z%~KUd*QP7W3ud^ktN0$xIT_5Uo#!|t-X0IZOl;J3K0oX2hFw~pd{_`UE7;VteFXI4 zNrJV<)Bc2DgfS|abSVP$N=}UOh%h$U=y*cOxd_YGJxBO-vH=ZdF z`kTXcDuHcX)0ZvVJK0*%!I4k$foE{?iP+K5mt4@e zQ3JUUJr5F_jROc;5XF)R{iktGGEoVgYHcJ95BptAl=y;MzU4aE54h{1A-N|hPHkXAI zvRDkdn#GSQuV51Uu03r=cj@4t3N<5^DVFJr$$26P^Uumd52KrSkW)q9Z=;%OuzeGu z*K5KS%H|FHB?@P7l=%J%8gvx(1$PrA=igmjSI>@f-=lL#Um??Z>F(C$nBy?%h|gD+j&TzQ z!vC4qgNyBihJa{Txx~?KzAj^nG)tPyJN*-XVdR0cCED{#c=)}YXWIAozp^q2e0V)k<%M_@ zpB5!@(&=i&(8RC@(ccMYKs9N)VixT=qXd8cl`a1@sgUl|G6q4gKqFXwBNRP5EO`e< ztFWeKve4bEWzCBXpMZdZKK_CBgK_G&`5C|5iFP(ryjkJQ4&+}fu+8QpM~Kz!jQAQQ z+Z%YK68S#7Z1S*P+lc5r;@B1js3Z?u_({=!E8)BxJmBYV%l!}_avEK%)B?3mnWLDd zvEWQ|GJsl=1J0h{Z}ST%<0QYT>ob=&7S!cq_|v+H3G_&V9KeOO?R6hXo2dLN>A zW{DyA;}ivOGfabwTqPkRLZLE(*6;AUc%eb&f$%63U*9bj$unuUI^iKDm;Wi9adSh) z_XAb!kv=!MH`-k&)%j~$6Ln~!0(nL4N;BMoHa-0a&MN)asZ)thAFB#wIvfvvNT*KL z?_oppxZs#wvK~F6y{ycb^clMz8-!5X&%9D?L%_r0I9&yb@2@|yI$p*n)lck-&myAW zPT&lG(Cs@aS91;kRkht>CsVXWy}ms0);Q5}2tC7V)hGWn2Ai7U1cyn^0Ie#dFdx>E zS&kQso^RS1oByksVBJTtPdKAt=W#w?@@A$fVkweK#9)* z^u#7h3kzXcEq7!-HR=SdFHz?=UB9~5yDU?!^PEwMUa`Bqbu94_$Sia;zg5q@y?LFV z!na)9QBiKSp}B6O3X#+ad5*95c-*Lvj``BmL%xs6{swDc3Cg*Qq?w4xyonkRMS&QY zp7CFDgE^v?nBbN)oy-?2`^qsD7hpe7oSa7{0=Et$LLQ^W9TDvVUrPKW6L{?lssreCQ7*vx!ujJ{f#UHg!-Mt}LD%R_nkUe5Cu>P#s#cXw+#l!&sU=Bu`i zSpB{6-4k3@+gm4URNIr>y<17P9dXVh0(O7VcHZ6kcg`Do{Zj3lW0;@0mDtO*Cc-gx z#99XX8b(n?BK1+sWlGGP7JKzrVf0cG6>myGe+X*$WcbZ~e{LBT>;}7QRSUAA>UL|t zHK5(y^c_x{IqN<>q&VW9f=}{4a5POWgx&AR&}WiF>$Z9Dws4EhgusNP3Pmd}l@eYb zt|CPmnugb-P$x-QjdVV^NR|=Du-ljI2n_Y48(vjLJ;Qm2Mv;c+=$3M@`5EEM}hRvVq6RPvkKyrrrGnPaw(pv>#Vq?r2U z&+#s72*0_5`yTJrm3w~WSnn_Le&(Z?O-XHjq?gAP+Q-G5vFCuDD3trX?+mBcB>qwP z?)SrbR??vxN2i2O%$2XV8M4ETz>N^TJ{yHg!Du%<5;U)^^lsdQkoX4l|9t&X5vq-L zSHtaCFFlZGhuQXm-|Whj)qXOs_ZHdTt@~$9Cj_NzOj}U@dBrtoo65Hqd>M*@z3y8m z&VX-ZE@f;vXK?62ScB?`F=x~VD-hzi-#e|#wLh0FP~BnJ(#fiYQJ_S##JwH0KFkZP z(Y~t8l?$cs=-dv{bnC&eH7aIh#`RaMrXWQ`-QZSeX^ltI$+~asn zK}&EFBpq6=$6rJg-_2F&ZpDEsRGdMfRHJ|TTOy)f8DNcW1eGt51pf?iRHfZ$)LgA7 z-su~G)6Ng3CBM39@L6cwHvy5$Xz_A3cN|_Rd3kN$ufSTj{vqKvc4G0nr#j@2i&Cw+gQOiPlJm99G0$mX-{Gjfm>)* zleQSgeWs0Zp|{_qvVXA07F_zqV)rVo-&T}kfC^&aMzQ@C^fgxEqkuch=K$zWOqP!M z3ocanK4J?aJd&EMByam%E9GZiPiIEz*)^!Z<4lagLXGMipC9&fQgI*Yw~Y2bySxdg zGUQ#>jmKv{(7NF4=ZS`*{zx27J24d7N!OemSn~0<=>_n*^qa-k7Y}k*>)b5q9nHTp z{|SvyY)89*4{_vig`wfOw=|T}lc}S^LF{KOsJuIVu3S4LbpaDVHv(9E2~6f(0n|O} zA@WCj(xiiDTt-6Q{S^}`DGdjb2}SS^hJ<2b#+DYoru@p2 z_;(^<;j^P#PB%xka_?rOOv3s zpxk2s5b=15z8JjdbS25x5|FcNbq6-0B3#<=Wjt<73&q5^|lg8yxQ{1 z?#d$*Xw>%@WV(PX@YgX>8GvW2M2QdFLyTlMVA@)beD06oO1o)W_kDw|+hY^~K3H21 z8#u)+rywKC<+KKhL~1(s@#FC`plKkI-p0r7srUmVRdiw=`G5S4yHTugwQ&-`)%9X+ zWyS?~d2mej9waGM@L{p4Ba(dGCpLO2fTPiWTijXW7;3+JZG?VUT-@1Z;Af2>F}+5|1A&Q z-V#3}B*q}C8nS!%E30>Pl?aDX_wzXMDqb~31pybn{pJ?Mnp!yh3{YJpRat%Z?ly1D z#g-VrQ?xM-8fKjdUz5z-D zZ&=gmC$`_13TjL`PHN_UBoo^9X-Bl!e1@%yi=y`2{a1NF#Ep6|-GXzes~3&lu1mdk z!NRIa-|4zOPi8b~NlpEX@bR5tXTv=_!TGvocpIO;ug^LF+Ouv^OZ!ks*Iw2 z>GS7q3rKF>*7#oDuR!ivdZRwyYArx$^rhdze(nCAZbn8%qk&aU#Q$BmEOk6UTMf8b zyQ88U`|Umypb$GQ-t5oG)PYIa0{kUgpqZxyw?g+Ud-Y;CSo#ON)#@@x6!?omhcdqI zJH9@KnsL*!NvPyuxCw;LYN)pw#& zSUyat?tXL{>OLpfG@89r`@}d?7z5)pjmkH@z>-%3q%`H>jZ*I1Inbcc3n2!H&%|P{ zT``a_QVCg3!A#RJ$nX$v%eJCtx(#HvyNht-_qGJ5(GBY(eWm5|_NQLpj#I9ARL0wE zJFOp;K-3Kel_jva8j#IZ$W0!G8QTIXUy08v{41PY@NC&Hp_7OF*+6}}1ZEBxRbOX~@x!&KYHsWu zaB+umP(~54EgAp3eGMhh<&=Dt7n&_J=g^YobyD;JfnrM=w3TBW{vI#>lP>M!gpWny& z%cN~Ww7;^dB>L!2vqFhxC$bYwB$ah&WNJo+X$odPi5b)Aasm8#b}@zJN&neJ!j$oP zxR(L3T#n69Gqb+1o|T7D)~De)T+YUh{9((nl+aQZAx=EYDU4&{V(;%;%G?jz=7Y&x zZW9oLP{b84gg@(l6%h5O`>ExuwVf|gk??@%8LXEN8xlN`iT&DIGBF@rK-PB;IH7Tw z$tRbKna7>8x3K6uIRF^BlP}q z5Qo|MPsn@Y2B%%-y$v-9W?O>~xu$}_?nnuLk*Ficd$XVHG*r9A& z>k*+J?>OAfuI0@xR?35^R~0vcetgZ_ntqX#91Buuw`j_rQL8rF zZ=n=QiOUS1pD);60)fNAWjpti_{v|c`;sYkc?TSlXB0;(?f~5I?*aN&!K*G^kx&D% z@t3_+3+)h@iHxjvab~6D|787MSGOK3;WK$PnK`=n!_(sL@*H0tv$=itP6KK7O!l>} zo%4F8mjX0t%Nkv4yYKVf%~(MFZg@g*)%@cEu3vR?Qep&63~=^|IA7LM15is?ykooQBIvQY#Dc!GV*_qCU_r>9VVH61fAM<>1! z;R=U>oR94tP;bd1)B9aq%}jP;wS5>K6;jZYY(&n2zgASqV?43-9CT`9uSirfl{dK7jZKy`3~b zLI7yJ@uV$_tAE*}! zME&nNS66jvZ#Xjfqa#xoJKt{6e=yWlIG9d|3n?H$xUO@20MF%f^9#EJnFsUi9yeos z04nj%$N_BX)0X28;)cB`s}!uSNcPKiB=>G;j%j`sY~tJ89$88i<7>Nf>|;z}TK>Sm z+W@)=;Rn#)mgD9I*iaDajmEI0Lk(K94?&W&;{t3%3Bhbz zza06gC^uGK7nbC#!|>aYl9h)i(g!8hi@84flPADlJwki^Mi;|OnypsS(CKq9yn4Es zxsGd=2;;sMFF{!W^oRQU0ATN3KCo3|UxnQM?r_Ph60qN#C)Q7OTaLdrGoDY^-Q02N ztkc5UGR+irQpDwlWPjrah~zgZnc^6UpB~;waSPYlX4gFO!dH0XQ1@f7oTVvW#N>M9 zNdKx7ce+N>>*~o!5i^c%bQ6n6**iq;Kmh7|Gzw2p6kYU<)JB{XlM`o=`-R7XLQyXa zb`c~zs5I@kPN@Q^ATUB;g#!_D{}KZJJ6B{+W0<2pNkyIWLMT|g!yn#Gy4LAD%DE-? z<+7OXdedVkq+T4AOYlF zP(A+l#INN*&6x5ilY_}Ng0(@d$aIS)x$Tx#D>{qPUAL!EtLuX`6iZdcI!|}Z2StxN zdh|>7`-qiLx(`9lSeuvWX;-@5>s+eDFDAS{E}Fwo);Wsr(v02H%4W?VDZ)F{YEO;w8qp#XeU;bg^yGvsy$2=QZ)x%Btkue>U@Lr9k zu^h{p#|~b6D39}IDY%(;!H89X?o*_MEGqMGk3hG{_m~`Dit$&8)4@%3*$<`f&8lr? z9ZNgC8`|Ih>KdQJA!Gj~_2Le{>U&)Uc@vC~rHByRWWucET0>y{;1oWagv?{tqA86X z8?9dIC@|@SIEgl+sf&}ifp5bm4c6Jq0-n|%eC~O>^48}g&(y zbwZ#odIk$7$2K0wq?$wtA{WdUK*{c}TjRT*=IwM4zi(_zmwYhNZg6`G!7V*^gI!X#AaqqWYxW2{azZ-FCy&1zkNkjyC zHJU@q$vQLFku;*sxLS2l$2F>aj|Or$L?a<7bF_ta6E-_GjKEJy3ItXz;(U2mH! z*SOPc7!|_tVVOK1WDK;^Dy^l4{eFg2>Wyr5#ne4B*twvpROsEsg8$nV=yex^MYMPoi>)Sz@- zk>L_J{-o{J)|hTrSf7q43oQ<4;k#s}XTp(?hS0lt4es#hh*;^!{Nncf4o}-w1|%=} z*?2t}q5jOc#%K{*gcLETallz3$40>1eA*sDSj+6gGvjy||IC7ebp0;A4TU3E2Rmmx z-5Q9H6Ro7w*r$5#_@C*z6|LrvhPjOoUWfyqg2DlCZB8OrGhR+$RkC?mc( zpy;*3xwx+8JvvX=pgtg}k~vOOL~w%yxhpPI%>Q&}*RnGrK#x`$&H768XJkrmgGHn} zWR-;3N-e$`Zw-&lpGNu%zH$0}ZxFP?>ieH1$N2gErCr1=Ni$O+VYEFDOrUB517?VC zXaGMzz`xfrZSqgOlCOc~4lj4>H5a2$&-?f9u1}rmTH8W7-YD2~0HUD3y{gW0;#UQ8 z5?m9&xcsOZ&lIlcTXl^Vn;9u$_B>T0TzZoq9L}}B-FLh{I_Kgo;UY=ytlxo97rEJaI_60!%d6{bjmteGGt8$ z1zk35>Cl^WE!@dDhmwP(#cY?qeG=PjOrdh=#t?KCjU)Y`s=IvnJpwt+!}_huv!By* zMiGou^AVu%$}sG;J*(7Crb~DnWEL!q-!HM$#H8Y{g(cM|iiwGHu$i538)HU`e)-Jv zJ}DHrZLJWl&GZOKmT64P&qZVkE^r=DDP`zpXTcWkmEen|6KNn7dw^#Rmst!%Mbm!P zu;Fk0ildyfg&rm#yY}=p$Fo-d?pG2I8q>GfadMiOwo75wvEMDqF*;7=n=CPs_?o=G zw-+Mx_KZQqi!0yamRB>WKNG>Wu)TsQ9q0x0h3Y?2{ds5&!E*fa&V+)9-TP6p%2jU-7w7wvRW zN!V~#S(xy|c#Nv(;OrzVxRfI=|Be1$Kzi9r7QU$0Hv5&AM&+9B2yNyU;x z^D`SBpZ)U+lj%s!1fU~h@Y&56b?GFp2necaYCuZ!_iJH#aW)LU;S(V#JLd1ZywStD9!yGH_xfRMz6_l};G9l5up)E7}=6kpKuJ6ys}Nmb~uKQfO+ zh{YP)Ge3keP3{I*uk5uVu*N(D8C3TzuT1(XZv)(+;!H2$2En>de3x8fMY8N5r_~by ztYh9H_B({prr%ojB&>Zn>_iw{a<(P1cR>d3tRnA!BO@6iDkwAPj>qljCacAS^IfXo z!9_s5=gNC8YDQPhC$^gQ1(4*?4P2jgs794L_j1FbgS;pMKptFmAMjXDQpu?4OzOvi z#|jwjC_NXx;|tx0mxM)?N7Hc}NL6*uS?k-21~L5ZU_$GKz4WI0Ac1B$ButY0Mwxx};Hutd8ij^MCtj_Vi zjGy$igbE!WM;4V@qJz+XUkxfC9NyY9SnK+Pe1ZGj35Ev3>+S_jJ-HSR=t)2FJ7fc@ zn?_X*PEJFh*T27*k-y)_(}6ofBW8zS;>R&{2ufnueILUPH*tKhbk_a`2^-wIG%o!O zmCW)xG3JZ7+jepM+JFyv>o*h!PJXdSI6>(ir6>)M8^lzOkZEi=$B!`!ix|R5xGH=` zGu5wn3b{|--`-2Do|jh;h#9o0k?rjaM&04N#!qtY#d-Kb4SAl}q((h(KFu zCIbm76pmLb@RtEKX|_o31dVQ^$tL8U+i2^XHc^pb~DZ|B>+-YUPN0dc$wZ8*3k8!C}YN{@0$P){I`f` z#%uB8n6&c}K1kTUw2@D{BOZyb12hoa={kN92+q)j^}ikk6T$2bWw%LsGQ5=OHN@sV zdGKVB>EgF;rDtryxU6J7g5=1_`0soW>#Pn|BR{P&Im05zB2<4*1Qph3DV{-&|MFAk zL?@*gDkJ(`<|s=Ck_Y=xw^HAvc8jG-Q~4sY4kfb+n^86QDTnw z3|}{R#Y%q*)o54QI?y`KAY;R4x6HBQLw?w|Sne6(yDm@W8$b{Yf+|$L_Vwhuh$mVZ-n9Ew8I=Z z(d$Jjs)e*gHiEzS+j*3l8Xa;RYC88}ho`R`w!ovbXfq{L_zQ^R=r@H2#tE5 zQRcVXqG6>@M`f$yarY2F1bnUKFHkDcPX8_S@JMi$`V44;kP;)43jvHpxxWr{Mw66G zl2Vv__vgMJ^!BY2L;Z4OIc>d0A;WL@ZME5iWU1-hC!COPav+}M$twa(a zo^Y<)Z73j=Do|9}S6u;8wtL7V7BwnM3ifS7R;cHCvN z!SDrj?`yNeZmGl8j9JoIyI#2pZPJi((Kt1^ZkS?Bt62c+Rxs{v$+Vf3ZYX);a9QFV zoSb3|N@{NTy_0WUmDbU(aJIxJG%u6M=24Tui~LeGq*n}WuAVhMVA6Rw1iv|ZPj-x{ z6drl$^{(BxiPjVM7moeJu@Dn93m;Y!&2vhKN?i3j^PRbSxd5hkq* z-*_(8E4_(l+o^7?9{r{yuG+T*c4rRbkH9Hw&aykaOJcq+g;h_FnX0iLY_vL^b#H86 zp0X{LIZo)h*Qr}W59)TfUW+z=UkafV-RRC*8O^>8e;J2vb5ya-;UKcy%c3hBqSqBC zbzb%>9O3Oxw`uX;6!=!6LVHVtBaf54K8jhNcA5U=ugT7}nVlrGtYDn8_@oZ!d*kEBgwSwa!P%x z6%IL>_h`Nd9POn_iEWHr{4dq4Ju$AA8jEZg(%{dKB=Ts&d|c_TD>!{rQm$>;)B*0l zdiEOqUT+oNRITN0!wLo2d(*9d4y+?b<-D6IY3770EHtthRBCg7_$hS~$BUYcTO_BI zzmC)fnV}HwA)JrXNEPB<>RV;ze{|=Ljj;E}v0cmaR@g&|@UJf91s?Tl?Z|iscMy7a z+eDrx*Vklth`QYDz61b|4TRhllbD}-J|}5?ISsu(c;Wh_RaBAB&&6RLce5`e@Th~b zwxUhV8!$cRumbEA0VE4#yb&tgxvB}YO-(#T){Dd>*k^qhzY@fzwOg8+HU}rZ-dDTf z0K1pQOjAtEd3uyS6D)HYE`KR?Q14pcFTc&yQnJd5(aqriJI!Q*#^f7TRx+d0Jwhdi z%06^^Hq}ei@xCZ}6KxRbKeq7R`Ai}ck~z-d(^RPJ$C{YO5h&>51RYSCBisI43xKfc zrG~<-yN_a*T zoG&a8!d{oO_ik@kWlAZA)5}_MaX-xCSQj+`6}{cblCu>9dp1R%<~v}Ao3{>crA|u> zpx8M1t}_ajq5zuR4`mV;m6F;)e^nbVLkXz$*L&hc%ImL@N<4r1*;SaV3 zc3OD#Wf0$}#C{YT_S4Riaeh2B`?id(5S@RKCV-Ku_3p&0NcG|ma~11xd_Va_R6eJi zC)}KsmUAEMY^&GiIT3pF)R9}(b}^X&8?#1I(-u4(tF+_`K`7$%d3st`Q+wsy`3^Jd z5>a)*$dRHbY3F&j_jyn#t#=cid7R}{?uJ&bkY)>ZT30;+xdzQAp%|H%a;zv7jC`b< znI=XK{RB=95UB*o-Q0*qnF9Ozg25q&c9iULSA<8(m;Jg{h%{Z1u+{f5aZRNu0O*z8 zHjcpKpf^vBFbVl5fVcY&xzGU`BJI>$JBiGVvijHIYw15|TAcT74#kl?1nFI;V=;V0 zS@-o&xe#om%ne~e#h6cBHwL(9CnEoWs}-B(G9_~4s!UUS+APy+MN7DYcz#7tbw^_H zd3nUJI}?Oj#g#h3y?C9u%>F@5h;3ir&4>+qWB~TF%s3#ryP1&Wq!G@-vJhjJ;<#fW zC=9S``&)?`7Y2eZ*+)tv#gC0=GeO;##$lq&8zwtfM}1)tK?MuRM6hQ>?gO}*uz@l> zsH^}}xuWBifarw?>V zKPu2B|fxH04 z%%@;!JBEPF`wx@yAff`#%2O3pn2^Z4nVj_! z5r^~B#fzT|Qmo%fypj-suokTaJ&0a}mzHFlg}3Ar)<_0*%C)k}PkuY@WSjwe+i3oZH1@O8V&Z5D_Zb>`I;&zN z>*>A!o35HU29L(W%6F&Lb>OzP+YKVBB-wIVuHKSmW7o+reXF9aM#n{mo53m7#jvKP zoj&~ZgK+8+eDSw!?!nUd&h3u)@^}kGEsTrF+t~Q4M=duhTZRxfWXy-;8lD}9K=uK@ zQ&9b1ye2o^g#LXc4nJ$kZJN|e!=?|;v18sHY0{sI&QwqXB3`Y%$F4oa(U^R~v|<8u z5bddGAL>RZP18V9D95Vuj=B0-QW!lw{LCTi_6}@Zd$QG+MUXGJu$WV_qs(TR8jHvD zxivqHwQQ7TEyrCSVX)QO9_;>xa-o7`c8IB3u4J=wuj_ znZSNDHNSzHij4N9H0=`Quo)Fcc1?w|+Hu%tJGW7sAlf!1x7NWv*b z+G9z{DW^-Dilh;8qq~XFBUnC9KQ`Fk1{zuH$BKEhRvpm*vzrQdf=4yVwm(_K&UW?B zT$RGmUOH?hB%tOrC6$6bP&ZmSl@(lS%-Qg(%|-EIi0RnMJxkNP#wf{t!~wu&udcq4 zB5s^Q%(jA#6Z8$i6S9hdybimRD+E$uzJa>4Up{Po4^_)Lj`!*cTr*-Yl~md85z;-k zag@BWY<6WK;#Ru4uny$smti|6z^NH%n|7hdyC9J8$z&L%%MD1O7f|GlMZESciG66r zJo0#_nyzqaKG{=1q;k;K@I*&P1ABcogZE!;u4=D?Vc_+9>}-vV+d}>#mxUR;KeuJM z46cipFV^-5-B6uhyZsneR#&6jsgP$LSN9@)NnVJ<>1scr0cgfp7T;vYn*smhft=Hc zM{oHV@V~FH_*?}eviSUAk+9j{U>#4^P{h7KnvJF2J@~x%wesud@~{P@N#G{VqV*uI zLvMWW*CZR9A%G+6%@Sh>crSoSZF`yrAv12Dx5*owL<_WVkf{ov5X59EyZ%4y{dZUs z+uJq_qX>vdQB+i#sDOZgh=d*xklv&!5I~Sl=tW9E1SwLLDgh#0y0p*}3%&P}06{v0 zBAtZx4Z8QQ-p~8o-|_zY&T(wR&djV?Gg-6Nd7bOL&UNmyx0oG^QIq^FujR;>0Ed8h zf14AZq*qoX!*^<)8xzNHJwWAwq_4%hsRm=%U%uYP<8ufX6~;% zi%xtXeedGu17b&SiKC-$eFMqqY_-K$$FJR+!6Tq~VDj}k;H$^ti2fMK#u6BDUoA`E zhGpl~dq9IU0wTlN8z&I?Km`&_4lyx~o54Lg%?qtp*_hlPiZTo3{RD;%8eEWDoOe7a zTNvNf&${lYjmBnM-QD}GLjh?(Y&&@-Nc}yVj^u?RGyVshL!_EkdO*fkL7Be&1EiaH zzEl8fh5w>ee2k3?1IMmPUBzn(Ky&(SNW9}sq?xED0= zdQ(i}+>_784P3=c^nz{+{TCxHd?!}RoTH5Tskvi~$rHHE%qzFw*{!{*_Y_m1FTz#QnSLa%n%#$W7G>8^Z8Z-f&7IoQ~Scb>i=>?uBzzkOR zr{_#&3G`8;`7KbPO3TUdp48!9yRO^1N4T}^c*5ty8D4puVL6C_Y9393#` z#D}_bz+7>AJkR+H+`{63P5@GukBWuk2M$Ac-^ZBxeY&Oz1F=2sH{~@IGcEWg&HK?z zsn(({I~q9~PzubDYPSpY>HI(?-}VuVGxP8$Cix1_m>4Udtp{_RsFux%Y5{YF8C)@`^7j`%sj>3xh~9!o8n??oP?Ls? z65LygAC9jrP@ylONM3WY`d{I%zY-pO0T)xhmA;F~Ow7<1-p_eBv?%yaC(}HLy7-&g zV~v2;47Wid|C3PCvu_f5Bkt$tfwXdf_o0|ZTXLNvD^q@g&vXZ?TpjU>ZYP=Ro&<4BbQ0W&Ef7<6p87OK}OmZcaB!2jbl?I3uNbx~+6W zNJFCy93MBX4XN#26^#Mrb-V1?^PduhhVtXSqgqT|?q*H2QGLPkFum8z=9*;w5NdV= z6L@s-Ios3Lc-k}bd`-wpeQKndr+MwSJG;KJu4jI~S~2?8uw*v<- zTYt9jSV9a&6_R5ZI zzCgy7zKyldbz70$0L`XnvO#myR6Aa2`ICv7Cw!vkzF9ZSjpgN*Gy|lY6QIG$SfAKT zEUxxkfxPnS-s14y5Z1`kk+~*8ZRM{=cIV}{fs67!ig|#q^@r{-`~~99Ds;PHKq~b~ zNt$Ho`c~j8iwU>~)!A%N`$DKB!lCbq6F{@}A_|XcA_7Z!pDa^-w>#nUUm|YP`9%-xc2@JGDpm z&kyK|Nm;WIHmq|b#c@|*uu~-3^|YV_NJA~`F<=|p1VYOkHKlPNo@64-i<3wLg3(HZ zz;12!c2%M%9!UW;$IQA7b~-436c*w%7I)E02UBEGVU+cwt0;bo>eNz@riSOUM)39+eif08D{{7g&udykM&TEcAP z^mm_JQD!(0K`w`hzIjye{U}3xp&oWVnv%NI`z`n4879RTwk)}on=h4GC`V=uQe_G) z)EYWk2>_8uHkOv*ZuYZPZImlBW&hE+*Nd&zJlz&Q-HZRun)y=Ha|7xX%)QI~KtNgO z!mGzNbY<@(&W%tyHs)KpzOialHEpJp3!m`mR1B==ty|h3UOjIR9>bMyaeZNB#yI8C z!RWqY`Sx|*9ZW=wy>9B$s)4vqOfI^X3QOXo2F?49^a8-(IKwkE;D641Mw9m3vb{d7 z+_mgG-6rHFBKwoKQW9Re)NXixY~5xD^1xcHzoqJgeztH1HQmZB*D~c>$4xOk6dma zO|IN^eT^Y!X&IhDma|l);-^rzZo`VWpQ-TDI;zS+>3d+e%dV6#-eqUe82nJVJ5$Ae zPR3B?hGF@zAB`sV>&z%1;snlhWgcgc%PL7Nr;`3{@FpjBcl&_VU##wYhEb)_@2>`3 zo(ox>W)7Tg?+3@Rl|Lez#qMPN!jV;-v!kK@uiBJwSGl)^O654`A4;fr|w?`{Q1bxd-v^c$>?7@I8DDK zFx{>)1(e0oa_V1Q5ux{+@Z?HiE%d(&0qo$!$nDzLj(v8L|DTTdqtp9apPKUezb<3F zzH(L*A7}VQfpc|SjrrB*OO<32dtt3w^$sy{uO$Au*i)669DtMT<~vsj(Jwv@E*AE5 z_7Dhd{q_p@!LwgY+~V4Jm5~t9!e8kwLGf8=%GqAhK^*Ult)W??y4~OMsP(UB{?~{4 z*Y}nG*Kc}{0k6MqKGMIc@aN+xd;nN4gyw(xT9#tNlsC5wNOt&_tqifdwzl>^9S*eS z|Ni>N@63^ZUhzHs11*(*tLf~yi^G5ENB=rZ^i1G?-OPU%^X&gx%wwM}sZ(+ZSdo0X zIa1y#A;jQ}=bp#nSUH85=i2N_Mum+;?!W$?MsUzGpP%2K0BQM@KoBc)u)EfMpT5!* zNCTiw5%r;A;4^Cc8gK-p{(vGE{$3pYw9@bG267&woHO4)W-faUoV7So#h~C?OOiJ5 zueVyPX4P##?yh)2BFczjkL;fh<VF(24Dt^x^M^&j9c-hE$i_bKA&g=Hjd`4McE|mYzgs5o$rJ)K z&P#xRiE;DYm#S%!?`bKaHqntGEnknVMRw>>q6Xok7A3b{9>PNX)|4t-U1MLS!Y8?! zqjP(vQ-%Bc8n7mJpXW$c=lMI@QC}3>+*7(?s>>aJar9q)uF40%uNO92ZA0a^-$Ply zL%guP=oh~0ZsMr1@9i`Fch)p=ZEk;qxRA~MSmUQ>7gu2Eda^-P<(!}{OP-(9zuZSn z0*JBjD&ts=6fSfAaY#!>8+d>CR0blNHIXz3mhJCxk}%#$|Lf4g0^e=OPt2_3($)Yj>d!Uhx8x%k;{( zivZKwjGu!Vzjk)dnR>s3{>T$t?61k}!WT+8UjQbO2fcXxWyQ*d7tFVOumbLzvWlv( zt4hx0z*CFv!V5MJOMfa=r)!>MpnA3J9|B0%W{_;8rp?FHfLt5k4vYhC?J-4|sQ`-#>cOdu#uj;0$;r28_Z^=CjlNe4qmsr zy1!aww~x{bpsh+|+hh$p#U?bcBj$W`QK=2&9leyjA{b1pU))0=&>c5UapHVQ(nHtq(D zmWk&7`#WFmI&AyqHt`!Bx!8G4X4<|U$uJ7iwQs|#7Na$FObdW-LC;#%SH~b>J>KYm z=n+jFf5X76?U9)J*(VY8=GD4*vE!4K2LBl2Ca;Bq`m!?mTn9@bd75HXy0kNQD<&(c zK7I;kWAnKISKMuO*SDxo)8snOpd2Q=aocx&ttjS}ef}Ch7|JKVM(QAt|v*Nz!E&$(YYEY-o%vkT;*&p960x2 z$x-Ua4YB6heXzP<({q3;-z4OipaYM4q9@dQBu~yx9Qd3FAMK(~guHEz58M_75xE_n z^^tz|=Hm{A>!kUSdLt6mOX-{Q)3$-i3`6 z2wD;Cfo)kl1lG<5Z{pE1>-!5>YiH;8$5*Yyq!N2iHJ$rWdwwSY76yJnc0yeGCR2Ia969cOlB0u|wREGP`tVb2uj~eertrog= zryVH}kECA3rAcnkx~-oauLoj-z|{X%CQ#_)i6u0(sp*OR;2S+7;6%3t|9$t`y#fEP zV5@bgXJ1?5!cx?Ug`-@@zn@9+TIbhG-cT7_T4DTo;-i1x`PzP1;@;&E0}uwyQhYkE5$2k;Na80`xz%+3)?S{!*2fj3DO{*lE=s2XpOTI zP115ljC~}l)IK>)o|cd!9NEi=+n=dXTM}9v`1jLD%ASuE{>Qa}BVuUk3$Fdgkt8G! zUw0Y&*O#p%AiID6A%H_}k&ga%sjn}ZmjAyilu`TIE)gGI@#4oVi}z2?z&fQ_1$!wJ z$xt_Ft?kH(S!CW@0eY67qHipc8U^$;l46nrz|v%gw|RT9ori}@;~cLefc4K8K#%W- zGW&kIia^;2N%>>Ay6?iucE@W*w65th^|oBx79fSa)|rBDyhd7n(lGcW%xY~xPD}~_ zf4#b|D)FfZC|&0(fB)HruKRrRNRrvJP)G98K-!9F(xT406NN{YZvHrbS_V+NTUYlj zf1eS1ZLy2O;9~&T@)NBS8JPZeeMzqV3a^lM6dWzV^IQ7}{;t#MwKRq=oISlBaBR#4 zUqP1M;fvc!RNM1F`M+-7aTq1bp^A zIjpLWrs3B`Y-+{8e1h<1<3< znToi#ljj~MgCDs{tzLe_Ntdf(sdFsuIt5E=8M@bGmDO~cMzj)=?Emp7#Z^^Rd86UF z&X)OHz4>xxQ|G#wX0k~kdYEvJGD*GceMZ&bPBUt4B=yw>9b{(@ZFsPkyRBansT>i6 z)ZYn*P*}RcpUAnGDKv={o$I=hZJlk54K^vVN}yTkxBf7NIoUhzI4L-)63|QcC&Wu- zBI@Q%iV5n2;(56&nX-uusUA$;5jkr$2^=n1**DuCeM3<`J(Vi{8nC!-^iNpDYRL)D zDszur!WCG~E;QW)>?V2ilZsRC$a3DVb~U_Qj^2E1-yjTO-FAYje7k1vN1Jl5 zS(wV+>M7>Jphg_{V%!W>SAPHm0p#kmOI$SMQd>^}ev8vaS^8k-X|0P?ALMMYNfrqr zOu-L|$PzypNVF-jr2)B0!8SF$!^FM1lAtCzs*LSo6T$9rjk?Low+zz8`5MVKHDe{eIR4=m+F| zf1l|l+3j@n2?PWC=GgB@Z6);Jh8e~Wg02qs=+j>;Z6FLMh23q6dM@?_f3w#0Kz4hA zL2p4^n_?~ZMy+1sKKGM?Fdy|^&-Ld-A6wit;_UFD-=N$(JyF7r$q{R%%~O!w$tI6~ z&fzie3YuRTd6)MCk=}+y%V5D?)G)tg`bTW0Z`hR(S2;ZT?!xR6iDiFl0fKYL3NNel zTFrkwSWJu-;#iiGo2lw=@Ns=!8FUUN*3!tiSm)mm@6mwgsD*moU@*HJ;Mfdw3S3I8 ztNSwM6?>%`FU#5^KWukXS3LavzLE;=4HPq?-bP}`f!AYp*rR23M=qJM*9x`Y;gSW}~jW5uOL@V62Q5nqQztd|GF@D|Qkb-S1!$?e%R~)UHB22eK(jpJLB}>7!RL z4iit-(SXxRn|q1#0Z=4ju^8*m75@C{*iKvblZw4v{Li*dMDx`+@RFgdQ15Q9@eb&=FPbb34pu_74Ns1u53DWn=3A!`WusmP*A8 zqra-jmEND(i`rJUI5juxP0%GOma(0WUcsm*BLhBfCqW+}2pU1u&Z=BX+9hXgg|I&z zvr{_Qth*bTrjs; z*VE;J_@~cah2`7`QRDAa5Povj41@ApCBDeeK;N3-75HG*vOjsEj61@Mhwhr(acNNh zpw|zK@NevgT|j^fNOuFrCjm>89O6L!XstS1pRJ^hUWoMeHk$$Q#TJA5{gb-L931;V)es~FbiJRjJ9#)gGb6F|$}NZRUTI7y1?w^~fJ(=afqIZ?WOrbH&;Q;&B7 zAGe&n<-)L)FV8C7A^h&x&eDAqjhXdVCw1kxkH*};x=si8rbX>uN8^w8oa^(H?R zbnek#hS_EPI{BMF1E`I|{6V_F{MU z{izi-NdTHg@3-_vf2g`by0YZ`Z~F%lfcU-OH%F9DJpyxyqJB zJw;SqfghW^8c;M0uPkP)lsh}A)KI^ak*un=f=XNLHGjTt)K(n*?&HDIj{^${-FuG_ zVLmJwZI0t=`XG~1V052I@VXWMNn<2Q^cQMQf6G^P(rQ9zr+}j@U z2$n6MOy~RqW5Xn)uzewmy=sXY%Z63*gUXFQ!t39k>hBEL%;g)G?rxS-rhi zgIB{CX!}A?3fn)!{ofb2TS5$8B9E@GHXM12)t(5KNeOXtcyt2T;-;{fa@`Agpy$5z zq+p|=s&7_!Er)ibf+c`GEc(tszF zp;Q|NmfE3AZXN7%#g~6ihc6z=7O-;CULdSKzC<)q*+V(KnXGf9f)Enw>~JO;{`=pH z#tLr(SnKB_Uj$!WAc`oHLNdV#ba|Es13TLx{H-VwetE2FTz0gBZ+iB~+pBb;?fv5X z!1KEZvt0UGY`56$*xTq*40=L{0MuMb z4=|#6ym)%w!>W7D- zmC(Nq*ZV_`j|(Czx}M)}e>;jq>_3=r9}{w02wBhFdznHS7REfo(3kyv#IDI&m&B!_ zHen6+ldX1@cLYG>`sl6(TF|RG3M%#VHjB*NgBIo`4Y*WBm`n<0o-Gjy<9RhPaKT3GMT zg8nVol;-&IoC?5$+=!#tl71gfk6DoHU8#-c^l=X0)O$g;onO^7Edt>c(yZT7s(fxG z1Qh@3;XBR)Lcw#LM?b=A)PTcy{t>LcKD!TGqjw=UYMSG-%(vjHi-M!$c$!d)Pdta^ zE$cas^nHftZETI&nM;J-Iw^JiqoehFdaJNwbiul!9UTdY^>yE^(Una`OrpOkO!Me7 z(UdSF4cCLQ6cP%LpYBeS@AaMREU%dc11^^_Jru`7@!Mlk1X~Be#CQjdyo`S7q zd-bOdCeE%>)_y-ZoC#UNt{;nl?8KvrB1=uqkc2kXO?nALMqH%7)CasX+|hXs)`wxj zG<{QTXn*du9-ET_|9#Tiw~4!rEm~Lf??y`r$m*Mb)36sye@Bm@FCTnrIoZ9jpA+@% zwRP#yg9tyze~F!g=LATJS0Wawao2uFyC1K@9umNz=ZasvJL`@2!Vj(&+RwgdsQqDj z(U$BVtpT=>W9ER{mWL{Wti({HQ_m0NI{a7gZX^a z;&0?#wo7Fq`mPJp>xEt;WU<`p8kE$bRth zrH0M+0zP;pF85@?2a!oHMc~<9pS=F9md1H$SYW}GI6M329kQ8$lLV1C3-I>Bfu;#m z(bR~n_>)G{zfuerx49C(nM#MVlyJ>gz_yaWm6Akq;BBEWAZ$q)?d-baNWXrplFFo=HnZ6wq4mEQTw|p06_F-w(%#$~M>037-Mz*f> z>R1VvQUc=8b9-O6sxKP-vn6h+1hspomRPcu9V;bKpqdDFm~~)WBI>wV7{wM>-Az^L z%Wuz3GwDVd@?$;K9W&0E#Lo6W`-a$b?8)r7#;mfid=p^$TGTb$NLgKVE==|7SHn@o z#J7V`R3io=pS`Fo9iD{eBS5_-}S^RSp}4yTkBezdrdD* zwhpc{a14DiKFJj?j()U5nJ24__x2$*{V@yAAMKl$XKE|eZfRpp%Wli09oW0pU8r&} z`Ta(+1o4YUCQ`!AO+tTZC_C7YB*VJR3o{~u0VPZ`M3Ta` z!&7$fnUm5Sm(%iLz#G{MyjGkA0ClMuvIDQI5ooaO0;OB${?)&G>1Q$b-OD6Hy z>UQQ4`#G0p$;Ae+RD5u}UvE1vE{0eE882->eauw5f8^3dFzVO!8kWLGIR3VX~8H%*fLU}09jsHE(n$#>M3P&|(vO<5F zOT{DhT{HF`Nh{N%pTtLWJPZ#+j{7+fn~SrYR?9T?jq19eD-y9G61f@RY}W{6VE!DqnN~cqX~aue*9g zxB=?*5f!NCuzQJspAqLsD(0$o{~Rh{)h)CzlT)x+=nrb*h>FY^wJPgLxMpVLxoVQP zSl%=GWoN;Xphhe%Dw1j*ZrM)F^Siu_ca;bKXOJ6em|bdJ?8u3JeAfo85(=B8 zQ-o=}q;KMIS*rjf7B{O8Dsvpxi+@1&b-py)c^$jkD=rLWyPe_bfRWilK{aB0;i=(Ik4`v{yn@-L}@J;A2`Wh?upQm2;>~ z9(Mhh9!^J8+|c-xD~G#lAnv@`AkKSrNV^iuh-8mo>_QTTH=fTqLzwhoGnSpsdEwqI z8PNtc`^IfE@)wkcS|}(>d#e2BvZBG}cXSwcAcpW@*oXqz^o_#|YVayaLzg1NZabT5 zS9jzHg%{u&q4Iq{YgoGdUJD)L_O##`D#?yFOC{8=4C%#?QRFP*$>#R6nC5D}cg>(s z6a9tD6?>K6wkzJ<)mhD3mdab9(T#eP8X#_j>r{h_PnrK>wK}$+Ti4VW){A@KFq?G+ zrhFsOUa?oaQ9_XHa>YZoC%Ixsy9*Pejco-gRs+7WY|NBE@-pCf+=iW_T2pi_))TgNos zzB=yf2DT^@h<7}0`6y*r{=DPKX7x`OSUu>2J42N8CM z!|@O)zPpe<&^08!hi*q!f`>oNeT-kGxlq&X`8wSw=Es!D-9+_P$t+f)*sU?O}-t;j02O=X32xMtgHoVo>l8Nd`5|m%5Ea^_V;&xuEm% zMq0!ndeuZf~pd*B|-+AkZ90Qma zNr(Luh6`=P{Rq4Kbv1XTw>k~jZqjAM!J~x&M9U8Fjs~T4R9hDw@FkQGro@b!s^_O? zzja{~<{vI>bA`m}akC$&!jB!w1z$?62O?3`&C{c>n;WX+ke%>e0UqF87rz2snBZ$6 z6b3XI3I9aGmoQ&96X zmfT}|$)jgdK~9cBcw+I{=`3Tm%TAB6T=dDR;At$m4Zz*6WajdOPt=c10@gjLDmWJ} z2&Z@_p^ny~2TlVmH>ZKf?Qu#1oc3!f?dr!*Qr>R1b6aGF&5Oa>v?adH#$+V7u9;{4 zc=Yh0s+^ov4@%jpJ0%-v47rkiGt+pY-0h5BpH3k9VG(Ir`?XIGM8qV2ruT}E$HHCU zw{9Z*!v8J&VTs6_i~Uj6mpcv2m<{d(s85_7c`!%PKwY?o>l;dp9@u!XbyM_SqUZ4z zmEQ4Wt-CJ-8@O}J<5z&5FEM9m+*7@n;Ja2i645gAJA9oktsbolxDMQV==aTn;O8}CDqCs7xc4XOJw598_WIl;F&oEmJHt`KHzJBv zDsAj^<`>p{d~^XG?yoN&T<@Kpo^C(+WcUg8c;ljz zxzwNH#0pCugI$6byH|Z|I4@o5>X&R`Nsp!j+5Z(*OP?KW#e=L(G$n-4Z>2iOSAjqk z2vsTYmaE-;VVGQjTH7RR+n-l1y(mXgC;eK!@o25$t-5#Znu40i=R4_>+y{3@hq4@U zuN?FuZF-wR>2}KJ(znDDw|rN&zF9F%V|pTST{EhLkip_2n1IuU`R#J4kDE3Hv0VP0 zHY@v!ICnEbbPtF*B_-#?Ed$@@a@(EoM!VHFVSBUQ_v+~x-rGBGJ%B}wA>!dhKrXuV#eXdL++gI7D zkIyyqp|6M01^#->JrmMux~RPSBW{Vc6>LpH1;C$nT01eeFBGh6D~?dqM~b z73~H2X~5uc=y}6Oo1m*dHs!Yc{i&E6QKHss>)Bn`S`{J6b0nhzP({Tjo=t6949 z5USXN(`Pk22>vulE88<<)N*&tS4J53a}?2GB|4+qxsjypw{ksI+?a4%a&mtjaeLB9 zTJ<58t{kT2Fi0vmFfd@O5d;!e5M``nLr7P~voe|-**BsrsUw%=BgF((w^Fe|X^eRl ziZyyOf<4pwsdzEX#ZCO|?!o0363@j|_LvUNs$IH74S7s!e?k)3jJ!bdsnLR7-aR*G zS^fIJYv@6m;OGctP4Ckl?fj$hdUuA1oUOVG7^H-975cWEq)elPc1rYBZAhrQptPZg z=Xp}i%c4iZyQ8zQQk=(1-MAt0Mu8g@&{*4)*|gZY2G>L!IRfe2X6P9;`|1u`T7XE9 zeO3STm1i38TrDihY?MNqFg|im18BO4xXNsmuM5dTJMuA_yV>&J4MJ#?ka0oh(A<}r zEmyCvVo`~^moD)#Wk)lKOKVV@D1<0%xbdD>SP96bswS@?HWxZ*J-B|cqD1gP!-aHP z{sDpf3e7lEG}pQ%vl~Ni&KBf-S*aN*6brKtzo_L~saCf?`NUd$G6Ah5J~VMs>x;#3 z#`k()=kF&CypG9Q_AG7Tz#WQiXt*4MYDI)&9;Q`2ae>Db7xA-UgUeqV8`QTk_he~# zqu&oZWaG!47@4T7gxdxVR{EOwn4hpfm33g4Yy>tce$omyqGj}J=(gt}b(rnMS-Trv z&OX$ey)?wZ{i|+mOIC}DcLSx-&yC`?Zpte)GM(3ft9GH?NuMv)C1Gz3`Qaj$=@#_i z3EAp*dyx}asM2DUp4AdIkgaTDTN^G560bLcdxF=RQC!&=7`9}$C7+Ec!!_}>aQZtO z#wPpUV^q%EzJ71a1AD#L=Rz(|kS!dkA*K{1e0ln#4qfc`{ymYq)Dt-t?;=$P@WE8h9E+0v|MD*C`M~SSzd2}G+N!#+J3uN^zg*=D^O!_+Q z^1OHri0iIVVEk)2#F;rQ8C9)IJnC#T*-f4UWvhhz+?VEuYembhy1~4Vv*3}!6ucug z7+wN{u*oskWyFU|At4=$z-;>$`{C|>Nmsc8J!3Bkjp%MzWZF$sl!v@9HHqd=*AQJp znYeGM(v7#Oj$mj*9u8|m!LHgS4n3I5#8e;B$b{M6d6NC)$|{?ryiye{3H>izW#cfZ zcME$0GH*d#plvmYzKYbpi|tqWYLPj#w`ABB zrx|_O{@ldZvgdVSd`7Q=JXj5n=06Dg$Iv*kgxOssxpe_bk%-iwQNGctHM;I1oMOiU z<=<7+@OFQTLPBYDJO_uQu>GQhJjH9m)4;%bi$qE8vCdo_G8{3k@gDg&7oy~D9azK* z)%cz;3gprapx0-gg_bs%>TXUmx&k$qkw|USHt^1{!p!a0UiH3x z_2coINLZ-Sc-h?wif7TQ!nQDk+YU@ws5ur1`R*w(ox<-WK}%_6zuk9xcIRVIWhiR@ zN!~M%oq~1GzA+}YIYC|9B&Ox|ly^P=MQIANmlT*HmoXq==4F53E!#BV>xHNu6w$cy zeL7J+UW!6D?eaTIQ5~w zI^x6Wu#x_=(>=MR@`i@n!cudg7~d8pHbu)qp%7$~khjIFjpD#oy9>52NB?jcqz*4RhGR~8j5i9VjS^1E=L}{cDbd$nwO4yG6OiLwc zx%Q+^_IpQRzeDm&0zSf!6>pPojD0z~;uR z$+aYYlD65k=uqLM$Ev4*Oj`fXq`-=W*PHT(eR=2$a;bl21k&!UrZq%|hSE6H3PjBP z0cd~y`}z(5gkR0A2f`h)7SuZ}E6B5^-|(8_F)Sn`W)DDOs(0@Ekdt!*W?!~954mPG z7#U2YtR!x2!>s=&wEMphvr(%S9eeG}&A9oF*2~k>ej6^D0WMZZ@mtw8AY3$$a_wK* zr+&ZkZ`J($Kc=PtbloYhbT$dR_+NWXuWs4JXlV2m*W|`4(P#SD;GFIj{C(7y8yCqd zs;jFvrDDi16t8uAS!<~=45xd)rbado{-hL>kSN`)1Ay+qEPOF=^z&a*#n&sQFN{Fe ztR#Am{TBhWdV0odk0fEc5dn?pG~Z+n(0X>C$gUfU>a#@6r3cpwVQ)ch{R8e&wnQ0b z${oEi&To8IK_+?28Dbpr2sv!dJCWvh%8PDz{e5dgSgk^cX-f zQ<-&+DP?1f+)>UQ`Y@aj5{i`v5KU2k6%>n{cRi}MOkTdupyM&v@nx={K%tNr^4((f zZ7Z+eXusOb)m&ejk;0ndbx5%VCZVg6eUN~fQNmWzP~yF_XOPc*i1_VW0IZqHGq6&n zuRx{u%Lp%i5-e#MN>m;y_1nm*jcE^CR{uH5fPrqhbHTL{n#c_3R=S-dACl@^tOp3H zy97qoUe2&zJ4@SgE%6Xi=?YPt#xN}V8bW}_gRx%bubH;e|FVR&!t86nN(U@T74_xe zpp7tp6pX{;aI*_H49)|3Bj0&jq(E5a+jXN(OaDS}IyPx4;!^&MJHma3H(-3D-Cfm_ zSJqqNf-{X3&1}XEGp%IjT8D6r(7Ro#_5BSI~7q4qKGvVs(XTCTRPts z^i&m&A~Dl<_~Z{;g^t}m63>3-n{ZdB!=RFm^Z1=5y;W_N}$;;K>7~Cv*9DKQtsn^{MdV8CU z;Yi05JiH!}_kdapxh_g-lo~_Dyd#l~>FF}`{AMkNF8BoP$aFj4kr})r*D2UUcqTCP z_R7qwP%@NQDW7WHU~~YJ3-Lp=#zAAQj(Zd{+9`EF60;lDPFuZTdeONh7n)3%eHM3L zlH~cvMnuNTCBZJ!xwnm+}qnk*N|@y3pycuJ0I z{()4b_SEk~>M-5WYQ^9%#_BsUnR~7^Kle*JZY;#&j{* z^31LKdBoK9(JPxAVb|TGip+7%E;L%rTgcgzlK5hXy-WqQXVmLgFMH5&`}*E!ZneGs zowdtD9G`9P4DShj<7;s>j%T_&MDDcA=`1?3p5wRD-vcv$vz?^H8B&zpWXdYyF4UDN zW6ZF7Y2I>G;;3sW(b{b5nUS5e)lU!Aq2`OBjH7Oztxg~k-)kE(Y`@{^GsX$%YU>M9 z3=*!x_&f*h8M(~0T4N@2&?bE7Nu5v-0ruat**#eD$viY933Ev z9R%qY`WEXzVU&HK6w$NO?~}D=n9Z~d=vrnqp_;+R4F*_wJJ<3Gg6OV zVo;=kP>ntB69ip#vHRk%-DB}r@Jg!kZ3Zfafmx@QcG9_hl0%sW)m6OndzWJUN;~WY z!yM9L?cSSMP=(O_hOWM>S$j$qMWO(7)xs1;J@U_w9VljUo_ssF`_?vm@R&o=@6E%- z%$~+V*Oh&CNk7j>0K%5euU{@MtqpRBke4{i7e^PXGmE+ZgsDTe^7&1dz>1nP7~gjz zdI_t(CfH>7KprTxW^`JJGwr~Em~goywAT5D_Q+QQg;&qum9AE4>AAst8Hg|__2PXl ze!p5pnj@LGp9gmnX=_&32U4MVkFm*0N3#xv#JJrnxg|76k@XM$?{;H+(}VUYm2)4l z(!6j4JjkL8lx88&HqqIw(lwrriyUrH$C zHFlk@*{;lsiF;|6xYR$7)&V)uc>DciuYLI{Lf+hvlq6Ic8%~bkq+pNv?lTuunMRnP8_aN+xqRz&`*L=7$d_R@+Tqwr zafQA9?$HFd6ibC$AJETkl0u|4$pXR4=9y~J#dZbW@xk4!@C98``J;nc>YQ~Qf~)2n z*SBM0X)W<%yD;;DH!2ErbE)6Wefta)i-Nj8!rK&h!b?9dF@Vs0HrPXyjiX?XPLOfE z%MNtNImP*5kOykF<||jn;5FoyY$Lz6uTK1>+QVY=fvWW&l(Yx4A$$oLzDu{!Bji6( zlRjU`(jZtKw4pSn= zrz>&cy+0iF;;aBy(HhFR-O@2sq&d%yb^b$Fqt z^QRXpL4j$_+I1n>MY|~EzA=x}ugDv0EvOjeeiUc=h}f5F)3octb^*dpo$RV&(s#t6 zLRw^&BU^6%2MOPJ(Q1>WD|@^MIP0Eoq68!=e2lXXxr$9NyrvLNH|I0}-JNA2#SDl0 zsCI%sW?E{^+>{qR|m59W1tSCaz#Zjt}u0WcG{i_{Q9 z-QoBS4|ONQ>zB&W4A~0^(#E@|c*^bVH{s|mz8>QQP7xmZRAPbjU##wOSpVACL1^gW zR1V{hXEdkB{$mqy`H>f*e+;f(lJ{O~w3oM&=H?rHh%8<`LvoA!--ufN9>hZywDZP} zYcr_~F2n*YS05837<(^_Gw>yV*~S_7lJw4y)h1|D-~| z25i^FIa8x>bZ!4$&ENGU2ho6ZqzwOyas9nh??nSSp>_OBEq;5GK~}$)bVl6zQ7s@) z__K*zBqs%C9*hXAt!GzBzj80qPLQtxLL?HBscw1f8HIV8UWgkr^CVbaZfkq7^Nw4` z_<3d)W))KazW>?ipIuL+{&&p#iFv*Y^ET6Maofvn>s6(IxQvf{$tQG@S8!BBRA2Z0 zu6q4jPe5ewlSPaC^Lmjm+cK6>-RqD`T> zA*7m2Bbx4X)YocmNvdFuhPTwrnn>vjYdn}U-(RJRY`RW=&o1^FVJ3rEob(dhZ zadfVwrB}?F3WzmV1&RZdqZI^pM&@)W1lsHaqyM~-pGBH^O?F@-qkni<#(^+JFV%Rx zJ_(?2EbZd-Pjti9zd<<3)9fS>&pS&9rxQWA$F^W9htA(T%+o&h1|;VQ?)m>Ryj&Z|`J zV5=z8A0t*q{36LqNdrJ$jyI+;gJ_mp+*22a%~A86MmQcuGNWa>^0M~Dtcqs@^~)ob zn0WtCvpbX%%|S(c`CU{4RuGuVEzf$}a+<#ES68N)VvV8Gz<~Wf!CHAxbROt79R49+ zO?hCWc3s4xstqRuaj&-_P0U2l3KXxa+3|F(i_FAp9CL-iih3oC-5uJKX5fiXCS)o; z)`pqZa5&+E4@&EiPBzCT?%LZBw_W zJi=RHt&vL$DxV22o-B&WC^1@=wtx&f6}@2VjA#*efw2WR&9wPgA#)~z)jN4D84e*V zw~jMdb0c*>m0u(ejQ5B3A4K6H4e?QGYH%^UTNTVvEa&63GBuS_M6BnQ;~F772J}J} zn-FsMWP|MwH;ae>xLL(Zf>{+$;-m3u7q}ToralPf1m9$U^k{h9QCTB%(7Xl)ue#72 zHm7QCaLk@9q{?o&)9JiPr?uBVt+TdS7rKsrW9|@Cd-*`O1?)6kx||dp95XlhG?AN#s+sK?>Fbg;`D8VNB^Q$~zf-K{DeyJmOwdjKy63j>y)4Xn z-BV_}%(^trT=o2}ew|OyIwc`5LAABt2-o!ifN+n58Z#{O7%N3%OiAw{;*Pp=Q({17 z=2_y3*Lz^CV>!qgvpLsnTf9mrvUI}8M#WBrVfty_28{q0Z>YJ;=M(Zm-iFoSq9vom zO*l^2#K)*V82b4YlTZEhEPt4hN5Uty3(C(__?Q9u;c929@0*-p{)|+C!ZRe+d1==Q z&q9vYM=g*y;+8c;<}Y;DtnP71H4?@Tb-~%*20Hz>!jVG@1w@mZ{fBw#-QjV6@a-JR zv?3mX1@}UUizYSm66~|I!&={cIfspM#}tUyIAt>JI2-Rw$xsR?Jt?+(*jW0g`Zx!Q z?b4i(FDmj~i>)q3Pw-HxOL&YO+6o#I=J~_;#(xcxDI2TsovG@=3~0M`gv$v#vV^~+ zA(3tZ=I@gIch=3Gl0Cz5^5*;J1-d#Mz&dDOpu?EJS1(i)eYC7==Fr3O&2e=TYrvz# zER|z{3KeES7bF=;4Hq>1mK-Ad`NHT%!K|q6h1!R>Fv{j4H>V1DiDgiz(rPO+5U}B% z$Vg;!du2WnMs0}fzS1>7aIX?S7nr79Vg+XW-q(;;9E@(FMKi>!{9o^0%zi-aMu`nR zzgD3b*rOek*Z%JDg#$Hu3J2rbnY%Vz)D=)M9p~EONZ!;~#5?eq$ncw!)f!IJ2G4&`8BNpKu>gQ>Gc`olE8Rq!soXC+b|#sP0cBq9*m?d& zAQahHOcfKoBW&jxwU+%OlS_38)DU;tsBcw+X2RAaCX#ep zEWj_|+W9NLR_Vo(EtZMFjuW-2Yp;eX0$foANBJ{aO__j9A#LUsNA}Q!u=>~v^`#aO z_thUpWi?Ngrvg6KK!rq2iux`)$|VgS1F7q@p0dp+v`c+Blva#du%IU>f~ zMA6Wwg&yZN;hf^m^YusVbE@woEk@sSNZlR|=&=sT;dUe@a3~9>4#!x}`#`H(m~WB9 zv1=sGnC&W5Ko5{}2~B;sbo9$O~v7VtW$~{sFA7KDhB4Jd* z2|z#{ z`aUWwj&+?fvT(P*VJX-3|72wU?0ME9Ka}XoDWy_XzYln=s8uLZ{B=qUxxH^Ipc6O+ z!GPh|^uq+NWOgC|}jCywN)e6DH- zQ+njZ%=1`dA1z`e<&NyV1_#`6t^?T71=#`e#Il-Q*MpZ^BHG7nm3DR})^2`{O0?M_ zc~N{QEoCMLyUZd#KGem8Utv#>zEJ28Qo;ldU1(Wz1?Hsw$;U!BAo0;$*IZjjbiwHd zas!qlqVHD^YbAH6H5eFG*erp8!%@6SopnaHbr;}a5hd*%x#)vtE_g%w^z|y=p*@%x z(C6^@1GBJ((NGp`DMV~Y-r+o<>k;GzcU|>v&8Q=H3kAYX-oZ#98@tk*iXbLO2!cK? z-<&44=2$2fnHhV&P2$|Icv5ILhWg@H?(ymk4K4kpu7Rz5LHi}?FhEW)e72~wdif6Q zo1((ew30hpJpEAWR4Jbu_VyE=tJYZG&lR5EJRUprcjpvwPqgUi%fxj-rhbl2e?S*_ z6jLb~9mu@E2d02;mirYOs5C0DCT>-u?(60jujw79 z>bliuF`(^P-x18Q;5#s%?;w~6M~5FF%P>}fueT~voJ9~!2!wvStg|#2zHY;P>`54=ri#mVpj!B{$tN1ci zBIK3@!ag%~FzNBjy*oo0+NmHaTdm6TkJrL;$LBoX|K?%Y{W2xLo7u5a4$fFF)+FPT z8dEhE=f#;0Tz4oN>ka|_#N5~37aLL_Y4%|(I0@VtrY&%ADc3iig&k7TV&?5F)}Hgy zP=v8YAk9tf$)aEHwz-KEzl|R&Ru7&aYF=2euloV#b`^-JpH7_}jtB<_elumC8aXNU zcgQK=6H&#%0eQ(=CST>hK`4X_YRCc!8)I9(bbJ2ev~OY3vW(4)o4SumZ+nY8wg!3) zD?OQn0ilAbAAjd;fYxZWQ@Cs_(eo{QbsTRKVr^&UcS-DF-}9|32pX)c5Q(ol{1CQ7 z#EpWg!Ig|fYc^RQv?Y|Es4! zTdnPp!4rI>9ITAH`afZ9cFl*EY5@u7JXZ}eb4 zxT5p`-1+_yuwwFQ-MO@teeXAzC=KZ-BYply_%!$Y#@nULAe4rvw~yh-^KUD=NIv75e?T~y|43S z*23gc{-#)`2gdJ1{pKBXhW$yAMa-k}+GIb-7FJ>-x-sqvV|$ciM>et()w`=Q z&>pNt)tzJ|J`%j`-f%ip(FLyMv4%Yspb*{8OZ<7$RF^(_a2AQnJ)~Rkyzz%yy=?v& z`UAvSgXq?TC1hwTANcY~@R(I*udO{D$cU>z-rdA$YQsQK(~$vK$2uuHegM>bFn*pc zqi2qf^%vI*6usRs-_M&=v(XGS9Taw4Y#e#5UO(n|okL&wPAz*I$&YbObQxR%E|}_de3rOtxfO*%^Cv z{wL5YuRkoUP?Q7!sc+7GmzMDHxkMU3FyL~aytb(#w;7wmB(jS!-dagEu{I+XIZ9}Z zt*3ph}gleE4^N_ullON4tkPE9Js}|6?Q$# zn>#@&S^$geLXsS)s)f{D%vN57xqud&|3m?zeoH(*50)#ln~e+_Hl zklNkZigW(KU}6Y0PgLtj5kT6gtk^ljONKCbHSkORlm|3$y1{H9-Xx}1P8x4r7; z>Ckx;0$&SMqcSwaZ@#}0C~I0^GJ$to*p53YFL#FNqoFR3k^?uVVEWip=BI&8YT_;(3`2A&31thjD&Gy1#DD%6%2^ zlI8^D{7jqrq}h64g>$iF54 z^$mw7WI$vSnwqNAFw1k;VZN5xyDyod-cRbAEq=)GZ;=p29s&44ZOPkZ!M(KQ<=O_{ zmcU4D{Io0I?5+lG%N2{LI|LCHkTo6u!pjc7t?}TthezjZBW>8CHo6dXOE>XDEqT+< zoo-4ayGV1b#@;xCrU;lorHp+P?WuQ8Tj|7G(oSh9I@0~C0BX95(%6~v<9^^tfRJvH z-MZ0OP}bo{YIgY)P}P2)g11;ETJd+#r^JV;AL-s&u2|kO?XudDv)NmL0fGQPF#F>E z-_Yzg1|wmr`8oK5$DeiF?T)0U87Nce)>Q`k8-^phBqVf!rF}1M-naqCOUtcHvJ)3H zZbaj%_lcr2XVTYY>xi2hUqwAaS`Yx~MgH4XC+knnF)0kNzajjUQ+}oxNGm?|mtp;M z*6;nbMX0ImPEP?pYwpaCc_EixYNP|R$;hs$15kDYkW9vii%R^Wp_>Q(ub6=E~FO zc)GoQbDEGB9?o+@`ur1&p3wi{jd)I1+!K<6{ ztGz(U^)+F?`CQ=OTkOw7{^n}i$ya|@n=S+p+nry%fp~V{wbise)#jC)DR7fLqb62c zg|QN!h9qMmg42bD{2x3!xUdSwzs$t@4Q{^`yS!Xfrtt=%CIZVBJui`+0%2 zx1CZ#(3Y8bXFkG6&rlb=Dk15xeO`r|QFWnUa~f&$FL+n*58joj50hf4#`K>%WoR|( z_Wu>?z5t+Z`#LZc9p55^cAQUF_AGtui2~rR^NzX9U{Chpb*GtN?g@5}N+o?}`Z|6> zp^%e#C7;yQ@nh%Q4Lu6`1nX1Ubx<9oLA-SKhEC+6TI#@_b__u7Y=5xunR){VX=k;U))Jx%HMi z5WDlG@-2{)sp6*n!m!4amY|ypH3-q2CD#atALPIULAQk3qeo?jJ{7r(V{f+>WyF+O zMLk}7@SoHzxVedk=O&-qjYo~JR;O=WZ3Ac3EylcCJ}dGTXfOMN3{TBw@gLUA6<|Nh zsIr(SP6=C7|H8TI{a<%R_6IDMsXo=ZCGJmHZpgZ@{whK^{*O`w55&|wR>7sEV08BP z4Xa%RZnzWFIEbLnHH$h(*%U}L`d%TBN0gA1ESk77m*u*WhbZC338BS`ceZ$?9;r#i zgVWyLb?bCm-9nA5vz45fx0HZpC>O|?VrXV!D^G((&s0>>+BoX)?&^05NfrLS!M5TMaERD|-^0$$3 zW>UhA6J(i7zKq$sToq8SI0yeHq258EUO6JHJUO*=c+1yfDP*_f)yCa$o3Kstukz~t z?}R|vpXoWl#6fk=jG)xh;q1gO-G{oa;`k&O9OXJ z*+j+D6>TY;zGO?Y)kHfd5G*N|4x7I7N!fHYIazQJ9;v)J73ERB+d3WqIQURvYB=Tq z?Xz3UW-LZCaP-AvF>v(Y#x);p|1uDQ*=c0 zPE`3a=Tp^#cbw@GbdLDuMzd}T!wxR~b&n9a_E(b$DNq9db#K@(HZqDsBCWFb3i>hG zZVd;aJ=&GETe4{Hnn*RS4#D)g8}M@E6`oXh!r8=4^Tx#S9Y+=WI;a7*onLn(dfHCw~7h!g<%)+-eIz zi`sv53@L|d7y{p#BK9+T8SeTEl|5cu@HrE5i`X(;_IS_*<`c-YIiuMEPWK;@2IA8^ z4DoUXl+JN;4nftJRdFk-Etl}90B;u~x&Q~sYul&H`+h(!;fjT+#)$Qf-l^ku!6}t= z@C1R5MCFThkcwt`DAO=>nB}A4z8+dY?h3h;5l@Zr^7ac@{EleUuKddC z_ENGoH$!Xu@qpzm9kTtuA-KMmTd77mw2Lr&k~n&dq{gPqR`4jKc(PT(#*tIwBy{G1 z28qQ!wwlV~fYKqO=TpyX&A11+uu4j{pEnC$(m6_BR1Q_*c7Z0`j(Xc~I!pVd9DbBd!$1xGuuj7UBe zmpM85<+VLEegz^_zwJE>3@_VL-<#YPoZJi_w>(xD9vEbQs|SE_(a&>8T-71nu4mS&uh4nB4^dcuJ0Rfxez1b{Nl~K47CWwvq)OTO6RBbspC>y+Bf11nOrvY_uX0skjdw?-kGWsox z2ss&>E_FQIDvEU^l;TI;a&FxnH5r={Lxesq1jTDs^($Q}@$K$mcC5YJb9d}kIxfc^ zXw-9Y(KUCYmn$2G$UoZ(4#pLGw^xQO9a<97ONaP92u!Me$$+-qqsy+pEUWUS;o-Xm zy>DR%(1B5uXC>_;VY?8I-dyDRm$XN<1X8zGuJ>bWm0*uiS+_1#%oBbc}|)>c*a%!e-`+D+2MkWaYM zs>)cte~2Y`=08X*qwf@8Dk`!=I!ypK{t0(iAvuCTDIw2Jg7%|&oC7ns(LnYcnANN( z5Lj(nCTa^hUC`yI1>}M}m3U5N*1OjhSab{<2~jaj)d?8;A4%jMWXoIf)2QWNQ2Cnk ztwxcnn5t?UE{u@HbCdzLU$#q0ros~uAa857c)89$cX5ENdynhR6K1WaZ={l8^>wF)F+;cC@*^tuiY|o< zH{$&JpRey3)g|jU_W)OmepgOQK?71P=Whw6?oe715`pWC%z&@Ti`%!we0Fucy^iBH z_9Mef5P~dpL~sQFs%cZqJzpJ$0WK~irxuaYe>pw%a{j}aLYj5zf-lE!)5tU2Qw%`9 zA9;@6Nj-7XSqyA%e1~Gf6hFKHTx>XS^c#i2#u8l56(_pnb{MSa@pldX`T5|B^Otnu z7^b@9G5J5g{p+tU?ZlB={6L~fr*C9hKA842)ud&Z$>rr5$%MN%;YaV)?XUYZHzT23 zf1k<@_+4CMV9-CND3d-Q`Er3MJND`5_s;L-&Z{k6-fn9a$>XR*4?TnxOZtRs0VDFC zFarEo+{R{RH{r(jVBWt9d`UvJQ1XA2j2l{a2yWc zqj{q3{7Two-6O)OYs++(m<~jDKQ6Z@MNqrs6=7n9a z={~y+P*-#Ll0o6Lxa)TC&mSwwZ3>k4N{8acxuX97Q>oi=;aKJmI!pprrErs9byom7{IPV*rdk3@1A^?fGJN0=^bTMij9*)C$N*80qkxiR+rAbB=H=dQ&rd8QIkbD5Hy* zv`-9op6{j7UzLr4Kay$xq19xO?V=gg^6`vgaH6*MH^wB_Oo3_7t$~)0D zS3u+ELYWxZx%_HkpiuPJ*gZXFTpn}N;xX;vuX4AJQqm5DzJJd(5;}%Nwd+Hv(fQ(lpjT-9wE!TWsPaN_ zH}GbU{v{R0HIXNVH;?~|URX-7$&2zxWVnmOU55YN%rr@yTzAq%pZeFQLxa%COqTU3k2iMnY8TdsfS5l7`3CCtX&1%b!@2>t5Uz$(PUFvNkj3UqW8k*7EtQD^k zVI%=Fq^8H5a@&>b1a^l(&|`qzCN|gdNmfY(kA9|xaB%v^dgvNJ{27W$pHfMCYXN#t zwa>j92u4=yW$}~Z+ozm%?kZA1tSOfB10V(o6((%&kHdY9E9$`OdQ>B_HZx(1huL6D)2SVHPp@K*TCsRPMd<)wlKo^QUOq)9Hdcs?AE7S>j+4nd7T_q9VKb)~g%*{!VvY36eO$){8rEB#N=J zRM^{f+B>jZsW+8l)C>d0ggD>bG)-cCRmnGbuC{cd7YWYVP4+LoC^TO64 z{27#g6hudB+apWgfUtYP-{o$`N0M#0_a{5>)vMv$zYZpEtoSfIGjzeO8=}?;Oo;c7 z`kd0&p>}t68t>`hx_`n^@IXLoQTSVHAxT51coHQ*|2pRMEq-_3xdGPiKDPG~b z87go+zx~6Yr4hh+TQ}QeRPK~zyKZW~Z|8@?GHeSz5JT0)m)t{{5z6)*d;y|}k4!V= z0NcHL11CmEV6F2T-%x!BWTN3Z=O6yB;Iz`1ZTW!vOu2Wvo@W%dl%W?NXtcX(sN`cB zX)HoHw2FP_PHYi71c#kYEaGD?oJ*M1zx=%Jxp+k&xDpmJ7kA2fC+;?u=fw^(%&J_t zojx^)zVIyn(HA)YPOEc6uoVhBm_XuQe&od=&-_Et&zo_k{zuOsN2+u1C|5~baGRBj z!<7u^5f-Ubo`&?U3ak1L7)_KbUjt69SHs|mg@2Oe7S870*EVcK4ke%+#lEKW$S-b; z{f5xf(RRlES$-75Z4?=LR5m}OG1WCTdETp~_~x9|$~LHR!HNqi&#%jELf{XW_NPy=d!0R z9sc&MiVr+f?`Wt9j6@kg25IA`_3ovIT%_KZ<;)8y{}a1y#3+Kx{f`_ug?tT{0ccCm zbqe#{g}9lDl3^6GnNO;UKk=-a7QJlZ=#jX`;l&q$jyJfLW8Pgp{N~2wY0TiGI?wVC zMTX2ff+AujveErUW7gr8(+FNZu>$E_s50YVZ_gO>o7nD$wh+?RlQr)9n#DMd$Mi?b zG*!poLk0RFF(|$F-}heM{J)EheGgohHgckYr$>2;<{&SATS$~A&QW%^%jF(()43b$ zczu~K_s*#B*#!fDxO%DiLBel4g1^uo2}#tr)AG-FG_6DJ`!sO@c;C&KcT~uTB0C?z zZsFVL;)8#<ONYP+ucnyle#~e9mg=Pcd!NJymBfXAxIiqD z8&*Y?*!g^6`R!S#fvZS9=vK;|5tb|a_iCQj_jN#IAoWnsa5L;bUGYwL>JFpH3ENjz z(|o+=X4oB%Z^3N0T9akx)%_A2yf(74Y5$fWBeVa$`M7^bklm;%_!lq#vKj*@?$fQm zyp+)NUQo{Sl-LdnwN6+Q-8~Nr(gW5FI4w^Uc1iNf5$Pjkxl@@kuj?IvM!%o;^Y3!M ze3^9bMSW8Fim(XWJpeO6%)bqCNLqi?c41O8=_@tN-G(0Y4SwgkeUmNYx%X@nkm9mU z%A?W0HttIXdAM~>veql&R3=tqPn7t{d~q0cyRKMPP|f|~vuwOy))vL>8{qU)`rBOu z);`E-9suZ+`Q(E>BY2~|Yk9j>7%6;-=SKV-4jovm+NH8QpsQ4o-O)Nr`Q|c>!rIKD z1W#DuZgxN2%_7d>efupX1So8RQ>6#pekb9$*yGX92;o1t+Ae%O-&eJCbXf0i_FHLS z9_e~J6^cB>?+y!(V-iY;;H#xXl~6+R_N&<2X>S1z{5QY`ZAEIQPt%=m0ibngsI|9u zt$QsWuO(q?N?5XHDR_!tQ{V;oXcDb=}GL|S_K0=-CLTyZA zCI1y9*t(#=(GauT!o`|YTB(n)nL+O~X(l$YKyDO60UOkWG&B$xBr)Wd`~zv{h5sY( zu6ios1rs)zummpXo=Lz-aJWTuGna%O&fBp!G9nITTNr5o(0zzHO}@KuDMVUinXxEe zO<%j#q5P39L2kZl07R(CYT;-U%Iv^|FN=2{_FKAM5J5H?p+hBi;hT1`w0piI?(74i z>HPtz)Xjr`OWfz@^qKhURW+y^!u;f&7t5tmXdNLTpE&}gBje7TZIvF`{Q#5ke0p~H z4Kh*#_MTlH#kU0Z7-TqCE8XrT&A58EM_X&}u?g>8JFYWBZ#|U+^ltBvX+dgL4uemE zoAd#R@_+kB6lgdVJDW$brEHqFSTAklubaPm_v%L_seqEBZ0GJb10&H#!Yok6SRmoj z4rC3?e>5OI3(YyItcxT4*6Y+Kvk46)*O-d5i--0*%e6WOd@Gg@B)HGLjQdG!3?*GO1>$*+8Q*TE67tgbf1Y}9p_>}l{3C7gXxoBeg8=O zYnG<}gI7e079dTIh>J}BVC}jPvsMovCLyLA{g*mta64BS)9-!r)1K@STjbEm*%Ec) zaO_Vrmcx3mVw>K?seOG0>&{g*Jmy%{@jStALhVfM>oCwG;ZL=UUvC%D23BNkme85g zd#e95?eTH}!cVw}@-8$|L1GpN*kL{T$g# zHl{$m5HJ}6S{mb)@ZgBttl28eI*LS^)jQN|e z&{T93U-Ko(nHt&;(5lFvIk{Z5uQjki_WF(GW%nAo`|a0#m_7hv_j0vrB-VJO(<{}r zTWw{uwB^ysE=0C9U>lF`UyZdFfA+opIrgP>5MLI+Tc__%e7}Fx1kk@jPS1Ww3zw2n zFFoex+Ha=QsGS8_i2tTnYp%F%np(eGzVL+YM0Y=$Z?S zT4z`7;J(Ax1Jm#=K0-?`UyI1y8=W@Ns>)mZ>b~y?=M2s(57gg1)if)ud3mjEYG2MM z4z;ofOd5;=H&Mor4>j9HJXH20j>?S?v$jiBYGGH|M&hMeZleF&b;20dw9(zy;I;r{ z`^=4wB%r>okDVpY|D>n<34a+X6z#RAe?e>1Q7p|6wM1k6i5v=#vNf+WjwV_eig8L^ z%`*#wrsQLkm5pXT4HO57J*uR&DR(q9Q=d^N2jXq*%J$yB+po#0PA9OZOMdmv=TvfszwdGziR`cytb`t; z<_mpaYydn-TU zpKP;hv$d!M3G6|twuxt{Jp2CUX_^zwDUsYOl3n72eDE&Y_f z=gqwzb&ak4XAX9yjrl}&JF)j~SBmNBOshgy*{neJPqE3@YqJ({Y}1GyLaDt*JlgRF zUn7!JCt^0r3)GJ%qa{Bfd&q3zbw>1 zv2AWte&bxfGHbwk9x7`u)ADn033|SUV>vm^wdmaF|0sXn9eCJ^zW>04m|qj zz@&2kntNURV1K@JY;LS$^PH7MhPMCJP_ZJS#IvpEl20*`KCG86F^{{MU+?%0{r(1p zBnl29JHOg-fRmeIPij(^HML?BV8b?CDM>=oXln14BrhKv7P`1HroS{t&~Ba)=0o{) zzmi@&=q*P1N;pML{M_IK%q!3Quh%1DOfopyTJC6B+W+De$>bJTWY{uNDfx`{Umd3h zHW+V@{d+Y}>-+U<^S9t-E4vvK`Hg?n|L;G(a?g+4LZoFtTr~z77mYE(^~8y_n%of< zVP9q-K`-3$?q5f~K0!BBQI+CHHzh_ciKcG<4QUw zNO{!%{d$DW`1OFv{EdSMfgL?s%M0l9L0=zD(=>lh=b}sM4*ouO^L{0CaYOdQu)=io zKyVFLu0y+1YE^_Q%R^7g6p@Y6X>qgl6WXB7l+b6TW=|U5jh60CD)d-C$~ZbY$~<|D zmOgm{4%iq6E&ld#d%fSe6zj1#ybE)`{HFY`8f%h?h^z}Qn)Gr$NW+pJ{g$c3kVC(C zlr^Xe&-e5%Pr7~e*vw6xRo=N_Z%x}f_QGx77CPFhifie!&hk(7re4)s(hoF#n2}cI zC&rlTKtTl2f>?Q`F<|(`iE39ah76PQWi)*5t~(1vM^S{5pDLEH%s{X1V-LI9pU4d} znrkUUmzehCat`g6c42$LKNI$LSo4mwZ3s25_PPX`G?P>_M3Xk;+ zql=w(#n^m=u*r=!DvQRQIsW;g=~PCenD0Ys3AqP_+$}{W-Sk+6#9jT;`Aj~v9fZw7 zxixw`u<^`kWH&qj!F16ffYD1EtO0YJI?r{v!b!uAQ4d;%l zODI`VeXvaw9i+fwbg?_I&uH2ko&xq~{8^jB#!7$|HOrT3Y?Q51t5wI^hiR@1^p7oy zBklw{(4QYPH6y*GEuEp~#=u_rsrDXyoBOjUlv?zw&<`@jD&C0FN;mcpehje?X_1_L0+$*4Y2>;RH(o)%`T@w+@J9Fto3Zk+tvU zPhtBrw1#xwDx65FZk?j@4fQ0Nfj5AsTlr{sqr@Ve#c;lqK634iz=-c>8|`2eGWhtokMq|t&nz|-@}~JY43!e$qogqqaT6xT09UxA?ODC$oLA9QFX1SpE!GF;<-`3 z6rfbAk&4&VCdnf=T4f`*)b)JhVY7+9=(ZDvsEd5V70u0ijd#(Ju_x#FXPjqtJQZ{s zdh(NLe&n?VI>`9J*d}%sZqJ!ZVA>sgs0U>zNC|(=n1Wn(X#Vw{>~)mU45u5kEM8+G z3e~nAi2H0IxtBc;wwXYt4-N{~%C>_EnW$c3eV)y@=T6{e^*JS|7bML)GHg{CDr-E% zG^5+YmNXpl?98)7yM65B;pOdd%q6BkyI3cgp|WKYPqzB~V^UJJx0V=ag85+z$!m>YBB07;)|;QB zo*L6M4_1iIyhg2ZPCOeNgIxX8qZT0zT@e%AR6|lpj^{ZCLK%5b=3!VpY~&5TG7`=A zu7^Rx+MCju?gY7`dYb$flCC)+0?GsYpCA zdHPxPAxBZk%4Mg8!fxTW#<9cR9E$!j;JDoH8T=3$rTa4RIh#2CPU;QXHJ1#Lb=%@Asoy~6>Qo|b;)!<{q}$@YCF_fgW} za&i?<*45VrYOIW#cwz$Hvx!v4#eFe}5R*a7UP!MvPTw()T6~O4E}fWTg6phu0lIL< zhIEfF1TO}HC^54K%q?%SJqErd31gH3UK{-8fjYP0+~j&z)^-v%#NV!(iKA*hI(TY* zD)w-3=9x8fwkX7*a!bBAqaH}01b%&{qy6DE7vwf$GYVuL0%?F&p%b%XkKQu%LhHc!y`Ual&>NhGb#q?%j? z7x>|XW-D7a?cyPU<>@6Bd^-jU$-q0ckWvZoRane;BHpJPoJEV5=r#8$^9Y-)b<*80 z^wxbibxY$>#tW8&WTvQ}1c@&iU^THK-W1EL{~DX{vE6qg~iJ3+K0P{fB>6w%|I!INuP zE@!k$kCe<-j@E(Q9?3K0S9MXVb;dfp87Te$7OO;W)(k&rn;IVYAoZ|K5+3n&FXeVpdfkM-4k5WiJdi`qGeqh%WwvTGHxQ5!KrRu0S={HWvwu^IxS zm7<|+3uM{Nv}Hr6^(({0PEvc1*QuTR7==pHx`crvW&?^UKi09lZQ3FPPqblLL7;{= zcW>2Cm|a1*dHD4mz>fp4ggFtIS6(S?QtRj|5x`GhHtW%X{k-kJ_*LCjYm|NDP`z?N zvB)343E`TU>?PJ5)DGfFgwc4vPWG2xk2mJxz-E4b@Q(h%V-Mr#+YIZ=#sR(2}!HbKMaN z+K3^>?8>4&z=C7GO?cY+-@PqFHi>|)nBSof#nN?IpQUM+mtjXgV|!S>gUL-<1;5aKz8cZx_7!Pq zim3lO>1)a(_$B(rC`I)ffmoW+;fpJzI;KXmWV@g*euoPx42eqxrGyBk?ZB;tL@Odo z=ULo03goolBSSS5&ZFJ`_WOT$U;GWmNk}esMyk6>8Nzq}!}a+05mBdvj%VueUK`8+ zQm^b7NeeDfTjMEbnfn{`$Hezp%N-mXD4Yb=N}rH8U7VLiKR!7%-6N?M%+CHU!ZPNc5a9-)2+UxE)v>Je1_oC$*arj3_T;jz--8tjaKGG(;nZeJkl+bn~Loe3mTOi)EL{2oNKAxDL}E_DV1F@ki7g0i0+ouI z-S1c0stk>OwqVIyyRuv9ntxo{B}#DZc7B86Rj~HQ^0lfV%g6NMSuzTE1eWJnemS{0 zzhAc%9|^o$>{seEA-IUbqcooSD|%@<9NC;X#m*kY?!XxE&qe###IoaF(Hq>jVX2y* zEABG8wn(8c^YGq=BsIViNroZmOXZWRr`nOqDl*$&`gwCBE72mG5Vzh)$3(H8vHj61 z&lBmSECz&wSx;r%{CTvN@3Lr&liLg4v~GG%Tj?It5daW++3v*p!;Oa;wzZb-4XE8d z!JJju)i@Vc`egMh;9}|PoR(;yg~isnc{|7RJ->kSazS)QCjQdl4Furwt3{BL>Gj2V z&J0(3@1-Q13?B4_90u{j%G1w0sBB-LTiut6wyJk{Z5EH}E&uEET%V@_M-+ za_G6X0jWJ693|$GH+o(7DVeY3aNeV@J)%)K(Q-BWe1`0O|8c&D|FdMxgT_tIUrLtD zT2U%d2;~}v`N8Oh*)woBy!oqnn>0Jcb3|8{njXnbCR3^kcw9i+RG_(@zSm3FIw!0KP9r`qn|{H8A;=i3V5PtqqV|Cgw^I&o-g2t?Lbaf z3c2O9F(lM{CR3S~L?n)h2W3ak$=~9iT_|;~6ZiA-Lk@_qRCAJe-d_JN1xunE@4#h& z)yZT8fCcbQCh;pbJB8w-FJ0$5x;aB9A}kpiWF$E49jow}mKlQyan=o9GF@F0gN#Ya zjwB`)l5uLV^fqif!4FiCe4dg>C(Chm|6pdS=h=d{ew6G53n&I}xK@=4&nXeOgRX5a zjKi{sX@--*+|ob2Lv$#6tBzWb--?<}4z^Vm$r5>`tZ3jb@nBc?PDwp?Ve$R_Dosfl zzWLxs=*PY;U3E!zu{P^%Mf{dWOo*qkhbYYs?=_XSBLh5AMQs@sAmvg}9&h(?ctO|B z!*OC#mTh~QL)J}}`_GaBYb^6>RPI?i+w3iP6k0B0F z94F>ZZJD#D*vRb7A}Ozr4*Yz~b4PA_mx|KLDBRD)^f6DhQnZYYGsn7I(j@yhi*|=S z{Q+tIgNd&$>XC`-x(}WwA`x!K5G&7tq^>7_Vd9#tiCh>vpS-;=coAbL`zfKdSc(MX z5|tX~+T7%y{n@1W&P!Mx>?l8Mc=c(&BX;-QgoynRGCgaJasQ5B(VERv?B-c1Jt;2# z5}en*Le4vuL}ffvSN~H$yaZmvtwA?+|;d=h_8RL?%*p| z@hX1uvPpP-)f3kB)a()a5@mAfH|5f2gihty7`4J5R&3i|E<7CBJG6OH74>U;cvNRO z+a9k%@xFf~%y{e7{XBh2sdX8yquGt`8;$~QkwBErvi`}~fJrdh9VA&wXRZ-v0G=B7 z?#HQ1ml14t-<0Cm`dP(PtWMm8gzwRF+1rjoIsd=*-aH)2zW@Ihp%N-rlrhPUm(gb|f0+9~qZNqp() zF|)VA1inm#!{+(dSM4@8B{z&lB^MPF#$Q|UX+<;ASxjC^^XZilF`OuA0~J1PSn)M{ z41HtUBrwbx_(~v`YK$dQZY{;laE6jv==`az3je;gt}bjmL0ZL z_n%;e!4L!?Cp7)e%S8||a4_Liuv~Due{VvW04O`g@6Uz<&5kNBA6x0Sofy7Ld3}h$ z1$=d`#TrhCaLRs;65KxTX00&MT__OHd9|dZCNxp2A|T`bjMdLgn< z0*UBZP=g?!W5XcO;h?plYEP4M-V1X={@kyv^Di`Nt*Y%JrlS;-8;m*^vtF5p(d=!kDLRC-joR0k>~WpsWz*dJgD~85 zwc^~v@$uDJcZfrKg4?pUxvlwqNqYgC+8Brl$Vl2swjD~CI5aFJENAJpW0?=%t}a)0 z^@0(2E{<-dM8ZPXP{=N9I>QkR!>iO~bbmwY%nEca5JWRF)2qH8S+gSI8laWPpY44; z+$!JA^$D^R{g!BkptKBZ81=AneAo0_dGBGSyv}@vV zea_L^65XH&{%rOXFs8ySRYZ2x)*6--ntI->OkOrnvwi*sXMx~Wvcrt)8d0S=Nf=s`v08RCq46Hn6YeMs>dwB8KaO0uE75g}^>HfwD}cu&W+9qd(XlPHdF&H#IX;&+mfo5NCalCZnn>kV=S z{hi-NmF`_&Wn3d)RyT|mm`Fc_<%sWp^5~+Zzw{V@VU%i1Zn4f5 zpL`2gPcRfE!c_lj;n?U;A^Ybj8>sn08PY}eop?<2c>7)_a$J^b5$z?U!#u(}q(Oq& z>1y&j$oKzM#X?vXmCRhJYu_ZD)qQVLlhh8$Eu-7 zpdr~)6nEm=ukRu7$rT|&v+2u(W^GGq(V4RC42h}ND$jNjTE|t$dEL_!cyWbTSCI6x zvOwMO~PYeTw)ySPsNXqfjReoBu z+1W~uDFZgvQRavx&Z1QL8CkxM$7=;%EzrZ#GL6GOmW7{Ezl-ExHz2F_LVG(N!Yslm zL$5(O7VuxnHgy2%orBNJ_MsM55cF9iHl>L+Jy6|mYa%`rz8G5W!ofgmzhIL(l?%EB zFnf~H_W4RS(GH;w{oF~g_i~5L1|3G^o&&QOgb;ofS65q`B0H*hS*!P)8T?yOaZmVo zpq7pR?{cFn@`$e4e3E<3*H7!G;+L~Ef3pBN?re^Vcj~IuYzNO-OoP-gQdYS)&gM)( zEz$yS%O(eRM5TFjrE}64S?(1yA7B-q*fP zy>Iul@A{thg|U61gAWBb1G)-^dhHyU{LsBeKe4P4T2rpod-{+BX$Z_#Bd_dJ1B?n` z5(6u2ifb0Iw~a3{*{cM7-nlQ)KZsxW#=;y5^t1hNU+P(9Uz!zB^6?A0)>WtOE|Hl_ z=wv`u)~F|)5N8?J;Hsf}7|7)|m62tg8dx1y1|sQk*OsJfByvV^;^jMEPuLx~z69#% z%Wi+l0hy2;?TVDkEtY&1e!1hC)xSyMip6^_Bv|%q z!er7b3StD3lsW`s2%a-_631LDMHi2(d_mQRU+)TW-a21W*ifctZ4nXZMy-|3U_V|s zZ9MZ0ZL(p#qps~eqJi0$kTEy3XLF>|-#M9QIW4kwZtC|djAySCbFc^fKhtOpUd;#Y z8%olnTAcLgvYyqw00ZmF27KrGR#7jDS@95+;|vxk{D8xyn^|%XGk8hD6HwjGz~OY2Zm%C#F~44xoG;a$PZTD*uyeU|;?3JNq^$pve&;Xc+Y8=) z`=KdzSih`keqkZ6qD*pX3)*Ex68gFg9vpiOp-~RH)G3;x&QvvAY<6Sh2?sIGCCf{-@)d;( z4G-DOR%x^w+C@rjUg&U zJlp40v?zrGgsqn+Lc0OIc5Rtjp&8We6hwD4C1twA`YPdwbv`?`s~yqe-a9bCWf~h+ z5#S44pL(;inCX{DTT56dw4_rt*|r&N`!G>`PyruXarn}d;HnnalagF^Lz&hFiKi|) zk#Go>*H6qV+QCHmo3FJk;&Q$Z5?shC`k+FJ^5p7P_&d{DyUiP-@Al2$;acFkY`$8R zb@jOWeeW^-w`YbysAouoUP0q6iN=S)3FDxOg1a_Hn?*cvU1K(S)KTq)=B?^`4*kl9 zCx&g858t&lTo-Wo6eEJW-_Loy>Hh(?tXnq;1=wjZ z60h+^aG&kxVlccg$Owel5T5gMV&Pn{deUEg7gl`TEx&XM)G#8Kr-RJ{B@q%G7p|90 zJiUWUchgIq>v(5f5F}GEHjt;i;whQLuFLGddx-7Oj~jA+=?D`&vuI}{U>Qc?_In!d zYVqM-8;#z@%FLxM?8t@VNz^A0{lbBBK6*=WxS{CiJ|rP!Xy3H32r8<*OAit)pHibH z=uV#sdNF9aBILY~*r@InYBCHCweFf&uI^kMl3I|-C3WfRx&}j1qj>q|{O)vL^&*6V zP^fgrvA~#ewn>!EsO!m8w?Qd<3FGK(Ch)aDX^$Sc5ZI&ban1Qh*4p>9Cp@8K{{x^bcH!v#nGSs}Q&_B?2RATAeP# zL`yn11eLgh-H+6SY8Zj0FS}6-9b4nz&xbJS6+1UsH~&Nm5KBe!OARv|=WI5w;|^4w zxx+&DDQ0!;q8$^FLA0p`Ffy}p@J6sgoYYjqNK?>(Asrko2TBqO(ckkpRc%kfYAc{M z0Sc2dybzyOv(SP;6#7jedO3Pvd?Wy91KFJ8@x1)+H5a~u3#qO0WoPwKw0zLQK4-D>Ki{i8hDe9 z+)Uc+2ITtv(DfMx%O9sxONFQ1mF87m-U1Z-e{M_*H$_nJQr-Vr2hyRlZx4?MZzY~K z<0>DpWG*+$>m-Kz%x}fAfj&3h!X(*dwY5Iqw}XLuHn_wc{UA7acPY4!b4f+|y!4lD zddj{oAL^JK%KsrY#b2(sIBK1bSj}HDkuj(Xcvfcn=qYF9QtuqMb;^al)wbNL+`onV z6?*1%NEnSljefTN!n=JcLp9@bO1$qLm0_U_C4BE3lOWG*#Xf-kNPHvWES2H^vR8TM zgFQJts6TYz;&M`@69J!dt-Y@&;dkL6j{8~8SAWMOEt9H(C6iIorlcdtAUxf+;KZF zO*>65Ix?;2_FSRmr90S&Z8xpx!=EC#Fo&=ICcKlNO*eK&VZykTI*|eF{dlQmai+OI zoEZ!rm9QB(>t3^=;PCE7@N16k^WJ`mC2_Q)1fz$YR+e4r<3+UNywsQ59R7& zXDdX6!ks#b!c-O)mi*NSQMD(|%>5x3=2C2PrhBJz%eNv9H3Pgf>$rim0`q8iY;mat z>&o-}QG2fn*3|c%<*%b!$kI~f3ZEO(uhlTqR<1nC@vE*#licv7Wvv&eTIQ>~)>d=C z$Oyuj$YNU^SDNVCtKogh+7RzXx$BgC^!E1IAJRkfO!U@#Wib1A;N?29i(Ae&q^S31 ziesz*W8kO7JUH@~#auJ9QlWtA+yLh7&DRjR{p@4AxpxwKrak71eM+a`^F(=Gz#v{uYXq$UiP;CQI zl@Gm_*%den))MCt^tZ-NB!{!_)gGY*Qmw)vOyN^3f)Zg^w7EWbV z-KS_xBd%K=bliCPz}CEH&K05!N7VuctILBOT=^nLcU)Cmr)r4Lf| znNJNY%xY=L%^l<9Hbl@t`@uKtdpOR%=bT~jsetVieex$-&8XXDZaxL8AMQUY)5}mZ z(lGa~bNOvnffoJnbC$F;OAseYQA?s}X@a;Er4qYOXk5k^?y2swuQINuUBst}BYkFB zx?1ERV+T2>wCgZqEdgeDAAbXk)Bk&7+5mxaY(TzZSszlJNo#CE z>#`w%LMh>iPG@A$_kJni?9I3Mt1W!_XEXj!xFaQr*r)5xsiHIt6qu!f2~eT_xm)Y$~J zvJZW*DcsRsuH>|&$~G%-9f5#LDrW}3CQ%O_Bb4yiwR0BAh|5&Mj**YsIx{(DX?yrn zcsh;i5j|Ik@mn9aqBHS3)xCa{gRu@q#kB0uW#dpcYd+*vAzj9g4Wo>u0un^JpR)%!(&`iOXUgol5zPOj=nmFGf}$ zFLpOv3%3y1g}o(!3VTFYr`QRH2i-rT00S`Kz1f9x!Ti^=UPo+YMHlOExqOd)Uv2D+FA-@R_ z6U$X@Q?%kL8jUJu-}jntl9EU1U26e!VIJTv2As|!qD5Z5@=yG}DLGVT>*~Vy#_kl} zb0;P9&p7ukF;62+0mv6Ki_!Ij@kWdX>j-^zcx(|`Y??7x?x@9gO)*1~=yo=0hDof> zPE@`?ba6OQzO!mrh&cq7NnfU5ApWh==7|-&!=l{&CCVnPG|<&lTxNVcn} z7-=N1tPBv+RR7LjXcX2lL4v;A2#TQc_^4|C=aBv#PM!eCSAnIdZVrjE#&Yi-2g_K`f^TD z-X_&=SmtNGdwG59 zkVnXJ0!1?C#)0bpm-%OtqJ4Z>o(ESC?fKNh4W4WSedLA&(Gea;VdnB491XmE#1Ng* zC)ZZcc#p(Mw37X-M#8EqNaP3AX3CxGnvm0@A}C7+!H%06g%h^p0*LFZvu=k#k(Oqx z)B_up@88=8B4qMEh|LsHfO?XyLKUHG%Je9v6@i{TX6;ZCUF+(IOX9@^F2nTJfOgjG z!1u&~Zzi+x?~^#o2OuWrxgNX}p}$QH&s7Ko)a4apPr;=P?-hw89Ouy=0k@e4|BKKD z0?PM_Y%%S}`l@NLVzpmF`|^F)-$MH=@$LuQsOkJA#ugI?E3?LE1FC(?Y3*;*k`6L| zSW*kAP6B--4G+~7mhnFV=*>uZ8%TwCGE+?7MlS4?czS(II@(FOKXI_s{H!G9@I>ao z!v}vRMt6vqO1*O-?EDkodmL$7?= zv#l`<-Hpf7az0305$oz3E@fHr8i!xHQhfH_ZgJPFKg`Vn?NWEYC%d@3)SknT3&SbY zabub&T6=r;x@bdRGvN- z+O+3ei!R=a35qu7N=gno>q{D#t!1=3Co8)0%0DQd2(Q53GHJNE>zwoShh1046eiau zzLsRYgqi@7yXwxo0x2+f9XW@Kjr>-M z$Oe@RrNG%Mh(N8leJPh9YDn7c`W!|s&*I>Jx2|6wE^YK7HH1>>3sS}x#dmH8&cyF& z)IP48wr;L`Yc_21S2TF|e(d$|t!+M7uJnIXg(UDFKR$Jpbd$H|H>U7Y)@lGeBCUT~ zUccembWo*<0Z&}iVy0~>76>ceY99EO%*olVu8yCLRq1C}ij5{M5`#%&$$~`K1HgQa zxlo}JxG|zTWk%=h7K%p~}zCW<sVX|`YmtjF3w4Du|=`iydrR^ruz-RZ1fuZYqB>nRPzcua8{oMN*5?b3Ix#k2O z{I3kP%wYMoFMv+&ggLhFn}o>`#D=WKphrn-(8IGySmHNB7vvnELAaZ4P!>iKpiq6bNne?(gk2gok~7n3aB&_rL{+<0q4nWc5C$3An0p1*Y&<9 zuh}-~IMk-T$LxviC}iB`(SALk?;{3~+XXS6*#qTF^~TBV&mq0J$?;1QSC`1jHz}?c zTYchV7vEBrxUPRnUR*{KrvLJ|8Ig)8V1kI%g+|9z{X*QjxEzHw#W>(wpF*NxgElbX z&$J-6LA9rc z-;*qVb#lNxJE$wq{ent+sV#xpcgNg38(6}Jq(+F@CTWO-88sGOHB*6aNN(st-=3eR zU1?9T(cQe2ul*Tu+e~tcR8T%;4QVz{3hdAor@R=EW(NDJk8Ny&^mr1Z~MX9`6dk0r!ct&~)vt@n7V-71) z8Ld0~7~DzRy)qc`7vWrC@)zN(NZd;pB-akDWF(2zqzO?=J>a5L> zQ0pGT^0ng%Mr>5Om+7nt*2}|?rnlqD@_ZZsz906Z^FKrdPWcwzs#l5e%rhIj`W}0t z$WqI)W9TV^e51?J_ETo*{@~MQ5?&t@9xZml6y@bqWCDN-IV8rbCgALegKUY0;p?ua+Wb66GmCZifF-yNRs-@~m{AhqIMmDkz-nZ&|g`A(sVO6X4wyIHUxLVP# zg36j7xWunTfVu6lI&+hYMYn1(^Fy}&wzpXTc;i~AhJr_JZLKqF2r9vM7>x*gKru2J z{7_awOGgdOU2+*{3m@-*3iPR%*KILU-L@{&KfHS_&G9`|u)Z?j^_PYaU@%AE72i)Y z*G;3fkKKeWz5(V|4+~amJzEYiGOzt0HkD+GU+fyOmQL-;&l^ft|F7J}_u<@gPbFMj z#80v1vTJEAef78FzbK-%b*bj&Y0d`k9G?8ii>Hj%o{+^YNez@Vkag72$YkosKDQAy z#QW)B$I=2~RVT*V=*BIo(4U?SlV^D?Yrd$y)!v$l8oFF&p~h2^an(W$p9rTZEx64&b*Pg?{iS-{Oo36?nG+BpT=11n+OM)kbL`bRJ$LA@UqGCRit z>79bD2Wius=`n@HtARrzODV6CsSgyZjfdyYRT2qBa+jOO`N;cZhM}@domw@2kgEKG zZQjJA98UFi&bd=!qN~}KJ68_S!Dl>OUG^;M46(k_;*R@oPI#YyP3WVd%X-(4GAUuF zLdQp;UI3EKg)%RLSnCyPLm-STLW_4w96z5km=JKegH|h;Kc?rXGBtQYO|Z({ioKF@ zb9y450VFE*-2r;rAw~LoJSR=>%R9%9Q@Q4#xk~!JWW3 zz#?J-s=1~5i-FSU&WETSUh3`2*h7xDAdJw9tuh_8m1*%@LmEX5U4Rxo-0q{?G>sS7 zGo8^-2ME<#-OZiZEvGlt4i6D?`D8p`=hDC-0!T^p;o+p|RplJ-Ss@E!PCs>dew#Ol zXl(HMn(#0O-LV7HQP1K;n89blX_ES3H6!Nn5@3csJR<{>199E=na6HbN1ohuD{PEd zDPMO^?8~RSPBAPK;JWPA6m;LU;ZH`SLZW}~6QqK1BPTV$)g?LnoAs_FCGw9sppm+O zZK7Jepi!(o{7>tKq4?<$oav?AnJK#$17uyvEnOif<^gAfxJmrofs zINAed!zPQ7zb@|DGNM)(-droGH6~S9E(_*1E6w?-d{J+;l7mBOYadyhWtT-a)X%%@ zzPl`3)OnQh`CDa81oEWq*E8J-j-3E{toA>l$82fQuHKRa;LC-)Rm!%HAD7(NsF6mP z>&X*pOUf@?;;LY8wCX^L!9`D%anGX2kS9QH#!Tf*e#f|X%6L)Qv zy;%v8;HdL_=o`BhCCZRibMV)5tIZ>kD^4w*hIMd0nqv}%l3b+FFKst8rxGx`yd;<< zyH*CGW~5mNv92xD*ycIReF&He_}-x|ia9xys(M7a3{YAH1=PX@1FDO=EyS*o&PA9I zD7orBQ+_}6|973~=>Fk4J7Bw8cDgSAQmAz1|3`9HM|uzPD`u$see;d~LE$nV7#lV+ zbH1&W;#xi!O{%Oyg@=@~{8qKT$alG6&ik4+v>-<2r>0$MKjPa9CD|j`0slBD$QSZ= z|9CIQSNB;o#_D&DFX^$tu46n$PB^6G$&1|VV*xNM$4?rWs2LHMBv;pqH4rdwA^*WK zO}Nv$c-Hnp@q|e7T+OyG7u5lM*xU)D{Ic{zLF_35@BWIDUfbe&BvY!c3#IKKsME?u zNZiL(d}KXfHE&S2*&((^ z4cB9JH+1L0lhezt$Gss#iQO>RIOgMn~VFUCCDo)9<>pw&xoETp9x!?&{}Uvj#-0 zkJ^nt&O>f&_-2y}=f?2R9{8&viU2}Dy}v_RU@uM4r~Y{)L@&Jehk<3shX!_n?c`3` zqLvu!x(cp=!Cx==#QN1*{l#|e0V4!(rsS2RCsj6(`?iP><;HQvSo(G-OJ0b2>5`

wMsCTsgmH5E4E8*(^- zL}zrfh9yc&MI>bTtdzQnLJfD3?Fw=1&-&ek1bJ+H{1B*27{F{Ld0olFWx-KP@RBLY z}U_q4-hQ0q{meR*+Uq zq9HgQ7wQ+WdpILD8Qt>HSI=iQ!IH_LD+6$z#HW3O2lK%%1&>!wTWlKT;+QKF{7XF~ z2p!ku%N;&@a`Cw&I|%hASw3-yGF;SBTT8n0APf;N!oKun`x}0+vJU6txOLRp)<&!R z^eNselb2@$%*xjE`XqIAlEQSnj_G_DG?f%zyq(*Y!${SAKQEWeX*#vo@VH@BCB3DB z`#UzN5BmA_wD9yjC54{Fclo&$shc4~4>>Hv_xDVk)}2?5M?>qD+;F;#flzr?S9&ESA=-7{5*m(+Gv zI@dW~CB%!{2VE#?tT?|z2;Ps_%m@QMUbd%w4Kj%Wj~lT86rU!MdB&iUpg9bEI532~ zT@~_~n0pqZJ19{C$#xQyfxgSQ^crs|XZa)(sQMTqR7OdvM(+_pKQZJy8|a6hi|mCj z_W2+6C^M|Y2hP*dP3i;0PuHp8eG%9tn^*J zDUmbq)pNsU!|e+Hh~svpfCTOjHxtUY(YN?vCVE@BO+3g+yF9uNc2p#*yFc5MG8A+1 zj?zAu49|A*1~V|R2v?+>&_C5$a1O6oc1#Nw%YygGa_^Z7@{%_R(3+b0YUy6U=lYlX z`xMwKcNVc>7oI*QHD0y!{2>LuYp}P=#gmkb1UnrJ!;bDQ-~hkDTm5ORVqoN*g=8NJ zA|^yNRGnig)9-zjE>3z(J2LxZx%)SiyfDH<7x%}1s@;~UGK3^@37A^7_JQ*+9vKyq zzU`|`y>TxXhBi37sy%V%znHkX4P5tFhqRXMQpV;jN|OP5}x82dUHSKM+k`Gv1DlZQJ8^CRvvk zz|ZC}oW=OcVRtcG?p}XRl#1JYsw`pLDjq#!#g(`wl2&SHgE`O>kJvMLg-n0!BgzFw z2q*o{aD07dtUjN77j7QN$<>eiCeC{=<^6^K6;3|hvbf|Eb$~q5=nnm~T7{U%cXzaf zxKyV93r-ID2b^3%)X7GTOJ4V}({~V=Cl4^&+xDT#O1%^pr8{pl^i+G`WJnrZ7Kd$G zA6H^tZjwqwP+llm?N&#XyAwuUY$lTB;LrIzBx`8NXx z_XgL%B6hzmfAcPD9hPc`^7Xx;^;jIs$adqur}Ho*dS!e3)_>(I*3S;g+7|gmHW1b= z-K=>nTMNR|+y?XU+{YXwTlp;~t9Ys0m1TFJt-Mhc-AVjt3A*2ia!0yX=l_w)m077? z9eo@-IrDaXy*^K`dY(lBo;pOC9cm~&Sh1sb~tQ z`&+u%BY?&Ed>p>aGF(n14)3GMhUmos8WYWhb9=u60{^r>iN`X$E~$mJxBsR_;9g4x zka`Vub->3xuHpS#x9$IKdI{ubg7I5D@$P-q)2=6nnC8CKE+*Y41IahT zcU4#No<5a1ew?~JtB-H#b#S27Qf-6e?nl4`#)0(QIqem8cp&i@QHG9^912q{wR6p5 zK*};ufP~!wgJnwS9c-oqhlN0<208gkgn2rb#Tj$ zbDItH?j$^W%w>a$G@@}t;{RT7uTeY*tiw)UNzQUNLjHwp(O{oFG zJlBH??a4jQmx`tTY4K|P7mGK7Wraoc>N61h*_WXGN0IJM-)Xx?QS)~KJSy1TM9KbZ zGsG^nRJPHxSANe|F$oFVt!iqZYjs%KbYA*7Uxm))HZOB* zFL>g6)OO&1o>O*Tm$SLJGNg5TDNtXY>iCJ^O>|P?-oA z(W@~H65+jzUKM<*$$Jj(+Tk#(hc9l+Z-s=viHZs|o2cbK?@5SyTD)rra~gN>Se7Tz znOTOJUnl3UZ@)uG6^zvzIjV$4?yZU(*0;ELUfy?}^73K?Wi82iW}> z4nKP;dgit`BHG*x9--I{|~}L2}I!t&8f< zGEW2+^$UEuhKR!pZpWY6Yc~0$9Q4i%C*n18@!_#SSp(Fb@!QlzvOM}>AxHl}<$%*k z8JrYM!WFFjK=F_hv2*-~nDy>pI8c-@{&f*MoxevvPJxDD<2kZU!uDKYmpxawPIVBruzUx1N8+nM z+q%LgZ^(eh2>)QRr0z1pdkO4)WKS3$J`EuE=lV`DEr@@=`w4WIj!&}utSL~Y*Mc(R z>o|f!ZXdZfah09Gptf;N(n(fgtj%ZbmNBsz{)$lj84r&8Wuzjv6IQn6@e4aJ@)R+~ z;v)c3#5`Q8tAAqg_{+P-@*pdlqwlKb{_uV|AdISu{0~=HqD#J~mU}@E$n8ahLaqI= z6!U`+qyq4IuHyV08MIBrbdkoOU9Igo70qTrJy zH*g(9>fq-lx2AAlb}^mLlY>%f4DhMJ_fw4tjZ#snkev;Q zu%pI8_tj!1T$wgEp2&haUcXBOyg@LFU8N#T(Kh%74C( zsz2f55wn-7w6?Grog+Vd>)?aY_4(N9MvF5!d?;DcV_}U*K*R&ERKSA#BSwIcI1Uea z1%Iw}V^POp(!}Q5*{r>uijF>&Cnp?YSjZzUa2)a3Z&N37-M$~fVvf0b_udF9Qp2PM zUe~<_O&ND(3}NLRdYpiFY@7`%cPoQz)_PVvoH!8oeD9^T|3>C{Uj|@KdQ^_a*#WPt zk%zjc{d*MNf=zotg}EU)+H@fKT--XtfcuDKza^JV?jXBL79x7BE+5-7Z?@#MHMjC* zkDS;lXa!0&avPNpaX{s(x4P5dt9aMLwsmBX!X0gag+7&b`@;B_E)}?zbl{+-2GvO4 z9@_^fFr!1tQ)ijAEmK7lClEtSCy@=WI$p08n#J>xX9U1)BPwS=R>8&RhIXI6uExSG zXvYe&so#2B_q}9pjxs{RdtKJqH|MHV;&4ZFnXkIMn>Yr06$A8EwB`q}R!AmeH~%OW zx2IMx6c^f-9$gk}M?A*{3JPaCom7A_Iwq%4U4C|*bgkrFw!3~c&izM|e8L{0jNLfe zDJS`9Ho`$9(OO~lwxm2a-|j>BG69sgZoHvTaBn25G1&|QGNR@Mmd!)AMFbl1SCMvdg6+|C&Zq-5_Z8o zI2gWnq)IyaWBnCP)p5GdM|w|AL$ze}O$h9cy1a_kO{P05mLrG{7g{(kuhBTe>b&?( zJdjXVOhPs$UBare9n@i%$Udc1a}1^LVO|bMb;x1e#mq!H<>z)1w+=7~qnX~tpSGnG z$dSwDeS*VBnoFlO9*P0T{9@PaYu}gHRH@;8^IW1@GDgCFs71FLYc#&wk<3!j!6`4H z(b!%hUKL|tZA5&kudK#Myd6IvDHCYC@?LV@VKvO5+dX=K7_-^}3xtXBY*$THacx+4 z1VWO)k{xlk#|08l&z8^SQa+$Tk>YRiz!&2Lrl;eh6FcOUjorlG+QeEUZ|0(bPK-Z^ z{IO{w9gm#v>I@%!jGzly8C|2Nzi+1!NB=?`(0F3k*Wc^Il7dS058zL(a6Qvzfw2cm zU$r=uv@Za8|DBW==KB8{DRJN+FvIabwk$gDBB^y9fPtzD7g(@;i|dx^`(_;TEA8jh z^&7Z2!~CW{0*DmH(ge8G+*Rl#LP-{!qXK*PmIb>17!kk9j?I{=1dfd+D0Sx032}DK zL+$)bE{lN6)vaAi00z|TZ)c?OVO!bN0wJ%P0wwk+3t%%4F$5CipedO8Vy42Op%cfg z6$xiE0=7dyQvsopXbxQ@hMgV}1%PtWbF4prWIXx8_%%JF`2!tz7%#S+;WnSc^?edq zI{IV?|MN64o)92xFk*!986zHsCUT$#IQ zX1t-cFm7;5ro;83+DPD9`rN0EWX-7Gbj19MN-gP%!;PP9aK0Zp71KaCA9`kw^1Bf$ zIFBxBoI9xxTr#xb{R@zA2reFMYtyg&JgBEvpj<=EJy#V7H{Zu3dY0x*O-&;r68unA zYznc-ZiBk{;qG-VXYjoBttqxz_9Ypp4TWB=8k9%3H+sjIu)S7kg;vyp^B^LC6Q0UF z6|HV9?MwKB9+*=ocOGc8=seNrJvPL4+N`c>`BYmJf zxJ42?F(6XLwJLUcV_K4s)-c<(b@~#L80P$!j>i9smFV-@x~_I+$d**nDJjP`r8f7y zT5to5Px`Kj?T9Xk0tQ*HvZ(XT1!hw_Zx9D(=oB7s7r!|5XJS}h(U~G}P`o4vQWV+e zjeq47BzkB#u~IH|qvJdQbY{+~%W0A{!Z?(kOsi2%($B~ZXxvR-;|=n5qDW5hz!d@q zh%&JHt;-pCQcLCXK*`eAMql5g3u$9|{OrhmOX^ zDIA!7{j6d1G6A6qFOzMOk=O!5$Pa_a_+XNXR-kMC(!Tx<+gb+{_zkB~17ebkuM{~;MDnD#A8!fH+W zpo-e(%$ceD9K=i;qUV<*2FxB z-$O2*HQ0g~hsl<8jklDM(hrB5M0&br^Bi{YNvT2TM9M_9q$62Gp;(fC0rvOX*k1yu_0k^kx$M;f>BnrN6l`lGCZOiq*yUNDcj#bz#?2=D- zcRHPFWWJ=17kJ<&VIk4A{x}sO`F3d{=q%F+6r0=O@K-GyZa|$iy?*j34cH!sXwa2+t zhOefwR_W`8>Lp6{MxT!K$nYL&ZQ`#9-U^{;$%_z3G_LBeNd9P@D^!NRO$lqRH6Ye*9F26_DGacCQ{MaG`4>ss5+9 zewovM@(q=Zsf?7_E;qbTczsRj2_CFVV{8>Mv0eDG9^xMoODEVQqRZj1x=@9IenGmR zUl)AU9F9{2_o>LNdAvg~*euK5hd^ObCmNxub@J*&H0KT6b7*$EkuL#=1#$tM9R z-*Ei_NuUz4?B^NuU9$OC;5|eqJBvFHgY%ZO2`=_cBa@$gx&`$-JK|KwQlCfr;~Jo~ zxA0b5zqNK{{c3hw-2v!+H1HiE7vf5!ie_+jC?0B&%Y!(4Tu`wbGdh~WO@2Gr3` zOI%mV#$}{84qs9M<|t%iwq#s;gsLP8JDq_h1vg?bBkDt7q9o4|iT!>{tvn;C^`ZmN)E6igK?RnSG3gEM)i?N9*vrl$^f@&n?a;{xISA zDZYjNFNkj@L1gb&0dPfpWZaJgFzD#ZKaiTrjWYS;Ie(7{VyZCCp+l641V`(xBx1Gf z?eY6SL=be*ikiG0G=RyhU090yPOZP=HZy1zRKQ=#c;C@01B^U;zJI#0|GPT_38a|paq}g0}Yn6 zZI{DPi!Hy)1oL|7vj5u#_H_UM8w~7zLB5}InNAgC zeVm@k{On+RfVo}p-XDgLlj9qcU4w6}uVm&{OO*@=DIWT4<9AErfVrjOuX_p!oSCVI z^ab19vip$4NZij_7I28(sZ)cmfpWajya(+gBP92;e}Ss-wR+X0MF>{G#64cnJwoFR z*6d#KO5gnHqUahxbay)3c>C;OF~0bk)XBUG`?Pp_{}N(0&?bl5I<;Ea*%!@|cH|F934C9~xj|!}~ z;2Yf8ng6~PFo7vLBV)e$ZwGsXZHLT`ej))_28c^^(de`H;oeiO{qXtU?1pQ^uj1vQ zW&qY0TI}iRIb-hS7ZYfrka*OZ--wjFDt_$~ZLDo8Ww{{o6hBM(1{aT1ys$ekGB zf#Xn|9VU>^SIvv>hi1w$*+Z~R0GUCL(k;H4-FAb4!#o-QG=^UnRTy+%HJpmvM=}7P z8luc42W-_0 zB_TI9iGofeE|_1eq*ykphw@1tLDG4A%ZfpTD@UO@o0AfIC z37Ov6gnUW&%JR}(uQRw9MNX*Uw;}P=a-lMIrEa3-6fyR%CM?j0=om}k^ zAG#rrHR-tz_w7Dy^9$qF4z3Q&Va_crbEx=Iox5hqMT}e)ebaK8x_dPr&%3P~t*HvC z(?o#@K{bhR8xZvbDzbWhu_R4+428Dh1M?L(_+#DjB1}NWAUbu@f^{z<(p$rCdZFhS zhPJ1IUDhf`AUs;18-9p_#V#ET9uX@=r;P_1;)n+;w?CX6HNJVWi8XOO$KeGTd}dx& zpP)D^r5@3vW^qT?`R#4AE>~teg1ZkPHVEw z^_6A(uV|i$auG7}8rqOW{4YzHaOPJX22$2kCrKrI+3fkTdK8h;t7b_uAkodG`_?X3 zFevXEVz-4i6mMs+8kW)!RBwxWvXV}ix%Mr?3xvB8AaPrL+)zVv^aL*kM znpp;0dli9ufj$5G>t;naOMHpCdvq{qT-agsu0zg7)eH zH4zLMq71o{`4%qq1vjRb=Zk3N(W#h_SJygE1bulrj*l8P%bx8wZCLohwwyb{bN>2F zgD1T=)98b{jYclK6Pa#w%&nsx#YDC5PAzZus*Z=Zp9r|Hz*S4{ASoh2rk%Q^qI(%` zLb%xIby+8Ya#5{Ia|k|4I$nnv+~M>facxj3zI1l|;ZI5H9*nTlXQn%U?PzMJ{*Bxm z?8HZx|-SZUA+Ci+;!fwg8n*o zHd{+z;@~5|F^+#VfV+t$dvnZ|@*-BkLzKR21|99uKl(II6|&V@v_0p7)2|p&Uk2-a zb_N!11xCeqvGU-grBasi0c^NH-}ud~Dd1FYL)^zNGoCF(z`E&eInRW}cSJ>OWoe;` z_mX=Tx^x=fs(Dx)=CZ7R91j5*2U+a6?pMHpR|=P3F4En$^nHeN#TfD!kCMfn1?CV} zjm0-6%b_sQ8_PdRKQ6?oEvi3Avmdg3AIO4HkX;h_@_Ave+qeiMKJ~w*>wwnzq36?3 z{p{CA8XoF@JiE=}3a*4esuIz)TUaAYRqloRjTTwm-DcSL$Os9(f@AD+;qRL{vFT5D zGDOjFD=jzpHF$Xm)W+c+Hp}JLATcDr_?=TomSHLRyoTpr zcXdX)_70Asm?Y4#Z&9urSHQ*TW!!^h0AC1ONYJL{0*kThm9dfqNSL-z{MZGAgaZ$<=ev3lIK`x?Y%F zZoee$^H-8CGhVcN;eu^00ks6r z7Rmj$X5-sr=afFOcG5b}s8mR5A*|t}KpQi#s0M7XyZHI`k*{m>9%0^9{m1uaXX8Mz zd7xr>EwJVK@#FmfM$y{O`%7s3{N*u6=m3-&3@En1ugY68A`a9(S|jDAG%(YJ{Bc1d z#`*4D9d&gT+BOIE-R5X=u-k4DJo>c#bU1OKZT`~ z7`tnm(*tB`VB>1SuKHPE5XygUr;>RxmV%ISKtTK-4ly0?n?Tl$sf^uQ!J8eD9q>U! zFrl$8Cl2JdvE@VPjFI~-RsVITAt?%^s5+(Hm$YZJ=QJ8RQzS>FFPOYbyJS73pc&lw z{pxW(CYRkusb$o$#_YD19sA!R^D%8~8RIlAlfhF2xd19kJ zg@6u@DBLe&{?}gum++}T%j={w8RKn}(w57YrqNUe*d@!~fx>^?Fhs<=(pFi2_bZylM-6|2p6=Tv%zgKD z38u+rP)Y2U1ylV_ZeVL|^M^S8b)#RG{M!y&KI@F%)!QZ5X6A84Ml9$JIQ#2NQ!m`x zCV|NvaQ3j;f82=f&d%;i$q8w-Ki};?1I-(H>SfpM9{)d3O9KQH000080EbTEKnQ+Z zX#r0F0QOV>02KfL0C#V4WG`)HWNBe9X>DO=Wi&2uZfAvj1zc0#-@l5AqKF8JA}~=& zC8cX3N+=xy5|a{;?%q_C4g~?p(J3G;Ih6+K7+rHTjDdr}*!I6ezrW}A)c+Z;7mRIO z&OP^>^Lf{~p_=MSv^2~#$BrGNRZ)JZeeBpNPT+aanbW}cZ=Sj*jvc#kOy!}xu6N4% zw7d<i*`7SN-@e$3XubH()Pw5|YOue&g_q2af&C#tQ z3(imUjPmIHz@`!kg1h}rK$b$t*V%!8dFJmogN&YR>8(SV_dP$dCN$>exJmYh*n-}keHMR0a zntijb*i*&aIlf1>${xz57V!PuMyIJ+YvAP4uF)Iql$-*Y6$?ce1hwd&DLb9U$*Hqv z=_PiBJXf{8yTuNG2+lbZDZ1Bc_0r~t#gTavmf%zFc`b<->eZzEU2NPatFu>qvGP0BkLk_=}pBCqB z2VJ~(AHxtCB>Ay{mUoYb_f#u-&}Bt&0a1CrjIGPlZo(;ED*x``7M`_ukL8O`W9vY$?K`?`&$-3DrNobKMOJFI z_J@)O)Hl3vix%YGLe#Qw#ca7$M@`5 zVQ*Z?VAbT+>a|9bk=jAyh-z6piaTp%SS0TJt2O7BhACfP2|u0f*yShquzq&y>a~d$ zes)2p%*R5qf>6^o(*R{X%(WIZ7nXlA1wO5^kO*9{fLs@eA}4t?FD^y>c<^>D?pI*v z7H-7jE^j=;ICWM;q0SJ|jwvDDO3QO9&q-J0Wu4-p3h43$_tXGAY;_G^*Y4X5rVN`; zl0BHoi%7Owa*Gixx@|rK@?29CYFs^Xc69IX%u6u-Fcf=uW&qLds45MMb%YW7O07DG z=>1tpJl9?{ege|Ef9vhV??J+Wbd1qX<16-q*Fe6pby@gkPF$8lNogapy&Y08+>|wX zwoI0evAag$)>#lY8!X3Fwt-l4=XXc`Sioyg6ZITS<0}^f5ry~;p2Mi@-4Y|e!ZglX zl99tT-tBXU($5-h_-)9s^_^3sbE|=H*lY5sYjZExt#848LHAFPQCM^nIU0M=Rzt=k zo`2n(-;1b}oB;p0O?>NprJ;pTg6h|X$G@m#$z9cUgb%s0XT4<9U6#_Am4*#56ABe_ z90pkZKfKQ({pvpPj>-frKFOXH`BbalZQa5mWzzj}oG-u9bijdC^8@43nStHy^?G`a znS~PYrL$=u`Hf%+n7J(Y&4Kt~8qX4D#|dU=c7U)MafSJ8?j;zkB@o(~D+S(O9JtC_ zJ~qCXe>N_9FAGyQllk;|!1Vf*ma6Qxt^7{g$-K?ZTrt}Vx#CX!_Kw?S-b#f##-q<0 z^0u*~aQ>CZGXXyfd_zagyI_X*!+$b@2Po-7yU#Fl{+t8$gj4t9--~K-hRtYHJ3*ZmLrl+eh;T>&t+C$ zgVeQ*Cw{kPI8XI6sB=;~RD?6KQGr?ZhJz+=T+k|L6$ZR-7>eCAW82;oY#GZ_xz!cH z%aoH=1B2F%AItjH`rMNIri+NW=XWW_(DpafP*bS4ht0uU2Z&egz*>jH@8| z`Emw>LBifo=5ST(`>ZYLzFMuvzOk@YjUnXdGDHf%hycv-PRr*1dW2n zV#pZ_h!auo%oJq7q|S#{5FHyshi4>?L64hPJBOAgJ{Auk^P<|C79!PGx^1K|CfA$w zhypBZ1bs~tbwU=*I9O-0RaejXCgB%ed0gE7}WopP2bFV=StTwFvfj`y7 z#YG4U89Bor{kXeYUJtIWqRr^((&$vrnc?9o&{5v343(5I4yx&fhLSaI+pJnjZ}gK56L*N6GF#d2g*rHjj}5zo;ZvDr7D7 zJ?@Q8C-q3cAP))MNnB0cF@)}lI>hG&Xf5n>QvmzhkhTZm>@V&>cxLQ_;Xwh7DoY1x z=GgD#?^LY|ve|57hS^jC+RV2c-tPvTGh-9u^nZmhyC5JT>usO(9k@tseUabK6r_!Z zY#rx;5{cD*oRL8)6M}6q7DETrTZyK98ihFDh^5(|ZS0ZXw@TO|H5QFKYtW}QW2~5>%}Ej%V6tFP3C#(Wjv-OZ2iioe+$29Ry*QC$7ehI=Qn#6)a#G0>i{?3-YYy zw#$}MZswP@I(NOe6EyW<-RQaKfdSHSw_<^DTgRdG^O1N`0D~u+GVM&6d|vGejJ{0X zI{PpUR6fHbjy#4;KIb3G;10VgSL6H10;#cuyWG0p7%@(yYm$oIT4E`epAy7l~xVe|G~=5Fm)LKXHBP=j3aqRp(N99P$z(zA$^mObogxxE@T1vx5s>f*Xsx{NcAXw8N2~0gxQ>UM#*meQB$L&w?6(-Au>_?!5IYzgaw5+id6pcy&E6)+ORG zR`<1@TMox|s}NEcj$L*(;lH(>jAV*?V`tG4FCJ1$8vDF>ct#j|c+KGZzL3iD&fdFf zgdzZtj4=X5?0eCo_P~0xD5!5f_~fx;kg2#!3+_r)Ry*Bx!GeC6cyga*KrW#4Fz)iseq&|& znvN`?$B_2Uub%L$18P$F6P_*Wi0|w$#eG-E>{6-GdJ+sZvgXn zuRKskLvI-EyJ53$zvrFVr-Q!I-Hmd^eox-qy(=3Ll@fNps`6*$!&r?MNrR;vhaA^c zu5P)xUsoW)p7r0WVVC+^4`-c-h5k*N?e1KjWT!P&6T^M%$-#=7lMIdB8MxYN|u z4~X&v@uI0;v9_A5FU1``wvBe2TVO?zl9DT~f_uHz_3y<=-%W@|X=?`Oos~+N_)@yY zcmR*S99MV6lQ*L~r$}3q_1Oh?R_iSXXZNfCv9Fu6{5pFx+Kd?v+kWjvO*gALc2@>t z_NdP^H7&>?f_v(M@)8FSnn-c>O_13m7&$sYp z{95{=(-UZyH-P^DDPsEA(CcQCB(uMPwCfja{AJO(HatXakWEd#r%j%pk519+YtS-h zSQEGX&1SLXPJWTF7YzVW+x|~(A2fDiS2)s{E7Ec|i}$QIWoI#W>#NnC4Dm!9evW*t z&Xfl^5LC0;XogCf9*|R8%4)f}9Yg8$&=$w1O^dN6HY!Ry;1^zK7 zZ)R455w+1YSs-0pJP0EnZq~SY<8N7$M5pZ5%a{t242kF50vI+)^fo_;Fg|E>!SGHr zQkNZW?Y$ouKf;-{_Q{8JE3M96#_$aJ9$YoHW(T^esSBGnpuJa03!MV!iRV|lFIH8O zBOWlng!t*SFN$Mth&z`>A|ou!R5!FKQ>27Aen|%WW`fRz)B=P4l#`nWNLLE++@)Ny zD{TKMMxeU}m?HSe=n5FH^7cYZ5l|u~TN+zjf3W>dycEJU9m!<=dOvODlC|~9ijH1j z*R9xG_uV^(>$0o?H?z7P6|~RHeEyYlW}Bs?HV)@vwY2wUG#!R*|B>#Cy#;?EQ|0gJ z^Oh*_WXNEW@MMZ;)Jp0LLuYPu@6W5$!BlE6du9%Or5+M;6&*w?lGM`YrQ|4b9q=;y z_KwmS_8*4OeE3B>v?RH1eZa=QO)xirBQf_k%7_`<4LbFpLFV*G_&I)l{;v{u)IQaL zbelellf?67Yo?DY6tZpC-p0*;3L?GYv}Vy-j_SAEfXyd{ybf!CaU1iq zCTxcJ)n3@(x}{by4Ds6FbBgSv7m)qJiPhJo>0)jddq|FvlZ%~%z<0Q8UCG|xu?Ykv z)eYFKC5@9nrJ;p`T*;vYM4P_SvX)=6aVVp5du_rO*_cnNR!%(nn<~e@$mdn76<5&L zv9NJJb>w*gYS>;BtICz}%hR|7@OLv|4}*vURg~adLSv@Zkf^Z4$G+kYl5=Oil&A>@ z)vG7n<=oPnEE{)NmZb=V3l2_3A2-fU3OpDPbW578VK0dss7?T%&RE3^NXYy?cfPPK zy1QCR-W9u-KkO#;?yLtj93<2Ren@7k|L_xg))73vqoh@^CUvUSZ`{M4_vuMwl*T5= z4oVJ@B(c@PT972ERImn$;ot!BehQK{M6|*!1uJcWu~ndrebJAf{L{VtyVb2@)jiiu z3Z%op_S)cy#e)gI0RO|d#~acwQ!k?LS8vDNG-3%DTafb5K;!Skt+j7ECEmUP$US$J z=I`t8_jq>LXgw_|E0xlnxMIRYrkiK+SW78qC?UQ{vjAO3P30B=jh9^A#P6PSZS+-| z?5@B_4IIt`EA59+9ul$6O{zXh;R&k%jU3oK5~fKGnvHG28mbVwx8&S6>aHkGW`zHc z=MOHZZz#KY=KU0S^pHgFc1UpOAX-FDLV|w0&h0Pn<*D~p$)Cr+Z6Yx_m~IN#!C)%e zhs%5Nn_#;sQvLvYc&bCxONRK=27u2_{*H+vt-YZ3)TW#7s$B|RKi#t!FHW=L8~eg7 zaGh>%0P_LRvapez6xx&=n~&)5aOjFh5eF6#avsn1wH{QLY6B$7fjsGloFDe&$=4yD zcovmeR$6y>WnX;rP9aRbtQFB$CdkRbd!jG;ax3f^**}yOu2+Q+d}nfsKb1snnDF%D zQuP%0sS$a~JPHN}&Z?gHt~SL$tXTe#c>J;#F+5b7#Lwk9R@dGe9oGWRhpB+z zdZzH5Z7_EsvA~|@lpfZc@f-kFkwlBjz?^CKtpTw=%1Muc%-(YPd9rNLu@iS5(*( zSo4Ct#z}(TbP&1rcbhMKVCi_;q&+^quxzuPThT2Ja8@OT8|MJ*cF&2LqH4}-d8~#- z&il^(zQ9fMP=+@neW{^(LIGCw&T$r<9ngFB70p-vgb%wHH1%joBIze*i9$$3$mKS0 zN6U`I^HW&(?G;q4JK=SM=qayMF{fUW`x)sgpQPwZgqA-5qRR~U=~eO#pyAEC3ab+i zGh?qZUDKBYZ7m`=YOLG6d|Y9M?Syb^*4}8~Lx$12OUyzm9IWD!QWMNR=3dYDai_&) z-ZLlFq&}AY*3;JFOKCE_I#XOy34yG8Kc~cwMaD3fOU?JYSOa(>B~&9OWxQ$4wR0IH zdW{^NlowthFavh_;1O4H&u(Dv>l$6TDq*PjCP_Xw4Rj}*><)~CtqtnX3|Lr~Uu%Xa z{{8ZxD@$GBWA-1rXi{139x%SaAyeQ?8?drJ_PR5xx_x`Hw#;voAri(&j0L#m45W?2 z9eVNuL4$FF`YaOhKD3j%JPvGqn>Y3d14{;1 zH+E6r?&B<74tDmb?18JT*Q}ZJB5Ij6xtI4>U?QihG5xNLyP`b=1>HokX`2BOuD)Q~ zG_h*gj{*Y9iMHm^u=lTaEcCE&&J`3x9r1<`uFs?PVNO3hGYFtOoif+xZ|+4i`H(|o zN%Fr)^lAio^Hq4J7BVPCHLUA(?gb9VHw*RlF6EWW{AEIpraS)QGwa?A%9Vi z$j6jY_;BS9wjqsV=C2nWV}40)M1H74;!4{v zf2#@XJ$eE%BfL>GymRpUq#iq*nx$*$O7X=dr9&)y0MYMpgmRKwGp4Ew)+9>~=G|Dk zM)AQSwXlYt1f`!?>R+U?uwB&p?D>+Of+C&nwZLCGUihUUBN>y8K&^4va-jioll& z`l^I_H#jD}B+Djd4rFvFP4AXkQp-HTWBr*?2z?&(PH0?^40s`m96^%xSqqESSl$h6 z@g{v0LwjnTN0YGT7?4XF=shrhC+IHC?hkp~jj#=%2_SY8(K$1aOS0f&wdBv4#2fPj zF1G?;l!R0PI-+3yiQmlY8=}iSRRKwZhG&`m+#G9oUG@Ze4`>T->FBD;J(_81A)~7W zmio4Vt2`A1pbZKtg5OYS7YLbxyqSEzqp%77vc8WyvNEI>*Z`<;(Y!cYCC~5o=uWBB z0tPZjAQ#twZm*Cy{snh_k+9{HbTRJa7)Nl(ZzoAtm`eO_r^ZRTOHHttPmKR^LoPn} zoT7@REX$)m0aF2hFK5_IvyX7*Dav3t@AGds$Ag8h%>+RyKPP0M-o37bCm(`g(_fBn zwPWmw6)55!?_ZmtJ#CCRHKMTmNrS^rfmV~U4yt3)GKaXJV#LTI{)Ly?Le1%zD9o>2 zh_b)FbAX&i7M2JjW5|JimX<-l;mg$9paQcxl1&`loKXa+G?Z?ddpQ^}JUK08Bu$zxz|n zPyD%$uuZu!Axu#X*`aE0LG!5jr!90^!T5TX12vNVCBd#bNfdFTdqV*Hc4hq2NfeVN z?0x#KMI`3&1Z)v|>V*mXH7@!$U0sw~D~-#B_jR}T-DRcc$v@W=?7q?^sjKZA^iv#! ziXR6`s5_M5h`aYu@*gbIi?7%AVuX!lLx2BK9JACsc_9Us3h{tyN%`<8ZWbFFzO@wb zrKJYj$#isn5z#y{U?gL3GeLTNRRL`_*|D-N)vGL=UYfo%*8bg~FyN2ap6Ik^GHK_v z3>kl2!XS(-Ti<;WE)6}7qjqHLhfZPD>WDcU{sE^)F#!c%)}E0(`WM9?H4iVX9y6(z zb*GKwqILVkn{p74FG2Hezc9KHO1b#A?c9|utdp}Q-eUYpYw}0LYMY?`)9c^VSJ{ny zG>Mc!Wbmpx)M{vz?v*P*k=J=i7jGeR?Iqo*(PDYcXfw$xL83ebSx0~b_BIaiY3dnu zJxX@(CH49t7cFOAo1++kvqRU^Dy2NCBu*_vLGBY@CsS8~y!-*D#P%!%=6c&_>B4%OpeZf?2%&G~!+4JlS5XnUfau2YOs~=Zx#qp^Q}a#oT;G z-LLL>988SDU7xpAjMZ}^MRWjp6vP1f3pS$DCYrjM7G0%;u#)hI!Wfq;8u$6?iG${$ zvobj9rx*L74Oq1%B1iGT!THa#8Xp1BF+%|zCG(P&l`Q)EGWz>09uC|Eh1}~4&Y@9> zeQt4m7OM>L>dIgLc4LOB8Guw5dc;7vKRYk*QikqBcZxr?R*D_VmEq8!*d+B=oNo3F zc-wt%^qRQJ5B>3qYbb-O3exZKrI$;c0UizZeK~(}xO`H8e-r<%7_6*Gz_y9+?=r<5 z;v^eM|Kg`eoAZsQ3yyRwvYz5d{tAF@_xt4v8}#q;E?qHUyzrYPcXOL_*aN`0M^}*A z4J;yPwEx)3+HUpZdb;aT4Y2!i+Gui7dxJ|}@iIj!OB3esGc-N%eVI$dIH6l`qe#l5 zb#Yj)<9u{^iWl#lSB2VJd4>Fz;~NPVy$*j2gKp;&zoL@e3Ej%1gOsO;y>~xK?Dp5h zt0g^_AQt7NYh$`W39iS!%@0FUc%cgt_Y`%xjE=5Xj1WablX7%*uN1|GPROGvZ4CLa z=4>uK^YK>($WP@g*PO}-4_j`$nq41y5Y9G{pvCAs{lW7^)$F63+%*#TvSz^=Gqr}R zc*Q{;AWt2qD>n2b+^`)y=d=b)37u8C2{}sZ>XUW@WdiZ)LRHX5fO^``9P!@WR`P<0 zUe3iw1cs*cPyCw)fe$Z2DE)Ewwx#*@YkPFxHI??iJ)}O6Z>OMg;Su`2PVl8bK*<5! zT={_BO|WPpY0pDTE=W|eybI`oAl0yW3erR^!{Zy$R}|}s_&(QT!LF0cMoU8J6x3oh zs$dsCEf*q>TZpI#=WUZoZe>M$JD{qjvj=bDOA3iM?3icgF8nN+rJ#d!S-JCh>4lbQ zw-0}$L71yPtj%Vi^VjX7?F`uR-BMs5MnIO|@stX2^OC|&@&Ani?rQ=QS!g4LvZ!a) zishB4^Oe%cHFyQX`-}dm{`R9)lhH#Z$G@8GKv>yGQU&yY@dS!U=l!)qSyeON)hJFE zln`*y#TiThkf}JJKBaaGWiWFPUuF;b^#@F-nLP#)0e`XpFGPWPu1S=iq^<(yx&A2h zy=bI>;0|SS{R0nLOBBYLHO$qe5iHMa6bMU$<%-CD|GFS__R6XY@;}ZI=0iNdA!0Xy zOLT^v%_Ly|2jGDd^$i_i`>%TtBUk*)O>BT)9+%w}#7}4n4`IGFPS3ics@&*U(WZ_md~*A@*ag`_ngVGas|t>e>ZX@Lc1hnv1g@ zPT<~y_5~wS-&xGrus_GWeT$O<6h!YynY&)rmhp%6w1Mu}yYhGM3TzUe#H#NtbHtAr z?4c?)H>iC!j~Z74i^UYHwCe+t0$;6Su1#5j?-gmN(y+I5bL~9^F1$IhL)Ve=b2bfykKRfW+gY(_4 z1RY9Q_g*U{Vx^79>oO+cYFa$p{0ULgyAP*BT1>$2we&ngYa;h^z7nD{;bUX-^QGeU zJ-GB((a>3DWvQmvNfDL(mz$(H|A4^jg+Vw2bDW(co_O;V#6mr#E;h{6{Wm@ zWKqpMxoI(3sIIm*ar(%rH%`-}IQ1{O)xQ{>8DxHknttD!A*5t6u3lmte#TyA0kZYtwWbo75 z1g1yCEbL`qB(xk{JGj33dI8HPovJ2_L&WqBHE z$yAf}j2t<>9vey3k(?>HJoE#_5$^nckIVuDrHr>p)U5FG;*6u}{M5&fQ%YjSVD)kx zKJ*)lN4EgTQQ?o0;kI74RwX?FmmxTIw6vE`D&6eG^ZdaJ<>3i$nWHr37LQ}YrJGc0 zK%pl~`@W%2N7eqSLvRWFjEF~q_|xSJ{eVoV6Eoe7fgIu}?Yn81rEo=hl4XR`f7Y+@ zQE~MO$7Le2=fZk6TPDvD$XZ}IXJLL*p5o$;U*RLW1pvi2aZiB|ad57%yI>>wiz} z055B>^SYS`X~Cvy8ydq(*Kh%t>5^cN($5#b1li5hJvJn zLmkmTAD#QN(hldDz-HfKSNc!Mwl~>f+kgRfVRXME$t4`Cq`}t!>2?Gqpiv0}uAiR4 zX!||C+D-15woa=Aoqc#8-vhdLg#FK7ZJK znw)on*TV!gr?P4Dl+1WR1ds;SGDLxv-B$CRcgPEof|fLgZg}gAE{#E-9=_?y3c5fDm}vv!*n{V{{HgM9F9{3n1DfJeMD{}YAK;M& zI5*=+HspJ0%@M9pg$`Fjw-5wY;0*R6(h!mmCCLjK!JY?M<%~4GP+#rNGQo+Bc33=> z#K+0eF{$X|L>=9kI~8otD%N-9&lfUTroeiidJq+Ym_*S-A zy(*BU8tOXt93BvC@);}JgXbA_<9^1zg^cYy$r5uVDg}`EpTXXLT{xl@ft=Xc#IMsq1 z|5Wrk17coXR3g&5JSZcxy!3<(v^LMK29y^NEL;TB>=zM^LKiz->)w@@+vybXsn>jZ zs}27TR)~?F4%Ki@D-X~-T!_MSP0*F+VrdJ(TX>)g#5`o%^$=qo+(syspSDX03g;Y_ z4_$2yv+O?cdyy3NVDGK|oQo9^jax&K)$165#r3VXI}Ux$$rP~*v8pAasl~`ojn+dw zGJs!rT1RJ-cLeS?UII#F7%}> z_W4U^?cGVl*F6C>sp+f)gQphL(S3C_pkKA^l*Uby)S7BISt;OhVUjacg;FSbKFbLK z0)_y5%7_9BaJO`7DL*~-0>Xm7hkLLY?yBEP+XMdV##rRkM#d_98!j5doB4(M&Kldo#MO<9gsXok`ZvEH+lS^6eI2_RfRvVwZ(&BnRU#I# zR4+^*uL%fU0u6O73!!VPuR*Rdh>#8>1xx8n+B3~1?|cS_;m=Y!Uh?|E_4R+M_;{ID z&L%hMiwe;udCd-k!(#P$t1$T<4?Y4SApRQ$hsifVaqvVVv}-pLjtV=}bkG@DB{*@x z1g!OZmEY+ump|)2m7Z@|a<@#V*P$-#v1~%tW}1%Yu}2mVkNir$I)3O)L8a;KZl!~Q z?c|O31B+v_NIS1eu+Q%F`_NdR8cmdAwIuo(pN&ddF$t)M`X5pO1yW-Q`vlx|Zv|k1D8fW;g8OfXxb&A;-!uBX zr+X%@q{{DeBBK956evwSjjhqp)eLUo6nVj1y}mj4NHR z$Td9<)ZMi7XP$)pNAz&^u*dSO;3hXcL3TNL^gye?{_fJ{9UQI&Pf~It{X3UmDf%y5 zUhXfKZ?HdC%$Is(9rOhA41LLq%L{`@^2^N?3++G7b)k%lCoc^hgelAEaTsllV^3J&+#~^0-Nw z1Aly_c^rv$U1vQdt7K9pG@-CjShsf%dHw9}n8T?>R$N{oqw#f%`G#LHlLEJs@a`L8 zPWJb;8e1+5A%7>oF!z7cbt?igxFPUD4~eWqF*p!R+H=i|J{8ia!~BAE2rh;AP}G-F zpIezrCc&bAuexwg+-1P@N@ihoh?FUlw%`AT16+L00(&yg3}xB=l$^P3>S9A1qx-jL zr!`2nPTi5N=TC^>d?yUmwvi~9q@-VRb{^rrMg+P2p^YFx}b=a?^Xhnx#2Oc((yo3o9 zb3FAb%aXn&AZiOjDIMp1C1}y;{|^y}1!MAPq>+f1BM+>B^)^>&p6qdB;7>2IOa3X} zaf{@Rqxv0nU5q>e5WUz{AWew-e&+K7In0QkFi)TS-~Lzy*3C3JOn^^Wxj?746F zgw(@}Yeg48~Grg&o4Ta_YrSNNbEik0z*@GD2Ur{yXfNjqRe!hsi z{FU6D2EM>onX%KJV-;DVb>L3>MEY?5vLR9aN+g`p=V^h61}O1hR>g~Qo)@D_n*8T` z){vwkpzjaAEzZ6Smo0MyG+YgyYSK)K6Kg`8VKq!^efG13gQo10p)tGubB#SI4qQ#- zZsocF1YMsPv^%VxsJA|Q6Pw((+^9!6>e~IFyA&nGP5=;tL5<|Ytj?m^SSp@PDDMw& z^r#3=tvxw@T&{bggK?zyEjViMWAEesvE&#=6)8EDcJa<#$b-Z9%U%SRQP879V7{Hf zrgVn&#GZA7)X+PosvNH=Z-oI5VrtXPNQlzH@F&M1Fag0 z0J{g8qjV`2INO$_4|IKihbCKF^XJuESIVNZwn<>53uIc85#C6Awu^u4AH#w5oB4&O zyf<6xVK*oN1y?*g_Oc;`XGc>oIWZf})dcM7NhL#@}~siAg_R)69OUh4&o?A*Mk ztiC%aJS&8iskrQc9)X3*o0}m{V@YfB+;_tIn3e0u0Nv-r8hyv%WcaRwDMyv@vOw(p z8_~};`!7VlA06$~kY!wZOEiO)@&~G}%}?*m0YKrORDxtIh?2@TTd1a?uIp_oJjTyA znQ31iI7kWQ>j}@r8C|hzM~6N-$opR?{;4!b1Xk-W#cv11Qh)Qv*~b%*9sKc%5}*o! zLcxDH{?y*Tar{wdkVtCMA}(XiK=-coJDd{zpYyR+fCb8G$~0N!Rj6=D@0JX+wt~b8 zKn`vJ8;x>uq{lhCtX1kE+w)>wd+tfe*$^tGD`3P?a<2&Vj`<>GpuKa?{b3X2T}V_T*R38WNyH7&k*fNb<3>qpPS^Jz``jRqO1#@I(2DcQfJEx9 zRc<}KsB5F7^xkpC{<(V&v@88FMWAa*$pcqXnR_d!$%|TKu`Q}oxxOCfnfy?|G$@W! zCkmuOCQlzmu-V*YO5B1{5G`sD+Q%aDeb3lIdnx{?0{T$&`Ijp z-MJR>Q{OC=7gS~ZBBUzIn8h3-IiatlonKTRL*rhgc7Wy>%EEruIV;2RxWf@bi+E!v zFl04s2_u&Ql3=qnA#{=$7Tg|IpO!k!PJ50q;-z3l=vzu(3NJFzwq*1#PD%=lps3Jf zDd#V39l^?A#?l&$-cE4QM`znZrgHnBdzu8d3$l5BFgHFxU=JO-7qmz`XAtP{0#@a{ zM8X)SolNSQ~dbl2iWa{GBoD*%f5 z*K|Y|P2FspF{bm!D&NWOvC^C3JCQ*@m))(7o7jd(|#Pe`EMSotd=wRU(E}fZY2NT)gF<4)yDv(o>;5#ob7nlUx`;71RVV>@M*n z4Gb2WY%zO#zD_L`6nMpG5^c(1_r&TPP2Ana0Puj5Sgx z__?ba$XO%Z7Y`0ocH)PP6H8_B@pwp1eKSTDKM@_2lG6Kb&d^sgov_PT$0qvAGvU4O zs9J%)3N!(DcaPApY|Fo5plk5BLYx&kU7>uvC0@JAj2Eq*p5Xg4QCh3#h zCtS5eUt~@cEz_|neotYjBZ$tMcKkSY;e4|vOxyVt6Q^)OO(g?zNnF^?|zpP61!;2z;(P8}w9Kf0NL zq>Ff7OqGH5OOw4eBvF2&**6iR)}Lk4n*5Kk$#@{sXmo5rF8nKF`Am=b{+QX z5ha@0vJ;UZyI<8*-}x;4mtQ7=J%mQLt#?g&2IhqIpR)OJvVmAp*^@D8(-&&QIPm~U z-b^Ic9$ZW)%7tv6l4cC$%kW|&6N9R!_`xA1wMEE}1}+C_dB=S!MovaE*}AgaH{P8( zEf+{yi^lcTr8uFgaVM5@sq^AjLz`_OC&%}x^e}VAT$a>nDqbI{>&Ww@Y^(J*uR-S$ z2_)x0#-=`@WOs1hwD?G9@c-7;Z28uY9>3$I`PVZ58jR0>#$xeX#H@@S-aI=)!QeML(we zs3GERHtUQizTY;ls73?#Vf1VWglRFYlPJ`YWrLS<5%V>48@0(Sg;N?Wj>!uGArUR| z0h)S?_Up^u?wd^5hBwv~8Qvv)Yk+EHP_-IFQoW#$U8!x z$N=aqT$B3(M6cZ@BX$d}!M);9g0m?mbFim^bW~$O%J)`bL7`xtPji(pCNgI~4QRTb zs$k=M3TkJymmXK1VqIKM!xp(9kRj@`U53xAM5%adf8vYbhatcMosE8lq!?%TYK|WN z8gyZ`ZeF{~yS-3V=KEYK1#LgvJ2_h~)>43nNG^~Hbsv7LMOA3QOHT4wiDop$YevfU zQA;Sr)~-f)=^=^R9qVxo8yc*dQwW2%;E~862W)K_cfsJlE(K7GEk6j0nzOP7=lX`` z=Z?$;fZ&6edXvL-h&m5kXXd=S+aIIx&2IK;d{De#r8*S37QbTvtW^8TBJy6!N45{< z)$^3;M18x`kF!M_`7TbxE}M77KNv*BY-Uj#EX~Mef*j`u&H#CliHeoU|4JZ^Pcd); zN#L9)L;haHiAkKlT1H~35&H8zvwB>2`|anoDR?G^*7S^i&Hr?(3d3=-2d{_kg%fJG zq^i7F3qPA{S~6Xlqv7OMeGUmoZM$#ewE31{D-P6=!bO&999w`Es(;MX6F>6l`&+o# z_&D@QBOrkwAnXw8S+>`DdJdODUENhd?qw@&kEhnc%11CDMcTEMIg@Pygo^-ouA6FB z!_~=1x5KyfXJsnpXy_hrji6~gm@WV(DdC+Fv6VmBh%UBBO-95%$!}Tm283udZcB#h zyJj9LH)jgs0C&-26kAI1I>7RKLj)A=w*V%7k+eYc-*bOE(F{_&zW)9J&o}6BQkMWG zIJZk{c_pt!(#l|5zT|_TvzYuqlzt($>#RQ1Or__fhqYW9D9;9a+5T|bYT^0(7CbWR z^7!?2}syC3K@^;m1L{UsS+fs`KbrfyJmpHiH|9kYvwHASkXrhH+AFc|F& zGM|K-K%SOwAv8sNFbgRuKoaD26v_Z$n$37J;Pupre_VbU=chHjww%(UugUd}4^oXuKiq`PLt8E)$=*UaDP zwC`AlkkqnUR^So7Q{5~gvQML0XkomY+6oe9a?lo^@}8eqtHMGk_v)go$_Pad>?RSR zVuzNM0FYX;3R~f?ttZ&`f4G4$PqIm5&>+GMBNG1rfQ9z zw2bEK~Rix6J9f*s)*?MgnRrm9XvdIn?fR$vB^LoMcVA0*5KX#l9=i0W}xHFk%s{P>a zG&`TJpQ9>QoHKTU&lw^#5jpXmImz8#gVlfGUgLyaFC*_#4q~+5$q@j1qb!+X;Y&Z8 zJe(f|)+fu?54#47H`NfpkaLaFaE96Yiu;E65_~Zc8klIx330I}V?B6501= zM{6wLkG9AcfqtZ+?LOgsv?6H;5;*#v-RNnMzY;H<&13m{5XeyOvSV`|M@Aa?Hb{?{ z>VPGvT08T=qD9eWuE#%;Sg!Oxh65w3m9^vtJLYDD%YuyQ$dbc=mE5ENXB*%_cI6r| zuz*_RUe-D2ZAnneg>Xaw``O%FSG&8(<<7t%Kx@q|naRc3F9Jcem|25cT7o9VB`N?? z(0XiIn1L?X-!+Gs?#RXk*UW@VY!4qv(u56P`AqA+5aP+%*U*adP6t9+fCTCb*m60` z7wEg?fIuq4O7DxH1)T0tlfGD)u$hDdLoa0JylOM;t0e%(bfdy^ka0(;R& zoxv*|CBD)a`C0;ne`)% zHzqUUy2Eb(ji%r+XDW(*T#Z26pK+7ZfXB&%qku#n0kLJhIG#~K#iV=Vb~n3{QJ%g< z_EPerCcrIVHo(>uV4g*Pf<`g0!-|WI0X{j3Wf1}c9yu2nl|Bpi>8Lr3K<3U1jzZpT3_oBC18wj^FewB#pV)yL&$1AMW3X0%DT&WRXSjXp|~Qp@FqB3kQpA zR`@)#02Z9Gb3|qc*f|g}T@bYe-pPNPs4WWE_It9f7Oiw-zX72I>DVc3dMWy51_SN*FABi;y zk+PSzqR>w?e-q4fiByzd5aEFZOJ$?gorRA?&zcm7z4UBthcH;9m~+Ax0yv)C;h2Lryv-;nfe7n=fBUe+fo20{SS$sDoTftf zb@%$ThQe%dvU^QEEf-yy-kIG|*{5^4Hj?e3dT$$BaH%Ovlb{^Bhbq$1Sk&<0j54j2 z%w#&*n*15KzPh$agr7_}mr*qNqM6=$-Pzw9Y0`*l$JH~8D}$|2GN+u~HujavCkqST zz&@{)-~E}*pll3)w$o*H1{4B0iI?8YgF%!-;UvzjW%LCC3(Sh6ZH{b&gC^!$xH|vh zJVk}J>f~xeKYz~TE^=BnLl8jxg2t-}LooS8YJ=A=Aux z=>ZI`$K%pYxvCp3t=qF)7xr!OLU$T}e0QFO!gi)Vdeh3g6Z}g}Hs$7W*Q@}?PcVC zf%Yw8)2d~EbTZYq*vpQ%CTGlqYa#aN)w?oU^adkrJem@#y#VNI3 zQ{i1JKI&FGq^JMh`*UifFw=;To^P{dRvM1GFd|&itUk`~Ooo+B#?M;yxN+C2$2r>? zEOOER00svpkEEo3$g!G#|bvMXajQY*~|-(QM_Q|hmVb9Kz55*4aBzVX3ka9C!hO(xG*&jIs$x%WpN z1Tit^dG%0m9=nc6QV`bZmK;JHr;PG~>5fYm@7h=a2JWk3%?i7-lctZ3)h*y{e9lEgk+OT0o7UO%VvZ*1U7Qj}p9NAsVGhCDx6{%7UZ1bCCj8Y}&6cM*k$$Wg4wv93|v2?YR1V zA=>J!NOjW4K%dop$9(}d%ya`Uc=|XTU84|PAGEZPq}Q(mvz$C@YmD&-F5mPDfYfL% zX*hCV>}hJqF!Cs43gF_KO{hsZxDZ73IZYxJN$ymGbJ%Rc^e4-7W`!cb*U}T4DS{xZ}xUZ4RCEkj{z>Hi}+vtnTepVlqCV zWA7Qc`}d=-8#G8d9H$*pwmGz8<;ahm6p%_#AcKi#3I%I^BQL(35GA@Ta{pESf! zR-j)&3Eg7x^hNTibMC(!t46wRM3+dG<+x=3#4}@QX=md%i3R*(;HHB^u17JSB8m8_ z@({7(Xx-+lDAEq#1HLyv#YEzyo82VmJ?A8n>q$U4aQ!}$3cXm;xEhofZQoi84LL?Y zlugEK@ODa)m!iXVF2yhPfUEmc@iYH4>HQJB+1b!*;edNfJM~f7RRD!)ziztwy<8p? z`mqq{q4w8rWJBoSYHSfYy8-C1ef28skMjn%h~>i) zy+;Snj~qfhL3T4|@^y{Ry@5c-o*SK8DwrwW3FH2F3MKofy{a^W^ESUK^sf%*I#`>n z3%d4++65{suuT?C8GMYBfP2RsPA=K0ll1#>ZJ0d1+81>ZHe$~&Pc@BKsCMWX_PUe7 z2zb(=gH7oaJ!^~#@_-$GwL^`=WwhY%$?TlGb~F6%hisaRuw>1%b;DW8?$!+T!eHze zmOR46Onld|EB$PJH%UfaC0Y+G!zlSd9xOHar29DvzZfqMCl>NRtsBH|I6&Lfx`rM} zwqWkOA+Gu_hduFr&cU;X*JS5} z&3tpl0skz#u~K8_MwL)Uw+04$qiS48US}eR3l5oQ&gj$NQ^PKGmY&MZ$m_fS1df`b z#5Ou+!`EU_|B3|%3bOUp)c&0hS(5Ay7x!HkbH7ir63HU0$mW`AEvLS0!c|>1x$89> zp%{;+?wxwc=Jsja9J|d}P?i?Dc3l@V)NZS!)y00~O-}T!NGHs2T0b(#4SwSci5b$` z922y5X5Q5jDWseq@lcoQ;4MTiDcUz2d5z8YuO6|g0Ms^v(-w59I% zPzuqR?<~HhbIvpQ@lBL(BPnb3pEXh`>m@@gF_Cgq-?6g&LwB$fgVJL~2ni0PFcDHy z*biB|p_XXW#-O~VY=Mq&EcfwK&N5xfu?jux*OT03CD6+Q?QS=5fqo`DOW2Lcz3mNL z5F+Fa;)OvlaokZ2c-^YpF>jH9-YP^FKktUV-qKEDZDE(kqU}b&3N{$0X>Tu1^QYHc z%S}GlyU%yf5KhZ=x*bJExI`dzM#4Huuw*(NJb|W>{CQh(p2{71n^H&-E^ke!CAx*E z_@s|O&9U%1ze8Wlb$0ey`lwoiA){yb-o5b5(h_ZN83>;~mdg|b56#Jb9h`!8IY9E>OfR$i1qxET^DC;-EY6iItC25Q~hCf4Jn z_OHm&jqN1W;dKs$0feO)GZSn5gSw8v;0it~Hn%6Ra}0 zip3-t_Piw&v)G1T+Bz)!P$1zQl7MB{!t0vwL;dP?c!g}; z@T(-p?eMw^Ylq@Tp|tj5!oM7e{xuu$2OsgdfZR4!BGdC8N{#W<(j_13hz77~Z|#zXvk~Tgtyl#v&emF~`%Fn&??>&MMb}^# zh_f;sZuozK@>`>VA2t3<_+=7F!L zczP6?&>HpQw?O^ZrL{Ih);A1>MXtK~6daUbml_p&ytOM+9f!N3K!Lf(P4!xZAztVN zR~>zcAT-S@LS?nZbfqdSx05%d57iYY@&c==vae~`2R6$4M_228FlKnB2HUxS7qb+X zuX|dX!6>Dm1C=2>F}ke8Cc>GD(TJ|oLq++!5y6g;ruwu>z3k_`%Y|+Sy$TD;J8and zpDV?)H{nVLVr_~n3I5hDbTM#@R-Je{Y#k z5QAcsM-9Xyg&$sheU03#1V$r{O{ltg_k58Gg8t-T+~Y4I#$=#g*R4?E45`h7+FD80 z|AmI=7;ZG)ayg#`U7!P&?B5eJmXSaV_pS|Oe8>1VUnn=}!}3A~GCo>pupXLxEA{lX4i zg#PjhdZR|N-v&N1W6KuOI443^16wivTjvnSb4Kf}~Lv7`BS zErYwZz^I@Az{aj(Q=jZnoQ)-w>fd_$KZ;C_pQA^8M4ZYSM`B16`sfm775B@kc{^Jo zAZl3^*XQ8G1ZS7Dur0=|=}*rc+TK2)W;0>>>q!yY_>mlT=vts z-shgyzgBK}Cc2<_>YCLi8Z!Wz@tt z`RbAhN(Uax=pESvmfxX|j{w{1YlNi!Qp4Ii6bA=3kCq7Z#Vb2x%tJ{AO<2ok8@foQ zA}eXCZ?ABT3?PskSU0wfDfiH}sRAtRTq>tWKu&0O-7o2HH7KG5xTu>Kw8}gwxVUr_ zm3AuEK@K-fu_JI7086ZZ zUr>qU-+DSwApq;C*M_hs1A9anMl15Sj?wlZ)f>Q+-0ukWaS!Wcg5)!iV;pk3&zJfD zKbzggsq)r{zW=R{aX{rj5Yn54w*nh>W#(|{I~lxrJRr|PY1~4|o*Ig$;GvQ!XL#=9 zMJyo0;(y^2g^3TZ+`|8s58`F+Z>^;UbGbkdc&w{Ly;zp%S<5y3#KDjn2I!dwc5G{7 zrnEt{qE$SL{=Pj`^@qZ4`|6L37hD-5V}|H&gQv5GKE>(^85|p)Qy13~U*DZ%^USIS zCyovU6-CLhwVpiAaBs%&t4+(u)u<*SuoXE;E9FX@6lt}tQrgetUd4<6B_RK!Qru{p zGpCZ)Ic~fL7;X zyI%cxKHA>GZi#Iajn*CpFF?l=WxfyqGSLYBSMI&KE44ibW*nED?@Ae}MX;+4bzseP zjewW^i9uR8MG2e7*ENRKi4~hXHJ%(v&DwH6?{jz!`};XfQI!Y)vV3BI`L1GSD43Ci z6x0|8J`9AS3-&>2NpzWN(y^f}7YC|qRfC9i8tKw|Gb<;!NB|E$6#Se(*}w3n1n*j6 z@DBZ4ZnkTq{v!7_a55OsVNKz28tv_-vUs6IcpS#u4M#`SWaPl4nchd9DhzfbYzz5_+Qpq+%b_X4cr{1Yxb<55WNE$X}qV?Bs;XQk%hx zy>t(AMCWNyj*&vk&XdyEezRGh2aL%fU+rael&V8@uujX;Gw+#J4wni(ll-+)K9?}A z82%jKD<*Qs8ZEQ=Se#UkJHzXQMn4!Vdiu@B1^J*bO&7EDNY+Qb5J7bzkS1LsUFOCM zJpHVzN?~vJ%gc(^*F%ZjHf1Tca##OHP%nvOxtG=4q%K=)N;lx)>jAg}Xk;3{Br_y*;Z}fbicVMD>}8%I^fL4{*mHk=Y8dE zB`WgvtH#mEh-0rAqGyepL+1>XYLuKv;nPEo(hQl=TpOtBe%If?GYDg9QL=cnU700ug{NWE_8n&vn#jpay3{qWN;wmE=Ik#78cVD6;+7?p(WloR z@rP0)QpIpJsIUCht9+REqcPP6RpbcrfG26sz0~VZL=3xMV^8*ORQfb~8I$<)D1(sK z1=i0IL*N@duNM+h)*&nFf}KKpRiLq1IBfzikTyNfE`fS&ocobfECBCYT{D#1mT#*wJjb$LZYPXZ4fEfMX+vEw}ZHQBRRbGZJBev;-Q! z^*#wnn+E!}JRE%a$YlzCi?1-xh!^~i0kp5xSid@%UA5VwO*-GKUxa7Vg739x&_Zr* z0XLiKSCuhZigMQcHf^s)S>^hfa&nuwT?A61ijb;{ zvTHUU)H1WG)*$Diw8W+trEIL}5!x)aISX3gPR1^I(tx(Ey94{SKxzgT>;bKdK$wz2 z7KJp;81sk#6bBW!vqeIe4^~2Mhwhf%X#goe*1rnw;|m4qxXt0*+MnSON}8h#+06{u zOFr$y$KaQI2XI)~XeLa<(EGbXxr{6BR4C{G;4Ip=Y&9)#nW*!iw1y!&)uh-22KQLP zmGdHU+&~G8jKd%B!g>lx_QAft@rEg_2y|z-u`_T*6r<}Cs%K|!&4dC&wfBWbu{W3% zE20nR(a`gsp6?#JNjSp~q}b8WFxphfVzYB`*?3HQwXza%!qNwa+#}?b%0#ALCvT!2 z?9$ok9u3_&ZZ<@jJ%@JXY@1$Q*bHPzQp0xYQM(#byFLB2(|%`X=nKhfCK8finN>{d z`5&qTWy}2MaQxQTN7(CON4m}O86o>kGM;6BCI??bYiXdwF0YA|8!zh%E$i9e^9o}< zL4zb)MmiF}AJ{DGp*dT%qN5Gbe-|#Ia|<`&ll=92r$BuWF)VeM5QIv*YchF?-qJl5 zqeqWSJct5S=(##!)zBJZp^6glR0$%n+Py7EOliB(^PeizDoK_pb6L+kd=fK@AWof6 zpg^<;M0kXX{lHUJ6Sq)${dMd|6jugjnT^vG!u~r4YRu)6dB&dOuy5^`3~AnMoX#Km zY5UWcxpv9htzcKB@Bk)>xyBXln$TmJ9EtSmf^Jr2A_I*&?qo`;yoyqhtG!gt_UH)F z(1opayglFzkc0XrCPG^WcSQ0cvsF_Gc4DmfRpXRdkGs~!0W^U+Od#T6ssL{V7x1T=+Eu3n&J=UAA!Cq z0<8C!{R|P}&HXF3r$%+eRj}0d;r3Hj;kd0w7hx3TE0`rOD}A%BjtLNv2*v`oU1Ws|f9*s^eY)0wY$7s-1wH|zZhANhd>@@zc+WmCZN+Fy zQKF5Eh5ARO3m^%TU#2TozHn4pD5J^cnGh?E{=I(ve=i%Z=%^6Y*Yy%@7#={rTC-H^ zFyCw|nl9AdSj7A*geJLN_*Y(KFlxnzJZ)Lv$YF3?GCv z8OG_SHLA}~%WFSy(Ek8m`k!3}nQAO+a8w1aq!ygvFGbk{YtrY7nVL!WL9WIe4%^vuO^G0Kz-t@mkjKAtQxde>=9p7VK1=SQBfU|B9PT#J&~$lGaEMp z@bXDe8s~YloLIw%a1My(%`OiEWX>U&9ky3jw}~HU249i=&dk{=he04Beg~no25eu+ zY>Vq27fMN89s1ke#bw+0`UD&ygv0jrJT{X01`I_iW?nXT9WY`nvSsr@`+1tq1B8|_U`>>*fQWo84_|`R;OH%*APXstg=)YtASA1ofE_6qH2d*c z>dpeNzF~ZwUxyM`3LECmaqZEzhJws<39Z_1s9LZd<<-|$8m;s-vOVS&x46s0f=~@%LrZ` zsqK+k$>n}Tt!q*g7`S1y?E+}dfl~<`&;-BFsjHmu89zGcq`~|wmMgv_{qg&x)IKmr{iNoaU9-;>? zO6V!0R-I>b#q$fQ8q)%a8rnXOCW{9>EZ>st*WTn+FL;ZnDHP9M=8z2%rGq!s`lznT zbef%u!xqjZ;Xly&_T%`uR5%Giq?%kXGMl(E6v zhByZZTR#V2sVGGUWR8%GBXlf{;rlQSKL8Z!_gQ~@yjGw_rP>*BtWI zSX$0{K6jHziuycx z$5KSBCgkdGqxHvD+^X8>m11_^jAUGETXtxOkMvhCO&rOzSbDIIe(4Vdg$!Z7tRs!1 zZF1!1BcegitE%vnc6pafnq_Lmb1ra9fFYX%q~=Z5;yvDLompO5a-R?1BD9e#GN%p= zW>)+t6H3)jx;Cb(ZEX@=xNpY1HBFT}a1rRxO%mMC3j^DG(uBUrgz4P4%1l5tE-|HQ z!~a%|f`9kGO;h1eScN4ByuSGzt<&}5_1Bs&U^JEJ)tZy~`5_=?poc%dwm_is2={pK zdchn(E_(@M>G>NbYzpe33;U)ybb2;O9R;!8I|ykg_svz<3XHr z*G*SOSr!Cn=gJ<;sL)UH<}3LL=lR2PB(#F!meQGID>WR8wt~lStR3^c=5k#<{=p-! z`}4`Uw?>JFOWzxy0ar1a^!|q#oX82L~Z@s5$(kAK8N<1>dmDc*2*8m;}GKueE_;dJ99U% zM?o#etggCA0(B-}9rgiBBI7R^LpahMvf&R)q3JpzdYW!y-sQ)XV|Sk!f_Q#aEa8*y z02kCex+Z1Csf{wZJ$;leKRT1;@|j3&%4KTcC-oNYoa5J=f|ykFByc+zL*=2D@TcB? zCP9zv!S}GGNsTQ$HKgW%oY)>#lq1T-legr-c@NG!(~1n1IIc=jO)op>UU7@_OvEVV z#&@>O@@_1_!npV*=qeUqJr__F=RyDIH=XBTt9QLxG2!se3)abID&%F$4MLNv_*S#F z6-L%<&Xu=E_saFz7BR*d?rTj)eXev@1QC59JY;|uVD8dGoLA>)gW~jfR;IW4vTww% z1XkFd3fr4w-1G@XJR%iV>SXX}IeQ6pIZZN5dM2g@jQR^ z=#qUV#~);dvA7M50pA9$Or0mBDlWOld6aL2#hwIjLF3ibu+x(rDKIbKA}P*?$G>tq z-Nk*7YP2urDW5z-y_922E!@L9#Wc34-F9W6u&2~uiXAH_KWbaX+*-u2Il_bQ?@+|G z&@XjYjoB()WFX~q*sg^IxbwB=?K0O{x!S}MKRshgf0)A$MCP5h4=2*Tp9RL!ikGmk z=jx?@0cNGV*C;VkV~;RWD;Tuss$z|qrkUu}i~^H!UJ6M!1VJ;`0s*O=zGcC;+Wodo*B6RM4qg^zTfsc-Ormj$+Y}*OR91 z-AwcRsd^Q+f178$>a)?QGv4}iYqzlJ7Zlted6{I?8`v_Cixn2K^+uxT?ZICVEmi+t zs@$Xml{an6yWk_&nn*{0;m-#y$odz~0rnGTwRje~;}EVpnpXyQx=}U{eXbyHvGwP@ zT!+_yB>`Jf)nX%Zc638@1BYgSK&AJos{F8rM%Jaop8ZVeY&tFdnUYz z{#Su1^_gRb*$BKZ5qN?>hxQ|10Up~pr4shlSH@s>#ZZlHljOQgcTD{Ad8KqNaHb}I z`jc8a{6>FO_5B{Re>+;e1fAoYkA_j@i1*jC0QKKVaW20r9o}PR=e4EQ#quP>G!ya+ zew*j0PU6Dw%a_ndERAiP2u;dqtdYr9gz2*7dR3Eh2PsvMhj0y!k$9x-kg>xd%~!h> zDPp%*&B+jqG*xrRjq)z9c~O>Do#Od8GQhBU1l5)R2%;X5LPfZD`L6Rrv7tC{(OBc( zlp#eCM|}3^OJ_>jXdakh#DkV=Ma26QO*jf}N8sDeo28RD9)AD=GY6eH5|VlaOJC=UeN_{g*=r7w;yA-<87}HrU3=~a*-7!f}w&me|&}`3GYw_ z4S$kRy4}9j1y?`*esE!))PeIdeCsmOQ8cg$jyE!sYU?0gS4(Ft6N1eLL#V6ktC{B4 z0uou;UjRyTzY}~r4>sGEPUiW`^%tP&yeWnn0L9JEI{0o6{@KU;Y&!x_RqkTcrb*C3 z_O4pmDJBvUmRsu!{7YM}QEoo-K#tq_T`!>g3(^mrpWA_@YOzJ7>RkiSYIyTtW_*#iK$ClJxdGLa3kJ7XQVo$1#$h(mRIx#p@7m0x3bGPT_?Jm znNfIjX>N77b>k>>#GD9o>5RUFrfgQpLyd}8r30Et5MI*QiOh2G=8zv}X_$53z@GOF zIxv~vdt*fwP7?1?7+*wuZWDN>U3n!QEj}33wp}UH!~H|NssS42+VKYVk(JXt^%r)O zCB?lfM0|zGr*J|xm3r~kj%Ly#qAwR0?I3%;nWKoQ$JcX0gYM#_(HF$_xcz?3YmXyp zZkdcJh$EI4L_8P1C^Q9vko|6iZyv9_Yw-hM^kZW8*ko5?w-0mP4$BPsZXVVxPOQTB z?d30ueMnz)?Qj(DD|5N``Oj*0XVyJbb}|~daWs*?@z=qn%0N?%9q)2>&S@ZH ztKU)nr8t7;W$Y;FZY2g(pJw0hNuz;9$uWQfKYfJ$I%UBZdW^e5sEPxRgX_s%4^jIY zqhb4oq{lF0p|#Ux=nd;~g21~uQuq=4G&6c(PW8l{*MyOEoc${eC*5=z#-szo-x60Q43uN$n(83_H$wIYu~JZ!B9CL2bw2s>Gb zj!A20tLauwz17tT@V!-Jp{gySn%sO5HCpoS49q!T-Y;Cq)a}%6s!sAdOpol$O{nyU zX>_?LOTqWjM1)u9Ro*%bW#VQhO1#ML?B{)7bv~Aa-|1yOXG0S&g&IVgI9QKG^6H^j z@+jSc4hU7TD$|Lptv9Q8DlNln+1vm+n@PId|XrfNC$U*-jq6i2y>|Cg8rVj+_a$^>ij+DHO>VJJ zLik3n9Fx_1F`MxE8g&>`HG^DN$6fzR6JT#J`X9iJPBP`{%G5hrSe2A$kc4k z^W;I_Gh5-+bL^n>7Jq_Qoh0E>!t9LrGU?eg3&dq^;mY zd5J;8=bAO<$fXzdw(s&bFQ`iv`pP>i7M1w>r$pVPJzWCBQKGN*G!N?2eGlU2BJrb3 zP^!~$iglAAokW1Xs#oMj%u5J(fX{6s$`(S?CU@?*EUvZ~)&oZ=gY}+fkWkd3z zVzF53QN!+ms`h=pI(RNl=3fG)_b!uzL4uV)0w%?s=+!L;v5j1#I4opENUfFup zJW*XIgMvR@Y-2yS*bxnRPEhb@Fl~W_Msxh{`)R6DWi)3wIoJRSU>Zj97KWoVQ#dM_bh;u8LSlaX%3i_58K1jNTHH!-kO#S z^W5pqug%@bXy~$HuDJvjN@eEukW}RC%Sk&k=apLl-a7)Q=2gW^aG*viYf^F9zSh%U zqFf78`szUcZQSVOL|`Q6u6dz7WLrIX4!j(!APUY@@M6;W+`9d68P$nN%;d_^8@3tK z5swSq2U`no)?p~K+YE9c1-=i|ZX)t0eH}%Z#7c+v=?mU&@&1r=4F13=Y^17=$mzGu zF6#9n&>aZw*~NK|Xw?=d!V0i;f|vDf(lo#C!PCPjh0F}+56^?^Iy*e_&~a%6sU&Q~dr_{x^#~>TKG=kyX4?fC z%Usx!5FVN3ceoIDpOli5X>nPnSx`3UtEjNR2kA!#{XWyj>tXu28&PBLGI#x@6HF5( zJJSQ@#>=1u7l5xmR&O3xXz|uPga1yMkUmZ&ekfHE%g{YxxQ5HPwF5gW)alkaapM}h zm!P~=gvwN1m02g;Kzl+4cVQ3X{=SST4LSXgOFIG!M-4{84WCJ&-gCG|g`Da6Do*`P zpc3Yhqyd0PV?Ujx+P&wG_*AY5Z8F5DhASkM&F=IGfD*HnR0Fwxo-|aWk#ZGmAN?Gx z&F3rl$DwO}5DrHI*rr+@4DV?d_ent1HoMzjF1!PXwv$|G`{gz zwr^Gk>0iXC0b^3k@4UjTn`cBa4f~ls`~{br4idRFGUKy)p=w5rBMFb00_GIkt@EQ ze`-W;2p;C#^8NJ7<8?l*B`Hx$cMaC!8COod<8yF!>dlysNYsP$2eFdl-tB5uo9qABfjLJ_EV$mzyXj-FN}Xu(fF; zKoLgrAqUx&1U(?)QU*PqfC_c)Ab1@Jy0s0 z-L(@FJ|E(8G%OEx-w{nt8P4V6r=``F%UjJ5UUYzPqgUxwCXRebQ_0qo!1vA-)hbvBoV9oW!iB!Aax)~C%RN$B?p535q-%mD z&9ZDe2cJgn#WQIG9qL=>Fntk6RJ|#db{LsRH6Dl!^Ckx_B^YN{O`SPg(Yhi&I}DQ~ zr@zEyy&udD^NmlPFF42JtgYao`F_1SaQTYljSj^*R(nx{$OAoV2Dk+`%oB^&IXLs2 zzsji?2J?}bf0$WAEC5e5wLgr-7YH;8yi3L#&0L~_A9sZPg2H#^NhcQ~i-Q;xOG|qV zpYYg=OgHoCg+s*Tqr={no=5woFnMJ{lw!+V7>T;{W`_=3DdQ3GC94JbBhGiIjZNVDxN=XfOOr8;Ib;D-loslTz34@c zN$1^h)js#y@$AlvOC2ZHRGP;WUT8Y_1wc0CU;W}l&D9-VuzSO+Wh z32g7&?52qq=S72ALwzD8&uiYLLbWN0tc?;;`0leUb;bHdjq z7=<6#VHJuNA7m}oF*y>Lw)tc)2&AqRC#@X7@l>{%_btB1R-~nTaH)KskO_XL)KN#4cw8zc6nV(ld|h|gXf$V?PI3*x*F*?@cp4Q zk*3iMa18>bW}wp+u{FV42pti3C-$p6>G;rS^O13HSf~m_L#loQ+BTCn;}$fJMZAXc zId;m``bV=KH8hGgl#UtbGMo9Vvay4VHb{OMKm?}sGW24L^NUv}j~#FZ-&P9Zug`aB z`Zz^P?R<;D%W4U3^mu^PkU^~&4*~o>vgZxKM8GGOzloGIhz&aqt|EdW4OZvIjTJ3_ zP5Y!i*wi18Q^dBFO^T9w$NlN8C$FfTep>}Y_+nw|eUDJ7J-9h!%HKIaaWjiC1P)Nw zo`f~Gi`NbAyB!ya3%|}(TQlnhuvjY@YZMqx=Z4@2J!@QJGLF-9D`u3rc6bQq;^sOk zQj}sPrr^P|oh2C{=zQ_bMk41?=fQkZ`w-rNuY!2@{5%~}n>ZYB!n9F2+W!t5Q=4<_?gdPOK)lH+4UiFO4pFxWOE@$47^i5Hv1@EE5gyF85>2NewW1gCdnY%FpBK)1QfOFxZir1>PosZd6nf-rG2^H=MZhv&#^Qt9Wg%2z#rd5c9+{Bw>cLB zN@YMM)w$R02JraF`LMJw2z{29fTCTp7mi)Wf;mQZo1KE|refx=3PbKHld{-!Gi&uh4AcW6Nsghm_D_?sS|>FNLoI>aE`CTyczc5-_iu+*`;caoreMGRYD+hIM4fi_=MkJNA*$eE)Sc@GGJ56Rj z9`*nhV=ylg`~`H3LmSB7{>mzE#@4gq8pcCO9#I$!M0R0d*euHFfH_m8gAh}*245NG z@y@*JRJ^}4Og&PN>N7^~feM7!9cZO2w5ray?WS#rf)BIdg8rxhVphwKGn$mnlBA#& z4p=Htq@`m8g9`B5aAmbw3k3V@d-wiII9U6;cEzBfg{!(Weulw8&}x(rU6=h0*zCjD zW{f!#?V0HfHL{tiPm~=U0IuI~qi2Y4GYThN0$5aZ*y%^-{w6CyUlTR=*DmBPn(<+o zLbAs>PTC&m%?&Ueth9uZ6pF5ACZeBaTYQ{oIN!=KyB`?N>+|37w^a!K7M9`O}_-mjHo+h--V+lL7kYacDs|-V-+S8 z?5KH$Gk)bN4q9k24j>SC4xD0pLT&BN7D)A(U<0%+3A3GV+4evccQ?IKdL&&-w{abB zYWxik1zWr@;%_9cJ2V#lZ$BOCif?f5Q=?l_9+o;l)_0Y8uy1xWMFo&pcd!;MGpxS` zF34WPp`yp+-i5(CN#J|!Z$5o=TDShd-@x6SfZ=K0DQ<1@xLnsK8bqg`TO~R1f6;n% zSFN<6c76fOTKREHoBCzMX7d%PPNSl(9Hd&GAv|`!Qo!OT5yr_#&)I)uz00@s7{~Z* zD{vBpr;LRa=a6f<&rzN+;OqYVKCOUb_Nb}TgO&9UT+5#Z4)EU27ARm)IHF9``qY-r z0ubo&P(XXQ!VI-cGr`w{BwDONm@5CPFv*}kcublF6{8osm>gCHAyahAOq9|d zZ!@P!anbR{b;0cmTsbkKZsn@s%k?YmL_qCx9*v-AoD#O9CyX6@Iqwo%&=AA7Ba$Hq z44rhgsqGx9>b zlZK;S+ege6f_|ZeA(G3Aj$>0`$UbytP6BAhYTdD`veoEq&n{_Oj~^zr@)>bb=>)Yw zPsxH_Goh4GJ!T_VB`1H^SmQh4TN)w&@1E}fjHDjBk1&cnclN);H{@T zEf8D-n7x#jjI?t14rP}q*~C$OG$D|p6Dx+IgQxbZ3m}eF+KGkS<87NIe@w(L%APT$ z|6oRQqd@r3m+Vhy`H+^M1kur93o#{<6NhPmTU(_(f73nmJLC|bjQQ4&>V@u_urqFc9U(goehLyMI@#D z=1FmzYwti)mNJFqOh2)52(mF@xe@I-^~ z$|T;**6x@rjhGJ)ZX;D+N*%#{-*8_9(v2TS0_r! zr@&%WYZKpTl79NE#U{|0EXi|y+jjd4FVLYP$s05_1mipjY`USnUdcjIvkHZuP7c!izsKA`3X8~=xR-rbwB6*3(6s~9L6NU{IjZg%ip~u z1U|Nq$17A-<6UmE+e-SqI`K#HIG$5s&Xv|>-+E&1f8T#;Co;_ymaMxvok_C>OqL#u z$(;E}KDB(OUi>y)2vUgi0M)XB^Qh4$;U$}o#L*_I=VQWef?3`&#Vney*eDzX`(1U> zGb7@Y`X^Z#)t~Moz|pX4pgnPF3U`o&5%D0iVw3C9dJ>S`QUs)g0->HY>Ih)jYKvpYt+zj0T~%ubAccle2a&t2^89qDjM z({2_L#Oj7QcyuM0oIfoB6Z-d!Iogjo+tEcDi8O)?K)l3nqF1kw9q7r+XjG1i*jgg` zKXSZZ12U4djD=_>?NSnl1x&WRsT^q8;EHWVC}@7RVJx7bc~W`lXDp76hIiKYa&`;c z_S$gC>A9OLX1kP_lryVbd494odP-GTtDleR7e{$~ci~@jDYx~|sM*g=NQyT#L{mK= za)9H@US|y^F-BIDOg!XGphlTJ4PoVZ6ScUzUt|-nQ#Y|Qx3aV<;atff!45eW)t#2o z{|ZX(M=hcWW)yFsn)0!#0I`lm-Jiz4GS#ye#EcjW%>$SF7wVrPWI2xJ8-N^-^Izzztrz0ox^YO~&=G;yx(Fj> zCO%KDwGnl)bbKD>y=mkvgSVuluJh=em54bz0@8n0g`K8|2YWzVqX|!V>$ZdUJK3#V ziXJz5CL(rQFx#4Qc!F&lT(o1YPk{0%)A$@WYeLdcIh0)77ce9HC;&zg z;iv`_Y{5q~d!=K@Gm?9Cf~NyIQd2-NT3UnwR?EA9RXdqpyLxfznXNb$KwLf#C`LOR z;nNHP3}B(2bA#~yQKFQ6xxDn6mWTpiL^+Ong-W6{3Gqq!x4(+&~o$ryuy z`aW0wB*4aMXpjlT8_A7lg`=y-;jcuR>W6uu^*GEBNCdU(LWE7tAV4Jok0AwnCO~rD zjRWOZ<@R>U4z&PXa*yj$aWz}~ez!W?wQ2p+FSSUix)}Wn!73Mm0*_7T&PkB0D$?Z1 z3>BSqi|w=J!C9i8`RzssTAqT|T_+8BE)115WBa&yYm*9S|K93eWyf~|4P}mB5(@z) zx`t&vr)Pm3u)D2p+cm}Ey^!GUX@0r2vu|c^aLVy3T8vwz-`u50YSJ4w(H<2s)D*Fr zu*kb`=q(65+2`i*Am`IedSS<;Ay1>$;4tw@Op9MgJLJq_Gdh*jnBFZughZ#Uk;iCCBLe&T{Fu z5v~;6b12$${3vl0P^ID5;Bkfdp|CLPZeFxb6eYSF$u2$JUmdIaGKQgz2T`8v;kb{A zh96QI``}b0s0cnzZN#zf42tVSZBa2435;9s71V~P3n^vy?qroDP$*jT}VE* z5$CRGfAll6wUDu?slP`+9bA$cbAs|tSUaK7;XXf_EdW; zu5>?b7*ctp*HqY8`OC1#+VZqD@i%D2ZhzMGk~^Mny+9S}NL6k(Bb-WI5|CbOU`uMf zeOtOe*ggjBktK-qTjtP8SSPIB9!X@-Y|HU!LOZTdMm$cQR8*6* zfmRFjJwRou8U@z%y-5+G5(TU}wR{h748#0V?%iLg2B%ETXyWhk?xo@fHWoEbcj@n1 zshb(P&ZLe}w(hlI;l;d1-;a6VI16+l&Tb0$k^8v**b5opemPiXji({GX7Xe=qLbWa zNxb#)SNmc&FP@E?8StWJw4id{+zI~<8z0e;q|O(M5mOf20A9S5u#BHe z$s#xLcx?-1io^%vPdLD@m_fh+LHjHdIWOXun9+lRINVz)zU9&Apa-4&U$FD~{w; z&AM@a-c?5iu#N4INqZ*}{?6<3QH!A=0n3+SNz(!-bz_QMXW?H+UtQ%1-Cz$FsW9i1 zDAb#@bf8_*$L6DC2RMCXA};}LOFi}}#LlqiO85rnmKR|Icf|O(#q3ur?{>DO*{yU0 zJ<`rWCFAme{tf6js$3*XOxp;Cnj?gA!XNlymniSFNK(JgLYe*OigE@AvE_evtn%s3 z3MI?c6wNMdqI%;y?!c2Gg5Oe|2C6Jwx2X)$66n+a7Ix zKcp*HeFdpQmF9uRlfA6M>V?_cl`!z(KUr`oFgtrNco2glb-#j~U`#J-TOd`a8s2D3 zcy~)%r)Ne|Bf%S>v5EjnKAonF+gDFuj7}OJW?LU{Rl#2CJCqHtB>Hh00Ay=eQNjTp z#xyFL`feiU!ll&(`miK|N7BnS0;%@lOXkt5RSZc84Gn+pu&zTFf42R#nx5*}G4^>6 zl`qQCvrPjOSj`3Cc6SyQR^2WVCx5yTYP}z|PJ_>7lf14q^WH(b3QAH(nqB6Hqyqtd zig2yHhweSMP!344*yS+wJln%Iu+tuJvQJ*R+LDNUMm}n5I~M&qW!if-aR0u;6obDN zN4oG(Dgs+A#G@$$W3a5L*ifB5ven;Mlmtw?H}CYRPUBov_t{>TTx2%b=eD&aM)& zr>+woRVjcVXFJaZbRsEPEUfQ}>5%b0(`f0Lrp3{aUSehAWj(}UK(43g6AXY!tKWs@ zT-4+heIQ;yf1L9(2I-w9EFovU`qpAXWEjr|&?HOOtY*d~xQ?>Ia}qP`VfneJDwXK% z)ibP~tY~)B7eeO-*f?qWXcMxMSnY^MXe@3v5z3Ur9GG#1o0~71rl{&wcS@F`A#0wX z-%@*^!#Fg{-&x(2?27`uUzdCKab#1xKKu;l1F|2i$1ZkE4RCP?jg0=;<AD`0C){ z-E;I6H0%*Ek8gigJflGXf85@g2X$p9p9>fO8iAehkcX7D24)NDA}J@QKETf>U430E z)S&a+wtTDj-qDkapYMvP7;#K<1U`IdRNV`Y1;RosdedpK_mfiqjcai=4ylD5!tRZc+-H=z3oH zkhg^p2A7BWbL3&)^AoIAl#N*z;2Bu+s3v!>)>1tFba;3|sN(l~<#lL3X$7Z|PS^{n zsaP}j60LC&gDcr@sL_rU+KA2KBApIqe%gtSt{BRSTv!5pA`mz@lf4a}ZN|DMFRpq8#maF92z3N zuzuyJ$d7tfg9%l}+y-`iyMzAOVx87IAD#XfhCg!hpi^)p6W(QOWli&{LPcK_5gO1M zprwP1bUe{8YW6*PHsZ;Xbqc$1HKX7p3P2y)k*-Sc4SMvsrL%wi`rg~^A}JVgd)_hL z6>Knh`G9{-M%txl>$sBUPcS6>_2fgKOD(R^Y+XU?6ACJCk=*Sb(6|K*l??o<|63q+ zoxahw+-oDz*SoW9qZ3yV5tdLP$n2AyuH6*LqpD5s=`kTIHGs|6P*cB~L6m&*PIs4* zyNU0tOcjmx7QII`E$qpP$tg}y6uS~sru5q{ttCe3a_Lzh4e&I=zVe1D=ozyjA%!Q8 ze|3jupAB~#Rl+mTz9udKDf=j=i)H^SLJIgwHj*JU-Z;5{x4fmMnr6SgH7hhKnWDCX zeZQ|4`%W4L*%i}eA8E4hS#|+e3I|G?6h>UGXJ%3Eq;m`SO}tO!v{3#U!`-k0T2004 z%)~rpo7d3Le;*975mFx-)F~I>amXi+Ap9|+=Wj~(20t1sT;bVUo0?dzkzDP1%o)TL zCSrX9r#KOJwOTO6x~EYOU*fH}xjapRIJ)bA8J*kVtxm~AkxwS=&i9cc!&}S3QIPA* z9<6U$|H`^u+7t9f^u_(-PNo}Ah)K_lMr-Pe;&A~`ICFr_(VGsP}my6q|BJWT- zz*jnf zWl>vn-SS5SjNuwsXzj&e8My1sag3y9CiMcXS&%U1F zyXG|WsEe!{xCyJ0wM+E)P7dxXp%Dgu_5mam@>Zz`>GFidw0-Hi*l$rRA}AQQ&ObT2 zX*zs?tMy@E?arP8jPeKhcu4wrjz#2#NbZj~^=$#CEK3)cmHSP7aAAJqHm3Vat^5k0 zi~~nC71uKp7ok?Zr!J0bpQOc_pAKm%N5k${>YRieBmfws0~Qa)c5{=@;YL2DIyq1W zRHz`^P-!5|t`)=2j6<2P058s-?Kr?*gnUOcKc&ZoZrgazN50-P1O6f?S4b#mkQsPS zT_{}BMi1@~g*)*Uglo2)O{SS$?%Ce~)ZK}oU~Ug|W#e>F?MhZ7)l5@i3>cSE?{l?q z+3o2No%>>s_Ed=O8;WlB3w0;J;3@?T`qLJ>T>`8LPr?z@15DRDg>HFnlEFndSzC*+ zT}iK?;$LN?H!Ll>hw;r_3Rb<+3l`5mDkkT+$k5}7eCuACjr-8NpSRXJ$H{8* zCUxwxbb*(+SX6Q$ic>H7gdls~m3Q{0jJ(v(;5dx!0}X$hYy#k^`M}tmhl;=L`IavY z`eOsdK{C^F25k*;9vL4kI;>I0Xlk&^B;{Cr94$2rt7XFoz3veGb=>^MyDD!@Q7EHh zVoH61Kf*5J=4zYC(EnypFXx+#3X}c%1bI?>bml5~J0g74x%7_j@e0nm@iZiZf=7yJ z0hQp&MOe5$MB;`9qGGB*UdW+j=hX5sm~WW<-BUOF3pCkA=FcY!uV1g8q8n+uLNsxv zvJtwmjTheHmDwmp_Jsv`;A_@6_DRziu_sajM_jRSg*v3V;I;NBvCb@;Am z@mDgT7lBx@#5Xk8GJ@Wj)0j?bg%_>Y5z~yHSmvb>{viRqi;s`8!&t+4m%eHTUAg1^ zO1|v++m(~+H_NO;AD6O`BVXBQE6_GfH4(yNZ1tiO#h+`<% zD#l&k?@xJJeWpLUOT8X`sG2TWIHU1FmWcFs=;#AVMG&ASNqYOB(tMv|+rx5RJP7+r zE}mE6Ke64V^D>J)1ONNJa?-G@q(1_N$<0~W!>97l4A!>|kiw_4zLQ6T7dE6!>J6Am ztT_{9SU?mXu%0GfPaQZ(_u`$)m#8TxWnUoVeLPkBq{fV<|07DW&=SlP*O;OG708L} z9v?Bk!U>_@5N_oOQpN9xd1uYH47Z2#j>&^t>YBmha}3QMEE3N~v!{Qn!O4{Qf!kt2dyq?$eDfNd zUSbAR1`r9%^E~TS4kH_hC}gLLinm{Gb@-P%KJK^*+>Y1#J*rs2wY%cUAK0#|)^O|j z=Z#~_&~%$|^283*a4z&uIS3u2J}xS+?)f=zzRhT2?0PoxHpkMLz3y`wF-z91B^Niz z!1~w>k9c`smaQ6Z%c#=RCC8uWgQRq5*N9%^v^Gj;SgOpJA-oc zJqU;F=*_Pn$Z!>rAu4z`?mV|=svm`*fPSh}~H$tF){+-Y!ney|3rX-@~#{ z4aPD{>h%1JO2fC6idgT~{L_P@ok#rC+9vWwG)DsQXe?JE)jSeSIf}D6)M~d?`=ng- zEp0flhwolH@ZJ+BMehCtd_)|)3kItwTo9XgJQs^ZK<)6l_S^{lRX*Q#jB0%q&(K%s zp|fb4bDk^6xvCOulfW^`VBNr6A{8eai(=JepU-4Hisc8)cWoaIyI+*1hxm;E&udc~ z%s@5jwRYAXrp4d^szL-82|=aM=<=STUsAg68Wjy?QJ+?0%7cYD5zYH>QE0Lsnsy6I z+Xb}MQcei(7v~DKkYC(IzF~xyd4h0ufGKH4beKH^v6CW`MhLMM#n5Kvm3)7%#u2<` z6+Mq(h!OPNxjEhF9c%}5Dju2x5|n)%K|?Lr+}m)hTHJYJGV^ki+3GH6pixmyJjLNT zYBi2ZE!+LdnM_CGspBCf>?N&bRQ_ANe$Sh4KruY!z~Xz`X{=#}nB=ItXSQe5b3!~) zqv6xNe!>VT2957yXZ)gb5;`II*JVTs*Iolp{;Td`wTa4dYfrMp>IHDsvDp(;haH6` z!&fvQp!+sL_s53d>g37B^WL*?&;>51!--WVA{+U*V_n%?qIz;{3R^@&`dgnXQDlhi z-{B4dR>DKhkM$J}a z6*!#gg!<86l+0vM(u*ZpasFFdjp@IQU3ol|ZQCA!yETQbOC9>~pVeGrPzmeYOdGz)^zwi2kx&6-LIIrtCuKPT%`NMs` zzdD*HPn;&TpqM4*(4)=C*2!q{$SR(>?ft|jXJU_m;AGUz@Xuf^^ z;YW_MU8<2JGfYtyUd6q_Mst_ir$otN&JU?;Y~sJ@bidJ0Wzw=34&#uRG#nKj2x?Nk z*p&V))T7%%o}3|~pk;KALr+}GT9Hdet6h#*rkJ0oD6jM(EcmNqs^_})4c#f)r2LA3 zl11IEUav3x+wWhPCNA)xr3!9_4A4OMp^ueCR%1L|7s*?7+xBB5il(J(vQ+DFNrOHf z21_G`O$%3-?qrraHW}}NDKa^4aH8b|+3n(NTz4cvZJg;(TXZOfmnr55B{oUje3Ln^ z++31xeg;;8QI9-~s@gs?H}z|migj6eZm#vi99N1iIuXRGMeN$|e?23jTjAs% ze3?ehA;irMx*=qNR~Adv^(-Ou!qMu=clGSr)@B@GVlmvqM@l{-WqP-Oi(8bj!ZQ;L zfjAC0bcZrGskL@i}U18-9iGnYejlG>m|3cB4m7^lqjM+r^YLtpojoVjBzbRQY%m(_qn0 z{zm8TF?8j<=G?Lo7}x|&B~;z5So`pn&X~EzV5cxppusq9uF>VT2Ze=$ZP|E{31xh0 zGrPGN!7MDk*qrk%QjAijH`5_?chLZSRyr^?+}Yp?vOA})ov`bGwOJKyJL`o(2(Y1g z>!-Rm!0nG3wH2bGj&Xx{AT-Ji7MjYv1(sVpgr)($=QTM5>>84`>|PHdm~CP>f}!mngEJ z_vW^Iz4csx#CM}-uw-RrA?_DiPCn#S}H(pM5E}IZ@Pw2*Vt|>i%90t)mZlQ#0 z-coJPDGo;Y@!5;DW!uO%U{6i_#`mB4FYVq3TZ++(Pd=(k-l%w$JRgu&rx;_f~h^U3RNF^PG|hHZ+xX6U7|q$g)sN(nAg2A2DrA#M@{n{(`n5nTPT9($wKd zk%y3XYlGug+B~iXN3SN%fXd+(c-jI=w6>1hxv^kvJY1qrc=LDFo_+0cMDb&4hWo>5 zR+Msj*pi+LjgQKgo4W?xWCV$9QZA|7e=FYhA)mxiCSvEt!wOWAST{y;Vt8s6&*8xZ z>gfZ6fmFjH=`Vbf^{d2uhTL5B548lij`rd6pe_Z4JNc%c<#ml7-#oG>T3il|Jo|l) z1;yTKq+Bo{vp(kbRKBCx=+<^=p(0=8XTH(#5+^lyo-CK%k2o7viLJF9h7^3_Z)oQ^ zmr0FI7B!p!){Z8R-aL{Ukb26H6xhmHsle<#aJk*5&Dz2{-B%g;tDI*Vo8OX%a?JS0 zUuSnlp(%(`eJ7(bQTrM{-V5t}ESxJgtGK%%f%p{v*imHYLQZ(Pn4fa!O~S83bUxdV z#dGRrl_SX$IZ_T7#-R9JUjx4aI~(dWZn#^tiV4Kee#Gm%LK~!Mac#lH%IS;+QT#eF zRbprCSr?6M4Z)H7YFE2Jb*kRFJ#w^?hgugmjol8fIe&Ju8pyoIK)AO$6J6>F4$(#!IO&ii3#iJU>Ws zCd^*&NGF1VF~ivY11Bw;SYjgBY`FB9@mLv|6@$l)LzKEWZ&@sZgsST3ssPOI@`e;; zbLa!(ZyKGwt(qh|I>{>AcA`uQ{pk_AA(y2@Vwl~{>lXWYP#bTP`*;fVrbDORP;c+E zS)g~P^eRUVS*vwZkOi{f3NVzT@KTb*VL4JR{>K#5$<|~o! zqi&G^u)4t`h38_o&v#nOro|(u_t;QFn;&0KUTN^DMe1F58tD(d&W7s8FA|rvpLPsW zpn@`Gf?zrAanw zzV4fm+1khLw*D}QNs~E)zZy=(Ueg71L4YlSZB$>8STQ2?z{UTLtejkRR|>HG1O!S2 z9xx9fXL5tbDG5!VKq?7F*mX}aSwo-1bbKV;E9o!F$wO(H*TD8c_Td6EE z6N%qO&^o!;liGJEl~nrNx$nD%e#+z>y;4>C1?9(O;s7D9<{hE_XpkgH?@odzWum-U zXUUp1`&#n0RLh-v9x|B^*aEZKjX2NtFzPdsIEB~KX?_`^^U?;XhV#K1!Bp4z!BtzHN4f)97o>c+YD2B%`^%~ZM6c;YVW%ONQuqD34JZ!ao z`7>}edKJn~CoUmAq@Aq;`*Ng~D7pT)!UZ$KP6!6o9kDpPlAwnPbyw=ZFwt|V@gMA2 zpXnP^F;(dXy@K`TuRhMow|tvqm{+9|7NRU~O1wB@r}kF(-d7sZhys28hn`1JNm?u7 z&!}MjnaG|MSN1y3=5I=GTPQU2U{rhFCLuITihI%?aSC6{18!08$>nYu`or33GHau< z8+y1&9gFzR(HV>P=_UkrCYD$;*5>Y}Y_;LrtgemKo!9SeK!1Q4;_5!a=%#%MpJvOo z%Z{?z_E6<23oCAl@N@cJS(f?2POki+bl9%-bwD4r3DI{bDiS+nIb%d)X*vtADXuj0 z6#>^W(n5WrVqL8FbUsTK)O1`pzkbaJA22FQmwbGfId}A~hnY>?*2c2G z!#aE@!26AsNXxUGBN6pdroHLVWh2WA1N2|4OnN)rfNB1E+((@CQ{Hg>TMIbvFEorJ ziHKfSqkQKzGyHiQw}NI(6b|Z^?GuK_jkcHU{i2OFUrrFrDO#fBiIQnA0^j}JF$t-4 zft+3+BA#*9w~yJ9ToNkTqV{45k{SzYR2USZPF6b95_v(-wGbiA;mi#siGg2oWe@G; z3%7itEXq|cODW!+^q_H`DZY8wGo)!Ac(RQiz2-@_&Au7n7SA#l>HYE>c}bD;VcJ8* zo{s*O>pxg>#W-)bZf*eNs{7PNy62RC+=249V~nzhUVVSpms>dW4i%@cJ86j9K}rxL z%pGaJHB)MAG~#VB+}e`OMlShb4&vA>)x(!B*Q!wNj!~Zz)?gV*;jhCdd&g$(D(E6& zGK>Ltanz>ikR*&d)?GUr95!J;B6itpCY*IBU2K%vBM)-W^Pv|dRzN^me<8@Kb!n0B zL*SsGublCPi;{YP%&e?N{xDiYs|A8EiSsRAXevd?MjBurmbXI)nvd7cVtwz1O3!>9 z{PZ&q=!}hLrmrbZf`2wukqqgS|NetSzl6Z)ao%3b-P=aa(yQNwQ*Q4q&#nhfnVk34 zJV#FwF!6+E;O4H*t!Q{285vk`)@Q3(_*`oV_VwgUVWlX@Qsl|_#~Hwr zWQ*FrqNd3{dCb>b??XLaV6o6-GPKFwX=2Y?U%Q4CQ8h2OgBukK0?QG|Vk|fUkzsSd zAOea6fp8~)>Uk&*phmWL84vh2Il)2z0!<(v3Axk(eBy^5X#gEF#%K3I8W}*c(WS8j z`;Pk;0yrJvtGcUsSZCq$*l>^ z`vf)q2r#il0PtD;8<^?#F8e*T1+{yC3s(%Zex}r$xa=V4$0~q-@L;(WBc zf%e2i5n|t|V_wQ4^07AU2h#gU-=BV9fZ<5ZKG&(stvDs55W0%YI*Yek`w5^^0Lp!U zL9OdR&LMI+fT;!97z;=xV<z1|F-f?BRcW3(cpQbZ5tnQ4KL{$Th0 zI3RQjh%Z5!vBs^}uHL`-%+wtSD5&5z;zJ#}TGZgPg4$}^jtYD=7yvSJ)7NH34Xcoj zbs*P+Oa2}ioEG%NwXODPC*U$VjHy{dCKw#1D6dX?9Ngu@!L^4SnO&+L#{o(YfOicmpt*f>I9yJ7KIdm=rEd_BbX**R$Y}Ytdmz z##gnFqa9}|*1gM;c}cmOp-UaPDwy((3qZ_fflo5JYHVhrxnt4$D2w5B9foGCzNKf4 zaCsG4U$9Fg`~s=V(@X>ct&HnjS!-T^j<(eIbbMK;^#PD;X~ijFhiZix*^#k5z@)rm z-#5R@P`%jQ=3ZblU=Co~Rog{|B2ocyzC8)t27-Zuz09@sE8>R$Y=0kv>;-?D6ma4D1#L={vx!9WNipP+_l#e#YRiv$GswuO@Lp zAdA3!yCcHSH7&+wl{ij|%(wb%)4po`#){!%680}p8JOVO&3-QxQF|eH(!3S(nmy<*}0(9HHwg$R~Gqq~zjkj#};w4084bbCTSmj?| zQZ2H^3`>>d^&wa`>m%obs2y^7I$uf};TN#cS;!FP_>wv#m^#UHpkLfk3?ZO&t)A6akfRuf|64;DA8Pe~7j~z-@#CTy3m>%Ix1&AB-zR zN-$A1Ar&){)agndx#$WzUma92QHl`0(rBTXz_Y)7UX71`N$AWTCUX?}F?Z$Vpr%Oo zy{kMSVeTx7c+h!t-+8L%p*2ao=|0@TOI5S!*CwrB^RLaE%Spc>Dj1ZR)W5_<8pn5s zw%sI^F*imyToUF|df~g1o*e>XBfDexPTr1{WuM^dUi_VPK$OhSWmjS4ykHv(#n7lW zkM2j$&oLD(qTxIQ)dsHR3GYToDy_Dfk)tI4TczW@hZScK9|U4N z9cW@_>|_JCMf@dCO^k}wCqdF@XuqR?iG!ug(Mm|dwlKjpvYUiel3`Kq&zB8yjE{CS zr4q`zGigxIuNM?;YU}szgy)QNmiRXbR&)vxni6&#QHxeRceXgK$TLKeh>%Nu z9jcyO&xOsrHDroS-dpgl!^iXAmH3)v^%L+XpJ7`XqMBH=o4L17FKCw35EK)%K_2te zmE4l4J023;f8Aa{Qp+41P1ZBgoZ0iN?J=v6x+Smk)XVaix5**Om;l3u`g>laE<$v= zcm<8zJkr=lKf&6TIlw)mrq$hLkOfgLb);c&+Q$%MVb+c{4aX(rA}K-!1XVN}XsV1L zpmHfTjjq)^vXNo14W`?cF43mIO?J28DN%)m`GK?2bk|q_4L6C~_D|dXE&=4x6J8lu zH>R;Q;vCkEhw^r|2&@JA_uTB^j(W#Vl*US_eG;Vdcj3Uu&rov;kP@Z6A(g&qz(bky zW2A(j)?{4n0R8@~&C@U4cZroU8zkmEWt^ub`#vr#k;&cbj9V(9EhNoF#1{8PT?<98 zlf)@dl~E4Ccm^ritLT|n$eNWPLKjLyIbX5uD|Kch`cM?vGxLXj6`=J&FuWNd*}M3r zu8Y9WQ6Nd7@TKv(LB=H6SEGEPD!uM$HOeT|(*8ybm6q^i&HcNqKIFSal%)Oi@bRS}q{NOBWR-~(^aCWdDdwPuR^pcX`OQY@$R3X@KeC8^ckZW2ug-u!LS5NJS#^FSkyDVLVZ&Ve zG~_0RD{%FYP}z2tP#-@C~%;w0(VUzndo@86Q0OyL-MoQy~2pXK)7aL3Un zvFVx5Alj#co0e!k`|E~84Ql={wi^s*@Nym55@i^g9Ked9tSbirk zmiJJY#ex*E1hhZl{~I}kjEF;>iQFkJp`Oq!nvWG z^bt0(1NIp3A01C-&;3uBp}qZaIrtsj@P;;18jFTu(G zcO3&y)~b0L_+Opuchz#PUg-#B0f9n;aDG$G8hdK^**yQr@oy{mk7Xz3mB07}huavz zO-w9o&5lofr6R#t0z+BMaN#xj5%1+umcn z`;6Uil*dHaaU#hFxnDCe#kQ1thDO%^@c+;Do%SM2;TPTpZen5hUyEoT^}G?Se5R?gEDFU(Hm{z)M$0JkyyokIEJ z1U@Yv2o%WwJB79Ne^Xrf(<}-|l)Ck>3Jw$guAq?YFA7^HteFm2FaN#){x0?;g+3Ao zSnLkX?>FEI*Xw%-H#28BWG6Zz+^pgM0RMm7 zqs5l7pW7H=4!42-mwP?==!RYF?aD#-yKdJ{1AoGQRXhCSFEZFeBO_=Q`>8Y$J3r9> E0sS+Z`Tzg` literal 0 HcmV?d00001 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-issue\346\250\241\346\235\277.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-issue\346\250\241\346\235\277.md" new file mode 100644 index 0000000..d47164e --- /dev/null +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-issue\346\250\241\346\235\277.md" @@ -0,0 +1,21 @@ +标题: +[众智-PyTorch离线推理] [问题求助] - xx算子耗时长 + + +一、问题现象(附截图): +xx模型迁移到Ascend310上,pytorch->onnx->om,模型性能不达标,原因为算子性能差,profiling数据截图如下: + + + + +二、软件版本: +-- Pytorch 版本 (源码或二进制): +-- Python 版本 (e.g., Python 3.7.5): +-- 操作系统版本 (e.g., Ubuntu 18.04): +-- CANN 版本 (e.g., CANNN 5.0.1): + + +提供附件: +1. profiling原始数据 +2. onnx模型与bs16的om模型 +3.PyToch离线推理xxx模型性能不达标测试报告.docx \ No newline at end of file diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-models_result.xlsx" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-models_result.xlsx" new file mode 100644 index 0000000000000000000000000000000000000000..1f95acd26098ea6f384121566b7ab6b8b8ef27b3 GIT binary patch literal 11905 zcmeHtWmH|uvM%oK?jGFT-QC@N;qDH>Ew}{;9^9Sa2^!qpT@wO4NY2@kv+o)A{d{A* z{=r;hQPx#8t7}z%iZY;JFhH*>Tz*sV_4)4w{qn&SV4~;*aByZ&d?7=7`2y-E**93v zXC7c6AOHvu5Yk`Cj2s;3J?w0=6Te7W|T^ZjDRlu`{HA zZkfx!(OPas@&VQ8j0uV}G6!*`X?1i@bmsJ-B+INkief{W;LN<5?1^WoUPK?DclN8} zB*b$LE40n0u^`^V735EP@Lu5ML=1!@dW(+X+(~k&Lag{PPu&B_4mW!$R9lHRtjNL4 ziZzSd*C3dji2xxEtOD8=xW5|!>uH_Ee!Vu;dyt9Pl2YY3oC}VbPCM(jJO)iTA>bUWZ#Q{5!Z^kv7^Q3*7GbR641|G zTO$z2)n`V2beS9WtN-{VKg%VM$}QMVXftAfexp|r+(T_lEzcl;(swiuN8(-6WGg?a)Z7IeVzcg=aGMi!$IqROl-y zWggUn7MyVupu00d)o4*80wB&*-4%3qJ;PFmnaXmm1Dm3F*QnIiT{5?|eVi*9k&;(o zRv4C(qk7LGKBN;kURBeX5#VL}#Ne1PB_@|jYrvdgfHWRpJ@k0BI`f;0OZ%eWj9y&4 z3kn2;`ZpK5I=cYuUY*>KBxDoJh&Ff@>G|iEGJ+;R|vOO4{9gUKPE=+0= z=(J(_$|{+HG=gk;tks}6RCZ7{Q(x~D>q<@M6pIc<_fj-*DL>y+pUuNCjT3OhP%QBcUYH30&uuKF z1NX!+>}%5ha}jz83Xq5G&+_xn1PH$f+yPED&X#6oF29#5d-*tw50F4WBDg?6SpPx$ zlZELu#zk6siJ9U^ehO-*f*0$QxL9N%A?|YxAjD)N)EZfw=r(Ugm~rRuncGW`r0A4_;oaT?gNcTyA!+@>z2(TdB*o8l-3kSueM$7BL4OY_9Is zHZ`25Ef9LzUXl|;(w49UERZ3(0? zn&B0Gyt(>;Tc~7rHB@hJVP%>SsQ{1y??(Guvv6$Oj|<|#NIJ8dHd?I8MUorRJ)Qf- z*Y)hybWdOv6A(I-_T1-o>7bRFSpp3y2%~;XN#@3|Kiu;(TKp4Flp z1|cbYe}cuosTHv*@~J43IJ5_a$HxJ27U3Clv%5$R_k`ZnIbZNG&hM6h>cemx%6&*P zR@XVRHpyKx+pC2yzl<|pCKqAx<#7*Ju1);>821b9-8c5`+WGZ`CdbaD$qJio z#2CRii>W~BF|R<2xwkwGL+-+$?<$aaAH>J+gbl5X==Q^jao>IZL@4@0HVv#Sm?r=+ z81$am-tVw@1rCTLOueO7c8TyBz&Uljz^=2 zmsf^S=_oCfY&08($450qaYDUkFecSBjDenQIg>;cJ+7Tt=Cbu7m~#5klFm~TPa<`u z6a@>vE^ZPu;8@NiL|llIWTct`9AGBN0fbfzD)^39ds_xB1S^8E`VhE`P?{x&@v%4- zAm>x~kl}pQznmdIQBB@4&R+!&(TF8UE;x)gtE{(+rp`Pl7!e2sT27Fc{vI1uL2I{D zAQ}?Yh|KsN?xDHxSjpMBnFMz8cN78Dbbk>L!eZD(FoG+*lNvU??U zMx~8B@?fsc7}{{kwY=CN*1R!M3J|&L{E|zlil!7kC@};HT!mLaajXq{PR&yp@GpT%(bn2E4!uB9xOj5i!mU-))X%yK>lYDcs{sJFDtX*^X2cD);Mj;c7jWRAD}Hz2PVWD z{$!j8QKWa2fFa-Aq8>T&>a4~~>nn?oJm-__Ax$jmW-4}iqXy&U^;L>En}&y)5oOAT zwqDi9l-#xi4NMKNT$7FAJla?10*@j4DO4=1)*P6;_a+bQOkW)YGI^v@m3&`I`9p}y zz;SZ7Bot<#CUkT~2>chwkL`|wFyvVWmVW1TbsT}1#o*dBC>uT#L6Af?fg%NW??`N` zz`I1Zn*_t#J1ySD#MG3JNoao1)lLIBJr!D6Pt^-HUPiPxG70ct6v_j?<~2{Xul~B} zSN*j$r~?E`FKGgPoy~k%Y#-7kd~;=}FV*8XW58gR=O;4w%NF*<^ijLW-HIR_ek`CWoAx0$`P^x$LM+z5A3Tdl&p{J4ay;=aFky*L-`o|AX^P=hNO&Q`htN ztDUzGw>J$Yo(u-bCpI2Vmm?Un8jUy`RGZKE(@|3 z(FZDP4aiNI2hz*@P-cHrVR^9bY{7H|VqkZ1HTR|K@9pun&uwArOYdg&(kA>5hdcA@ z;U1GTYQMyYBz6b>9TDsfd8gRwP*7QMo^equFz>)bPKGp+aoA{BDf#+HMk!&dSI@M_ zGXnAsA-w35NR z;0UoGc=EHz)8N>wjg(U*lBcY+U^X*Y{9$exd-{zo3F10vsSFYwBu$0|mRTou=lbT!Y(a#w_ z#wn=e20pRSl0F(aU?LwEv~)~Gr<&A93@lcsdt=58je9m;vOm#>(}Y+^ypm3`GDET z-rh6RJWiDIO>+JHwt8as9JYxOp=pR4h8abJOEv@RHHRN?Wk=Li`(|#gs$AGnQQ61q z<=5G@fmW7V2A*zT(sLCG(nZIPBff&5>`o8|w<`~UTL|8t}NI*xexCE#Ld zW@q->IO6^Z4=Dx$2q^8P$AA3^_XpWe;-BsPiPjnbmkrNfV$;9+)YoQ;LQjP*JA+lr z=t!MyVS*t0t@B>LJO6@wTf5xS78%(mL2n`9_Va)MsZ21Gt=xPQOE{}%5Q8Q6ch1iF zC+dL#VrtG7{y!ccAHUn|Z5Ht$i#&)*j%*cc@EKenMf&m=I!;{MU$^be@{*y)hNoSm zv35`=tQN)Gt+hqWsY#Lq8T!)BIZnf-NYB1YEkx|o%-MsEG_wCB7J}X$WoSip4m=Sv zKjliHkl;Lh-wNTAmSuIH zqZ;Y9YGbx)a}ezVTm3%725PlHLU47 z1^vzBcg4$R$~cs@Xq7-~;~KbrXqz=Owj8g+4{aVf-)q~{t?0X+LzrHaJKvv%4p@Kb z$5kA%o<1nGYvRIeEx67Hn@sW$#i07L&$Z`LtdORp>)};OZ zy*PjG@Av)h;|%<+s=+#`&-0+prXwU>V({|#2m@Wq(dagO$fNPr5%wzj#Y%yzEcL-*?xG$wf})^?UgHs>Di^HKN#nPjDjGNpOTerY zl!RK9N_}@(2b;2;WRK})m}buInTfS?voFA~Fw+Jk_+mms2Wmy0`}X3o=ntTVzGqJK z#j7dk`Dl!u3cZjC(-m5*zaoN%#YNGCYrs7ZPavvd4Y^9Vmkfpw4kHh}VG^TWNcLoh_FWF62X z3#!uQ=x7Mf#ZqQ1GDY!-pp-j}tLRo2%~G`RN+aHC%1Ln(bf%`(r7a=#>|2NDUX+ed z$_x6yinO(YRkeM0^u>p9jeW5epIcW?^{9O5qp(l6i^W&580@R0Bbo|w*ex`2XLLrO zIBrIYA6YCE5(z&gA`!S!fPz9|LfOs|^~^}9hISxPPBdxo$&tlV9|#tN$Z#@bY$z|{ zAa-=se)!VLA3i@qT(h_#!qZ+5UbiJKKOl*o#$q>MDTc~PU6%q&tu%2Wm!{UBNV%)d zR*+t>?=Y%1OToV9#OPUt1GhUHG6o?e0H&@VI^AUszlZ;MNO5W9JC*a>i(ai($uJK| zWkRctJzrHe`4M9?dF0A)cnDuN-T`ZHdw989y5{UpQetim;K$5o9Wn1}p>jN-OYr3+ zFffyn`uFn=*bYZs5x;R-d69C`^qC|F%oW*FFu|@b4r}y=J=P4{*81#z5id^51XF8% ztai&)nJ)#6jJJjzod^p{mlg@=&qzw~Z$U8Udjl?AHaT3VsTFK^KUfIF(^^IT9{ECT zav>Dij^`mGRheH&^tyKg7^gaNzHi+RzbtP&Ub^{$5nZUO`zEWgG2Q8{bB0U53{!C3 zRvJ06{Ai{8ByAGiGFqW7#FaV{qRX3<6)JZxHQABe1e$)vr`*zlJJjYNO>Aq#dj~06 z#;NqF>t;pbaGKHhB}`bxh$;G>cf%>7vBS;pxu(9 z!w9?O)28q9glV#^w9!hXqA(+?A!$&k9|JE?R(!NsFI!#WLlZ8^v`$O~4(7q}KlRQK z3Krzx>i2P`(dxWg#Jg|fAf8GpUTHmL;Uoh-VuCIr;+*0^^Q$qx~ z#G&0;0o7xQr$o8sCEEjY4wR`t zdFzCDAhp#`3k}=zJ#HhX#D&5iY4K=K4uv*cPzJN5zXj4e5S7H*oaI_g9V^^1oSP5a zekL;+Y>L4LXjtP70#^4r?mPhlEG7U00RrwR{82>Pal6qGbG=mBro9`xyJ|ZBJ9YQ> zGOVdpaf72u?6%n!gQINWRv`fFwIa!V=hjVl+`JaC|;doe53bVtL~kv}dg`Lk5b zBI{=_z60$vUf&SLbezX9@cYXmR7q=eI_^k6HeQFVe;~9q{UN!{F$9_VylW>_)!(d% z&Q|d= z+@jdo3kwLVsi$cO=L$mdXP&WpGQ<;51<~WpyAvcDvSWUam)}HuW)cn{yZK2<4)*bO z^<8!*-1rEyE0FQuOsV?Y@im7QVeEW$4yeHJ7#HBR99`Kb3~Hwd#U6>4$5(D8Gt<^M zV%I3MO@9Mmy*qppv%<;aw*J|xsZH9QUTGdAv@%6IZFh>QsX=#u`eV|P6af)nSJ^$` zlTChVOKpSk^s#LjYuo-u#w#4LO!w}La6OGe78aR#_4k4~j4L{#vZ@31n z#>j@wP2G2Pm_@K%ct*;4!@^;Kqh)Oer63A3!Ukv@%q_f4!}539^zKQ-2Akx?8I>K4 z0-{lh#H)0iZt?*+R5W81J|Ka6^S&XQ)qSyX8MSDOmY;RkY;nRSl^eAyQ}bYObtIUt}FuJ3*?O*JZ zY$LFvJxEom+VfRxSuJ_AVP3rf)*pONtpe8GmnS>ORSYkPx_17f&87o2AHGO#L)x&5 z&`$B8aoXriuA-O!9986$7$N~_61UF7O0Ac6Sm#a{Ac22JeNEJ#TCG4!{tI2{sA-sl zO{r(}(;OLm=Wxi z`z^(w;SHK3Pojy{vS4q7?OM)a5b9Q~0rNwL zkaCZ!YHu9mXB@VQjraZX_2#h%Mg`2=WIGu?3k&S{4-@#>^tAcfewKF=Ahw8|+NLwt z9*lFeTK)#JRi?4xV^WycCMVmtf_ZChj)uOPNVuZQ_DF_;d6*{o8lgm--k(C41<|pkDRSK3griwqgPJh-%?OpYYwfcB`Of zO0*euDmrNtT*g-yCdGcF2-Awaw}>p_?EA=5SAO+V163 zH`CSbXG1YIine;hG`0e@xYUI=Z-?#RSQ_#i$`2)S0A27hx1TWrvNAzN!igW}IV74Y zOy&&)8|7(jRhywGYF&WI`tH2JIbp^t7*Ddg~F@_B|k&h*T?t@nT_ z8|5u{1}=2v%H}*CKim)wFqaG{W49-{>x)P7DWoGvo@S z{kX*3JL?)ui?9bOCG+#|ZZGWoD(X@$gFAVyzx_>};+gO5WWfenhc5c}lu+E!d}M?a z(m1hkE+-pmbr)|T+{ym2$MMIjaE9V8DUzVUDOG8#J`H~#;MMG>#||v|9Eu|0d!r7q zk3P&i4?liPE|lM`rPEpNl#;p}T^soG3HWj-4XZ#J!j>c*k}672y;YCJqO=m$mKEUT zd)`Mx(ZzwjP$e1999!b?{{C(luofzs?swiK8vjAzdMv4>8^taXN`W|S0BN4f`;w{I z-0N;Qi#tago;Z$VD-TF?;G#b81~#x)lFE@(f2sgXF+@}$M!Z00XA}ueVcK=Ntj`(p z6ah@Qv^6PjQ%c4fgkLG}jeUB8iz(Kl5+;WO0<03Q)jelXn0rA-}HdZM(Wo^$m&Gruudbu z%nst8lw&jc854vsp#`FP_<24;JHFlBVek3xJt%F$x1_t5LF)9&AoUgI@iIYiwls1w zGgWbMva+}M*^0&_DZHH)#2PpYS|b*n)w$|wsFcDejnH?Bzo9kZwgdS>W?o#716H## zvj}sv8B|`8XCvq+hA3@&^v;Unq|~5i^8UjZjaSy*BOl8d%T;WPAKyOIoMH4Ml2!o? zbpcQUF1zp7#!oW1d(vu3-C%hX`yo);oA43+t`GN2s3s(g485uQ$S5;r*lh%GK&ib@ zBfWJv;jCWZ0>FA*%W~sJR6XaFJX^^BlaRlj(BC1{lk^J9r4uc>CxvGuRA^xunAF z)AMf3z=eof4@wX>6$d|{X6;f>aGvLi44SmTuZ%=ZAxAM~+E%S<&Sm%>*Iaeh4^+)0 zMq+PCp;jp4?<|UjE8i)p7r8J`Ky4+C6+br^Jso2(MXU{ko9M{3trk))}@9V`l237olY^G3Q;Kiz8)yd z0aTVzl_%@GLsX7~w;NRG_Gi;n`4w5Q%iOl#ZQN0~{d4Z<4hoSQACgH}!R1ol!m;34 zFc96zb!%?1j(QQa8Nvw>*?2@^>vrC3d`k0pwx58SrGKZ{924Q)o5J0qXu?+t`K)2h z>uvn=ZBlipU{+l9)G@!H-QYC%xr(hH+XZpwLic7 zZ|x-@uz%3dzfx8vj!S=HM0$B)&yaRI<=oMc7hD)fj^#{!1JY!lCn!)8wN5%agp_E% zwnE0Gf7{Jm^D%A1T_MtEDvoUra#acv1Pr-#DxLdyXs!fFR&>mOda63~g0&r98b5>t z@A4)u(vv0V`6y2?hx1S;Sxp|ODhlTEu!()(5-=eRX78|9v>&+?AIK$8l;iDRrHSxv zgK*>wZT|)UlpT$&^QFHR8E1L3t(uVSY8#Ip9qjXdz6o%&Hp?hx{Vqh~Va!B5+%p8RYE zl1`U&R~|? zX{@;KWWZX*eD3Jw>j5EVt7oKhHqjjb7JmF>{~T6n)*|XUorHoe0b#xC_knGP%`KH= z?R}Eefry3Hh~mNOu7ys%{FX;~p8@=kdB&k87Sy0I>>=y*=8vvMhEa+^%v^uB{rjlU z;zbDBRjwf~vR-!8Aqevm$p|5Xa8GUYuyA#K=K`!F@`pjT6D{W6VPD!C4+nA=JSlvBXqab{WQl7^5ljwCw@LcWKwh4CcRG!T~(NNt|Cw0{%;izE0nDL;&_K zX7(-ys-6yJ&U&w9p&@zHzW22(qbPA3DFZSwCn7X zAUP8F)Px+_V2<~xt^maQqN+r258BFMO@@UhUl2SlrOBM|WqL7wB_o!eeKva&pCuHqAFapE;MzZ5sn7~)h+|HWoDFUFg6>X|F z)}fu4J_=v97p~b*6400qZ+RJg8A)@fqA7yfPG*Q+eDA^@l1#IOLn|H5VH$i99OAX> z-d%heQ*cofWJ>Gffu3ROV74=_6P^WEG^mC!U+<|eO%&5o45eJ6qn^hdOV$&P;>Sy9 zCUca}<&&wTx01#=Xz1#^tKY{wk_By`jt9FT)B&kLiJ~(->4^jb8<1D-BH`_=iqNnB z096h{Z%-7JLj>8X+P&2@#~ncB403Pwcs#wRWi#)$nCw+1W;M1xNGH7LTV}O(QYkLB2aW5Ww*~Gsu3jdxVzh4#psj=~|Y=2%H{)frGH9&s~ASU~%_pb-V ze>3_OxcF**说明:** + > + >若报无HwHiAiUser用户则执行useradd HwHiAiUser + >安装固件若报Not a physical-machine, firmware upgrade does not support.则不必安装固件 + >若报错ls: cannot access '.../5.0.1/x86_64-linux/toolkit/python/site-packages/bin': No such file or directory,则```export PATH=/usr/local/python3.7.5/bin:$PATH;export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib:$LD_LIBRARY_PATH``` + >可以指定安装路径bash Ascend-cann-toolkit_5.0.1_linux-x86_64.run --install-path=/home/test --install,然后环境变量的CANN包路径也相应设置 为/home/test/Ascend/ascend-toolkit/latest + +- 添加普通用户 + useradd -m your_name + passwd your_name + usermod -s /bin/bash your_name + + 修改/etc/sudoers添加your_name ALL=(ALL:ALL) ALL + your_name用户便可使用sudo命令 + + 普通用户通过sudo执行atc报错找不到动态库,需要修改: + 修改/etc/sudoers将Defaults env_reset改成Defaults !env_reset + 修改/etc/bash.bashrc添加```alias sudo='sudo env PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH'``` + +- 请使用conda安装python的库,将python的库安装在自己的conda环境里,修改这些库的代码不会影响其他用户 + 查看已有环境: + conda env list + 创建自己的conda环境: + conda create -n your_env_name python=3.7.5 + 进入环境: + conda activate your_env_name + 查看环境安装的python的库: + conda list + 只在该环境中安装py软件包: + https://anaconda.org/ 网址搜索包的安装命令 + conda install -c pytorch pytorch + conda install -c pytorch torchvision + conda install -c conda-forge onnx=1.9.0 + 查看安装路径: + python3.7 + import torchvision + print(torchvision.__file__) + 退出环境: + conda deactivate + 删除环境: + conda remove -n your_env_name --all + +- 使用说明: + - 请联系华为方申请登录服务器的普通用户名与密码 + - /home/下每个用户创建一个自己的目录,原则上只允许用户在自己的这个目录下开发,不要修改其它目录的东西 + - 不要随意更新CANN包与驱动包,修改系统文件,系统密码等 + - /opt/npu是共享的数据集盘目录,该目录仅用来存放共享的数据集,不可向该目录盘写其它数据 + - 每个模型使用到的通用数据集都放在/root/datasets/目录,除/root/datasets与/opt/npu外,不应在其它目录存放通用的数据集 + - 环境中默认安装的商用版CANN包放在/root/commerce_packages/目录下 + - 如果需要安装最新社区版CANN包可以安装在/root/cann_community/目录下 + +# t4服务器部署与说明 +- 安装cuda,cudnn,tensorrt + - 安装cuda + https://developer.nvidia.cn/cuda-toolkit-archive: + wget http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda_11.0.2_450.51.05_linux.run + sh cuda_11.0.2_450.51.05_linux.run + 安装过程不要选driver + 修改/etc/bash.bashrc添加: + ``` + export CUDA_HOME=/usr/local/cuda + export PATH=$PATH:$CUDA_HOME/bin + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CUDA_HOME/lib64 + ``` + - 安装cudnn + https://developer.nvidia.cn/rdp/cudnn-download: + wget https://developer.nvidia.cn/compute/machine-learning/cudnn/secure/8.2.0.53/11.3_04222021/cudnn-11.3-linux-x64-v8.2.0.53.tgz + cp cuda/include/cudnn.h /usr/local/cuda/include/ + cp cuda/lib64/libcudnn* /usr/local/cuda/lib64/ + - 安装tensorRT + https://developer.nvidia.cn/nvidia-tensorrt-download: + wget https://developer.nvidia.cn/compute/machine-learning/tensorrt/secure/7.2.3/tars/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.1.tar.gz + tar zxvf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.1.tar.gz -C /usr/local/ + 修改/etc/bash.bashrc添加: + ``` + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/TensorRT-7.2.3.4/lib + export PATH=$PATH:/usr/local/TensorRT-7.2.3.4/targets/x86_64-linux-gnu/bin + ``` + +- 添加普通用户 + useradd -m your_name + passwd your_name + usermod -s /bin/bash your_name + + t4上在线推理需要使用sudo安装库: + 修改/etc/sudoers添加your_name ALL=(ALL:ALL) ALL + your_name用户便可使用sudo命令 + 配置t4代理以访问外网: + export http_proxy=http://192.168.88.254:8080 + export https_proxy=http://192.168.88.254:8080 + +- 使用说明: + - /home/下每个普通用户创建一个自己的目录,原则上只允许用户在自己的这个目录下开发,不要修改其它目录的东西 + - 测试时请确保t4卡没有运行其它测试任务,使用nvidia-smi查看卡是否处于空闲态 + - t4上使用trtexec一条命令即可测试onnx模型性能,一般模型性能测试在半小时到半天之间可完成,测试完成后请及时退出登录 + - 不要更新CUDA驱动包与tensorRT,修改系统文件,系统密码等 + - 如果onnx不支持离线推理,需要在t4上在线推理测试性能,建议共用一台t4给普通用户sudo权限测试在线推理,而其它t4用来测试onnx离线推理性能不需要给普通用户sudo权限 + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-\350\277\233\345\261\225.xlsx" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-\350\277\233\345\261\225.xlsx" new file mode 100644 index 0000000000000000000000000000000000000000..fdda2d26fc8f6411dc83e8d23ad7a7c55ae4fbe7 GIT binary patch literal 10203 zcmeHtWl)^kvNpjrcnA(bg1fs74#7RRI}94!-6gm~2<|qx21xLr!QCx*fD6evJ3E|x zPu;&?)wimKcZO+M>*+P!UEM7w1px^U_PBy%R|FoP|6XtpZwze=oBRNX8A0;fQZT^Q4E|@@8;q`UxXt_`|L$HVKt{2zRaZmAuxgkg-b94_t^E?2J`gfoWZ0wC5 zkxqyaeAC5%7;q^5BZ%sP&FVFD*sL^o4SHPkOK`g|Jex4nF$y?`n+wCYs)0+LjIP6( znfvCg<1!~9L&cL5d7tcKvwT#jsz3Ihn#U|ecf;uAnQHj4&g8xZdb-N{7>#hbJCp*j zFs?~s!%cK8s*pOEBzZ065i$WmAuCOe8vd6KxaLsJVMt?4!yl9kP!u2qk=rLCMVN@l z!o_cvQwWHB^!;#Tj%q%^U+*Jr)!bKSsSc%S1hhmjBiRklphd21Ex5AcfXW7&BnU}1 z=(l!HmOm1*DsNO!3v?pinE4|QHVo7Xxq>$5hJ*oL_o`^uhKP8f-b^7a(~L6`uF2o# z%U}4vcU8~3VxhHBxeAvlC~5XA-s)$aI7KZC+PEjy{xTH&8tf2w-^_jw{si0;N72_0 z;68r<7yS=#9f1xuR*&%J$Gny5V!#MIgt`~*_ehCi)8v|s^t2?>rr>bXnRTG9k&qbM zM!5%J14FSEw$_)Md3X*vkFOo^nsY}ARGFT2_qC%}$y?E1z_~0~5yNCu)>`fgO2l}_ zG3cf*EObX{$Oz48zZ@pclW6alH4BI~d#+y+aX2|<#g*ST+a!M&v(wayns2@#FCIqG zi!kxIw+K~Mhr@QcH1fRslM1T|x8lBR+_=o@o6G)P(Ybm!7ix9+L_}tBcKK&;EKK_~D`7Z)z8+!|&sgaSxQ&;m%h~tOzgMkX6z`$Pq2klQ5#z#v< zC~4ZvGQ9N2thv{{YH1YEgny-kAMQ9tuDnY(+B4UtdOlyuOxp zcyM**7rO>F3HR2BB?3({YlREzD_bsqYx!^)gzAql9IV zy3n0kUyv#44y5pM>Lc%(&DbDqG2`S=W=2@?3(oEQP5K{-zMA_E%OqvW-0)Z-fgp1gK>CE zpMc8NWVMF}E2*d8d$&-`XsUx#szNie*AEOi&_aewT_X(KH{r-qA&GHt+*0Q+QJKe7 zW;I>N5wBLY@pR!7j2(D8=&tcCmtG)szec$AFtNf<*%*q&gRgt<`Ei{-=7uR(adzHg z{ERl(YI`)n&OtC_!HtsY5*#L4{F5m|cddx>4>Yoh4zYNObCk&@W$Bm!VW&uPA#hp$ z;~4MV7={S)azzv*Hrsv6@SMTXT~cW|qUG6`F^XGcI1}b*+kNr=HkuwQa-oG(KDE!K zWpG2VFUGJZrER%Do$x0cC(t&G)z62608CNHYUG~XeBUb_f@yE2O@m9Xr(J?Q_6#M59N1G)iWH|{I3Vrhnyv$NE?A&jR|ezm z+r>92hS1~acE-YT=Z(xd!&sh(UJ(3PK_aB_Ffe`PMMsH5lgNc~H-O?&#h#cR5q3)&cvW_Q`3A zxr85m7xO)0L*No~;t?+-?;w2^Rcsb9NUnvqo&^JZk;o21zD1y9A9Q?*&W6C{rhP|3 zS1(2Ur5 z0Llo030|M2199o!$XOH1_MjN{gqh!MfQ{U zXSmu|U$ntx#q$zd@v1oRuoxqMt4N!k!lJIXtHL@nLXaK?-0X4Yn~?=I%gn8kl6DKY z2?{kId3#HwLZYo^W*e9yn%#rz%sIaY0<-s3e7ph5KohSYx3{;~7Mm;i+^E7gBH{z9 z1**I{$0#8le0g>w->tucHYa&VF(ZSLj+0ngC}S4#BQ6#}!BfiOuY7eqXr}DO5fUUP z-zVlFcd2D;!iDHrcLV$}n?rTYD2|?uL`;v_=T|jh=;63g^rA+$@Zrd3W^MA{eLpD{ zYy0K_=CvT&o7?3rUy?^~l)Pp3yj$J0qRUR1WKF3qS>Ps#l~%r#i=qi)W& z^5;Iy*@g!Hsd0cbt(vCV6l42wP;GBOX zTSEG@vJ1S_u&AEbc07VMJT3-a9t8B7f9b)M`((m-~@~DLA({|Q3Zh;FxV5_i*nx{mgo)ag{ap1s)vO+Y=+UoT_zp}R$KNT9o zVv(0Q?1B{5|Gd@WdA`4E@@8b$BntVEm$&(%Yx(vj=%Tg0CM{r&<9R58j>m03q0Toz z8~@eG*RJ7#%Yj_F*qxQkLxyX_hIcW^q zWp}c0yrt)xB|U(T&&o`-p%5-!+sO@SNryJ@hF=W2)Z6Vf)G zXb=E_h92-8j}ToUXP#CPA9Nt6W7{tVKc7<=VD?es+F=QD%yN`1qMd%6DZS%sq?MC( z&I=PG4VxGbEI2qH^^hZvPCRDqUi5%mrdSWW%AAgJeau9-nN;|;;9YU3jOVbch0LT1 ziKUHn0*k)m<+_Ec`PPeifIS#{MB>m98f@eHCnmD_kMIjMxjTxVWCJtZmkvK}X~(4zqV898_Hf@!nJU z*5r%`OSAo|oq@NN{gzek8(%~Vl2Rd+8*VF{G8LykLWD${BMgoN1|~}id-tToC3rPH zH|C{=k}MkS&r?(P)4lA=1LJNgBg;iSRvw)RK(*q&Nl8@TlNb5kx%>>LB4oO2$rHaQ zYdBi6;g|tau&wPMY@%|Hs@TPdu``)HL)Y?gb zS-!>Qr?j71@9H>B%#vyXP?b?s$yN5A z&Acutw^%A#nB&C|DoizxO?>9ZjpKFTmLljUz|Ps@4y00Vy_m(j1aT9le$I;543ZI! zo8iY|v_Uoo6}`1dG;y<$N9AR-s5Vzu0jus*84(}9LjrKDt5{G^ru7bnH1{$j zv5UZIkrLk`pYv0>i%P%TMXDXX$f|pFc`JnOSK(90XM&5IhmePlC$MS=W5r;mFj-@^ zn^&4>HEHr~d&52IrKXeFWxFt4ukDEud?Yd zRH!TWisF$SyrBeZEkRxd63m&G2x&L^i*D(=q<*o}+u|}SmuaVIn`v$bm4}W_)@kP+ z>t(n|%?zeR*L4L{)+2BrZQBW@HsIWhdEo$&HOzb+A+)t3FA!!YV!qtd$OI+x>Y&tM zDgsju6)|PU4lh%yTnReepnj3SGU&B6*y8uj!o|pC0|zDSG!bhMw}6mGo4%<=>6ne1 zPZJ0o@kW-PfFj-YWw*>%No2~k)cPjd5jo79QcS031NiH^{CI)KsrR=haIU9;_WQU~ zWoFLt>|{C~UKYLYMLffrb+2p|J3!(jA`1?7t8a2S6C3ei?V_@3jk8}Cj5zZ-m1 z;=Cm_%_yB3*deP@KS7&NTr_S@xz_f6#igD-IzPW1@C>0UTR46+WhyLv{xFW0VTIn6o_W4=Tc6i+zOpR&i&Cp2}G+&p}mT38TO~Ro-%Eff= zhjv)p*_hyV}{pojSd3woZHTH|M@(ZaYFC z?Jj)v0HD5nJLkE@XHWb+_U{_GqZPD@eSs>bnW;@0rI3^wX*lDz{_tH^M zPa@lbi%m;DRv5(HIu|Brv4)SIJH^2x$kxg6{N|`&w>GLXYrktw@>NXpP_(}zm0GNM zkw^V5(v7Z61E}lF;YHKCnw>vQnH2xKXW){)rEZV^gpF6oh z1wm;!c&zw#2ZK5+4DSX~J#{&Kc^s3E2F`O>XEAYVpT%T#1qnGrim#`Uf`VF8H%KB# zK`oT0_GO=IGJycnjG+Ye2~~0N8C}^`V8Yy;bz_>|A*7}Q3=f1OB&B6@N`u8ff2nk2y1ob%2K~N)AR2u1G zHp`-99#R9WR~4Y~^OoGKL+A>%UD3)ZL8%t}%dOb6nQ?7VTJ`PM??!Xlw93c$IFE<% zDxyl`-q=#z)C8FjonIOI`r*HO4R277zB-SL)8MRJhzDUC@}4L_2Al^1a=Sk0R+!!X z$P*S?x8-GavLpkzRv@ae*yl{VRd|=`cKui`GUUpDdP7TFww>ranu#Ml(ZZ?J$cjah zFJfCjDaf(T8+SI^Em#)X#0n|}a=u2ck3fp@=;Wa$DQ>MxEAfDp{&gz%4gcys7 zU{AZu5x{o}C7;KXs~@$Blwz(#YO`#7rQy(y6$i2iT_?l?tlPI3?wR9{^q`}t2}l?e z$qhtApllI@15&s&G$9MJ%CG`MQWbUI@L=)1q+}M@FFiYeW=HWBPSiK*ECiO4F6k5L zJx>$NtL{zi68QRtt+B(D5>CYW8@Nq)w*nvqw@=gzftkNTiW>$9fy=tK9?m{Y+Dy+Y zt`s7PJT@I{i*Nxspyb!!V*uMCA|aTQTDm^mKgNco1?P)@NM;l}C;>#5jMkpjA7WOY z_383qk#sYN#pNixp*4Y9Nn$~OQD0VF?APF_M~os!ZM~*ywx0!c+VhBlu{B13t+07x zWcy|$S{LPR4!?f0sHjLiusm|Aht63EHz!;L9_Ph}&wJm0QA0?|WE#zg{Uc9MrFW#Y zz>+eTo-gyq{@rE$X0AyTJ^z*IN!Tz(Fo7NNSDu1yQsgT#O>ByZ{z0)^m^Wx}DC?v| zg1}00v_fJ%wj+XYnvv12n{sU)yxSip?+HEI8-qjf}KMtS<<)$Hx>&z4R`2an6gf|u6K$;l#? zcNg0ro%+0PiG0FRamoENgOZKctNVN2zmKBV_PSg!94JcSQqbtTzdT*Py7?%<=m=Kl zKdTkljwhksEH@aPI#%=2%iZ&;{}4l*mx{Ci%@vY<_i$|{Q?$*K5PIM;w=K_C|9XT_ z>l0+6niQZL!LShTE>(sxZ0%0a$$%Go~#l^i%z=rj2y>7jF4CL1fP25(zqM>5ihK~ofA=Z@vl<3W$`Dx00y|DcYpkTqWlw69 z0i`KT?P9Fr>!*}pM%cXv4oz0I+Ljr0c>B5|j2k==V@-W_&lH!}H70|8$jCOGmeJ1j z@I!e9|Ig!C%f5;or16huK#+#TR5e%cv`o1_;-+c_7uJ9Q+A*-|$3F8xbGF~)DiHn5 zu9&jWXgcP=*yB#ph8Y*3Lx)p99_KhFL_y46iq4>Zx&}@IM~wz$7T}E?uL{OJP6}^- zrEA{(!+$>!jkVGJLE2DfB4r3p2)9{Xj*N=C7ZDi?7K|dvMhRIyYyA(VhwnAUh$-w| z(^@1Zl~%k}=@cvDmX1g&RW^$T&g+?XTb8a6RZ}CF?ys`#N%f_=k1r6M6z`tT3iG$7 zd(J4$+}JFeP!k<|FL7=L>4ht^D4*Y1eb<6XuA3jf+<(UN0jzw0;Ur}z3i*8J{LaU~ zlC`F#H_@o+D=nz0zD>DS!|moKvJ_im_idrcmRL_&=Fa2wK!vI$^99sceG_oPxIG9(|oHk5AMeI)+Rp_M5B>G$u0p5=%tn=Zhv*kFgp7SuVc{i15o*i{3xQ>k+&6*^ z$stxGR~{5?h(;P@G|~u{#YdYHtdeB$FEne1%FyvIC^2ZKObq)78%jbq$^N$Hgk1`J zl*~tx#x&nKs-MVk69zml7Od@}?M2%{_v(T43#9Sm1+5k2u`Eu_=atJ=_Sh;_y>z&) z=$DhaVtoKoE1h`SFrkg~iY{OkD(Q|v^cKjF&w80c1c>~H!jO-3uiPg zuZ#=Y?L5@FoA_5OP%?>Oh|G8<^h6gj?P{wmgYO7Hx`={A7A_&!TCHcx-AOL@)+4Z! zbnn#~B7)sI6Sx}X40u1n+^d@NxamK<8}FE}5R$@F{W=+Qhb{eiQ=RGh{^sli>k;gK zF62EF>cDjDjVzybp4>Q`n(rUJTYNAG!e2CWkCdgc!;;+$D7}ZihbSAZGR~N&GY<5x z_GAn_ypyEwN667*)%ROl1m$TanqZ=muQpN_-3>vw^F-Q=1(D6Zj`F?&HlLhY<&WH5 z)aHG~OIp4{xGLFpLV|Wq^l$wAHhAJ^=}2SVx+{z@1#wfxnT>8M$q8g~vjRSF@*9x& zv9(ysSr43u_GS{u$?)_nPzAezpzSyUny+kZigt&Vc$0I5hncC@lw#5yEu%5R{oHe> z+fdE`(9oR2_Ktjmht85gC8fr+NC2&XB*oBg>soDgG|7EhvV@^zSzoRM#>Y21J7PvP zYX-%+{K;~5?@5{Zy4Mn2`B-(t?N4Zno&R?%|5X(ICpd{N53p|@V4Qg9jo>^yADqW8 zvd7lnzi;_ZbX(#E&3|6?fIGh=58631VYrxT$^gr`?-2gHo+z43DcfCMiydJNflJoX zY`)2Y7a5~~o31xgMf8XhE+#}F`f^+LIxJ;OM|ST#T%ZA#Z}l_}N7}t1gZA#M?*mJX z8but(`a$wOtV}Pbt;^7Wf>~&q1>*N9c`_C9U-`yifYRM{nHr zr%SMLg?a^ia0&PiSM;$#tto6{?Oprhn!YXp4z=oV_@2dz6F-9o}Oa!frYquj?L zD)2DAL|TPyn1*wIk#1yH#DKQLZUHhBsId_l(!LD011)~&-2AdwC>NU2el`02D#-Af zB9`%R4{2?#3z`n1aI^&0=t@i?T%_`H=(q$3>B3pv-cXcp5StVTBjAd=?IY6GEiZfm zJqOaGnHixL4xJ7gYRJ9e(&a2FmY3k{8Ow!En)4PdUK3Cm_OHIv`!bMZTSk=+yB1F$ zKAY>n<{wYBibEqA#BS(!?C1Y(!@0fSAR^~D-`9}F-32qn($;8wS~DmOF~3jw`E-q| zwj@zRV*#u}p{7a}S0rgi5Sk|sp^?;XHm7^4{M-2?jy_#S;D&Y=(?A-WjtU;avS16$ zo7ZTXn#SUU_Zc5L4v~OUG;pwc2LNkb9nv~jC>!-N9DS+UYMz?$8v+5Sp zp0n}qiU4LqOMSFLvmQlei)UpqO>Gq4M#SzreSUoH+xNOzI<_P9e29t9z%hQtzhA>J zPp$M|D6nT>L=S%>iT}}NzXbg6&gfCVueq8ng5R#`Po+()arBZ2y}7c?!|fF_z!l(GS=AFQxw;YWWZLCxaTl z+4moO^KbUQ#y0-jp&us_eyztt10er3pYV9xe>TaVD6tUz&2>H9=u;o}sOJ|-ntx~e z50Cd$&eKHew;Teze@?reN_bkt`z-+y?U#hVN_zhdtUu3A_z(vF>SaCE__XQuTchaT z*?zaco(g!H0R0x=hW}IV-(mZ2n|>TA`L!M`Z2Zf@e+-!XdAFaAm^||SvRdqa@cte+ zdAj|lz2M(c_+kFq{=Y_Cem;Iq0{q{l&_CGX0sOz6需要给出使用的模型开源代码地址与其branch,commitid +2.2 环境说明->需要给出服务器上安装的本模型所有必要依赖的开源库的具体版本 +3.3.1 pth转onnx模型->优先使用训练提供的权重文件,如果训练的权重文件网上能获则需给出网址,否则需要给出从哪获取权重文件。如果训练没有提供权重则使用开源代码仓的权重文件。需要给出权重文件名与其md5sum值 +4.3.1 pth转onnx模型->如果需要对模型的开源代码做修改,以打patch的形式修改 +5.3.1 模型转换要点:->对于CANN包算子有问题导致模型转换失败或需要规避才能转换成功,则需要在模型转换要点里写明定位主要过程,原因与措施 +6.6.1 离线推理TopN精度统计->精度测试需要测试bs1与bs16的精度 +7.6.1 精度调试:->对于CANN包算子有问题导致精度不达标或需要规避才能达标,则需要在精度调试里写明定位主要过程,原因与措施 +8.7 性能对比->性能数据需要测bs1,16,4,8,32的性能数据,且需要计算出单卡吞吐率 +9.7 性能优化:->对于CANN包算子有问题导致性能不达标或需要规避才能达标,则需要在性能优化里写明定位主要过程,原因与措施 + +- test/README.md检视: +该文件是验收测试说明,主要是准备环境,pip3.7 install -r requirements.txt可能会重新安装某版本pytorch,验收时根据需要决定是否执行 +参见模板[test/README.md](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50/test/README.md) + +- 如果使用补丁文件修改了模型代码则将补丁打入模型代码,如果需要引用模型代码仓的类或函数通过sys.path.append(r"./pytorch-nested-unet")添加搜索路径 + 预处理脚本不需要improt该脚本没有使用的库 +参见https://gitee.com/ascend/modelzoo/pulls/2309 +参见https://gitee.com/ascend/modelzoo/pulls/2585 +- 模型不支持动态onnx,性能不达标等特殊情况需要在pr备注与性能优化里说明 +参见https://gitee.com/ascend/modelzoo/pulls/2122 +参见https://gitee.com/ascend/modelzoo/pulls/2328 + +### 附: 模型推理指导中的交付标准与规范 +- 交付标准 + - 精度: + om模型推理的精度与Ascend 910训练出的权重精度或PyTorch预训练模型github代码仓README.md或官网文档公布的精度对比,精度下降不超过1%则认为精度达标 + - 性能: + Ascend benchmark工具在数据集上推理测的NPU 310单颗device吞吐率乘以4颗即单卡吞吐率大于TensorRT工具测的GPU T4单卡吞吐率则认为性能达标 + 如若交付要求中对性能有要求(易模型),310的性能必须高于t4的性能 + 如若交付要求中没有对性能有要求(中难模型),310上推理需尽可能进行性能优化 + 若无法达到,则需要向华为方提交性能已达瓶颈的认证申请,华为方将定期组织专家组对申请模型进行评审,通过评审的模型允许以不高于t4的性能进行交付 + - 脚本: + 代码符合pep8规范; + 脚本命名格式需统一,文件名含模型名时模型名用小写,模型名含多个字符串时用-连接; + xxx_pth2onnx.py中不能使用从网络下载权重pth文件的代码,xxx_pth2onnx.py应有输入输出参数,输入是本地权重pth文件,输出是生成的onnx模型文件名; + xxx_pth_preprocess.py与xxx_pth_postprocess.py尽可能只引用numpy,Pillow,torch,pycocotools等基础库,如不要因mmdetection框架的数据处理与精度评测部分封装了这些基础库的操作,为调用这些简单接口,前后处理脚本就依赖mmdetection; + 不同模型的脚本与代码部分处理流程有相似性,尽量整合成通用的脚本与代码。 + - 推理过程: + 需要提供端到端推理过程中执行的命令等 + - 关键问题总结: + 需要提供端到端推理遇到的关键问题的简要调试过程,至少包含模型转换要点,精度调试,性能优化 + + 说明: + ``` + 1.如果已经有了ascend 910训练提供的权重文件,那么优先使用910训练提供的权重文件做离线推理,精度与910训练出的精度对齐;如果开源代码仓提供了多个权重文件,使用常用的基础的那个配置的权重文件即可;如果开源代码仓没有提供pth权重文件,则需要该模型的训练同学提供pth权重文件,或者使用开源代码仓训练脚本简单训练一个pth权重文件,然后对比om精度与该pth权重文件的精度 + + 2.由于随机数可能不能模拟数据分布,Ascend benchmark工具纯推理功能测的有些模型性能数据可能不太准,所以模型测试脚本与提交代码的描述中的性能数据以Ascend benchmark在数据集上推理时得到性能数据为准 + + 3.如果模型支持多batch,需要测试batch1,4,8,16,32的精度与性能,写在README.md里,模型测试脚本与提交代码的描述只需提供bs1和bs16的精度性能数据 + + 4.如果导出的onnx因包含自定义算子等而不能推理,则在t4上运行开源评测脚本测试pth模型在线推理性能 + + 5.对于性能不达标的模型,需要进行如下工作: + 1)优化修改onnx模型去掉影响性能的冗余pad,用Ascend atc的相关优化选项尝试一下,尝试使用最近邻替换双线性的resize重新训练,降低图片分辨率等使性能达标。 + 2)对于算子导致的性能问题,需要使用profiling分析定位引起性能下降的原因,具体到引起性能下降的算子。优先修改模型代码以使其选择性能好的npu算子替换性能差的npu算子使性能达标,然后在modelzoo上提issue,等修复版本发布后再重测性能,继续优化。 + 3)需要交付profiling性能数据,对经过上述方法性能可以达标的模型,在交付文档中写明问题原因与达标需要执行的操作;对经过上述方法性能仍不达标的模型,在交付的README.md文档中写明问题原因与简要的定位过程。 + + 6.git clone开源模型代码仓到工作目录,如果模型代码仓没有安装命令,pth2onnx.py脚本需要引用模型代码仓的函数或类时,通过sys.path.append(r"./代码仓目录")添加搜索路径,如果需要修改开源代码仓代码,将修改用git diff做成一个patch文件,交付件不要交付开源代码仓里的代码,只需要交付这个patch文件。参见本文3.5 maskrcnn端到端推理指导-开源detectron2加载npu权重的推理指导 + + 7.数据集统一放在/root/datasets/目录 + ``` + +- 交付件 + - 交付件参考:[ResNeXt50_Onnx模型端到端推理指导.md](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50) + - 最终交付件: + 包含以上交付标准的代码,README.md,以及验收脚本 + 权重文件、profiling性能数据等非代码交付件一并打压缩包邮件发送 + - 最终交付形式: + gitee网址:https://gitee.com/ascend/modelzoo/tree/master/contrib/ACL_PyTorch/Research + commit信息格式:【高校贡献-${学校学院名称}】【Pytorch离线推理-${模型名称}】${PR内容摘要} + 模型命名风格为大驼峰,模型名含多个字符串时使用横杠或下划线连接,当上下文用横杠时模型名用下划线连接,否则用横杠连接 + 对于batch1与batch16,npu性能均高于T4性能1.2倍的模型,放在Benchmark目录下,1-1.2倍对应Official目录,低于1倍放在Research目录,目前都放在contrib/ACL_PyTorch/Research下即可 + +- gitee仓PR贡献流程 + - fork [modelzoo](https://gitee.com/ascend/modelzoo) 到个人仓 + - 提交代码到个人仓 + - 签署cla [link](https://clasign.osinfra.cn/sign/Z2l0ZWUlMkZhc2NlbmQ=) + - 选择 Sign Individual CLA + - 若已提交PR,但忘记签署,可在签署CLA后再评论内评论 ```/check-cla``` 重新校验 + - 依据文件夹名称及目录规范整理代码,完成自验,使用PR内容模板进行PR,审查人员请指定 王姜奔(wangjiangben_hw) + - PR后,华为方会进行代码检视,并对PR进行验证,请关注PR的评论并及时修改 + - 最终验收完成后合入主干 +- gitee仓验收使用脚本(请自验)、PR内容模板 + - 验收使用脚本(请自验) + >![](public_sys-resources/icon-note.gif) + **说明:** + > **提交前请确保自验通过!确保直接执行以下脚本就可运行!** + + ```shell + #准备环境 + 交付的代码文件夹下获取模型结构的开源代码,安装必要的依赖,获取训练提供的权重文件,获取数据集路径,获取benchmark工具 + + # pth是否能正确转换为om + bash test/pth2om.sh + + # 精度数据是否达标(需要显示官网pth精度与om模型的精度) + # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准),不指定数据集目录时默认/root/datasets + bash test/eval_acc_perf.sh --datasets_path=/root/datasets + + # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准) + bash test/perf_t4.sh + ``` + - PR内容模板 + - PR示例链接 https://gitee.com/ascend/modelzoo/pulls/887 + - PR名称 + - [学校学院名称][高校贡献][Pytorch离线推理][模型名称]-PR内容摘要 + - 举例说明:[华为大学昇腾学院][高校贡献][Pytorch离线推理][ResNeXt50]-初次提交 + + ``` + + + **What type of PR is this?** + > /kind task + + **What does this PR do / why do we need it**: + # 简述你这次的PR的详情 + + | 模型 | 官网精度 | 310精度 | t4性能 | 310性能 | + | :------: | :------: | :------: | :------: | :------: | + | ResNeXt50 bs1 | top1:77.62% top5:93.70% | top1:77.62% top5:93.69% | 763.044fps | 1497.252fps | + | ResNeXt50 bs16 | top1:77.62% top5:93.70% | top1:77.62% top5:93.69% | 1234.940fps | 2096.376fps | + # 如果是无法规避的算子缺陷导致性能不达标,这里需要添加性能不达标的原因与解决方案 + + 自验报告 + # 第X次验收测试 + # 验收结果 OK / Failed + # 验收环境: A + K / CANN 5.0.1 + # 关联issue: + + # pth是否能正确转换为om + bash test/pth2om.sh + # 验收结果: OK / Failed + # 备注: 成功生成om,无运行报错,报错日志xx 等 + + # 精度数据是否达标(需要显示官网pth精度与om模型的精度) + # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准) + bash test/eval_acc_perf.sh --datasets_path=/root/datasets + # 验收结果: 是 / 否 + # 备注: 目标pth精度top1:77.62% top5:93.70%;bs1,bs16验收om精度top1:77.62% top5:93.69%;精度下降不超过1%;无运行报错,报错日志xx 等 + # 备注: 验收310测试性能bs1:1497.252FPS bs16:2096.376FPS;无运行报错,报错日志xx 等 + + # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准),该步是验证eval_acc_perf.sh显示的t4性能数据是否正确,该脚本中填写的性能数据与t4实测性能数据要接近 + bash test/perf_t4.sh + # 验收结果: OK / Failed + # 备注: 验收t4测试性能bs1:763.044FPS bs16:1234.940FPS,与eval_acc_perf.sh脚本显示的t4性能数据一致;无运行报错,报错日志xx 等 + + # 310性能是否超过t4: 是 / 否 + bs1:310=(1497.252/763.044)1.96倍t4 + bs16:310=(2096.376/1234.940)1.70倍t4 + + - 示例链接 https://gitee.com/ascend/modelzoo/pulls/836#note_4750681 + + **Which issue(s) this PR fixes**: + # 用于后期issue关联的pr + + Fixes # + + **Special notes for your reviewers**: + # 在reviewer检视时你想要和他说的 + + ``` + + diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\345\212\237\350\203\275\346\211\223\351\200\232/.keep" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\345\212\237\350\203\275\346\211\223\351\200\232/.keep" new file mode 100644 index 0000000..e69de29 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\346\200\247\350\203\275\344\274\230\345\214\226/.keep" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\346\200\247\350\203\275\344\274\230\345\214\226/.keep" new file mode 100644 index 0000000..e69de29 diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\347\262\276\345\272\246\350\260\203\350\257\225/.keep" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/\346\241\210\344\276\213/\347\262\276\345\272\246\350\260\203\350\257\225/.keep" new file mode 100644 index 0000000..e69de29 diff --git a/docs/models_result.xlsx b/docs/models_result.xlsx deleted file mode 100644 index 6569e5c353d1669a4fe7bf95610263775ccf0e6b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11605 zcmeHtRaBkJ(k<@p?(Xgm3wI}Yg1bv_cXxM!hu|Jug9i`p9)kOY8cd7NQ3CR;wqa(^c#?S=BnWn|wHQtfSlaef>@-UnoX@V=`e6lN+wR#b~pTW_) zmWvS2F}TnwkH(aE7gtCi;of6`j|-Oj^<@L`BAw9SPO52_A{(^D}uYJjj848`%-$XMH8=6WFDde zMcsL7PmSR`Ta!;cm7QdftRj+}J*sSB{tk3sSiU983($NoKqLPh=y$db#;-_6#VN`3 zFrfvlfUECbbdDKhNw zK>A4muo0%Kw}|QYbUdeUo+g(cjt%@`fEFdXiPMyBR$jNYBO zW3j6huOjp+7N|=wxiqA@#yL}XG7nzCt zVkZ3~@!Qux79P{UHuRe#oWpYHl7XFPVg`%&>)xdYe>VtK(UY9(st%yv0Tv@T&NX}i zcn1^+2=zCBogJNQtzRLoPY|{YU_u)>4e){-(v{2q^rp_(BIr{O#Nm5?!xQ?=V6b5Gv>bQO_#aHG&}YUm6TF47z*ac_-+8w zwhamyD`r2^1|FuGL*pITn4MLBC$h7x#Aikh4olk|Pq%4tdP%GmL%cp=J@EB#G$~<8 zw%{; zFnP$98<5wC{BsI=2?~&#)z9Sf&jJX)1YB($EFH~^jh+5XR2q(+CoT{`KoS^0Kv@4n z_NNN-&uyeR6kouF)UMF`0NT|KY*@>ElulKPMQD=$&LWvL$R2&N-_4iC^?FXh0=Q62 zX&Lj~N`msOa$5YvRpZCeKDUo89X20%6EvgERp)B>XXa9}Zm;zhK1{IP@=2f`gq)2rX05)cU*i_W5+UBUp)OXXV_$I)yk+}Uc6U%rLOD8-KeVq_kFQ_{;{$= z9QE+4j@6vKXDjtV`4GKTOU|ds>~Uu;a_ie!)j2=h{W2M5-{(h5uR{q-eFZ_|)34~R z6FhgM&8c=KH4?M1&*?2kiyi!k>I)nkuGXb2HKYJj6$UtS{~5I9l*T1WCQnfN@yjBD z4(8?p(HW+t=GD{L$64)dH8*1LP%szvUfahB4k>1|ou1^;&fPKZ`D}|&lC1XyjW29!new#`b<~M7;?s&$+eU$W)_=yA6*b zqxg9RqPhkqdz}F8ucvA{y1DN1I)e?*sf^!!5*$sBhB88qAABz8-eF%gv&zpjyS2jE zd)zh(z<=arWsx`4Xjt zbK25X&xhp$U~Xs`ez*TnzXvw{DZ#dx@ESPz7GnHU%z2NW=OlG1uP~oLENIY}xZek? zK3(lC$SOanS?KI{l*d#Yh@2Uc=|PA~RqeQnd9i@U%G^W91+iExa&SW6pwff&$H%UP z#Z#u!qO~g)2~~zwL!O^@3ifOtEW?SITDt6G}Hu>iM{Za$=Yyw6M=U?CrNzW985Hin~lY$rpk!4O}AO zo!4}*f@a?bIcRM0t?CvfdW@ZDr7u5G^jH^E31x_3npO4 z7d|`UKGw`HHN^##40f0%jsifJ!U|=MH6X$VPR^%7WQ{S00iAnO@KwMroJ-h@+Y&7z z6F12zs=z?E`oh9VNp&*l-;-&vqcIkkMn_(CfRYy;L#7NIH6f1FP6~|bF)*x7F$+{m zgej2EyI6_IRBb46c)J32;KATs_k|*z!c2MEo`_@fpe6!=Xdv{M?u;PWhv5(VRsGwY)Z^C#?ORAnXILXm!8p!-mYYG!X)-m3yt+HAckhl+!ge{X}ONhRlC zqY}@K;BSn4gEY1GwwlDsuNRg~i)cTv+7qB>(#hh!VqtwtwT|F_!B^AInhskV-(j8` zB*Yc42d1ov9vYWw@KaimIHCwflVZYEWF$)Q1Rv}Wc~I*cJ7%zPSUj(E*QbwR#(m7G z(neLK?4K~nth5}53Pu?*tZz)#HEI1_AB-luGv>e=J{5QWsyZK^# zy&nwN)=}&HoaeF=5|NArG+U*$tpl(?+V>Mf72D73MpOkOJp+-#kmMnzBUGhMBLd)g zMg&qtLYfh8tR-jxIZ|P^Z_BPT1Ygt@g%?VoMM>^&k%+w|qrm#9^HpQZgkn(2sHpzt9J8`-P27L2;-4-zQ zzR*qY-NE#ezpN}D)w@+MSju(1hl#9`IkH*nY{er%;BX3wo;S=QVrO1}&Rbk6Z zDPsO|>iu8NDwdyTRl>7Ey*!)09S#h3e5#X1*|4vSZG>mD;a7Zcf z;!s8@ZnIn0sK`AO@){we@3>&{2-s86Or~SlVXSj@o@y!SVc>eXPht#2YDqE*L;4BK zZ&lrP6^-;xbPqTb;15rJ8ul#;Det6fFg6bqmzVxMDRprNYL|Pg9FI|*TgonBVBGMJ zG`mgxp3Vj}cB@YWaGL4F1a*Zaf)~R;vXZjQNxy+9k(i1a`suvuCS_$ zD#S@m%y${)-af`CsO0!PvC@$~8roqZALTcHGRrY5+&coN?k2>c6fN!(OUUMR*j#z*`~g`%V=7TLSgDe(E+-tMsAGzklcM| z1r$D-d%ttFT<{EBOl?n1r=!l1+;wkA29QvK-5`^g8oo(d`t-BV`RgU>e-Uri{~z!F z*W>-qb?fW9^-E#vWM*t_{KvcX%@ZC{)Jw^liu>!0QP6@Oork?xZjd6WL^U)go;Krfj%^S={feL2_ZIib%>_@nDu#r3$nZjgKk~{4I6Mg$-uig9V z_|Q~g;?Oh(@syvx?Yd|E@jm3bqp>bCXo(9U99G}!afnd=8{o6x?T?wB@!_+Y(C5om z$Z?;iIy@+BwuHUD-54K_yPLj(7=7>aYOoIK@3~N?)1i`1QFyuh1b#OCOjTP(MDmUK zgl`WjI*BJ3h?kY}K-)O_D&Xcws?Ka+s0fTZM-rBzE7}!h(dgGb$s_UB5Oyp20HwfH zW_sYT*Oc~(V>+_oP5hD9BW{iMM@*HJWWaxzqn9reylHl10P@5&JWXj2v%?v`T{?i`~N;HaW zRt={I0Wg38W44cP$07#l1#CjNq=&2daY6t`kPdLf;gbv+#^3QzOhUaO1=rm2T5Xj# zn(l8z>;fR7C2N5uSy7cXMn(d8&z3T3kSU6Xg``|*oW(Xj(=0`bEH~h-BpnwwLT6}d z{jegWo{g}3?nY@3qC8{pt4LkVUs2n0MPIxh*VqFzdtN!atB2=F9|recEf$|gV{ojH z4r?mNVK>vrozfeE;Y9;I4QfZCoM_bGmm`a% z-WSRbl;L7bUsqnlL2UnA^I^0_AY^`+_|xLLC~sSSNbRP$e7_`mDywzB832`wx;6=x zT4~}~E>*2wk#a|!JwGjf&u&C*mV#r~fyuoJ2X1FJa126N5KLV!X!^4W{4V~NLB*xz zJ1WPBvu>>x$zV50WkU1zT`yI3`Qf+5^2n7T@DN@ueEk;SHt=#+^i5g4QUI5F@FSMf z_NbH^sBCxW5_~xc49tXtzP;Ri_Ja{;#9J=&_oQ4jJ?|2I=Zb77m|@ozhcvo_A3qIQ z)p+iViWjG5fT=Y-R=eb=%$I_O#ah6QOaup~NsIdSr6(l$G$WYs(}GKvO%4@mY6TeF z3={(Kwp5W9AfKsCE(9T4^WLW?D+?%zU39Ml<5Y*u_pEv2m*tMfN;jP`p$mWRyv%H9 zNOOpAOn2&&VGgL>OeH6lAE}g|q)VXxhE}KpajuSp=tP^eOy&AsO?EgZj;4?4DW^35 z8nww^6Wapu#!iZkX)0~%qDhfBgeEcY>nV&Gp6s!&nIElQYH+T9xy3pCV8>Gfx2bt* zU2sC-VgWD;+7&4}jEGAfUD_URuqOL*E1gs_3JbCtk_LtP5%2E`GX2GC4 zv91o@Y*XR)B6i2Sve5bE%=@SW1O)%3&tZkQO(Vqkip8X7}y`W5F zJSVj+xFd{Fu_h3}aC?`_sN)vRkPXrfmU zc&dCRRDGO!7E8*d1#2FokHI^pRTVNj-aFqftu7u&P-Z(mPS}%xhkw79yJQNFu1b`C zyvm!dz0c^1+w94*LApAElJ5ynex`Cdk|%_|Y>U@p(2(I@x|}(t_nD}DucU7d2`Xq& zX5t`4?CL$?7tvA{JM~5wGWVNm$Njl5Qv22D9O6)y@%Gd;gD%ags$RmkyZ!#e3^DJ3 z2ZP(wRn@P`kr9xFCZM0RvZF~aJAk$iX7&y0?3vnN8ED%^Q%W0Iu+0k-aSPnfTM_pZ^CV_@qYAvKbjlQSs75k zUo<3~mjHuD)$RFKpm3!l2sPP$AT+v^v} z;;xU^sK*QAHqUadY-rUCQmN2xtHJ(M`(03M4#57w90qggZY^4h@J^^|mt0rBN2r~o zsixi8NFomP4kMH5;--HH`6A47FPzj-W~&)nfiUnYta$SvQ!ba#%?2`$%WOScGhY7M z1&6-SHD|@{G1~D+Xs#{XL!TVMPTj7w3F3kE zAgXMo#L^fq4eye2y|zMqWnD%mef|xo%Hf-Oy^ox-qCI>=lj>!kyFA-b9@&dmf$lAn zq}%h^om4F8LZ)lC@fmM#HtETyfak#S&6K~|5CYnpW>$4-Mimp&Rm zMZI;cX=3`=J1S$bYB@3_hi9(7Ay+lkMq++Ogc@9bFFfxTL>^SN&t@Pd#b9mgF3;(2 zZ?s2%m%mEIB~ma<6z)%e=cIE_^xYZ}LFSX#Lj;+jFwAUrx`k8dT=ZZCvbDU6(?y7g zx=4&NJ~lZM0UTuk5KxT0X(Ew$M|zai24i_trWgkGy6 zO;HfpB|Dbs*xK;IG>*x=nToXSbgjwqKn*SD0hd9u;bPp{;Mjyg3=w&vcTn766&zzw zhLl)G9`Y#NO|+I>`JmZ_fd^3so>vD%jIp2$r5CKm6p!30VjGibELzs+UNpvWikK$v zkO^`i0*bXISF8uls|wieH*Mm(yC(2D2R7)l{L|QPmJZe{WdTx4XXfRi5g3is|z79 z;7gZ1#fs!73}%iqj);0+nh&H_CWwJV*4r zmw1bI1}{+8h6G?$WRnTHIYo=Us}7I<5!>i1vL@@G+4NDm@D2|+NXq(MT_TWrjg4~$ zHSp}GZ9RGfF@!t_(u9gpE9M#uUq;D7ZcdSK`eIBa&rO~xVMO1&KwOoFR7d>vJ9m<@ zq8RpD^$<%cXW~|%Z%Mb|u(?QrKwRL{4u>=vs7{iOkwyTJuuu#QaN?GU#>@>V%P?!m zw^?W;LX1#iY%Y<+-N{A%{pp7borcV}gKAc~SSx6yQ+Djyv`1gaPo;o#9_%nV;vBQZ z9_PC~P`+cJl4{t(97-S*5VM5>q=oQEqv|EM4~3+>@RggD=98eSHui$H$q;L*AK9v0 zEQ9d5zGv%jQ_4IX@M=ZuCa>$;pzCp;bR0SG;Z%Ro*;W{)48XFn0dC*19-uuPHbiQ% z6*m7y;2YTrl8h*N3os*Gwu?3l0A8qK`J`fan>sPpV9r!5t26*dgiIns^_`2rX$Ogs zuS<~8w)YV-LYPwnYxjy!;lyf5n``PzrFFDfUZvuJ(Q^6-E|kUaf!|=U?1PjrnhMSb zeo3cqC#3eLO7R%z7x|_t_yX<@Pka1HMcI#jhg}FB@q4-R_I~F+f;a;1yOi5GeJpD} z@({L{_$iYkt4bV+>l*sm&zoa=*B^QY5E%sSw;nX~sV0(<33E??RjgvKyEh+I&<<{E z?|WJi-$8$cV}VXZmhQum5xVb{T9b?26biXM;q!TKd((A~N$As?7J64Ei|M^!uQQct zPLkeg5QOD+cLODEiystoxi+ z%Tx{E zjnPk+a}m>ss7WNwKn=b1r2-(%fi-Sx zR=uInjW32)QH0|JE@&Cw`H?T8AjgSR_Y!|Pf0*1|OP@TJcy}O%qgKe5-!BZ3+aFfv zN39qpLX+5p{E>)>v->DbjEWs6%;p9KK|ufuR3<#HMQawh8I*MuB~Nu(O7TobfEBo5 zTPj?5;qYQ)UaciH5~y8v>u8*DjDU=U#MwZM92wio+vDOxNRP*7+?96LyNhx$bt6Qe zMP&%g?a{1!$>ignxV8{k*p40TlH_Iga@Ou_VbvNByw>Mp-yO? zkAZ8VIFf;rVS~I;>;RgFr==#EA*{fP%c&mFR+cRj)MO?xHOwh4#0ukv)}9=buPWj! zLXdm)!)+|l?aw{cl3;;AD{f>7z}ZrDd{Nh?2&Oexh!eJq2}@c;f)NnA)oVpx*c7tDYSSvT3>=OA{OIHL z`ZD8<8bscbs!wyHisjLX)}45Jo60H^{D+#!lA|LV)i1MSCsi|4hugDDvEvgda3Aq3 zcY5dGJZyO3?u0oPt$KJZgc}bVvW8BvIi#a*B7zkMlR6+3g1&0PJn^*7sy3s75XLow zRS!MShbjx+PHkL01OHy1(l@!-ZM-yD-CvrludTS3?uw(Cp@XrJij#x6jp@%aH7Y?N zVp<5R|I~k#II^N~M}K;8mMlWGw|$G~P^;f*5iDKmi!6X2`fY1@J-U-&ml~Q8JXO`6 zaG3QY%V;70B`p`u^H(jgnA7tof*tqWEM`tyFx`Eg@G!!+ux2$dWP!`o+ndqg47U1I zGrnR}z4Y_&%r%{IVx7faKLVwolA174VcinN;Q_=|As~`lOd_eSi-veY!y`=vu~g#K z0KP!82{k9RN*dq`=Gbr5A*J9>_>%lb`iFTY#AeP08NH2i7N+}LxIS#?78%2wMT%NK zNIYEhRY?zwA*Hgt4GYM8=EqmZYc3Q6yNLuJ9uRkE3aca@v?bY80+zk&n^<+_To#d) zbg&xL6LjNfEDxRyZch++-AL1GQ$VkXk!K=O@Wf=afsb;PYhj_!mO5am^)n==n5J+{ z%N&j`sO9)a%B2hRlA}HwiaT#2Dzb8jIi2m9bI8`Vdp~hevv7`c%LCMn<-{Ur8H0D{ z>QYDN&;6j{phW$V+w)7y=4q#J9h~VCy&p1jGy&QQ<6U>3E_Usw?R(#KFg8`lc&aQc z8*yXRw!0p6Eo~<}JxsT6T%w77`B31_v-nhhweGoSdM_dP;Qqc2VGh2z;*kA`MLl8t z$K_M{xhqhR5B@#v?`wi6fq+}+WleCukSq}Yx+H#UYis$l{~9+U-}BmUmwX7==65EL zQ_H7FLLzDrPV%kcI4k^gAYs&6_`shaR+!Xcxb3ckzuc@>sqiMw)=uw@%7hz+5=e24 zNV=D>;7ZPEa^g*A)Y>o()gGc-3xln#dX{qtqa`93a6e`pblxcTxf*EteVTRGT5*H? zxo_nX>nhJ$Hz$xVZL-*;zc~XuK6LXvV$W=S#oDC01Uy9~5@a}J-6`(E`!`hs07Ar_)q<6`9fBc&zR$T~T;^vajj) z3y~Y{6G_;><&q=dSn*65iLT{3H80Yrn)`bOhC;tq-Zup zg?e-+@iZ&G<1dAL*0A97c>D5gx@);wLPtXi|0BcjDp?snF5Sz7^s+Zkk#;)dT+xvioES-tFPV!%$JJ5?SuI_F2J(9&X|ts7_xP-KMKlGk*~oJE zrwLzYPXg|H6z0kp;l8?=5{120+;h-pt77?X|K7_D0$`L}ykkEu#U6 z5J;w1-iK_u3KZ)_bcYN2eA+8Sjs!k6AxAcl?Qxj$8$~BMHGpW#2)(zod_5H<41^uBv9FsMT7ouq;H_RRIn2uDuG`*;0Iq$ug z_`VEaKDIVMFS6)WX1;n^71#2a>f40GbGP5a%)qhF{o0v5p*N_a3@`}V&tv-+4e`$? zy#xvf7>MZQFBIiJ#_X4Yf8Ic^$>`U3Wi-Ae`ztm5?)1;(^s4jAY4Cq^er~4!=InL1 z{u-~&UdP{A&fOf2#jUkNZpg z>?JquJAy8fK#pE>T;(Jz!{{;u{v zGTxtd{;UuGva?R~&(-3e7XIvj{$)YS1>XH}Lf8Q7WzC!-I zG5pJ8z~9yWxe;K$W`RVuXu>H45ztZu3jn@bl{)+H#+~|6qmn^%PK)0someP+yLT MgqJy2P`!HpKXunS`v3p{ -- Gitee From 8379f22d721852d920bd9f61dd652e5487990b18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=B8=9A=E5=BA=86?= Date: Sat, 17 Jul 2021 08:45:20 +0000 Subject: [PATCH 2/4] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20onnx?= =?UTF-8?q?=E7=AB=AF=E5=88=B0=E7=AB=AF=E6=8E=A8=E7=90=86=E6=8C=87=E5=AF=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 .../benchmark/.keep" | 0 .../benchmark/cv/.keep" | 0 .../benchmark/cv/classification/.keep" | 0 ...50\347\220\206\346\214\207\345\257\274.md" | 605 -------- .../benchmark/cv/segmentation/.keep" | 0 .../cv/segmentation/ssd_detection.diff" | 140 -- ...50\347\220\206\346\214\207\345\257\274.md" | 1227 ----------------- ...50\347\220\206\346\214\207\345\257\274.md" | 1041 -------------- .../benchmark/nlp/.keep" | 0 .../official/.keep" | 0 .../research/.keep" | 0 12 files changed, 3013 deletions(-) delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/official/.keep" delete mode 100644 "onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" deleted file mode 100644 index 11de7d2..0000000 --- "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/classification/ResNeXt50_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" +++ /dev/null @@ -1,605 +0,0 @@ -# ResNeXt50 Onnx模型端到端推理指导 -- [1 模型概述](#1-模型概述) - - [1.1 论文地址](#11-论文地址) - - [1.2 代码地址](#12-代码地址) -- [2 环境说明](#2-环境说明) - - [2.1 深度学习框架](#21-深度学习框架) - - [2.2 python第三方库](#22-python第三方库) -- [3 模型转换](#3-模型转换) - - [3.1 pth转onnx模型](#31-pth转onnx模型) - - [3.2 onnx转om模型](#32-onnx转om模型) -- [4 数据集预处理](#4-数据集预处理) - - [4.1 数据集获取](#41-数据集获取) - - [4.2 数据集预处理](#42-数据集预处理) - - [4.3 生成数据集信息文件](#43-生成数据集信息文件) -- [5 离线推理](#5-离线推理) - - [5.1 benchmark工具概述](#51-benchmark工具概述) - - [5.2 离线推理](#52-离线推理) -- [6 精度对比](#6-精度对比) - - [6.1 离线推理TopN精度统计](#61-离线推理TopN精度统计) - - [6.2 开源TopN精度](#62-开源TopN精度) - - [6.3 精度对比](#63-精度对比) -- [7 性能对比](#7-性能对比) - - [7.1 npu性能数据](#71-npu性能数据) - - [7.2 T4性能数据](#72-T4性能数据) - - [7.3 性能对比](#73-性能对比) - - - -## 1 模型概述 - -- **[论文地址](#11-论文地址)** - -- **[代码地址](#12-代码地址)** - -### 1.1 论文地址 -[ResNeXt50论文](https://arxiv.org/abs/1611.05431) -本文提出了一个简单的,高度模型化的针对图像分类问题的网络结构。本文的网络是通过重复堆叠building block组成的,这些building block整合了一系列具有相同拓扑结构的变体(transformations)。本文提出的简单的设计思路可以生成一种同质的,多分支的结构。这种方法产生了一个新的维度,作者将其称为基(变体的数量,the size of the set of transformations)。在ImageNet-1K数据集上,作者可以在保证模型复杂度的限制条件下,通过提升基的大小来提高模型的准确率。更重要的是,相比于更深和更宽的网络,提升基的大小更加有效。作者将本文的模型命名为ResNeXt,本模型在ILSVRC2016上取得了第二名。本文还在ImageNet-5K和COCO数据集上进行了实验,结果均表明ResNeXt的性能比ResNet好。 - -### 1.2 代码地址 -[ResNeXt50代码](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py) - -## 2 环境说明 - -- **[深度学习框架](#21-深度学习框架)** - -- **[python第三方库](#22-python第三方库)** - -### 2.1 深度学习框架 -``` -pytorch == 1.6.0 -torchvision == 0.7.0 -onnx == 1.7.0 -``` - -### 2.2 python第三方库 - -``` -numpy == 1.18.5 -Pillow == 7.2.0 -``` - -**说明:** -> X86架构:pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 -> -> Arm架构:pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 - -## 3 模型转换 - -- **[pth转onnx模型](#31-pth转onnx模型)** - -- **[onnx转om模型](#32-onnx转om模型)** - -### 3.1 pth转onnx模型 - -1.下载pth权重文件 -[ResNeXt50预训练pth权重文件](https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth) -文件md5sum: 1d6611049e6ef03f1d6afa11f6f9023e -2.编写pth2onnx脚本resnext50_pth2onnx.py -```python -import sys -import torch -import torch.onnx -import torchvision.models as models - -def pth2onnx(input_file, output_file): - model = models.resnext50_32x4d(pretrained=False) - checkpoint = torch.load(input_file, map_location=None) - model.load_state_dict(checkpoint) - - model.eval() - input_names = ["image"] - output_names = ["class"] - dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} - dummy_input = torch.randn(1, 3, 224, 224) - torch.onnx.export(model, dummy_input, output_file, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, verbose=True, opset_version=11) - -if __name__ == "__main__": - input_file = sys.argv[1] - output_file = sys.argv[2] - pth2onnx(input_file, output_file) -``` - - **说明:** ->注意目前ATC支持的onnx算子版本为11 - -3.执行pth2onnx脚本,生成onnx模型文件 -``` -python3 resnext50_pth2onnx.py resnext50_32x4d-7cdf4587.pth resnext50.onnx -``` - -### 3.2 onnx转om模型 - -1.设置环境变量 -``` -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373) -``` -atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 -``` - -## 4 数据集预处理 - -- **[数据集获取](#41-数据集获取)** - -- **[数据集预处理](#42-数据集预处理)** - -- **[生成数据集信息文件](#43-生成数据集信息文件)** - -### 4.1 数据集获取 -该模型使用[ImageNet官网](http://www.image-net.org)的5万张验证集进行测试,图片与标签分别存放在datasets/ImageNet/val_union与datasets/ImageNet/val_label.txt。 - -### 4.2 数据集预处理 -1.预处理脚本imagenet_torch_preprocess.py -```python -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -from PIL import Image -import numpy as np -import multiprocessing - - -model_config = { - 'resnet': { - 'resize': 256, - 'centercrop': 224, - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - }, - 'inceptionv3': { - 'resize': 342, - 'centercrop': 299, - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - }, - 'inceptionv4': { - 'resize': 342, - 'centercrop': 299, - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - }, -} - - -def center_crop(img, output_size): - if isinstance(output_size, int): - output_size = (int(output_size), int(output_size)) - image_width, image_height = img.size - crop_height, crop_width = output_size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)) - - -def resize(img, size, interpolation=Image.BILINEAR): - if isinstance(size, int): - w, h = img.size - if (w <= h and w == size) or (h <= w and h == size): - return img - if w < h: - ow = size - oh = int(size * h / w) - return img.resize((ow, oh), interpolation) - else: - oh = size - ow = int(size * w / h) - return img.resize((ow, oh), interpolation) - else: - return img.resize(size[::-1], interpolation) - - -def gen_input_bin(mode_type, file_batches, batch): - i = 0 - for file in file_batches[batch]: - i = i + 1 - print("batch", batch, file, "===", i) - - # RGBA to RGB - image = Image.open(os.path.join(src_path, file)).convert('RGB') - image = resize(image, model_config[mode_type]['resize']) # Resize - image = center_crop(image, model_config[mode_type]['centercrop']) # CenterCrop - img = np.array(image, dtype=np.float32) - img = img.transpose(2, 0, 1) # ToTensor: HWC -> CHW - img = img / 255. # ToTensor: div 255 - img -= np.array(model_config[mode_type]['mean'], dtype=np.float32)[:, None, None] # Normalize: mean - img /= np.array(model_config[mode_type]['std'], dtype=np.float32)[:, None, None] # Normalize: std - img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin")) - - -def preprocess(mode_type, src_path, save_path): - files = os.listdir(src_path) - file_batches = [files[i:i + 500] for i in range(0, 50000, 500) if files[i:i + 500] != []] - thread_pool = multiprocessing.Pool(len(file_batches)) - for batch in range(len(file_batches)): - thread_pool.apply_async(gen_input_bin, args=(mode_type, file_batches, batch)) - thread_pool.close() - thread_pool.join() - print("in thread, except will not report! please ensure bin files generated.") - - -if __name__ == '__main__': - if len(sys.argv) < 4: - raise Exception("usage: python3 xxx.py [model_type] [src_path] [save_path]") - mode_type = sys.argv[1] - src_path = sys.argv[2] - save_path = sys.argv[3] - src_path = os.path.realpath(src_path) - save_path = os.path.realpath(save_path) - if mode_type not in model_config: - model_type_help = "model type: " - for key in model_config.keys(): - model_type_help += key - model_type_help += ' ' - raise Exception(model_type_help) - if not os.path.isdir(save_path): - os.makedirs(os.path.realpath(save_path)) - preprocess(mode_type, src_path, save_path) -``` -2.执行预处理脚本,生成数据集预处理后的bin文件 -``` -python3 imagenet_torch_preprocess.py datasets/ImageNet/val_union ./prep_dataset -``` -### 4.3 生成数据集信息文件 -1.生成数据集信息文件脚本get_info.py -```python -import os -import sys -import cv2 -from glob import glob - - -def get_bin_info(file_path, info_name, width, height): - bin_images = glob(os.path.join(file_path, '*.bin')) - with open(info_name, 'w') as file: - for index, img in enumerate(bin_images): - content = ' '.join([str(index), img, width, height]) - file.write(content) - file.write('\n') - - -def get_jpg_info(file_path, info_name): - extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] - image_names = [] - for extension in extensions: - image_names.append(glob(os.path.join(file_path, '*.' + extension))) - with open(info_name, 'w') as file: - for image_name in image_names: - if len(image_name) == 0: - continue - else: - for index, img in enumerate(image_name): - img_cv = cv2.imread(img) - shape = img_cv.shape - width, height = shape[1], shape[0] - content = ' '.join([str(index), img, str(width), str(height)]) - file.write(content) - file.write('\n') - - -if __name__ == '__main__': - file_type = sys.argv[1] - file_path = sys.argv[2] - info_name = sys.argv[3] - if file_type == 'bin': - width = sys.argv[4] - height = sys.argv[5] - assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' - get_bin_info(file_path, info_name, width, height) - elif file_type == 'jpg': - assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' - get_jpg_info(file_path, info_name) -``` -2.执行生成数据集信息脚本,生成数据集信息文件 -``` -python3 get_info.py bin ./prep_dataset ./resnext50_prep_bin.info 224 224 -``` -第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 -## 5 离线推理 - -- **[benchmark工具概述](#51-benchmark工具概述)** - -- **[离线推理](#52-离线推理)** - -### 5.1 benchmark工具概述 - -benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) -### 5.2 离线推理 -1.设置环境变量 -``` -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.执行离线推理 -``` -./benchmark -model_type=vision -device_id=0 -batch_size=16 -om_path=resnext50_bs16.om -input_text_path=./resnext50_prep_bin.info -input_width=224 -input_height=224 -output_binary=False -useDvpp=False -``` -输出结果默认保存在当前目录result/dumpOutput_devicex,模型只有一个名为class的输出,shape为bs * 1000,数据类型为FP32,对应1000个分类的预测结果,每个输入对应的输出对应一个_x.bin文件。 - -## 6 精度对比 - -- **[离线推理TopN精度](#61-离线推理TopN精度)** -- **[开源TopN精度](#62-开源TopN精度)** -- **[精度对比](#63-精度对比)** - -### 6.1 离线推理TopN精度统计 - -后处理统计TopN精度 -```python -import os -import sys -import json -import numpy as np -import time - -np.set_printoptions(threshold=sys.maxsize) - -LABEL_FILE = "HiAI_label.json" - - -def gen_file_name(img_name): - full_name = img_name.split('/')[-1] - index = full_name.rfind('.') - return full_name[:index] - - -def cre_groundtruth_dict(gtfile_path): - """ - :param filename: file contains the imagename and label number - :return: dictionary key imagename, value is label number - """ - img_gt_dict = {} - for gtfile in os.listdir(gtfile_path): - if (gtfile != LABEL_FILE): - with open(os.path.join(gtfile_path, gtfile), 'r') as f: - gt = json.load(f) - ret = gt["image"]["annotations"][0]["category_id"] - img_gt_dict[gen_file_name(gtfile)] = ret - return img_gt_dict - - -def cre_groundtruth_dict_fromtxt(gtfile_path): - """ - :param filename: file contains the imagename and label number - :return: dictionary key imagename, value is label number - """ - img_gt_dict = {} - with open(gtfile_path, 'r')as f: - for line in f.readlines(): - temp = line.strip().split(" ") - imgName = temp[0].split(".")[0] - imgLab = temp[1] - img_gt_dict[imgName] = imgLab - return img_gt_dict - - -def load_statistical_predict_result(filepath): - """ - function: - the prediction esult file data extraction - input: - result file:filepath - output: - n_label:numble of label - data_vec: the probabilitie of prediction in the 1000 - :return: probabilities, numble of label, in_type, color - """ - with open(filepath, 'r')as f: - data = f.readline() - temp = data.strip().split(" ") - n_label = len(temp) - if data == '': - n_label = 0 - data_vec = np.zeros((n_label), dtype=np.float32) - in_type = '' - color = '' - if n_label == 0: - in_type = f.readline() - color = f.readline() - else: - for ind, prob in enumerate(temp): - data_vec[ind] = np.float32(prob) - return data_vec, n_label, in_type, color - - -def create_visualization_statistical_result(prediction_file_path, - result_store_path, json_file_name, - img_gt_dict, topn=5): - """ - :param prediction_file_path: - :param result_store_path: - :param json_file_name: - :param img_gt_dict: - :param topn: - :return: - """ - writer = open(os.path.join(result_store_path, json_file_name), 'w') - table_dict = {} - table_dict["title"] = "Overall statistical evaluation" - table_dict["value"] = [] - - count = 0 - resCnt = 0 - n_labels = 0 - count_hit = np.zeros(topn) - for tfile_name in os.listdir(prediction_file_path): - count += 1 - temp = tfile_name.split('.')[0] - index = temp.rfind('_') - img_name = temp[:index] - filepath = os.path.join(prediction_file_path, tfile_name) - ret = load_statistical_predict_result(filepath) - prediction = ret[0] - n_labels = ret[1] - sort_index = np.argsort(-prediction) - gt = img_gt_dict[img_name] - if (n_labels == 1000): - realLabel = int(gt) - elif (n_labels == 1001): - realLabel = int(gt) + 1 - else: - realLabel = int(gt) - - resCnt = min(len(sort_index), topn) - for i in range(resCnt): - if (str(realLabel) == str(sort_index[i])): - count_hit[i] += 1 - break - - if 'value' not in table_dict.keys(): - print("the item value does not exist!") - else: - table_dict["value"].extend( - [{"key": "Number of images", "value": str(count)}, - {"key": "Number of classes", "value": str(n_labels)}]) - if count == 0: - accuracy = 0 - else: - accuracy = np.cumsum(count_hit) / count - for i in range(resCnt): - table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", - "value": str( - round(accuracy[i] * 100, 2)) + '%'}) - json.dump(table_dict, writer) - writer.close() - - -if __name__ == '__main__': - start = time.time() - try: - # txt file path - folder_davinci_target = sys.argv[1] - # annotation files path, "val_label.txt" - annotation_file_path = sys.argv[2] - # the path to store the results json path - result_json_path = sys.argv[3] - # result json file name - json_file_name = sys.argv[4] - except IndexError: - print("Stopped!") - exit(1) - - if not (os.path.exists(folder_davinci_target)): - print("target file folder does not exist.") - - if not (os.path.exists(annotation_file_path)): - print("Ground truth file does not exist.") - - if not (os.path.exists(result_json_path)): - print("Result folder doesn't exist.") - - img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) - create_visualization_statistical_result(folder_davinci_target, - result_json_path, json_file_name, - img_label_dict, topn=5) - - elapsed = (time.time() - start) - print("Time used:", elapsed) -``` -调用vision_metric_ImageNet.py脚本推理结果与label比对,可以获得Accuracy Top5数据,结果保存在result.json中。 -``` -python3 vision_metric_ImageNet.py result/dumpOutput_device0/ dataset/ImageNet/val_label.txt ./ result.json -``` -第一个为benchmark输出目录,第二个为数据集配套标签,第三个是生成文件的保存目录,第四个是生成的文件名。 -查看输出结果: -``` -{"title": "Overall statistical evaluation", "value": [{"key": "Number of images", "value": "50000"}, {"key": "Number of classes", "value": "1000"}, {"key": "Top1 accuracy", "value": "77.62%"}, {"key": "Top2 accuracy", "value": "87.42%"}, {"key": "Top3 accuracy", "value": "90.79%"}, {"key": "Top4 accuracy", "value": "92.56%"}, {"key": "Top5 accuracy", "value": "93.69%"}] -``` -### 6.2 开源TopN精度 -[torchvision官网精度](https://pytorch.org/vision/stable/models.html) -``` -Model Acc@1 Acc@5 -ResNeXt-50-32x4d 77.618 93.698 -``` -### 6.3 精度对比 -将得到的om离线模型推理TopN精度与该模型github代码仓上公布的精度对比,精度下降在1%范围之内,故精度达标。 - -## 7 性能对比 - -- **[npu性能数据](#71-npu性能数据)** -- **[T4性能数据](#72-T4性能数据)** -- **[性能对比](#73-性能对比)** - -### 7.1 npu性能数据 -batch1的性能: - 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 -``` -./benchmark -round=50 -om_path=resnext50_bs1.om -device_id=0 -batch_size=1 -``` -执行50次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 -``` -[INFO] Dataset number: 49 finished cost 2.635ms -[INFO] PureInfer result saved in ./result/PureInfer_perf_of_resnext50_bs1_in_device_0.txt ------------------PureInfer Performance Summary------------------ -[INFO] ave_throughputRate: 374.313samples/s, ave_latency: 2.67914ms -``` -batch16的性能: -``` -./benchmark -round=50 -om_path=resnext50_bs16.om -device_id=0 -batch_size=16 -``` -``` -[INFO] Dataset number: 49 finished cost 30.514ms -[INFO] PureInfer result saved in ./result/PureInfer_perf_of_resnext50_bs16_in_device_0.txt ------------------PureInfer Performance Summary------------------ -[INFO] ave_throughputRate: 524.094samples/s, ave_latency: 1.9101ms -``` -### 7.2 T4性能数据 -batch1性能: -在T4机器上安装开源TensorRT -``` -cd /usr/local/TensorRT-7.2.2.3/targets/x86_64-linux-gnu/bin/ -./trtexec --onnx=resnext50.onnx --fp16 --shapes=image:1x3x224x224 --threads -``` -gpu T4是4个device并行执行的结果,mean是时延(tensorrt的时延是batch个数据的推理时间),即吞吐率的倒数乘以batch -``` -[03/24/2021-03:54:47] [I] GPU Compute -[03/24/2021-03:54:47] [I] min: 1.26575 ms -[03/24/2021-03:54:47] [I] max: 4.41528 ms -[03/24/2021-03:54:47] [I] mean: 1.31054 ms -[03/24/2021-03:54:47] [I] median: 1.30151 ms -[03/24/2021-03:54:47] [I] percentile: 1.40723 ms at 99% -[03/24/2021-03:54:47] [I] total compute time: 2.9972 s -``` -batch16性能: -``` -./trtexec --onnx=resnext50.onnx --fp16 --shapes=image:16x3x224x224 --threads -``` -``` -[03/24/2021-03:57:22] [I] GPU Compute -[03/24/2021-03:57:22] [I] min: 12.5645 ms -[03/24/2021-03:57:22] [I] max: 14.8437 ms -[03/24/2021-03:57:22] [I] mean: 12.9561 ms -[03/24/2021-03:57:22] [I] median: 12.8541 ms -[03/24/2021-03:57:22] [I] percentile: 14.8377 ms at 99% -[03/24/2021-03:57:22] [I] total compute time: 3.03173 s -``` -### 7.3 性能对比 -batch1:2.67914/4 < 1.31054/1 -batch16:1.9101/4 < 12.9561/16 -npu的吞吐率乘4比T4的吞吐率大,即npu的时延除4比T4的时延除以batch小,故npu性能高于T4性能,性能达标。 -对于batch1与batch16,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/classification目录下。 - - diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" deleted file mode 100644 index 6c8e012..0000000 --- "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/ssd_detection.diff" +++ /dev/null @@ -1,140 +0,0 @@ -diff --git a/mmdet/core/anchor/anchor_generator.py b/mmdet/core/anchor/anchor_generator.py -index 3c2fd5a0..f6d11fa7 100644 ---- a/mmdet/core/anchor/anchor_generator.py -+++ b/mmdet/core/anchor/anchor_generator.py -@@ -197,6 +197,8 @@ class AnchorGenerator: - tuple[torch.Tensor]: The mesh grids of x and y. - """ - # use shape instead of len to keep tracing while exporting to onnx -+ x = x.to(dtype=torch.int32) -+ y = y.to(dtype=torch.int32) - xx = x.repeat(y.shape[0]) - yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) - if row_major: -diff --git a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -index 98d30906..48bcdae3 100644 ---- a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -+++ b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -@@ -207,10 +207,22 @@ def delta2bbox(rois, - deltas.size(-1) // 4) - stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) - denorm_deltas = deltas * stds + means -- dx = denorm_deltas[..., 0::4] -+ '''dx = denorm_deltas[..., 0::4] - dy = denorm_deltas[..., 1::4] - dw = denorm_deltas[..., 2::4] -- dh = denorm_deltas[..., 3::4] -+ dh = denorm_deltas[..., 3::4]''' -+ if denorm_deltas.shape[2] > 4: -+ #please self fix when shape[2] > 4 -+ denorm_deltas = denorm_deltas.view(-1, 80, 4) -+ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) -+ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) -+ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) -+ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) -+ else: -+ dx = denorm_deltas[..., 0:1:] -+ dy = denorm_deltas[..., 1:2:] -+ dw = denorm_deltas[..., 2:3:] -+ dh = denorm_deltas[..., 3:4:] - - x1, y1 = rois[..., 0], rois[..., 1] - x2, y2 = rois[..., 2], rois[..., 3] -diff --git a/mmdet/models/dense_heads/anchor_head.py b/mmdet/models/dense_heads/anchor_head.py -index e7c975f5..e2d057e9 100644 ---- a/mmdet/models/dense_heads/anchor_head.py -+++ b/mmdet/models/dense_heads/anchor_head.py -@@ -9,6 +9,55 @@ from ..builder import HEADS, build_loss - from .base_dense_head import BaseDenseHead - from .dense_test_mixins import BBoxTestMixin - -+class BatchNMSOp(torch.autograd.Function): -+ @staticmethod -+ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). -+ scores (torch.Tensor): scores in shape (batch, N, C). -+ return: -+ nmsed_boxes: (1, N, 4) -+ nmsed_scores: (1, N) -+ nmsed_classes: (1, N) -+ nmsed_num: (1,) -+ """ -+ -+ # Phony implementation for onnx export -+ nmsed_boxes = bboxes[:, :max_total_size, 0, :] -+ nmsed_scores = scores[:, :max_total_size, 0] -+ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) -+ nmsed_num = torch.Tensor([max_total_size]) -+ -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+ @staticmethod -+ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', -+ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, -+ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (N, 4). -+ scores (torch.Tensor): scores in shape (N, ). -+ """ -+ -+ if bboxes.dtype == torch.float32: -+ bboxes = bboxes.reshape(bboxes.size(0), bboxes.shape[1].numpy(), -1, 4).half() -+ scores = scores.reshape(scores.size(0), scores.shape[1].numpy(), -1).half() -+ else: -+ bboxes = bboxes.reshape(bboxes.size(0), bboxes.shape[1].numpy(), -1, 4) -+ scores = scores.reshape(scores.size(0), scores.shape[1].numpy(), -1) -+ -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, -+ score_threshold, iou_threshold, max_size_per_class, max_total_size) -+ nmsed_boxes = nmsed_boxes.float() -+ nmsed_scores = nmsed_scores.float() -+ nmsed_classes = nmsed_classes.long() -+ dets = torch.cat((nmsed_boxes.reshape((bboxes.size(0), max_total_size, 4)), nmsed_scores.reshape((bboxes.size(0), max_total_size, 1))), -1) -+ labels = nmsed_classes.reshape((bboxes.size(0), max_total_size)) -+ return dets, labels - - @HEADS.register_module() - class AnchorHead(BaseDenseHead, BBoxTestMixin): -@@ -653,7 +702,10 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): - anchors = anchors.expand_as(bbox_pred) - # Always keep topk op for dynamic input in onnx - from mmdet.core.export import get_k_for_topk -- nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) -+ #nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) -+ nms_pre = bbox_pred.shape[1] -+ if nms_pre_tensor > 0 and bbox_pred.shape[1] > nms_pre_tensor: -+ nms_pre = nms_pre_tensor - if nms_pre > 0: - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: -@@ -662,11 +714,14 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class -- max_scores, _ = scores[..., :-1].max(-1) -+ scores_tmp = scores.permute(2, 1, 0) -+ max_scores, _ = scores_tmp[:-1, ...].max(0) -+ max_scores = max_scores.permute(1, 0) - - _, topk_inds = max_scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( -- -1, 1).expand_as(topk_inds) -+ -1, 1).to(dtype=torch.int32).expand_as(topk_inds) -+ batch_inds = batch_inds.to(dtype=torch.int64) - anchors = anchors[batch_inds, topk_inds, :] - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - scores = scores[batch_inds, topk_inds, :] -@@ -694,6 +749,8 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin): - iou_threshold = cfg.nms.get('iou_threshold', 0.5) - score_threshold = cfg.score_thr - nms_pre = cfg.get('deploy_nms_pre', -1) -+ dets, labels = batch_nms_op(batch_mlvl_bboxes, batch_mlvl_scores, score_threshold, iou_threshold, cfg.max_per_img, cfg.max_per_img) -+ return dets, labels - return add_dummy_nms_for_onnx(batch_mlvl_bboxes, batch_mlvl_scores, - max_output_boxes_per_class, - iou_threshold, score_threshold, diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" deleted file mode 100644 index e365eaa..0000000 --- "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216detectron2\350\256\255\347\273\203\347\232\204npu\346\235\203\351\207\215\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" +++ /dev/null @@ -1,1227 +0,0 @@ -# 基于detectron2训练的npu权重的maskrcnn Onnx模型端到端推理指导 -- [1 模型概述](#1-模型概述) - - [1.1 论文地址](#11-论文地址) - - [1.2 代码地址](#12-代码地址) -- [2 环境说明](#2-环境说明) - - [2.1 深度学习框架](#21-深度学习框架) - - [2.2 python第三方库](#22-python第三方库) -- [3 模型转换](#3-模型转换) - - [3.1 pth转onnx模型](#31-pth转onnx模型) - - [3.2 onnx转om模型](#32-onnx转om模型) -- [4 数据集预处理](#4-数据集预处理) - - [4.1 数据集获取](#41-数据集获取) - - [4.2 数据集预处理](#42-数据集预处理) - - [4.3 生成数据集信息文件](#43-生成数据集信息文件) -- [5 离线推理](#5-离线推理) - - [5.1 benchmark工具概述](#51-benchmark工具概述) - - [5.2 离线推理](#52-离线推理) -- [6 精度对比](#6-精度对比) - - [6.1 离线推理精度统计](#61-离线推理精度统计) - - [6.2 开源精度](#62-开源精度) - - [6.3 精度对比](#63-精度对比) -- [7 性能对比](#7-性能对比) - - [7.1 npu性能数据](#71-npu性能数据) - - [7.2 T4性能数据](#72-T4性能数据) - - [7.3 性能对比](#73-性能对比) - - - -## 1 模型概述 - -- **[论文地址](#11-论文地址)** - -- **[代码地址](#12-代码地址)** - -### 1.1 论文地址 -[maskrcnn论文](https://arxiv.org/abs/1703.06870) -论文提出了一个简单、灵活、通用的目标实例分割框架Mask R-CNN。这个框架可同时做目标检测、实例分割。实例分割的实现就是在faster r-cnn的基础上加了一个可以预测目标掩膜(mask)的分支。只比Faster r-cnn慢一点,5fps。很容易拓展到其他任务如:关键点检测。18年在coco的目标检测、实例分割、人体关键点检测都取得了最优成绩。 - -### 1.2 代码地址 -[cpu,gpu版detectron2框架maskrcnn代码](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) - -[npu版detectron2框架maskrcnn代码](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) - -## 2 环境说明 - -- **[深度学习框架](#21-深度学习框架)** - -- **[python第三方库](#22-python第三方库)** - -### 2.1 深度学习框架 -``` -pytorch == 1.8.0 -torchvision == 0.9.0 -onnx == 1.8.0 -``` - -**注意:** -> 转onnx的环境上pytorch需要安装1.8.0版本 -> - -### 2.2 python第三方库 - -``` -numpy == 1.18.5 -opencv-python == 4.2.0.34 -``` - -**说明:** -> X86架构:opencv,pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 -> -> Arm架构:opencv,pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 - -## 3 模型转换 - -- **[pth转onnx模型](#31-pth转onnx模型)** - -- **[onnx转om模型](#32-onnx转om模型)** - -detectron2暂支持pytorch1.8导出pytorch框架的onnx,npu权重可以使用开源的detectron2加载,因此基于pytorch1.8与开源detectron2导出含npu权重的onnx。atc暂不支持动态shape小算子,可以使用大颗粒算子替换这些小算子规避,这些小算子可以在转onnx时的verbose打印中找到其对应的python代码,从而根据功能用大颗粒算子替换,onnx能推导出变量正确的shape与算子属性正确即可,变量实际的数值无关紧要,因此这些大算子函数的功能实现无关紧要,因包含自定义算子需要去掉对onnx模型的校验。 - -### 3.1 pth转onnx模型 - -1.获取pth权重文件 -[maskrcnn基于detectron2预训练的npu权重文件](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) -文件md5sum: b95f35f051012a02875220482a568c3b -2.下载detectron2源码并安装 -```shell -git clone https://github.com/facebookresearch/detectron2 -python3.7 -m pip install -e detectron2 -``` - - **说明:** -> 安装所需的依赖说明请参考detectron2/INSTALL.md -> -> 重装pytorch后需要rm -rf detectron2/build/ **/*.so再重装detectron2 - -3.detectron2代码迁移,参见maskrcnn_detectron2.diff: -```diff -diff --git a/detectron2/layers/__init__.py b/detectron2/layers/__init__.py -index c8bd1fb..f5fa9ea 100644 ---- a/detectron2/layers/__init__.py -+++ b/detectron2/layers/__init__.py -@@ -2,7 +2,7 @@ - from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm - from .deform_conv import DeformConv, ModulatedDeformConv - from .mask_ops import paste_masks_in_image --from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated -+from .nms import batched_nms, batch_nms_op, batched_nms_rotated, nms, nms_rotated - from .roi_align import ROIAlign, roi_align - from .roi_align_rotated import ROIAlignRotated, roi_align_rotated - from .shape_spec import ShapeSpec -diff --git a/detectron2/layers/nms.py b/detectron2/layers/nms.py -index ac14d45..22efb24 100644 ---- a/detectron2/layers/nms.py -+++ b/detectron2/layers/nms.py -@@ -15,6 +15,56 @@ if TORCH_VERSION < (1, 7): - else: - nms_rotated_func = torch.ops.detectron2.nms_rotated - -+class BatchNMSOp(torch.autograd.Function): -+ @staticmethod -+ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). -+ scores (torch.Tensor): scores in shape (batch, N, C). -+ return: -+ nmsed_boxes: (1, N, 4) -+ nmsed_scores: (1, N) -+ nmsed_classes: (1, N) -+ nmsed_num: (1,) -+ """ -+ -+ # Phony implementation for onnx export -+ nmsed_boxes = bboxes[:, :max_total_size, 0, :] -+ nmsed_scores = scores[:, :max_total_size, 0] -+ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) -+ nmsed_num = torch.Tensor([max_total_size]) -+ -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+ @staticmethod -+ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', -+ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, -+ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (N, 4). -+ scores (torch.Tensor): scores in shape (N, ). -+ """ -+ -+ num_classes = bboxes.shape[1].numpy() // 4 -+ if bboxes.dtype == torch.float32: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() -+ else: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1) -+ -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, -+ score_threshold, iou_threshold, max_size_per_class, max_total_size) -+ nmsed_boxes = nmsed_boxes.float() -+ nmsed_scores = nmsed_scores.float() -+ nmsed_classes = nmsed_classes.long() -+ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) -+ labels = nmsed_classes.reshape((max_total_size, )) -+ return dets, labels - - def batched_nms( - boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float -diff --git a/detectron2/modeling/box_regression.py b/detectron2/modeling/box_regression.py -index 12be000..074f3e3 100644 ---- a/detectron2/modeling/box_regression.py -+++ b/detectron2/modeling/box_regression.py -@@ -87,20 +87,33 @@ class Box2BoxTransform(object): - deltas = deltas.float() # ensure fp32 for decoding precision - boxes = boxes.to(deltas.dtype) - -- widths = boxes[:, 2] - boxes[:, 0] -- heights = boxes[:, 3] - boxes[:, 1] -- ctr_x = boxes[:, 0] + 0.5 * widths -- ctr_y = boxes[:, 1] + 0.5 * heights -+ boxes_prof = boxes.permute(1, 0) -+ widths = boxes_prof[2, :] - boxes_prof[0, :] -+ heights = boxes_prof[3, :] - boxes_prof[1, :] -+ ctr_x = boxes_prof[0, :] + 0.5 * widths -+ ctr_y = boxes_prof[1, :] + 0.5 * heights - - wx, wy, ww, wh = self.weights -- dx = deltas[:, 0::4] / wx -+ '''dx = deltas[:, 0::4] / wx - dy = deltas[:, 1::4] / wy - dw = deltas[:, 2::4] / ww -- dh = deltas[:, 3::4] / wh -+ dh = deltas[:, 3::4] / wh''' -+ denorm_deltas = deltas -+ if denorm_deltas.shape[1] > 4: -+ denorm_deltas = denorm_deltas.view(-1, 80, 4) -+ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) / wx -+ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) / wy -+ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) / ww -+ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) / wh -+ else: -+ dx = denorm_deltas[:, 0:1:] / wx -+ dy = denorm_deltas[:, 1:2:] / wy -+ dw = denorm_deltas[:, 2:3:] / ww -+ dh = denorm_deltas[:, 3:4:] / wh - - # Prevent sending too large values into torch.exp() -- dw = torch.clamp(dw, max=self.scale_clamp) -- dh = torch.clamp(dh, max=self.scale_clamp) -+ dw = torch.clamp(dw, min=-float('inf'), max=self.scale_clamp) -+ dh = torch.clamp(dh, min=-float('inf'), max=self.scale_clamp) - - pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] - pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] -diff --git a/detectron2/modeling/meta_arch/rcnn.py b/detectron2/modeling/meta_arch/rcnn.py -index e5f66d1..1bbba71 100644 ---- a/detectron2/modeling/meta_arch/rcnn.py -+++ b/detectron2/modeling/meta_arch/rcnn.py -@@ -199,8 +199,9 @@ class GeneralizedRCNN(nn.Module): - """ - assert not self.training - -- images = self.preprocess_image(batched_inputs) -- features = self.backbone(images.tensor) -+ # images = self.preprocess_image(batched_inputs) -+ images = batched_inputs -+ features = self.backbone(images) - - if detected_instances is None: - if self.proposal_generator is not None: -diff --git a/detectron2/modeling/poolers.py b/detectron2/modeling/poolers.py -index e5d72ab..7c0dd2f 100644 ---- a/detectron2/modeling/poolers.py -+++ b/detectron2/modeling/poolers.py -@@ -94,6 +94,31 @@ def convert_boxes_to_pooler_format(box_lists: List[Boxes]): - - return pooler_fmt_boxes - -+import torch.onnx.symbolic_helper as sym_help -+ -+class RoiExtractor(torch.autograd.Function): -+ @staticmethod -+ def forward(self, f0, f1, f2, f3, rois, aligned=0, finest_scale=56, pooled_height=7, pooled_width=7, -+ pool_mode='avg', roi_scale_factor=0, sample_num=0, spatial_scale=[0.25, 0.125, 0.0625, 0.03125]): -+ """ -+ feats (torch.Tensor): feats in shape (batch, 256, H, W). -+ rois (torch.Tensor): rois in shape (k, 5). -+ return: -+ roi_feats (torch.Tensor): (k, 256, pooled_width, pooled_width) -+ """ -+ -+ # phony implementation for shape inference -+ k = rois.size()[0] -+ roi_feats = torch.ones(k, 256, pooled_height, pooled_width) -+ return roi_feats -+ -+ @staticmethod -+ def symbolic(g, f0, f1, f2, f3, rois, aligned=0, finest_scale=56, pooled_height=7, pooled_width=7): -+ # TODO: support tensor list type for feats -+ #f_tensors = sym_help._unpack_list(feats) -+ roi_feats = g.op('RoiExtractor', f0, f1, f2, f3, rois, aligned_i=0, finest_scale_i=56, pooled_height_i=pooled_height, pooled_width_i=pooled_width, -+ pool_mode_s='avg', roi_scale_factor_i=0, sample_num_i=0, spatial_scale_f=[0.25, 0.125, 0.0625, 0.03125], outputs=1) -+ return roi_feats - - class ROIPooler(nn.Module): - """ -@@ -202,6 +227,12 @@ class ROIPooler(nn.Module): - A tensor of shape (M, C, output_size, output_size) where M is the total number of - boxes aggregated over all N batch images and C is the number of channels in `x`. - """ -+ if torch.onnx.is_in_onnx_export(): -+ output_size = self.output_size[0] -+ pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) -+ roi_feats = RoiExtractor.apply(x[0], x[1], x[2], x[3], pooler_fmt_boxes, 0, 56, output_size, output_size) -+ return roi_feats -+ - num_level_assignments = len(self.level_poolers) - - assert isinstance(x, list) and isinstance( -diff --git a/detectron2/modeling/proposal_generator/proposal_utils.py b/detectron2/modeling/proposal_generator/proposal_utils.py -index 9c10436..b3437a7 100644 ---- a/detectron2/modeling/proposal_generator/proposal_utils.py -+++ b/detectron2/modeling/proposal_generator/proposal_utils.py -@@ -4,7 +4,7 @@ import math - from typing import List, Tuple - import torch - --from detectron2.layers import batched_nms, cat -+from detectron2.layers import batch_nms_op, cat - from detectron2.structures import Boxes, Instances - from detectron2.utils.env import TORCH_VERSION - -@@ -68,15 +68,19 @@ def find_top_rpn_proposals( - for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): - Hi_Wi_A = logits_i.shape[1] - if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing -- num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) -+ num_proposals_i = torch.clamp(Hi_Wi_A, min=0, max=pre_nms_topk) - else: - num_proposals_i = min(Hi_Wi_A, pre_nms_topk) - - # sort is faster than topk: https://github.com/pytorch/pytorch/issues/22812 -- # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) -- logits_i, idx = logits_i.sort(descending=True, dim=1) -+ num_proposals_i = num_proposals_i.item() -+ logits_i = logits_i.reshape(logits_i.size(1)) -+ topk_scores_i, topk_idx = torch.topk(logits_i, num_proposals_i) -+ topk_scores_i = topk_scores_i.reshape(1, topk_scores_i.size(0)) -+ topk_idx = topk_idx.reshape(1, topk_idx.size(0)) -+ '''logits_i, idx = logits_i.sort(descending=True, dim=1) - topk_scores_i = logits_i.narrow(1, 0, num_proposals_i) -- topk_idx = idx.narrow(1, 0, num_proposals_i) -+ topk_idx = idx.narrow(1, 0, num_proposals_i)''' - - # each is N x topk - topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 -@@ -108,7 +112,7 @@ def find_top_rpn_proposals( - lvl = lvl[valid_mask] - boxes.clip(image_size) - -- # filter empty boxes -+ '''# filter empty boxes - keep = boxes.nonempty(threshold=min_box_size) - if _is_tracing() or keep.sum().item() != len(boxes): - boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] -@@ -126,7 +130,14 @@ def find_top_rpn_proposals( - res = Instances(image_size) - res.proposal_boxes = boxes[keep] - res.objectness_logits = scores_per_img[keep] -+ results.append(res)''' -+ -+ dets, labels = batch_nms_op(boxes.tensor, scores_per_img, 0, nms_thresh, post_nms_topk, post_nms_topk) -+ res = Instances(image_size) -+ res.proposal_boxes = Boxes(dets[:, :4]) -+ res.objectness_logits = dets[:, 4] - results.append(res) -+ - return results - - -diff --git a/detectron2/modeling/proposal_generator/rpn.py b/detectron2/modeling/proposal_generator/rpn.py -index 1675377..77d9f26 100644 ---- a/detectron2/modeling/proposal_generator/rpn.py -+++ b/detectron2/modeling/proposal_generator/rpn.py -@@ -434,7 +434,7 @@ class RPN(nn.Module): - else: - losses = {} - proposals = self.predict_proposals( -- anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes -+ anchors, pred_objectness_logits, pred_anchor_deltas, [(1344, 1344)] - ) - return proposals, losses - -@@ -485,7 +485,8 @@ class RPN(nn.Module): - B = anchors_i.tensor.size(1) - pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) - # Expand anchors to shape (N*Hi*Wi*A, B) -- anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) -+ s = torch.zeros(N, anchors_i.tensor.unsqueeze(0).size(1), anchors_i.tensor.unsqueeze(0).size(2)) -+ anchors_i = anchors_i.tensor.unsqueeze(0).expand_as(s).reshape(-1, B) - proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) - # Append feature map proposals with shape (N, Hi*Wi*A, B) - proposals.append(proposals_i.view(N, -1, B)) -diff --git a/detectron2/modeling/roi_heads/fast_rcnn.py b/detectron2/modeling/roi_heads/fast_rcnn.py -index 348f6a0..87c7cd3 100644 ---- a/detectron2/modeling/roi_heads/fast_rcnn.py -+++ b/detectron2/modeling/roi_heads/fast_rcnn.py -@@ -7,7 +7,7 @@ from torch import nn - from torch.nn import functional as F - - from detectron2.config import configurable --from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple -+from detectron2.layers import ShapeSpec, batch_nms_op, cat, cross_entropy, nonzero_tuple - from detectron2.modeling.box_regression import Box2BoxTransform - from detectron2.structures import Boxes, Instances - from detectron2.utils.events import get_event_storage -@@ -144,7 +144,7 @@ def fast_rcnn_inference_single_image( - # Convert to Boxes to use the `clip` function ... - boxes = Boxes(boxes.reshape(-1, 4)) - boxes.clip(image_shape) -- boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 -+ boxes = boxes.tensor.view(-1, num_bbox_reg_classes.item(), 4) # R x C x 4 - - # 1. Filter results based on detection scores. It can make NMS more efficient - # by filtering out low-confidence detections. -@@ -152,7 +152,7 @@ def fast_rcnn_inference_single_image( - # R' x 2. First column contains indices of the R predictions; - # Second column contains indices of classes. - filter_inds = filter_mask.nonzero() -- if num_bbox_reg_classes == 1: -+ '''if num_bbox_reg_classes == 1: - boxes = boxes[filter_inds[:, 0], 0] - else: - boxes = boxes[filter_mask] -@@ -167,7 +167,14 @@ def fast_rcnn_inference_single_image( - result = Instances(image_shape) - result.pred_boxes = Boxes(boxes) - result.scores = scores -- result.pred_classes = filter_inds[:, 1] -+ result.pred_classes = filter_inds[:, 1]''' -+ -+ dets, labels = batch_nms_op(boxes, scores, score_thresh, nms_thresh, topk_per_image, topk_per_image) -+ result = Instances(image_shape) -+ result.pred_boxes = Boxes(dets[:, :4]) -+ result.scores = dets.permute(1, 0)[4, :] -+ result.pred_classes = labels -+ - return result, filter_inds[:, 0] - - -diff --git a/detectron2/modeling/roi_heads/mask_head.py b/detectron2/modeling/roi_heads/mask_head.py -index 5ac5c4b..f81b96b 100644 ---- a/detectron2/modeling/roi_heads/mask_head.py -+++ b/detectron2/modeling/roi_heads/mask_head.py -@@ -142,7 +142,9 @@ def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Ins - num_masks = pred_mask_logits.shape[0] - class_pred = cat([i.pred_classes for i in pred_instances]) - indices = torch.arange(num_masks, device=class_pred.device) -- mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() -+ print(indices,class_pred) -+ # mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() -+ mask_probs_pred = pred_mask_logits.sigmoid() - # mask_probs_pred.shape: (B, 1, Hmask, Wmask) - - num_boxes_per_image = [len(i) for i in pred_instances] -diff --git a/detectron2/structures/boxes.py b/detectron2/structures/boxes.py -index 57f862a..bad473b 100644 ---- a/detectron2/structures/boxes.py -+++ b/detectron2/structures/boxes.py -@@ -202,10 +202,11 @@ class Boxes: - """ - assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" - h, w = box_size -- x1 = self.tensor[:, 0].clamp(min=0, max=w) -- y1 = self.tensor[:, 1].clamp(min=0, max=h) -- x2 = self.tensor[:, 2].clamp(min=0, max=w) -- y2 = self.tensor[:, 3].clamp(min=0, max=h) -+ boxes_prof = self.tensor.permute(1, 0) -+ x1 = boxes_prof[0, :].clamp(min=0, max=w) -+ y1 = boxes_prof[1, :].clamp(min=0, max=h) -+ x2 = boxes_prof[2, :].clamp(min=0, max=w) -+ y2 = boxes_prof[3, :].clamp(min=0, max=h) - self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) - - def nonempty(self, threshold: float = 0.0) -> torch.Tensor: -diff --git a/tools/deploy/export_model.py b/tools/deploy/export_model.py -index fe2fe30..22145b7 100755 ---- a/tools/deploy/export_model.py -+++ b/tools/deploy/export_model.py -@@ -77,6 +77,28 @@ def export_scripting(torch_model): - # TODO inference in Python now missing postprocessing glue code - return None - -+from typing import Dict, Tuple -+import numpy -+from detectron2.structures import ImageList -+def preprocess_image(batched_inputs: Tuple[Dict[str, torch.Tensor]]): -+ """ -+ Normalize, pad and batch the input images. -+ """ -+ images = [x["image"].to('cpu') for x in batched_inputs] -+ images = [(x - numpy.array([[[103.530]], [[116.280]], [[123.675]]])) / numpy.array([[[1.]], [[1.]], [[1.]]]) for x in images] -+ import torch.nn.functional as F -+ image = torch.zeros(0, 1344, 1344) -+ for i in range(images[0].size(0)): -+ img = images[0][i] -+ img = img.expand((1, 1, img.size(0), img.size(1))) -+ img = img.to(dtype=torch.float32) -+ img = F.interpolate(img, size=(int(1344), int(1344)), mode='bilinear', align_corners=False) -+ img = img[0][0] -+ img = img.unsqueeze(0) -+ image = torch.cat((image, img)) -+ images = [image] -+ images = ImageList.from_tensors(images, 32) -+ return images - - # experimental. API not yet final - def export_tracing(torch_model, inputs): -@@ -84,6 +106,8 @@ def export_tracing(torch_model, inputs): - image = inputs[0]["image"] - inputs = [{"image": image}] # remove other unused keys - -+ inputs = preprocess_image(inputs).tensor.to(torch.float32) -+ image = inputs - if isinstance(torch_model, GeneralizedRCNN): - - def inference(model, inputs): -@@ -104,7 +128,7 @@ def export_tracing(torch_model, inputs): - elif args.format == "onnx": - # NOTE onnx export currently failing in pytorch - with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f: -- torch.onnx.export(traceable_model, (image,), f) -+ torch.onnx.export(traceable_model, (image,), f, opset_version=11, verbose=True) - logger.info("Inputs schema: " + str(traceable_model.inputs_schema)) - logger.info("Outputs schema: " + str(traceable_model.outputs_schema)) - - -``` - **修改依据:** -> 1.slice,topk算子问题导致pre_nms_topk未生效,atc转换报错,修改参见maskrcnn_detectron2.diff -> 2.expand会引入where动态算子因此用expand_as替换 -> 3.slice跑在aicpu有错误,所以改为dx = denorm_deltas[:, :, 0:1:].view(-1, 80) / wx,使其运行在aicore上 -> 4.atc转换时根据日志中报错的算子在转onnx时的verbose打印中找到其对应的python代码,然后找到规避方法解决,具体修改参见maskrcnn_detectron2.diff -> 5.其它地方的修改原因参见精度调试与性能优化 - - -通过打补丁的方式修改detectron2: -```shell -cd detectron2 -patch -p1 < ../maskrcnn_detectron2.diff -cd .. -``` -4.修改pytorch代码去除导出onnx时进行检查 -将/usr/local/python3.7.5/lib/python3.7/site-packages/torch/onnx/utils.py文件的_check_onnx_proto(proto)改为pass - -5.准备coco2017验证集,数据集获取参见本文第四章第一节 -在当前目录按结构构造数据集:datasets/coco目录下有annotations与val2017,annotations目录存放coco数据集的instances_val2017.json,val2017目录存放coco数据集的5000张验证图片。 -或者修改detectron2/detectron2/data/datasets/builtin.py为_root = os.getenv("DETECTRON2_DATASETS", "/opt/npu/dataset/")指定coco数据集所在的目录/opt/npu/dataset/。 - -6.运行如下命令,在output目录生成model.onnx -```shell -python3.7 detectron2/tools/deploy/export_model.py --config-file detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --output ./output --export-method tracing --format onnx MODEL.WEIGHTS model_final.pth MODEL.DEVICE cpu - -mv output/model.onnx model_py1.8.onnx -``` - -### 3.2 onnx转om模型 - -1.设置环境变量 -```shell -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373),需要指定输出节点以去除无用输出,使用netron开源可视化工具查看具体的输出节点名: -```shell -atc --model=model_py1.8.onnx --framework=5 --output=maskrcnn_detectron2_npu --input_format=NCHW --input_shape="0:1,3,1344,1344" --out_nodes="Cast_1673:0;Gather_1676:0;Reshape_1667:0;Slice_1706:0" --log=debug --soc_version=Ascend310 -``` - -## 4 数据集预处理 - -- **[数据集获取](#41-数据集获取)** - -- **[数据集预处理](#42-数据集预处理)** - -- **[生成数据集信息文件](#43-生成数据集信息文件)** - -### 4.1 数据集获取 -该模型使用[COCO官网](https://cocodataset.org/#download)的coco2017的5千张验证集进行测试,图片与标签分别存放在/opt/npu/dataset/coco/val2017/与/opt/npu/dataset/coco/annotations/instances_val2017.json。 - -### 4.2 数据集预处理 -1.预处理脚本maskrcnn_pth_preprocess_detectron2.py -```python -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import argparse -import numpy as np -import cv2 -import torch -import multiprocessing - -def resize(img, size): - old_h = img.shape[0] - old_w = img.shape[1] - scale_ratio = 800 / min(old_w, old_h) - new_w = int(np.floor(old_w * scale_ratio)) - new_h = int(np.floor(old_h * scale_ratio)) - if max(new_h, new_w) > 1333: - scale = 1333 / max(new_h, new_w) - new_h = new_h * scale - new_w = new_w * scale - new_w = int(new_w + 0.5) - new_h = int(new_h + 0.5) - resized_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR) - return resized_img - -def gen_input_bin(file_batches, batch): - i = 0 - for file in file_batches[batch]: - i = i + 1 - print("batch", batch, file, "===", i) - - image = cv2.imread(os.path.join(flags.image_src_path, file), cv2.IMREAD_COLOR) - image = resize(image, (800, 1333)) - mean = np.array([103.53, 116.28, 123.675], dtype=np.float32) - std = np.array([1., 1., 1.], dtype=np.float32) - img = image.copy().astype(np.float32) - mean = np.float64(mean.reshape(1, -1)) - std = 1 / np.float64(std.reshape(1, -1)) - cv2.subtract(img, mean, img) - cv2.multiply(img, std, img) - img = cv2.copyMakeBorder(img, 0, flags.model_input_height - img.shape[0], 0, flags.model_input_width - img.shape[1], cv2.BORDER_CONSTANT, value=0) - #os.makedirs('./paded_jpg/', exist_ok=True) - #cv2.imwrite('./paded_jpg/' + file.split('.')[0] + '.jpg', img) - img = img.transpose(2, 0, 1) - img.tofile(os.path.join(flags.bin_file_path, file.split('.')[0] + ".bin")) - -def preprocess(src_path, save_path): - files = os.listdir(src_path) - file_batches = [files[i:i + 100] for i in range(0, 5000, 100) if files[i:i + 100] != []] - thread_pool = multiprocessing.Pool(len(file_batches)) - for batch in range(len(file_batches)): - thread_pool.apply_async(gen_input_bin, args=(file_batches, batch)) - thread_pool.close() - thread_pool.join() - print("in thread, except will not report! please ensure bin files generated.") - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='preprocess of MaskRCNN PyTorch model') - parser.add_argument("--image_src_path", default="./coco2017/", help='image of dataset') - parser.add_argument("--bin_file_path", default="./coco2017_bin/", help='Preprocessed image buffer') - parser.add_argument("--model_input_height", default=1344, type=int, help='input tensor height') - parser.add_argument("--model_input_width", default=1344, type=int, help='input tensor width') - flags = parser.parse_args() - if not os.path.exists(flags.bin_file_path): - os.makedirs(flags.bin_file_path) - preprocess(flags.image_src_path, flags.bin_file_path) -``` -2.执行预处理脚本,生成数据集预处理后的bin文件 -```shell -python3.7 maskrcnn_pth_preprocess_detectron2.py --image_src_path=/opt/npu/dataset/coco/val2017 --bin_file_path=val2017_bin --model_input_height=1344 --model_input_width=1344 -``` -### 4.3 生成数据集信息文件 -1.生成数据集信息文件脚本get_info.py -```python -import os -import sys -import cv2 -from glob import glob - - -def get_bin_info(file_path, info_name, width, height): - bin_images = glob(os.path.join(file_path, '*.bin')) - with open(info_name, 'w') as file: - for index, img in enumerate(bin_images): - content = ' '.join([str(index), img, width, height]) - file.write(content) - file.write('\n') - - -def get_jpg_info(file_path, info_name): - extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] - image_names = [] - for extension in extensions: - image_names.append(glob(os.path.join(file_path, '*.' + extension))) - with open(info_name, 'w') as file: - for image_name in image_names: - if len(image_name) == 0: - continue - else: - for index, img in enumerate(image_name): - img_cv = cv2.imread(img) - shape = img_cv.shape - width, height = shape[1], shape[0] - content = ' '.join([str(index), img, str(width), str(height)]) - file.write(content) - file.write('\n') - - -if __name__ == '__main__': - file_type = sys.argv[1] - file_path = sys.argv[2] - info_name = sys.argv[3] - if file_type == 'bin': - width = sys.argv[4] - height = sys.argv[5] - assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' - get_bin_info(file_path, info_name, width, height) - elif file_type == 'jpg': - assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' - get_jpg_info(file_path, info_name) -``` -2.执行生成数据集信息脚本,生成数据集信息文件 -```shell -python3.7 get_info.py bin val2017_bin maskrcnn.info 1344 1344 -``` -第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 -## 5 离线推理 - -- **[benchmark工具概述](#51-benchmark工具概述)** - -- **[离线推理](#52-离线推理)** - -### 5.1 benchmark工具概述 - -benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) -### 5.2 离线推理 -1.设置环境变量 -```shell -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.执行离线推理 -```shell -./benchmark.x86_64 -model_type=vision -om_path=maskrcnn_detectron2_npu.om -device_id=0 -batch_size=1 -input_text_path=maskrcnn.info -input_width=1344 -input_height=1344 -useDvpp=false -output_binary=true -``` -输出结果默认保存在当前目录result/dumpOutput_device0,模型有四个输出,每个输入对应的输出对应四个_x.bin文件 -``` -输出 shape 数据类型 数据含义 -output1 100 * 4 FP32 boxes -output2 100 * 1 FP32 scores -output3 100 * 1 INT64 labels -output4 100 * 80 * 28 * 28 FP32 masks -``` - -## 6 精度对比 - -- **[离线推理精度](#61-离线推理精度)** -- **[开源精度](#62-开源精度)** -- **[精度对比](#63-精度对比)** - -### 6.1 离线推理精度统计 - -后处理统计map精度 -```python -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import argparse -import cv2 -import numpy as np - -def postprocess_bboxes(bboxes, image_size, net_input_width, net_input_height): - org_w = image_size[0] - org_h = image_size[1] - - scale = 800 / min(org_w, org_h) - new_w = int(np.floor(org_w * scale)) - new_h = int(np.floor(org_h * scale)) - if max(new_h, new_w) > 1333: - scale = 1333 / max(new_h, new_w) * scale - - bboxes[:, 0] = (bboxes[:, 0]) / scale - bboxes[:, 1] = (bboxes[:, 1]) / scale - bboxes[:, 2] = (bboxes[:, 2]) / scale - bboxes[:, 3] = (bboxes[:, 3]) / scale - - return bboxes - -def postprocess_masks(masks, image_size, net_input_width, net_input_height): - org_w = image_size[0] - org_h = image_size[1] - - scale = 800 / min(org_w, org_h) - new_w = int(np.floor(org_w * scale)) - new_h = int(np.floor(org_h * scale)) - if max(new_h, new_w) > 1333: - scale = 1333 / max(new_h, new_w) * scale - - pad_w = net_input_width - org_w * scale - pad_h = net_input_height - org_h * scale - top = 0 - left = 0 - hs = int(net_input_height - pad_h) - ws = int(net_input_width - pad_w) - - masks = masks.to(dtype=torch.float32) - res_append = torch.zeros(0, org_h, org_w) - if torch.cuda.is_available(): - res_append = res_append.to(device='cuda') - for i in range(masks.size(0)): - mask = masks[i][0][top:hs, left:ws] - mask = mask.expand((1, 1, mask.size(0), mask.size(1))) - mask = F.interpolate(mask, size=(int(org_h), int(org_w)), mode='bilinear', align_corners=False) - mask = mask[0][0] - mask = mask.unsqueeze(0) - res_append = torch.cat((res_append, mask)) - - return res_append[:, None] - -import pickle -def save_variable(v, filename): - f = open(filename, 'wb') - pickle.dump(v, f) - f.close() -def load_variavle(filename): - f = open(filename, 'rb') - r = pickle.load(f) - f.close() - return r - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--test_annotation", default="./origin_pictures.info") - parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0/") - parser.add_argument("--det_results_path", default="./detection-results/") - parser.add_argument("--net_out_num", type=int, default=4) - parser.add_argument("--net_input_width", type=int, default=1344) - parser.add_argument("--net_input_height", type=int, default=1344) - parser.add_argument("--ifShowDetObj", action="store_true", help="if input the para means True, neither False.") - flags = parser.parse_args() - - img_size_dict = dict() - with open(flags.test_annotation)as f: - for line in f.readlines(): - temp = line.split(" ") - img_file_path = temp[1] - img_name = temp[1].split("/")[-1].split(".")[0] - img_width = int(temp[2]) - img_height = int(temp[3]) - img_size_dict[img_name] = (img_width, img_height, img_file_path) - - bin_path = flags.bin_data_path - det_results_path = flags.det_results_path - os.makedirs(det_results_path, exist_ok=True) - total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name]) - - import torch - from torchvision.models.detection.roi_heads import paste_masks_in_image - import torch.nn.functional as F - from detectron2.evaluation import COCOEvaluator - from detectron2.structures import Boxes, Instances - from detectron2.data import DatasetCatalog, MetadataCatalog - import logging - logging.basicConfig(level=logging.INFO) - evaluator = COCOEvaluator('coco_2017_val') - evaluator.reset() - coco_class_map = {id:name for id, name in enumerate(MetadataCatalog.get('coco_2017_val').thing_classes)} - results = [] - - cnt = 0 - for bin_file in sorted(total_img): - cnt = cnt + 1 - print(cnt - 1, bin_file) - path_base = os.path.join(bin_path, bin_file) - res_buff = [] - for num in range(1, flags.net_out_num + 1): - if os.path.exists(path_base + "_" + str(num) + ".bin"): - if num == 1: - buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") - buf = np.reshape(buf, [100, 4]) - elif num == 2: - buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") - buf = np.reshape(buf, [100, 1]) - elif num == 3: - buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="int64") - buf = np.reshape(buf, [100, 1]) - elif num == 4: - bboxes = np.fromfile(path_base + "_" + str(num - 3) + ".bin", dtype="float32") - bboxes = np.reshape(bboxes, [100, 4]) - bboxes = torch.from_numpy(bboxes) - scores = np.fromfile(path_base + "_" + str(num - 2) + ".bin", dtype="float32") - scores = np.reshape(scores, [100, 1]) - scores = torch.from_numpy(scores) - labels = np.fromfile(path_base + "_" + str(num - 1) + ".bin", dtype="int64") - labels = np.reshape(labels, [100, 1]) - labels = torch.from_numpy(labels) - mask_pred = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") - mask_pred = np.reshape(mask_pred, [100, 80, 28, 28]) - mask_pred = torch.from_numpy(mask_pred) - - org_img_size = img_size_dict[bin_file][:2] - result = Instances((org_img_size[1], org_img_size[0])) - - if torch.cuda.is_available(): - mask_pred = mask_pred.to(device='cuda') - img_shape = (flags.net_input_height, flags.net_input_width) - mask_pred = mask_pred[range(len(mask_pred)), labels[:, 0]][:, None] - masks = paste_masks_in_image(mask_pred, bboxes[:, :4], img_shape) - masks = masks >= 0.5 - masks = postprocess_masks(masks, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) - if torch.cuda.is_available(): - masks = masks.cpu() - masks = masks.squeeze(1) - result.pred_masks = masks - - '''masks = masks.numpy() - img = masks[0] - from PIL import Image - for j in range(len(masks)): - mask = masks[j] - mask = mask.astype(bool) - img[mask] = img[mask] + 1 - imag = Image.fromarray((img * 255).astype(np.uint8)) - imag.save(os.path.join('.', bin_file + '.png'))''' - - predbox = postprocess_bboxes(bboxes, org_img_size, flags.net_input_height, flags.net_input_width) - result.pred_boxes = Boxes(predbox) - result.scores = scores.reshape([100]) - result.pred_classes = labels.reshape([100]) - - results.append({"instances": result}) - - res_buff.append(buf) - else: - print("[ERROR] file not exist", path_base + "_" + str(num) + ".bin") - - current_img_size = img_size_dict[bin_file] - res_bboxes = np.concatenate(res_buff, axis=1) - predbox = postprocess_bboxes(res_bboxes, current_img_size, flags.net_input_width, flags.net_input_height) - - if flags.ifShowDetObj == True: - imgCur = cv2.imread(current_img_size[2]) - - det_results_str = '' - for idx, class_idx in enumerate(predbox[:, 5]): - if float(predbox[idx][4]) < float(0.05): - #if float(predbox[idx][4]) < float(0): - continue - if class_idx < 0 or class_idx > 80: - continue - - class_name = coco_class_map[int(class_idx)] - det_results_str += "{} {} {} {} {} {}\n".format(class_name, str(predbox[idx][4]), predbox[idx][0], - predbox[idx][1], predbox[idx][2], predbox[idx][3]) - - if flags.ifShowDetObj == True: - imgCur = cv2.rectangle(imgCur, (int(predbox[idx][0]), int(predbox[idx][1])), (int(predbox[idx][2]), int(predbox[idx][3])), (0,255,0), 2) - imgCur = cv2.putText(imgCur, class_name, (int(predbox[idx][0]), int(predbox[idx][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) - #imgCur = cv2.putText(imgCur, str(predbox[idx][4]), (int(predbox[idx][0]), int(predbox[idx][1])),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) - - if flags.ifShowDetObj == True: - cv2.imwrite(os.path.join(det_results_path, bin_file +'.jpg'), imgCur, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) - - det_results_file = os.path.join(det_results_path, bin_file + ".txt") - with open(det_results_file, "w") as detf: - detf.write(det_results_str) - - #save_variable(results, './results.txt') - #results = load_variavle('./results.txt') - inputs = DatasetCatalog.get('coco_2017_val')[:5000] - evaluator.process(inputs, results) - evaluator.evaluate() -``` -调用maskrcnn_pth_postprocess_detectron2.py评测map精度: -```shell -python3.7 get_info.py jpg /opt/npu/dataset/coco/val2017 maskrcnn_jpeg.info - -python3.7 maskrcnn_pth_postprocess_detectron2.py --bin_data_path=./result/dumpOutput_device0/ --test_annotation=maskrcnn_jpeg.info --det_results_path=./ret_npuinfer/ --net_out_num=4 --net_input_height=1344 --net_input_width=1344 --ifShowDetObj -``` -第一个参数为benchmark推理结果,第二个为原始图片信息文件,第三个为后处理输出结果,第四个为网络输出个数,第五六个为网络高宽,第七个为是否将box画在图上显示 -执行完后会打印出精度: -``` -INFO:detectron2.data.datasets.coco:Loaded 5000 images in COCO format from /opt/npu/dataset/coco/annotations/instances_val2017.json -INFO:detectron2.evaluation.coco_evaluation:Preparing results for COCO format ... -INFO:detectron2.evaluation.coco_evaluation:Evaluating predictions with unofficial COCO API... -Loading and preparing results... -DONE (t=2.16s) -creating index... -index created! -INFO:detectron2.evaluation.fast_eval_api:Evaluate annotation type *bbox* -INFO:detectron2.evaluation.fast_eval_api:COCOeval_opt.evaluate() finished in 21.80 seconds. -INFO:detectron2.evaluation.fast_eval_api:Accumulating evaluation results... -INFO:detectron2.evaluation.fast_eval_api:COCOeval_opt.accumulate() finished in 2.61 seconds. -Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.326 -Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.536 -Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.349 -Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.179 -Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.366 -Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.432 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.282 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.444 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.465 -Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.269 -Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.508 -Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.609 -INFO:detectron2.evaluation.coco_evaluation:Evaluation results for bbox: -| AP | AP50 | AP75 | APs | APm | APl | -|:------:|:------:|:------:|:------:|:------:|:------:| -| 32.586 | 53.634 | 34.852 | 17.862 | 36.613 | 43.174 | -INFO:detectron2.evaluation.coco_evaluation:Per-category bbox AP: -| category | AP | category | AP | category | AP | -|:--------------|:-------|:-------------|:-------|:---------------|:-------| -| person | 48.933 | bicycle | 24.620 | car | 37.483 | -| motorcycle | 33.410 | airplane | 50.975 | bus | 54.898 | -| train | 51.864 | truck | 26.716 | boat | 20.755 | -| traffic light | 20.305 | fire hydrant | 58.144 | stop sign | 58.833 | -| parking meter | 41.813 | bench | 17.210 | bird | 29.444 | -| cat | 57.738 | dog | 52.853 | horse | 51.333 | -| sheep | 40.341 | cow | 41.568 | elephant | 56.160 | -| bear | 63.240 | zebra | 59.121 | giraffe | 57.166 | -| backpack | 11.226 | umbrella | 29.385 | handbag | 8.685 | -| tie | 24.923 | suitcase | 27.242 | frisbee | 53.933 | -| skis | 16.987 | snowboard | 24.268 | sports ball | 40.009 | -| kite | 34.285 | baseball bat | 17.073 | baseball glove | 25.865 | -| skateboard | 39.694 | surfboard | 28.035 | tennis racket | 37.552 | -| bottle | 30.593 | wine glass | 26.470 | cup | 33.779 | -| fork | 19.335 | knife | 11.024 | spoon | 8.761 | -| bowl | 33.928 | banana | 18.034 | apple | 15.394 | -| sandwich | 27.732 | orange | 26.546 | broccoli | 19.022 | -| carrot | 15.449 | hot dog | 25.118 | pizza | 44.402 | -| donut | 35.096 | cake | 23.876 | chair | 18.866 | -| couch | 32.443 | potted plant | 18.701 | bed | 33.585 | -| dining table | 20.164 | toilet | 46.354 | tv | 48.705 | -| laptop | 50.107 | mouse | 47.597 | remote | 20.899 | -| keyboard | 40.454 | cell phone | 28.115 | microwave | 43.190 | -| oven | 25.974 | toaster | 13.432 | sink | 27.114 | -| refrigerator | 42.467 | book | 10.420 | clock | 44.894 | -| vase | 30.559 | scissors | 25.719 | teddy bear | 36.704 | -| hair drier | 0.000 | toothbrush | 11.796 | | | -``` - - **精度调试:** -> 1.根据代码语义RoiExtractor参数finest_scale不是224而是56 -> 2.因gather算子处理-1会导致每张图的第一个score为0,故maskrcnn_detectron2.diff中已将dets[:, -1]改为dets[:, 4] -> 3.单张图调试 -> ``` -> demo.py分数改为0.05,defaults.py MIN_SIZE_TEST与MAX_SIZE_TEST改为1344: -> python3.7 demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --input 000000252219_1344x1344.jpg --opts MODEL.WEIGHTS ../../model_final.pth MODEL.DEVICE cpu -> 说明: -> 比较pth的rpn与om的rpn输出前提是detectron2/config/defaults.py的_C.INPUT.MIN_SIZE_TEST与_C.INPUT.MAX_SIZE_TEST要改为1344,并且注意因为000000252219_1344x1344.jpg 是等比例缩放四边加pad的处理结果,因此pth推理时等价于先进行了pad然后再进行标准化的,因此图片tensor边缘是负均值。开始误认为预处理与mmdetection相同因此SIZE_TEST的值与000000252219_1344x1344.jpg缩放是按上述方式处理的,经此与后面的调试步骤发现预处理与mmdetection不同。om算子输出与开源pth推理时变量的打印值对比,找到输出不对的算子,发现前处理均值方差不同于mmdetection框架,且是BGR序。 -> ``` -> 4.精度调试 -> ``` -> 对开源代码预处理与参数修改,使得cpu,gpu版的pth推理达到npu版代码的pth推理精度,参见本文第七章第二节T4精度数据的diff文件与执行精度测评的命令。 -> 说明: -> 1.查看npu固定1344,1344的前处理方式(缩放加pad) -> from torchvision import utils as vutils -> vutils.save_image(images.tensor, 'test.jpg') -> FIX_SHAPE->./detectron2/data/dataset_mapper.py->ResizeShortestEdge,最短边800最大1333。 -> 2.cpu与gpu开源代码推理pth精度与npu代码推理pth差2到3个点,npu代码(基于detectron2 v0.2.1)更改roi_align.py为开源的代码后推理发现pth精度下降2到3个点,最终发现是aligned参数问题,注意插件缺陷导致om中设置该参数未能生效。 -> ``` - - -### 6.2 开源精度 -[官网精度](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch) - -参考[npu版detectron2框架的maskrcnn](https://gitee.com/ascend/modelzoo/tree/master/built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch),安装依赖PyTorch(NPU版本)与设置环境变量,在npu上执行推理,测得npu精度如下: -```shell -python3.7 -m pip install -e Faster_Mask_RCNN_for_PyTorch -cd Faster_Mask_RCNN_for_PyTorch -修改eval.sh的配置文件与权重文件分别为mask_rcnn_R_101_FPN_3x.yaml与model_final.pth,删除mask_rcnn_R_101_FPN_3x.yaml的SOLVER和DATALOADER配置,datasets/coco下面放置coco2017验证集图片与标签(参考本文第三章第一节步骤五) -./eval.sh -``` -``` -Task: bbox -AP,AP50,AP75,APs,APm,APl -33.0103,53.5686,35.5192,17.8069,36.9325,44.0201 -Task: segm -AP,AP50,AP75,APs,APm,APl -30.3271,50.4665,31.8223,12.9573,33.0375,44.8537 -``` -### 6.3 精度对比 -om推理box map精度为0.326,npu推理box map精度为0.330,npu输出400个框精度更高点但性能较低,精度下降在1个点之内,因此可视为精度达标 - -## 7 性能对比 - -- **[npu性能数据](#71-npu性能数据)** -- **[T4性能数据](#72-T4性能数据)** -- **[性能对比](#73-性能对比)** - -### 7.1 npu性能数据 -batch1的性能: - 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 -``` -./benchmark.x86_64 -round=20 -om_path=maskrcnn_detectron2_npu.om -device_id=0 -batch_size=1 -``` -执行20次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 -``` -[INFO] Dataset number: 19 finished cost 439.142ms -[INFO] PureInfer result saved in ./result/PureInfer_perf_of_maskrcnn_detectron2_npu_in_device_0.txt ------------------PureInfer Performance Summary------------------ -[INFO] ave_throughputRate: 2.27773samples/s, ave_latency: 440.813ms ----------------------------------------------------------------- -``` -maskrcnn detectron2不支持多batch - - **性能优化:** -> 查看profiling导出的op_statistic_0_1.csv算子总体耗时统计发现gather算子耗时最多,然后查看profiling导出的task_time_0_1.csv找到具体哪些gather算子耗时最多,通过导出onnx的verbose打印找到具体算子对应的代码,因gather算子计算最后一个轴会很耗时,因此通过转置后计算0轴规避,比如maskrcnn_detectron2.diff文件中的如下修改: -> ``` -> boxes_prof = boxes.permute(1, 0) -> widths = boxes_prof[2, :] - boxes_prof[0, :] -> ``` -> - - -### 7.2 T4性能数据 -batch1性能: -onnx包含自定义算子,因此不能使用开源TensorRT测试性能数据,故在T4机器上使用pth在线推理测试性能数据 - -依据npu版代码修改cpu,gpu版detectron2,参见maskrcnn_pth_npu.diff: -```diff -diff --git a/detectron2/data/dataset_mapper.py b/detectron2/data/dataset_mapper.py -index 0e77851..0d03c08 100644 ---- a/detectron2/data/dataset_mapper.py -+++ b/detectron2/data/dataset_mapper.py -@@ -4,6 +4,7 @@ import logging - import numpy as np - from typing import List, Optional, Union - import torch -+from torch.nn import functional as F - - from detectron2.config import configurable - -@@ -133,6 +134,7 @@ class DatasetMapper: - - aug_input = T.AugInput(image, sem_seg=sem_seg_gt) - transforms = self.augmentations(aug_input) -+ print(self.augmentations,transforms) - image, sem_seg_gt = aug_input.image, aug_input.sem_seg - - image_shape = image.shape[:2] # h, w -@@ -140,6 +142,20 @@ class DatasetMapper: - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) -+ -+ size_divisibility = 32 -+ pad_value = 0 -+ pixel_mean = torch.Tensor([103.53, 116.28, 123.675]).view(-1, 1, 1) -+ pixel_std = torch.Tensor([1.0, 1.0, 1.0]).view(-1, 1, 1) -+ images = (dataset_dict["image"] - pixel_mean) / pixel_std -+ dataset_dict["image_size"] = tuple(images.shape[-2:]) -+ batch_shape = (3, 1344, 1344) -+ padding_size = [0, batch_shape[-1] - images.shape[-1], -+ 0, batch_shape[-2] - images.shape[-2]] -+ padded = F.pad(images, padding_size, value=pad_value) -+ batched_imgs = padded.unsqueeze_(0) -+ dataset_dict["image_preprocess"] = batched_imgs.contiguous() -+ - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) - -diff --git a/detectron2/layers/roi_align.py b/detectron2/layers/roi_align.py -index bcbf5f4..23b138d 100644 ---- a/detectron2/layers/roi_align.py -+++ b/detectron2/layers/roi_align.py -@@ -38,7 +38,7 @@ class ROIAlign(nn.Module): - self.output_size = output_size - self.spatial_scale = spatial_scale - self.sampling_ratio = sampling_ratio -- self.aligned = aligned -+ self.aligned = False - - from torchvision import __version__ - -diff --git a/detectron2/modeling/meta_arch/rcnn.py b/detectron2/modeling/meta_arch/rcnn.py -index e5f66d1..b9ffa66 100644 ---- a/detectron2/modeling/meta_arch/rcnn.py -+++ b/detectron2/modeling/meta_arch/rcnn.py -@@ -202,6 +202,9 @@ class GeneralizedRCNN(nn.Module): - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - -+ #from torchvision import utils as vutils -+ #vutils.save_image(images.tensor, 'test.jpg') -+ print(features['p2'].shape) - if detected_instances is None: - if self.proposal_generator is not None: - proposals, _ = self.proposal_generator(images, features, None) -@@ -224,10 +227,14 @@ class GeneralizedRCNN(nn.Module): - """ - Normalize, pad and batch the input images. - """ -- images = [x["image"].to(self.device) for x in batched_inputs] -+ '''images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.backbone.size_divisibility) -- return images -+ return images''' -+ images = [x["image_preprocess"].to(device=self.device) for x in batched_inputs] -+ images = torch.cat(images, dim=0) -+ image_sizes = [x["image_size"] for x in batched_inputs] -+ return ImageList(images, image_sizes) - - @staticmethod - def _postprocess(instances, batched_inputs: Tuple[Dict[str, torch.Tensor]], image_sizes): -diff --git a/detectron2/modeling/postprocessing.py b/detectron2/modeling/postprocessing.py -index f42e77c..909923a 100644 ---- a/detectron2/modeling/postprocessing.py -+++ b/detectron2/modeling/postprocessing.py -@@ -55,6 +55,7 @@ def detector_postprocess( - output_boxes = None - assert output_boxes is not None, "Predictions must contain boxes!" - -+ print(scale_x, scale_y) - output_boxes.scale(scale_x, scale_y) - output_boxes.clip(results.image_size) - - -``` -测评T4精度与性能: -```shell -git clone https://github.com/facebookresearch/detectron2 -python3.7 -m pip install -e detectron2 -cd detectron2 -patch -p1 < ../maskrcnn_pth_npu.diff -cd tools -mkdir datasets -cp -rf ../../datasets/coco datasets/(数据集构造参考本文第三章第一节步骤五) -python3.7 train_net.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml --eval-only MODEL.WEIGHTS ../../model_final.pth MODEL.DEVICE cuda:0 -``` -``` -Inference done 4993/5000. 0.2937 s / img. -``` - -### 7.3 性能对比 -310单卡4个device,benchmark测试的是一个device。T4一个设备相当于4个device,测试的是整个设备。benchmark时延是吞吐率的倒数,T4时延是吞吐率的倒数乘以batch。对于batch1,440.73ms / 4 * 1 < 0.2937s,即npu性能超过T4 -对于batch1,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/segmentation目录下 - - diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" deleted file mode 100644 index dfb1a8f..0000000 --- "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/cv/segmentation/\345\237\272\344\272\216\345\274\200\346\272\220mmdetection\351\242\204\350\256\255\347\273\203\347\232\204maskrcnn_Onnx\346\250\241\345\236\213\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274.md" +++ /dev/null @@ -1,1041 +0,0 @@ -# 基于开源mmdetection预训练的maskrcnn Onnx模型端到端推理指导 -- [1 模型概述](#1-模型概述) - - [1.1 论文地址](#11-论文地址) - - [1.2 代码地址](#12-代码地址) -- [2 环境说明](#2-环境说明) - - [2.1 深度学习框架](#21-深度学习框架) - - [2.2 python第三方库](#22-python第三方库) -- [3 模型转换](#3-模型转换) - - [3.1 pth转onnx模型](#31-pth转onnx模型) - - [3.2 onnx转om模型](#32-onnx转om模型) -- [4 数据集预处理](#4-数据集预处理) - - [4.1 数据集获取](#41-数据集获取) - - [4.2 数据集预处理](#42-数据集预处理) - - [4.3 生成数据集信息文件](#43-生成数据集信息文件) -- [5 离线推理](#5-离线推理) - - [5.1 benchmark工具概述](#51-benchmark工具概述) - - [5.2 离线推理](#52-离线推理) -- [6 精度对比](#6-精度对比) - - [6.1 离线推理精度统计](#61-离线推理精度统计) - - [6.2 开源精度](#62-开源精度) - - [6.3 精度对比](#63-精度对比) -- [7 性能对比](#7-性能对比) - - [7.1 npu性能数据](#71-npu性能数据) - - [7.2 T4性能数据](#72-T4性能数据) - - [7.3 性能对比](#73-性能对比) - - - -## 1 模型概述 - -- **[论文地址](#11-论文地址)** - -- **[代码地址](#12-代码地址)** - -### 1.1 论文地址 -[maskrcnn论文](https://arxiv.org/abs/1703.06870) -论文提出了一个简单、灵活、通用的目标实例分割框架Mask R-CNN。这个框架可同时做目标检测、实例分割。实例分割的实现就是在faster r-cnn的基础上加了一个可以预测目标掩膜(mask)的分支。只比Faster r-cnn慢一点,5fps。很容易拓展到其他任务如:关键点检测。18年在coco的目标检测、实例分割、人体关键点检测都取得了最优成绩。 - -### 1.2 代码地址 -[mmdetection框架maskrcnn代码](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) - -## 2 环境说明 - -- **[深度学习框架](#21-深度学习框架)** - -- **[python第三方库](#22-python第三方库)** - -### 2.1 深度学习框架 -``` -pytorch == 1.8.0 -torchvision == 0.9.0 -onnx == 1.8.0 -``` - -### 2.2 python第三方库 - -``` -numpy == 1.18.5 -opencv-python == 4.2.0.34 -``` - -**说明:** -> X86架构:opencv,pytorch,torchvision和onnx可以通过官方下载whl包安装,其它可以通过pip3.7 install 包名 安装 -> -> Arm架构:opencv,pytorch,torchvision和onnx可以通过源码编译安装,其它可以通过pip3.7 install 包名 安装 - -## 3 模型转换 - -- **[pth转onnx模型](#31-pth转onnx模型)** - -- **[onnx转om模型](#32-onnx转om模型)** - -atc暂不支持动态shape小算子,可以使用大颗粒算子替换这些小算子规避,这些小算子可以在转onnx时的verbose打印中找到其对应的python代码,从而根据功能用大颗粒算子替换,onnx能推导出变量正确的shape与算子属性正确即可,变量实际的数值无关紧要,因此这些大算子函数的功能实现无关紧要,因包含自定义算子需要去掉对onnx模型的校验。 - -### 3.1 pth转onnx模型 - -1.获取pth权重文件 -[maskrcnn基于detectron2预训练的npu权重文件](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) -文件md5sum: f4ee3c5911537f454045395d2f708954 -2.mmdetection源码安装 -```shell -git clone https://github.com/open-mmlab/mmcv -cd mmcv -MMCV_WITH_OPS=1 pip3.7 install -e . -cd .. -git clone https://github.com/open-mmlab/mmdetection -cd mmdetection -pip3.7 install -r requirements/build.txt -python3.7 setup.py develop -``` - - **说明:** -> 安装所需的依赖说明请参考mmdetection/docs/get_started.md -> - -3.转原始onnx -```shell -python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img demo/demo.jpg --test-img tests/data/color.jpg --shape 800 1216 --show --verify --simplify -若报错参考:https://github.com/open-mmlab/mmdetection/issues/4548 -``` -4.修改mmdetection代码,参见maskrcnn_mmdetection.diff: -```diff -diff --git a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -index e9eb357..f72cef7 100644 ---- a/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -+++ b/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py -@@ -168,13 +168,31 @@ def delta2bbox(rois, - [0.0000, 0.3161, 4.1945, 0.6839], - [5.0000, 5.0000, 5.0000, 5.0000]]) - """ -- means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4) -- stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4) -+ -+ # fix shape for means and stds when exporting onnx -+ if torch.onnx.is_in_onnx_export(): -+ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1).numpy() // 4) -+ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1).numpy() // 4) -+ else: -+ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4) -+ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4) - denorm_deltas = deltas * stds + means -- dx = denorm_deltas[:, 0::4] -- dy = denorm_deltas[:, 1::4] -- dw = denorm_deltas[:, 2::4] -- dh = denorm_deltas[:, 3::4] -+ # dx = denorm_deltas[:, 0::4] -+ # dy = denorm_deltas[:, 1::4] -+ # dw = denorm_deltas[:, 2::4] -+ # dh = denorm_deltas[:, 3::4] -+ if denorm_deltas.shape[1] > 4: -+ denorm_deltas = denorm_deltas.view(-1, 80, 4) -+ dx = denorm_deltas[:, :, 0:1:].view(-1, 80) -+ dy = denorm_deltas[:, :, 1:2:].view(-1, 80) -+ dw = denorm_deltas[:, :, 2:3:].view(-1, 80) -+ dh = denorm_deltas[:, :, 3:4:].view(-1, 80) -+ else: -+ dx = denorm_deltas[:, 0:1:] -+ dy = denorm_deltas[:, 1:2:] -+ dw = denorm_deltas[:, 2:3:] -+ dh = denorm_deltas[:, 3:4:] -+ - max_ratio = np.abs(np.log(wh_ratio_clip)) - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) -diff --git a/mmdet/core/post_processing/bbox_nms.py b/mmdet/core/post_processing/bbox_nms.py -index c43aea9..e99f5d8 100644 ---- a/mmdet/core/post_processing/bbox_nms.py -+++ b/mmdet/core/post_processing/bbox_nms.py -@@ -4,6 +4,59 @@ from mmcv.ops.nms import batched_nms - from mmdet.core.bbox.iou_calculators import bbox_overlaps - - -+class BatchNMSOp(torch.autograd.Function): -+ @staticmethod -+ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). -+ scores (torch.Tensor): scores in shape (batch, N, C). -+ return: -+ nmsed_boxes: (1, N, 4) -+ nmsed_scores: (1, N) -+ nmsed_classes: (1, N) -+ nmsed_num: (1,) -+ """ -+ -+ # Phony implementation for onnx export -+ nmsed_boxes = bboxes[:, :max_total_size, 0, :] -+ nmsed_scores = scores[:, :max_total_size, 0] -+ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) -+ nmsed_num = torch.Tensor([max_total_size]) -+ -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+ @staticmethod -+ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', -+ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, -+ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (N, 4). -+ scores (torch.Tensor): scores in shape (N, ). -+ """ -+ -+ num_classes = bboxes.shape[1].numpy() // 4 -+ if bboxes.dtype == torch.float32: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() -+ else: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1) -+ -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, -+ score_threshold, iou_threshold, max_size_per_class, max_total_size) -+ nmsed_boxes = nmsed_boxes.float() -+ nmsed_scores = nmsed_scores.float() -+ nmsed_classes = nmsed_classes.long() -+ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) -+ dets = dets.reshape((max_total_size, 5)) -+ labels = nmsed_classes.reshape((max_total_size, )) -+ return dets, labels -+ -+ - def multiclass_nms(multi_bboxes, - multi_scores, - score_thr, -@@ -40,7 +93,17 @@ def multiclass_nms(multi_bboxes, - multi_scores.size(0), num_classes, 4) - - scores = multi_scores[:, :-1] -+ # multiply score_factor after threshold to preserve more bboxes, improve -+ # mAP by 1% for YOLOv3 -+ if score_factors is not None: -+ # expand the shape to match original shape of score -+ score_factors = score_factors.view(-1, 1).expand( -+ multi_scores.size(0), num_classes) -+ score_factors = score_factors.reshape(-1) -+ scores = scores * score_factors - -+ # cpu and gpu -+ ''' - labels = torch.arange(num_classes, dtype=torch.long) - labels = labels.view(1, -1).expand_as(scores) - -@@ -80,7 +143,11 @@ def multiclass_nms(multi_bboxes, - return dets, labels[keep], keep - else: - return dets, labels[keep] -+ ''' - -+ # npu -+ dets, labels = batch_nms_op(bboxes, scores, score_thr, nms_cfg.get("iou_threshold"), max_num, max_num) -+ return dets, labels - - def fast_nms(multi_bboxes, - multi_scores, -diff --git a/mmdet/models/dense_heads/rpn_head.py b/mmdet/models/dense_heads/rpn_head.py -index f565d1a..3c29386 100644 ---- a/mmdet/models/dense_heads/rpn_head.py -+++ b/mmdet/models/dense_heads/rpn_head.py -@@ -9,6 +9,57 @@ from .anchor_head import AnchorHead - from .rpn_test_mixin import RPNTestMixin - - -+class BatchNMSOp(torch.autograd.Function): -+ @staticmethod -+ def forward(ctx, bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (batch, N, C, 4). -+ scores (torch.Tensor): scores in shape (batch, N, C). -+ return: -+ nmsed_boxes: (1, N, 4) -+ nmsed_scores: (1, N) -+ nmsed_classes: (1, N) -+ nmsed_num: (1,) -+ """ -+ -+ # Phony implementation for onnx export -+ nmsed_boxes = bboxes[:, :max_total_size, 0, :] -+ nmsed_scores = scores[:, :max_total_size, 0] -+ nmsed_classes = torch.arange(max_total_size, dtype=torch.long) -+ nmsed_num = torch.Tensor([max_total_size]) -+ -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+ @staticmethod -+ def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class, max_t_size): -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op('BatchMultiClassNMS', -+ bboxes, scores, score_threshold_f=score_thr, iou_threshold_f=iou_thr, -+ max_size_per_class_i=max_size_p_class, max_total_size_i=max_t_size, outputs=4) -+ return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num -+ -+def batch_nms_op(bboxes, scores, score_threshold, iou_threshold, max_size_per_class, max_total_size): -+ """ -+ boxes (torch.Tensor): boxes in shape (N, 4). -+ scores (torch.Tensor): scores in shape (N, ). -+ """ -+ -+ num_classes = bboxes.shape[1].numpy() // 4 -+ if bboxes.dtype == torch.float32: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4).half() -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1).half() -+ else: -+ bboxes = bboxes.reshape(1, bboxes.shape[0].numpy(), -1, 4) -+ scores = scores.reshape(1, scores.shape[0].numpy(), -1) -+ -+ nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = BatchNMSOp.apply(bboxes, scores, -+ score_threshold, iou_threshold, max_size_per_class, max_total_size) -+ nmsed_boxes = nmsed_boxes.float() -+ nmsed_scores = nmsed_scores.float() -+ nmsed_classes = nmsed_classes.long() -+ dets = torch.cat((nmsed_boxes.reshape((max_total_size, 4)), nmsed_scores.reshape((max_total_size, 1))), -1) -+ labels = nmsed_classes.reshape((max_total_size, )) -+ return dets, labels -+ - @HEADS.register_module() - class RPNHead(RPNTestMixin, AnchorHead): - """RPN head. -@@ -132,9 +183,14 @@ class RPNHead(RPNTestMixin, AnchorHead): - if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) -- ranked_scores, rank_inds = scores.sort(descending=True) -- topk_inds = rank_inds[:cfg.nms_pre] -- scores = ranked_scores[:cfg.nms_pre] -+ # onnx uses topk to sort, this is simpler for onnx export -+ if torch.onnx.is_in_onnx_export(): -+ scores, topk_inds = torch.topk(scores, cfg.nms_pre) -+ else: -+ ranked_scores, rank_inds = scores.sort(descending=True) -+ topk_inds = rank_inds[:cfg.nms_pre] -+ scores = ranked_scores[:cfg.nms_pre] -+ - rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] - anchors = anchors[topk_inds, :] - mlvl_scores.append(scores) -@@ -164,5 +220,12 @@ class RPNHead(RPNTestMixin, AnchorHead): - - # TODO: remove the hard coded nms type - nms_cfg = dict(type='nms', iou_threshold=cfg.nms_thr) -+ # cpu and gpu return -+ ''' - dets, keep = batched_nms(proposals, scores, ids, nms_cfg) - return dets[:cfg.nms_post] -+ ''' -+ -+ # npu return -+ dets, labels = batch_nms_op(proposals, scores, 0.0, nms_cfg.get("iou_threshold"), cfg.nms_post, cfg.nms_post) -+ return dets -diff --git a/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py -index 0cba3cd..a965e53 100644 ---- a/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py -+++ b/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py -@@ -199,11 +199,11 @@ class FCNMaskHead(nn.Module): - # TODO: Remove after F.grid_sample is supported. - from torchvision.models.detection.roi_heads \ - import paste_masks_in_image -- masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) -+ '''masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) - thr = rcnn_test_cfg.get('mask_thr_binary', 0) - if thr > 0: -- masks = masks >= thr -- return masks -+ masks = masks >= thr''' -+ return mask_pred - - N = len(mask_pred) - # The actual implementation split the input into chunks, -diff --git a/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py -index c0eebc4..63605c5 100644 ---- a/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py -+++ b/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py -@@ -4,6 +4,31 @@ from mmcv.runner import force_fp32 - from mmdet.models.builder import ROI_EXTRACTORS - from .base_roi_extractor import BaseRoIExtractor - -+import torch.onnx.symbolic_helper as sym_help -+ -+class RoiExtractor(torch.autograd.Function): -+ @staticmethod -+ def forward(self, f0, f1, f2, f3, rois, aligned=1, finest_scale=56, pooled_height=7, pooled_width=7, -+ pool_mode='avg', roi_scale_factor=0, sample_num=0, spatial_scale=[0.25, 0.125, 0.0625, 0.03125]): -+ """ -+ feats (torch.Tensor): feats in shape (batch, 256, H, W). -+ rois (torch.Tensor): rois in shape (k, 5). -+ return: -+ roi_feats (torch.Tensor): (k, 256, pooled_width, pooled_width) -+ """ -+ -+ # phony implementation for shape inference -+ k = rois.size()[0] -+ roi_feats = torch.ones(k, 256, pooled_height, pooled_width) -+ return roi_feats -+ -+ @staticmethod -+ def symbolic(g, f0, f1, f2, f3, rois, aligned=1, finest_scale=56, pooled_height=7, pooled_width=7): -+ # TODO: support tensor list type for feats -+ #f_tensors = sym_help._unpack_list(feats) -+ roi_feats = g.op('RoiExtractor', f0, f1, f2, f3, rois, aligned_i=1, finest_scale_i=56, pooled_height_i=pooled_height, pooled_width_i=pooled_width, -+ pool_mode_s='avg', roi_scale_factor_i=0, sample_num_i=0, spatial_scale_f=[0.25, 0.125, 0.0625, 0.03125], outputs=1) -+ return roi_feats - - @ROI_EXTRACTORS.register_module() - class SingleRoIExtractor(BaseRoIExtractor): -@@ -52,6 +77,14 @@ class SingleRoIExtractor(BaseRoIExtractor): - - @force_fp32(apply_to=('feats', ), out_fp16=True) - def forward(self, feats, rois, roi_scale_factor=None): -+ # Work around to export onnx for npu -+ if torch.onnx.is_in_onnx_export(): -+ out_size = self.roi_layers[0].output_size -+ roi_feats = RoiExtractor.apply(feats[0], feats[1], feats[2], feats[3], rois, 1, 56, out_size[0], out_size[1]) -+ # roi_feats = RoiExtractor.apply(list(feats), rois) -+ return roi_feats -+ -+ - """Forward function.""" - out_size = self.roi_layers[0].output_size - num_levels = len(feats) -diff --git a/tools/deployment/pytorch2onnx.py b/tools/deployment/pytorch2onnx.py -index 1305a79..c79e9fb 100644 ---- a/tools/deployment/pytorch2onnx.py -+++ b/tools/deployment/pytorch2onnx.py -@@ -48,7 +48,7 @@ def pytorch2onnx(config_path, - input_names=['input'], - output_names=output_names, - export_params=True, -- keep_initializers_as_inputs=True, -+ #keep_initializers_as_inputs=True, - do_constant_folding=True, - verbose=show, - opset_version=opset_version) - -``` - **修改依据:** -> 1.atc暂不支持if与nonzero动态小算子,这两小算子是bbox_nms.py与single_level_roi_extractor.py的大功能nms与roi引入的(rpn_head.py中的nms虽然没有引入不支持算子但也需要替换,否则后面会出现E19014: Op[ReduceMax_505]'s attribute axes is invalid which is empty),因此使用npu的nms与roi大算子代替这部分大功能。loop算子暂无合适替换方法,由于它在网络最后一部分,因此可将其与后面的部分放到后处理 -> 2. atc转换报错E11019: Op[Conv_0]'s input[1] is not linked,因此注释掉tools/deployment/pytorch2onnx.py中export函数的keep_initializers_as_inputs=True, -> 3.动态shape算子导致atc转换出现未知错误,atc日志debug显示Unknown shape op Tile output shape range is unknown, set its size -1,在转onnx时的verbose打印中找到该算子对应的python代码行,利用numpy()将means和std的shape固定下来,参见maskrcnn_mmdetection.diff -> 4.slice跑在aicpu有错误,所以改为dx = denorm_deltas[:, :, 0:1:].view(-1, 80),使其运行在aicore上 -> 5.atc转换Concat一对多算子会改变其名字,故添加dets = dets.reshape((max_total_size, 5)),使得Concat后填加了一冗余的Reshape算子作为输出节点 -> 6.atc转换时计算mask的RoiExtractor算子报错,打开--log=debug输出日志,查看starce -f cmd的打印/root/ascend/log/plog/…找到日志存放路径,发现(14,14)导致cube内存不够用 -> 7.atc转换时根据日志中报错的算子在转onnx时的verbose打印中找到其对应的python代码,然后找到规避方法解决,具体修改参见maskrcnn_mmdetection.diff -> 8.其它地方的修改原因参见精度调试 - - -通过打补丁的方式修改detectron2: -```shell -patch -p1 < ./maskrcnn_mmdetection.diff -``` -5.修改pytorch代码去除导出onnx时进行检查 -将/usr/local/python3.7.5/lib/python3.7/site-packages/torch/onnx/utils.py文件的_check_onnx_proto(proto)改为pass - -6.运行如下命令,生成含有npu自定义算子的onnx: -```shell -python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img demo/demo.jpg --shape 800 1216 -``` -7.经过修改后导出的onnx由于添加了自定义算子无法使用onnx的infer shape,所以需要手动固定resize算子的shape,这里可以用未经修改的源代码导出onnx并使用simplifier后使用netron查看resize的具体参数。对原始onnx使用simplifier后(添加--simplify参数参见转原始onnx命令),使用netron可视化工具可以查看该onnx中resize的大小 -```python -import sys -import onnx -from onnx import helper - -input_model=sys.argv[1] -output_model=sys.argv[2] -model = onnx.load(input_model) -# onnx.checker.check_model(model) - -model_nodes = model.graph.node -def getNodeByName(nodes, name: str): - for n in nodes: - if n.name == name: - return n - return -1 - -# fix shape for resize, 对原始onnx使用simplifier后,使用netron可视化工具可以查看该onnx中resize的大小 -sizes1 = onnx.helper.make_tensor('size1', onnx.TensorProto.INT32, [4], [1, 256, 50, 76]) -sizes2 = onnx.helper.make_tensor('size2', onnx.TensorProto.INT32, [4], [1, 256, 100, 152]) -sizes3 = onnx.helper.make_tensor('size3', onnx.TensorProto.INT32, [4], [1, 256, 200, 304]) -model.graph.initializer.append(sizes1) -model.graph.initializer.append(sizes2) -model.graph.initializer.append(sizes3) -getNodeByName(model_nodes, 'Resize_141').input[3] = "size1" -getNodeByName(model_nodes, 'Resize_161').input[3] = "size2" -getNodeByName(model_nodes, 'Resize_181').input[3] = "size3" - -print("Mask R-CNN onnx adapted to ATC") -onnx.save(model, output_model) -``` -```shell -python3.7 fix_onnx_shape.py mask_rcnn_r50_fpn_1x_coco.onnx mask_rcnn_r50_fpn_1x_coco_fix.onnx -``` - -### 3.2 onnx转om模型 - -1.设置环境变量 -```shell -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.使用atc将onnx模型转换为om模型文件,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164868?idPath=23710424%7C251366513%7C22892968%7C251168373),需要指定输出节点以去除无用输出,节点序号可能会因网络结构不同而不同,使用netron开源可视化工具查看具体的输出节点名: -```shell -atc --framework=5 --model=./mask_rcnn_r50_fpn_1x_coco_fix.onnx --output=mask_rcnn_r50_fpn_1x_coco_bs1 --out_nodes="Reshape_574:0;Reshape_576:0;Sigmoid_604:0" --input_format=NCHW --input_shape="input:1,3,800,1216" --log=debug --soc_version=Ascend310 -``` - -## 4 数据集预处理 - -- **[数据集获取](#41-数据集获取)** - -- **[数据集预处理](#42-数据集预处理)** - -- **[生成数据集信息文件](#43-生成数据集信息文件)** - -### 4.1 数据集获取 -该模型使用[COCO官网](https://cocodataset.org/#download)的coco2017的5千张验证集进行测试,图片与标签分别存放在/opt/npu/dataset/coco/val2017/与/opt/npu/dataset/coco/annotations/instances_val2017.json。 - -### 4.2 数据集预处理 -1.预处理脚本maskrcnn_pth_preprocess.py -```python -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import argparse -import numpy as np -import cv2 -import mmcv -import torch -import multiprocessing - -def resize(img, size): - old_h = img.shape[0] - old_w = img.shape[1] - scale_ratio = min(size[0] / old_w, size[1] / old_h) - new_w = int(np.floor(old_w * scale_ratio)) - new_h = int(np.floor(old_h * scale_ratio)) - resized_img = mmcv.imresize(img, (new_w, new_h), backend='cv2') - return resized_img - -def gen_input_bin(file_batches, batch): - i = 0 - for file in file_batches[batch]: - i = i + 1 - print("batch", batch, file, "===", i) - - image = mmcv.imread(os.path.join(flags.image_src_path, file), backend='cv2') - #image = mmcv.imrescale(image, (flags.model_input_width, flags.model_input_height)) - image = resize(image, (flags.model_input_width, flags.model_input_height)) - mean = np.array([123.675, 116.28, 103.53], dtype=np.float32) - std = np.array([58.395, 57.12, 57.375], dtype=np.float32) - image = mmcv.imnormalize(image, mean, std) - h = image.shape[0] - w = image.shape[1] - pad_left = (flags.model_input_width - w) // 2 - pad_top = (flags.model_input_height - h) // 2 - pad_right = flags.model_input_width - pad_left - w - pad_bottom = flags.model_input_height - pad_top - h - image = mmcv.impad(image, padding=(pad_left, pad_top, pad_right, pad_bottom), pad_val=0) - #mmcv.imwrite(image, './paded_jpg/' + file.split('.')[0] + '.jpg') - image = image.transpose(2, 0, 1) - image.tofile(os.path.join(flags.bin_file_path, file.split('.')[0] + ".bin")) - -def preprocess(src_path, save_path): - files = os.listdir(src_path) - file_batches = [files[i:i + 100] for i in range(0, 5000, 100) if files[i:i + 100] != []] - thread_pool = multiprocessing.Pool(len(file_batches)) - for batch in range(len(file_batches)): - thread_pool.apply_async(gen_input_bin, args=(file_batches, batch)) - thread_pool.close() - thread_pool.join() - print("in thread, except will not report! please ensure bin files generated.") - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='preprocess of MaskRCNN PyTorch model') - parser.add_argument("--image_src_path", default="./coco2017/", help='image of dataset') - parser.add_argument("--bin_file_path", default="./coco2017_bin/", help='Preprocessed image buffer') - parser.add_argument("--model_input_height", default=800, type=int, help='input tensor height') - parser.add_argument("--model_input_width", default=1216, type=int, help='input tensor width') - flags = parser.parse_args() - if not os.path.exists(flags.bin_file_path): - os.makedirs(flags.bin_file_path) - preprocess(flags.image_src_path, flags.bin_file_path) -``` -2.执行预处理脚本,生成数据集预处理后的bin文件 -```shell -python3.7 maskrcnn_pth_preprocess.py --image_src_path=/opt/npu/dataset/coco/val2017 --bin_file_path=val2017_bin --model_input_height=800 --model_input_width=1216 -``` -### 4.3 生成数据集信息文件 -1.生成数据集信息文件脚本get_info.py -```python -import os -import sys -import cv2 -from glob import glob - - -def get_bin_info(file_path, info_name, width, height): - bin_images = glob(os.path.join(file_path, '*.bin')) - with open(info_name, 'w') as file: - for index, img in enumerate(bin_images): - content = ' '.join([str(index), img, width, height]) - file.write(content) - file.write('\n') - - -def get_jpg_info(file_path, info_name): - extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] - image_names = [] - for extension in extensions: - image_names.append(glob(os.path.join(file_path, '*.' + extension))) - with open(info_name, 'w') as file: - for image_name in image_names: - if len(image_name) == 0: - continue - else: - for index, img in enumerate(image_name): - img_cv = cv2.imread(img) - shape = img_cv.shape - width, height = shape[1], shape[0] - content = ' '.join([str(index), img, str(width), str(height)]) - file.write(content) - file.write('\n') - - -if __name__ == '__main__': - file_type = sys.argv[1] - file_path = sys.argv[2] - info_name = sys.argv[3] - if file_type == 'bin': - width = sys.argv[4] - height = sys.argv[5] - assert len(sys.argv) == 6, 'The number of input parameters must be equal to 5' - get_bin_info(file_path, info_name, width, height) - elif file_type == 'jpg': - assert len(sys.argv) == 4, 'The number of input parameters must be equal to 3' - get_jpg_info(file_path, info_name) -``` -2.执行生成数据集信息脚本,生成数据集信息文件 -```shell -python3.7 get_info.py bin val2017_bin maskrcnn.info 1216 800 -``` -第一个参数为模型输入的类型,第二个参数为生成的bin文件路径,第三个为输出的info文件,后面为宽高信息 -## 5 离线推理 - -- **[benchmark工具概述](#51-benchmark工具概述)** - -- **[离线推理](#52-离线推理)** - -### 5.1 benchmark工具概述 - -benchmark工具为华为自研的模型推理工具,支持多种模型的离线推理,能够迅速统计出模型在Ascend310上的性能,支持真实数据和纯推理两种模式,配合后处理脚本,可以实现诸多模型的端到端过程,获取工具及使用方法可以参考[CANN V100R020C10 推理benchmark工具用户指南 01](https://support.huawei.com/enterprise/zh/doc/EDOC1100164874?idPath=23710424%7C251366513%7C22892968%7C251168373) -### 5.2 离线推理 -1.设置环境变量 -```shell -export install_path=/usr/local/Ascend/ascend-toolkit/latest -export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH -export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH -export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH -export ASCEND_OPP_PATH=${install_path}/opp -export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ -``` -2.执行离线推理 -```shell -./benchmark.x86_64 -model_type=vision -om_path=mask_rcnn_r50_fpn_1x_coco_bs1.om -device_id=0 -batch_size=1 -input_text_path=maskrcnn.info -input_width=1216 -input_height=800 -useDvpp=false -output_binary=true -``` - **注意:** -> label是int64,benchmark输出非二进制时会将float转为0 -> - -输出结果默认保存在当前目录result/dumpOutput_device0,模型有三个输出,每个输入对应的输出对应三个_x.bin文件 -``` -输出 shape 数据类型 数据含义 -output1 100 * 5 FP32 boxes and scores -output3 100 * 1 INT64 labels -output4 100 * 80 * 28 * 28 FP32 masks -``` - -## 6 精度对比 - -- **[离线推理精度](#61-离线推理精度)** -- **[开源精度](#62-开源精度)** -- **[精度对比](#63-精度对比)** - -### 6.1 离线推理精度统计 - -后处理统计map精度 -```python -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import argparse -import cv2 -import numpy as np - -def postprocess_bboxes(bboxes, image_size, net_input_width, net_input_height): - w = image_size[0] - h = image_size[1] - scale = min(net_input_width / w, net_input_height / h) - - pad_w = net_input_width - w * scale - pad_h = net_input_height - h * scale - pad_left = pad_w // 2 - pad_top = pad_h // 2 - - bboxes[:, 0] = (bboxes[:, 0] - pad_left) / scale - bboxes[:, 1] = (bboxes[:, 1] - pad_top) / scale - bboxes[:, 2] = (bboxes[:, 2] - pad_left) / scale - bboxes[:, 3] = (bboxes[:, 3] - pad_top) / scale - - return bboxes - -def postprocess_masks(masks, image_size, net_input_width, net_input_height): - w = image_size[0] - h = image_size[1] - scale = min(net_input_width / w, net_input_height / h) - - pad_w = net_input_width - w * scale - pad_h = net_input_height - h * scale - pad_left = pad_w // 2 - pad_top = pad_h // 2 - - if pad_top < 0: - pad_top = 0 - if pad_left < 0: - pad_left = 0 - top = int(pad_top) - left = int(pad_left) - hs = int(pad_top + net_input_height - pad_h) - ws = int(pad_left + net_input_width - pad_w) - masks = masks.to(dtype=torch.float32) - res_append = torch.zeros(0, h, w) - if torch.cuda.is_available(): - res_append = res_append.to(device='cuda') - for i in range(masks.size(0)): - mask = masks[i][0][top:hs, left:ws] - mask = mask.expand((1, 1, mask.size(0), mask.size(1))) - mask = F.interpolate(mask, size=(int(h), int(w)), mode='bilinear', align_corners=False) - mask = mask[0][0] - mask = mask.unsqueeze(0) - res_append = torch.cat((res_append, mask)) - - return res_append[:, None] - -import pickle -def save_variable(v, filename): - f = open(filename, 'wb') - pickle.dump(v, f) - f.close() -def load_variavle(filename): - f = open(filename, 'rb') - r = pickle.load(f) - f.close() - return r - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--test_annotation", default="./origin_pictures.info") - parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0/") - parser.add_argument("--det_results_path", default="./detection-results/") - parser.add_argument("--net_out_num", type=int, default=3) - parser.add_argument("--net_input_width", type=int, default=1216) - parser.add_argument("--net_input_height", type=int, default=800) - parser.add_argument("--ifShowDetObj", action="store_true", help="if input the para means True, neither False.") - flags = parser.parse_args() - - img_size_dict = dict() - with open(flags.test_annotation)as f: - for line in f.readlines(): - temp = line.split(" ") - img_file_path = temp[1] - img_name = temp[1].split("/")[-1].split(".")[0] - img_width = int(temp[2]) - img_height = int(temp[3]) - img_size_dict[img_name] = (img_width, img_height, img_file_path) - - bin_path = flags.bin_data_path - det_results_path = flags.det_results_path - os.makedirs(det_results_path, exist_ok=True) - #total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name]) - - import glob - import torch - from torchvision.models.detection.roi_heads import paste_masks_in_image - import torch.nn.functional as F - from mmdet.core import bbox2result - from mmdet.core import encode_mask_results - from mmdet.datasets import CocoDataset - coco_dataset = CocoDataset(ann_file='/opt/npu/dataset/coco/annotations/instances_val2017.json', pipeline=[]) - coco_class_map = {id:name for id, name in enumerate(coco_dataset.CLASSES)} - #print(dir(coco_dataset)) - results = [] - - cnt = 0 - #for bin_file in sorted(total_img): - for ids in coco_dataset.img_ids: - cnt = cnt + 1 - bin_file = glob.glob(bin_path + '/*0' + str(ids) + '_1.bin')[0] - bin_file = bin_file[bin_file.rfind('/') + 1:] - bin_file = bin_file[:bin_file.rfind('_')] - print(cnt - 1, bin_file) - path_base = os.path.join(bin_path, bin_file) - res_buff = [] - bbox_results = [] - cls_segms = [] - for num in range(1, flags.net_out_num + 1): - if os.path.exists(path_base + "_" + str(num) + ".bin"): - if num == 1: - buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") - buf = np.reshape(buf, [100, 5]) - elif num == 2: - buf = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="int64") - buf = np.reshape(buf, [100, 1]) - elif num == 3: - bboxes = np.fromfile(path_base + "_" + str(num - 2) + ".bin", dtype="float32") - bboxes = np.reshape(bboxes, [100, 5]) - bboxes = torch.from_numpy(bboxes) - labels = np.fromfile(path_base + "_" + str(num - 1) + ".bin", dtype="int64") - labels = np.reshape(labels, [100, 1]) - labels = torch.from_numpy(labels) - mask_pred = np.fromfile(path_base + "_" + str(num) + ".bin", dtype="float32") - mask_pred = np.reshape(mask_pred, [100, 80, 28, 28]) - mask_pred = torch.from_numpy(mask_pred) - - if torch.cuda.is_available(): - mask_pred = mask_pred.to(device='cuda') - - img_shape = (flags.net_input_height, flags.net_input_width) - mask_pred = mask_pred[range(len(mask_pred)), labels[:, 0]][:, None] - masks = paste_masks_in_image(mask_pred, bboxes[:, :4], img_shape) - masks = masks >= 0.5 - - masks = postprocess_masks(masks, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) - if torch.cuda.is_available(): - masks = masks.cpu() - '''masks = masks.numpy() - img = masks[0].squeeze() - from PIL import Image - for j in range(len(masks)): - mask = masks[j].squeeze() - mask = mask.astype(bool) - img[mask] = img[mask] + 1 - imag = Image.fromarray((img * 255).astype(np.uint8)) - imag.save(os.path.join('.', bin_file + '.png'))''' - - cls_segms = [[] for _ in range(80)] - for i in range(len(masks)): - cls_segms[labels[i][0]].append(masks[i][0].numpy()) - - bboxes = postprocess_bboxes(bboxes, img_size_dict[bin_file], flags.net_input_width, flags.net_input_height) - bbox_results = [bbox2result(bboxes, labels[:, 0], 80)] - res_buff.append(buf) - else: - print("[ERROR] file not exist", path_base + "_" + str(num) + ".bin") - - result = list(zip(bbox_results, [cls_segms])) - result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] - results.extend(result) - - current_img_size = img_size_dict[bin_file] - res_bboxes = np.concatenate(res_buff, axis=1) - predbox = postprocess_bboxes(res_bboxes, current_img_size, flags.net_input_width, flags.net_input_height) - - if flags.ifShowDetObj == True: - imgCur = cv2.imread(current_img_size[2]) - - det_results_str = '' - for idx, class_idx in enumerate(predbox[:, 5]): - if float(predbox[idx][4]) < float(0.05): - continue - if class_idx < 0 or class_idx > 80: - continue - - class_name = coco_class_map[int(class_idx)] - det_results_str += "{} {} {} {} {} {}\n".format(class_name, str(predbox[idx][4]), predbox[idx][0], - predbox[idx][1], predbox[idx][2], predbox[idx][3]) - if flags.ifShowDetObj == True: - imgCur = cv2.rectangle(imgCur, (int(predbox[idx][0]), int(predbox[idx][1])), (int(predbox[idx][2]), int(predbox[idx][3])), (0,255,0), 2) - imgCur = cv2.putText(imgCur, class_name, (int(predbox[idx][0]), int(predbox[idx][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) - - if flags.ifShowDetObj == True: - cv2.imwrite(os.path.join(det_results_path, bin_file +'.jpg'), imgCur, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) - - det_results_file = os.path.join(det_results_path, bin_file + ".txt") - with open(det_results_file, "w") as detf: - detf.write(det_results_str) - - save_variable(results, './results.txt') - #results = load_variavle('./results.txt') - eval_results = coco_dataset.evaluate(results, metric=['bbox', 'segm'], classwise=True) -``` -调用maskrcnn_pth_postprocess.py评测map精度: -```shell -python3.7 get_info.py jpg /opt/npu/dataset/coco/val2017 maskrcnn_jpeg.info - -python3.7 maskrcnn_pth_postprocess.py --bin_data_path=./result/dumpOutput_device0/ --test_annotation=maskrcnn_jpeg.info --det_results_path=./ret_npuinfer/ --net_out_num=3 --net_input_height=800 --net_input_width=1216 --ifShowDetObj -``` -第一个参数为benchmark推理结果,第二个为原始图片信息文件,第三个为后处理输出结果,第四个为网络输出个数,第五六个为网络高宽,第七个为是否将box画在图上显示 -执行完后会打印出精度: -``` -Evaluating bbox... -Loading and preparing results... -DONE (t=8.57s) -creating index... -index created! -Running per image evaluation... -Evaluate annotation type *bbox* -DONE (t=103.05s). -Accumulating evaluation results... -DONE (t=26.62s). -Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.377 -Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.584 -Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.411 -Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.211 -Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.411 -Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.500 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.515 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.515 -Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.515 -Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.319 -Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.556 -Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.656 - -+---------------+-------+--------------+-------+----------------+-------+ -| category | AP | category | AP | category | AP | -+---------------+-------+--------------+-------+----------------+-------+ -| person | 0.517 | bicycle | 0.296 | car | 0.411 | -| motorcycle | 0.392 | airplane | 0.588 | bus | 0.603 | -| train | 0.576 | truck | 0.332 | boat | 0.254 | -| traffic light | 0.253 | fire hydrant | 0.627 | stop sign | 0.624 | -| parking meter | 0.431 | bench | 0.224 | bird | 0.335 | -| cat | 0.588 | dog | 0.544 | horse | 0.527 | -| sheep | 0.473 | cow | 0.515 | elephant | 0.597 | -| bear | 0.616 | zebra | 0.627 | giraffe | 0.623 | -| backpack | 0.132 | umbrella | 0.347 | handbag | 0.119 | -| tie | 0.306 | suitcase | 0.368 | frisbee | 0.634 | -| skis | 0.214 | snowboard | 0.286 | sports ball | 0.398 | -| kite | 0.375 | baseball bat | 0.215 | baseball glove | 0.333 | -| skateboard | 0.455 | surfboard | 0.340 | tennis racket | 0.417 | -| bottle | 0.365 | wine glass | 0.325 | cup | 0.400 | -| fork | 0.259 | knife | 0.139 | spoon | 0.108 | -| bowl | 0.395 | banana | 0.217 | apple | 0.200 | -| sandwich | 0.322 | orange | 0.289 | broccoli | 0.214 | -| carrot | 0.199 | hot dog | 0.277 | pizza | 0.478 | -| donut | 0.397 | cake | 0.353 | chair | 0.245 | -| couch | 0.371 | potted plant | 0.243 | bed | 0.398 | -| dining table | 0.228 | toilet | 0.557 | tv | 0.542 | -| laptop | 0.547 | mouse | 0.572 | remote | 0.260 | -| keyboard | 0.491 | cell phone | 0.325 | microwave | 0.531 | -| oven | 0.300 | toaster | 0.467 | sink | 0.330 | -| refrigerator | 0.511 | book | 0.146 | clock | 0.481 | -| vase | 0.336 | scissors | 0.249 | teddy bear | 0.431 | -| hair drier | 0.013 | toothbrush | 0.145 | None | None | -+---------------+-------+--------------+-------+----------------+-------+ -``` - - **精度调试:** -> 1.因为在线推理前处理图片是一定格式的动态分辨率,所以onnx将分辨率固定为1216x800会导致精度下降些,改为1216x1216可以提升精度,使得mask的精度与开源相比下降在1%之内 -> 2.单图调试 -> ``` -> python3.7 tools/test.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --eval bbox segm --show ->python3.7 tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --output-file mask_rcnn_r50_fpn_1x_coco.onnx --input-img 000000397133_1216x800.jpg --shape 800 1216 --show --verify --simplify -> 说明: -> 1.参考开源精度测评工具,以精度达标的pth为基准,添加打印弄明白关键点代码含义。可以得到导出原始onnx时,paste_masks_in_image 前需要添加mask_pred = mask_pred[range(len(mask_pred)), labels][:, None],onnx显示mask才与pth一致。 -> 2.将图片经过缩放添加pad后导出的原始onnx作为精度基准,发现原始onnx的mask_pred作为输出时形状是(100,80,28,28),而更换自定义算子后导出的onnx输出形状是(100,80,14,14),因此通过添加打印与对比发现计算mask的RoiExtractor的(pooled_height, pooled_width)配置是(14,14)而不应该是默认的(7,7)。将om推理RoiExtractor的输入变量使用pickle模块保存起来,然后在源代码中加载数据到这些变量,查看原始onnx的图片显示结果可以验证是RoiExtractor的问题 -> 3.800x1216不是pth模型固定的高宽,在build_from_cfg添加print(obj_cls)发现./mmdet/models/detectors/base.py的BaseDetector,推断模型的输入大小是变化的 -> 4.至于查看函数调用关系,可以在代码中故意构造错误,python运行出错时会打印调用栈 -> ``` - - -### 6.2 开源精度 -[官网精度](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) - -``` -{"mode": "val", "epoch": 12, "iter": 7330, "lr": 0.0002, "bbox_mAP": 0.382, "bbox_mAP_50": 0.588, "bbox_mAP_75": 0.414, "bbox_mAP_s": 0.219, "bbox_mAP_m": 0.409, "bbox_mAP_l": 0.495, "bbox_mAP_copypaste": "0.382 0.588 0.414 0.219 0.409 0.495", "segm_mAP": 0.347, "segm_mAP_50": 0.557, "segm_mAP_75": 0.372, "segm_mAP_s": 0.183, "segm_mAP_m": 0.374, "segm_mAP_l": 0.472, "segm_mAP_copypaste": "0.347 0.557 0.372 0.183 0.374 0.472"} -``` -### 6.3 精度对比 -om推理box map50精度为0.584,开源box map50精度为0.588,精度下降在1%之内,因此可视为精度达标 -om推理segm map50精度为0.553,开源segm map50精度为0.557,精度下降在1%之内,因此可视为精度达标 - -## 7 性能对比 - -- **[npu性能数据](#71-npu性能数据)** -- **[T4性能数据](#72-T4性能数据)** -- **[性能对比](#73-性能对比)** - -### 7.1 npu性能数据 -batch1的性能: - 测试npu性能要确保device空闲,使用npu-smi info命令可查看device是否在运行其它推理任务 -``` -./benchmark.x86_64 -round=20 -om_path=mask_rcnn_r50_fpn_1x_coco_bs1.om -device_id=0 -batch_size=1 -``` -执行20次纯推理取均值,统计吞吐率与其倒数时延(benchmark的时延是单个数据的推理时间),npu性能是一个device执行的结果 -``` -[INFO] Dataset number: 19 finished cost 512.331ms -[INFO] PureInfer result saved in ./result/PureInfer_perf_of_mask_rcnn_r50_fpn_1x_coco_bs1_in_device_0.txt ------------------PureInfer Performance Summary------------------ -[INFO] ave_throughputRate: 1.95202samples/s, ave_latency: 512.318ms ----------------------------------------------------------------- -``` -maskrcnn mmdetection不支持多batch - - **性能优化:** -> 生成多batch模型需要修改源码,否则atc转化的多batch模型推理出的数据不对,多batch性能没有提升 -> - - -### 7.2 T4性能数据 -batch1性能: -onnx包含自定义算子,因此不能使用开源TensorRT测试性能数据,故在T4机器上使用pth在线推理测试性能数据 - -测评T4精度与性能: -```shell -git clone https://github.com/open-mmlab/mmcv -cd mmcv -MMCV_WITH_OPS=1 pip3.7 install -e . -cd .. -git clone https://github.com/open-mmlab/mmdetection -cd mmdetection -pip3.7 install -r requirements/build.txt -python3.7 setup.py develop -wget http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth -在当前目录按结构构造数据集:data/coco目录下有annotations与val2017,annotations目录存放coco数据集的instances_val2017.json,val2017目录存放coco数据集的5000张验证图片。 -python3.7 tools/test.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ./mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --eval bbox segm -``` -``` -6.0 task/s -``` - -### 7.3 性能对比 -310单卡4个device,benchmark测试的是一个device。T4一个设备相当于4个device,测试的是整个设备。benchmark时延是吞吐率的倒数,T4时延是吞吐率的倒数乘以batch。对于batch1,1.95202 * 4 > 6.0,即npu性能超过T4 -对于batch1,npu性能均高于T4性能1.2倍,该模型放在benchmark/cv/segmentation目录下 - - diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/benchmark/nlp/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/official/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/official/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" "b/onnx\347\253\257\345\210\260\347\253\257\346\216\250\347\220\206\346\214\207\345\257\274/research/.keep" deleted file mode 100644 index e69de29..0000000 -- Gitee From 0c0995d057f9b184f6e276164c4a471f61019519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=B8=9A=E5=BA=86?= Date: Sat, 17 Jul 2021 08:45:32 +0000 Subject: [PATCH 3/4] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Asce?= =?UTF-8?q?ndPyTorch=E6=A8=A1=E5=9E=8B=E6=8E=A8=E7=90=86=E4=BC=97=E6=99=BA?= =?UTF-8?q?=E9=AA=8C=E6=94=B6=E6=8C=87=E5=8D=97.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...14\346\224\266\346\214\207\345\215\227.md" | 276 ------------------ 1 file changed, 276 deletions(-) delete mode 100644 "AscendPyTorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272\351\252\214\346\224\266\346\214\207\345\215\227.md" diff --git "a/AscendPyTorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272\351\252\214\346\224\266\346\214\207\345\215\227.md" "b/AscendPyTorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272\351\252\214\346\224\266\346\214\207\345\215\227.md" deleted file mode 100644 index e0449aa..0000000 --- "a/AscendPyTorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272\351\252\214\346\224\266\346\214\207\345\215\227.md" +++ /dev/null @@ -1,276 +0,0 @@ -# Ascend PyTorch 模型推理众智验收指南 - -1. 先上gitee管理平台,将验收目标调整至验收状态 -2. 检查PR内容,文件夹路径和文件结构 - - PR模板和文件路径结构都在下面附件里有详细说明,请仔细check - - 参见付件pr检视,请仔细check -3. 按照验收脚本在交付文件夹下进行验收 - 验收机器:192.168.88.45 - 参考[ResNext50测试说明](https://gitee.com/ascend/modelzoo/blob/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50/test/README.md) - 准备环境: - ``` - 1.拉取modelzoo上提交的模型pr,然后将模型文件夹ResNext50拷贝到验收机器的/home/verify_models,并进入到/home/verify_models/ResNext50 - 2.根据requirements.txt安装必要的依赖 - 3.git clone ResNext50模型结构代码所在的开源代码仓torchvision - 4.如果通过补丁修改了开源模型代码则将补丁打入,如果开源模型代码需要安装则安装 - 5.获取训练的权重文件 - 6.获取数据集存放路径 - 7.获取benchmark工具 - ``` - - - ```shell - #准备环境 - 交付的代码文件夹下获取模型结构的开源代码,安装必要的依赖,获取训练提供的权重文件,获取数据集路径,获取benchmark工具 - - # pth是否能正确转换为om - bash test/pth2om.sh - - # 精度数据是否达标(需要显示官网pth精度与om模型的精度) - # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准),不指定数据集目录时默认/root/datasets - bash test/eval_acc_perf.sh --datasets_path=/root/datasets - - # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准) - bash test/perf_t4.sh - ``` - - - 验收过程中遇到问题,如是一些路径或者打字错误的问题,先修复继续执行 - - 每次验收都需要对验收脚本中的所有未验收脚本进行验收,不要因某一项验收失败而阻塞后续验收工作 -4. 验收反馈 - - 验收后,使用验收报告模板,在评论区反馈验收结果 - ```shell - # 第X次验收测试 - # 验收结果 OK / Failed - # 验收环境: A + K / CANN 5.0.1 - # 关联issue: - - # pth是否能正确转换为om - bash test/pth2om.sh - # 验收结果: OK / Failed - # 备注: 成功生成om,无运行报错,报错日志xx 等 - - # 精度数据是否达标(需要显示官网pth精度与om模型的精度) - # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准) - bash test/eval_acc_perf.sh --datasets_path=/root/datasets - # 验收结果: 是 / 否 - # 备注: 目标pth精度top1:77.62% top5:93.70%;bs1,bs16验收om精度top1:77.62% top5:93.69%;精度下降不超过1%;无运行报错,报错日志xx 等 - # 备注: 验收310测试性能bs1:1497.252FPS bs16:2096.376FPS;无运行报错,报错日志xx 等 - - # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准),该步是验证eval_acc_perf.sh显示的t4性能数据是否正确,该脚本中填写的性能数据与t4实测性能数据要接近 - bash test/perf_t4.sh - # 验收结果: OK / Failed - # 备注: 验收t4测试性能bs1:763.044FPS bs16:1234.940FPS,与eval_acc_perf.sh脚本显示的t4性能数据一致;无运行报错,报错日志xx 等 - - # 310性能是否超过t4: 是 / 否 - bs1:310=(1497.252/763.044)1.96倍t4 - bs16:310=(2096.376/1234.940)1.70倍t4 - ``` - - 示例链接 https://gitee.com/ascend/modelzoo/pulls/836#note_4814643 -5. 验收完成后,需要进行以下几步 - - 在pr评论区按照上文模板反馈验收结果 - - 上gitee管理平台,将验收目标调整至完成状态 - - 上团队空间-测试管理-PyTorch模型众智验收跟踪表 更新模型验收数据 - - 完成验收测试报告文档,归档obs - - 整理验收必要的交付件,归档obs,将/home/verify_models/{模型名}目录归档,归档时需要删除该目录下的占用磁盘空间的无用文件夹预处理后的数据集prep_dataset,result/dumpOutput_device0与result/dumpOutput_device1 -6. 验收归档与统计 - 1./home/verify_models/modelzoo目录用来拉取modelzoo代码pr - 1./home/verify_models目录下需要保存以上测试后通过的模型 - 3./home/verify_models/models_result.xlsx里填写模型的测试数据,bs4,8,32的性能数据从README.md中获取,如果蓝区版本精度性能不达标,而黄区测试达标在备注里写明黄区版本,如果黄区测试也不能达标则写明黄区测试精度或性能不达标 - 4./home/verify_models仅用来存放测试通过的模型,models_result.xlsx以及modelzoo的代码,不要在该目录存放其它无用的文件 - - - - -- 关联issue模板 (负责人请关联相应的学生,若无法关联,请关联验收者) - ``` - 【Pytorch模型推理众智测试验收】【第x次回归测试】 xxx模型 验收不通过 - - 贴上验收报告 - - ``` - - 在pr提交的内容栏里编辑issue的链接即可关联对应的issue,问题解决后issue将自动关闭 - - 示例链接 https://gitee.com/ascend/modelzoo/issues/I3FI5L?from=project-issue - -### 附: pr检视 - -- pr检视: -1.标题格式:[华为大学昇腾学院][高校贡献][Pytorch离线推理][Cascade_RCNN]-初次提交 -2.包含bs1与bs16权重精度与om精度,包含bs1与bs16的t4与310性能数据,性能数据用fps表示 -3.备注:如果蓝区版本测精度或性能不达标,最新CANN版本测可以达标,这里需要写出原因与最新CANN包版本,用最新版本测。如果是无法规避的算子缺陷导致性能不达标,这里需要添加性能不达标的原因与解决方案。如果onnx因包含自定义算子不支持推理,需要说明性能是在t4上测的在线推理,如果模型不支持batch 16,也需要说明一下 -4.自验报告:CANN包版本与精度性能等数据是否正确 - -- 代码规范: -参考[ResNext50](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50) -1.pipline要通过,缺陷扫描与规范扫描要尽可能改 -2.python脚本文件头需要加License声明 -3.pr不要包括开源模型的代码与权重文件 -注意: -4.python脚本不能包含从网上下载权重的代码,比如函数预训练为true时一般会下载权重 -5.python脚本避免依赖非必要的第三方库 -6.requirements.txt包含服务器上安装的本模型所有必要依赖的开源库的具体版本 - -- 模型README.md检视: -模板参见[README.md](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50/README.md) -1.1.2 代码地址->需要给出使用的模型开源代码地址与其branch,commitid -2.2 环境说明->需要给出服务器上安装的本模型所有必要依赖的开源库的具体版本 -3.3.1 pth转onnx模型->优先使用训练提供的权重文件,如果训练的权重文件网上能获则需给出网址,否则需要给出从哪获取权重文件。如果训练没有提供权重则使用开源代码仓的权重文件。需要给出权重文件名与其md5sum值 -4.3.1 pth转onnx模型->如果需要对模型的开源代码做修改,以打patch的形式修改 -5.3.1 模型转换要点:->对于CANN包算子有问题导致模型转换失败或需要规避才能转换成功,则需要在模型转换要点里写明定位主要过程,原因与措施 -6.6.1 离线推理TopN精度统计->精度测试需要测试bs1与bs16的精度 -7.6.1 精度调试:->对于CANN包算子有问题导致精度不达标或需要规避才能达标,则需要在精度调试里写明定位主要过程,原因与措施 -8.7 性能对比->性能数据需要测bs1,16,4,8,32的性能数据,且需要计算出单卡吞吐率 -9.7 性能优化:->对于CANN包算子有问题导致性能不达标或需要规避才能达标,则需要在性能优化里写明定位主要过程,原因与措施 - -- test/README.md检视: -该文件是验收测试说明,主要是准备环境,pip3.7 install -r requirements.txt可能会重新安装某版本pytorch,验收时根据需要决定是否执行 -参见模板[test/README.md](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50/test/README.md) - -### 附: 模型推理指导中的交付标准与规范 -- 交付标准 - - 精度: - om模型推理的精度与Ascend 910训练出的权重精度或PyTorch预训练模型github代码仓README.md或官网文档公布的精度对比,精度下降不超过1%则认为精度达标 - - 性能: - Ascend benchmark工具在数据集上推理测的NPU 310单颗device吞吐率乘以4颗即单卡吞吐率大于TensorRT工具测的GPU T4单卡吞吐率则认为性能达标 - 如若交付要求中对性能有要求(易模型),310的性能必须高于t4的性能 - 如若交付要求中没有对性能有要求(中难模型),310上推理需尽可能进行性能优化 - 若无法达到,则需要向华为方提交性能已达瓶颈的认证申请,华为方将定期组织专家组对申请模型进行评审,通过评审的模型允许以不高于t4的性能进行交付 - - 脚本: - 代码符合pep8规范; - 脚本命名格式需统一,文件名含模型名时模型名用小写,模型名含多个字符串时用-连接; - xxx_pth2onnx.py中不能使用从网络下载权重pth文件的代码,xxx_pth2onnx.py应有输入输出参数,输入是本地权重pth文件,输出是生成的onnx模型文件名; - xxx_pth_preprocess.py与xxx_pth_postprocess.py尽可能只引用numpy,Pillow,torch,pycocotools等基础库,如不要因mmdetection框架的数据处理与精度评测部分封装了这些基础库的操作,为调用这些简单接口,前后处理脚本就依赖mmdetection; - 不同模型的脚本与代码部分处理流程有相似性,尽量整合成通用的脚本与代码。 - - 推理过程: - 需要提供端到端推理过程中执行的命令等 - - 关键问题总结: - 需要提供端到端推理遇到的关键问题的简要调试过程,至少包含模型转换要点,精度调试,性能优化 - - 说明: - ``` - 1.如果已经有了ascend 910训练提供的权重文件,那么优先使用910训练提供的权重文件做离线推理,精度与910训练出的精度对齐;如果开源代码仓提供了多个权重文件,使用常用的基础的那个配置的权重文件即可;如果开源代码仓没有提供pth权重文件,则需要该模型的训练同学提供pth权重文件,或者使用开源代码仓训练脚本简单训练一个pth权重文件,然后对比om精度与该pth权重文件的精度 - - 2.由于随机数可能不能模拟数据分布,Ascend benchmark工具纯推理功能测的有些模型性能数据可能不太准,所以模型测试脚本与提交代码的描述中的性能数据以Ascend benchmark在数据集上推理时得到性能数据为准 - - 3.如果模型支持多batch,需要测试batch1,4,8,16,32的精度与性能,写在README.md里,模型测试脚本与提交代码的描述只需提供bs1和bs16的精度性能数据 - - 4.如果导出的onnx因包含自定义算子等而不能推理,则在t4上运行开源评测脚本测试pth模型在线推理性能 - - 5.对于性能不达标的模型,需要进行如下工作: - 1)优化修改onnx模型去掉影响性能的冗余pad,用Ascend atc的相关优化选项尝试一下,尝试使用最近邻替换双线性的resize重新训练,降低图片分辨率等使性能达标。 - 2)对于算子导致的性能问题,需要使用profiling分析定位引起性能下降的原因,具体到引起性能下降的算子。优先修改模型代码以使其选择性能好的npu算子替换性能差的npu算子使性能达标,然后在modelzoo上提issue,等修复版本发布后再重测性能,继续优化。 - 3)需要交付profiling性能数据,对经过上述方法性能可以达标的模型,在交付文档中写明问题原因与达标需要执行的操作;对经过上述方法性能仍不达标的模型,在交付的README.md文档中写明问题原因与简要的定位过程。 - - 6.git clone开源模型代码仓到工作目录,如果模型代码仓没有安装命令,pth2onnx.py脚本需要引用模型代码仓的函数或类时,通过sys.path.append(r"./代码仓目录")添加搜索路径,如果需要修改开源代码仓代码,将修改用git diff做成一个patch文件,交付件不要交付开源代码仓里的代码,只需要交付这个patch文件。参见本文3.5 maskrcnn端到端推理指导-开源detectron2加载npu权重的推理指导 - - 7.数据集统一放在/root/datasets/目录 - ``` - -- 交付件 - - 交付件参考:[ResNeXt50_Onnx模型端到端推理指导.md](https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50) - - 最终交付件: - 包含以上交付标准的代码,README.md,以及验收脚本 - 权重文件、profiling性能数据等非代码交付件一并打压缩包邮件发送 - - 最终交付形式: - gitee网址:https://gitee.com/ascend/modelzoo/tree/master/contrib/ACL_PyTorch/Research - commit信息格式:【高校贡献-${学校学院名称}】【Pytorch离线推理-${模型名称}】${PR内容摘要} - 模型命名风格为大驼峰,模型名含多个字符串时使用横杠或下划线连接,当上下文用横杠时模型名用下划线连接,否则用横杠连接 - 对于batch1与batch16,npu性能均高于T4性能1.2倍的模型,放在Benchmark目录下,1-1.2倍对应Official目录,低于1倍放在Research目录,目前都放在contrib/ACL_PyTorch/Research下即可 - -- gitee仓PR贡献流程 - - fork [modelzoo](https://gitee.com/ascend/modelzoo) 到个人仓 - - 提交代码到个人仓 - - 签署cla [link](https://clasign.osinfra.cn/sign/Z2l0ZWUlMkZhc2NlbmQ=) - - 选择 Sign Individual CLA - - 若已提交PR,但忘记签署,可在签署CLA后再评论内评论 ```/check-cla``` 重新校验 - - 依据文件夹名称及目录规范整理代码,完成自验,使用PR内容模板进行PR,审查人员请指定 王姜奔(wangjiangben_hw) - - PR后,华为方会进行代码检视,并对PR进行验证,请关注PR的评论并及时修改 - - 最终验收完成后合入主干 -- gitee仓验收使用脚本(请自验)、PR内容模板 - - 验收使用脚本(请自验) - >![](public_sys-resources/icon-note.gif) - **说明:** - > **提交前请确保自验通过!确保直接执行以下脚本就可运行!** - - ```shell - #准备环境 - 交付的代码文件夹下获取模型结构的开源代码,安装必要的依赖,获取训练提供的权重文件,获取数据集路径,获取benchmark工具 - - # pth是否能正确转换为om - bash test/pth2om.sh - - # 精度数据是否达标(需要显示官网pth精度与om模型的精度) - # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准),不指定数据集目录时默认/root/datasets - bash test/eval_acc_perf.sh --datasets_path=/root/datasets - - # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准) - bash test/perf_t4.sh - ``` - - PR内容模板 - - PR示例链接 https://gitee.com/ascend/modelzoo/pulls/887 - - PR名称 - - [学校学院名称][高校贡献][Pytorch离线推理][模型名称]-PR内容摘要 - - 举例说明:[华为大学昇腾学院][高校贡献][Pytorch离线推理][ResNeXt50]-初次提交 - - ``` - - - **What type of PR is this?** - > /kind task - - **What does this PR do / why do we need it**: - # 简述你这次的PR的详情 - - | 模型 | 官网精度 | 310精度 | t4性能 | 310性能 | - | :------: | :------: | :------: | :------: | :------: | - | ResNeXt50 bs1 | top1:77.62% top5:93.70% | top1:77.62% top5:93.69% | 763.044fps | 1497.252fps | - | ResNeXt50 bs16 | top1:77.62% top5:93.70% | top1:77.62% top5:93.69% | 1234.940fps | 2096.376fps | - # 如果是无法规避的算子缺陷导致性能不达标,这里需要添加性能不达标的原因与解决方案 - - 自验报告 - # 第X次验收测试 - # 验收结果 OK / Failed - # 验收环境: A + K / CANN 5.0.1 - # 关联issue: - - # pth是否能正确转换为om - bash test/pth2om.sh - # 验收结果: OK / Failed - # 备注: 成功生成om,无运行报错,报错日志xx 等 - - # 精度数据是否达标(需要显示官网pth精度与om模型的精度) - # npu性能数据(确保device空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,性能数据以单卡吞吐率为标准) - bash test/eval_acc_perf.sh --datasets_path=/root/datasets - # 验收结果: 是 / 否 - # 备注: 目标pth精度top1:77.62% top5:93.70%;bs1,bs16验收om精度top1:77.62% top5:93.69%;精度下降不超过1%;无运行报错,报错日志xx 等 - # 备注: 验收310测试性能bs1:1497.252FPS bs16:2096.376FPS;无运行报错,报错日志xx 等 - - # 在t4环境测试性能数据(确保gpu空闲时测试,如果模型支持多batch,测试bs1与bs16,否则只测试bs1,如果导出的onnx模型因含自定义算子等不能离线推理,则在t4上测试pytorch模型的在线推理性能,性能数据以单卡吞吐率为标准),该步是验证eval_acc_perf.sh显示的t4性能数据是否正确,该脚本中填写的性能数据与t4实测性能数据要接近 - bash test/perf_t4.sh - # 验收结果: OK / Failed - # 备注: 验收t4测试性能bs1:763.044FPS bs16:1234.940FPS,与eval_acc_perf.sh脚本显示的t4性能数据一致;无运行报错,报错日志xx 等 - - # 310性能是否超过t4: 是 / 否 - bs1:310=(1497.252/763.044)1.96倍t4 - bs16:310=(2096.376/1234.940)1.70倍t4 - - - 示例链接 https://gitee.com/ascend/modelzoo/pulls/836#note_4750681 - - **Which issue(s) this PR fixes**: - # 用于后期issue关联的pr - - Fixes # - - **Special notes for your reviewers**: - # 在reviewer检视时你想要和他说的 - - ``` - - -- Gitee From 505cd78b89296e53cd8e2596632d12dee751bf38 Mon Sep 17 00:00:00 2001 From: pengyeqing Date: Sat, 17 Jul 2021 16:52:50 +0800 Subject: [PATCH 4/4] update --- ...47\272\277\346\216\250\347\220\206-FAQ.md" | 29 +- ...347\220\206\344\274\227\346\231\272FAQ.md" | 303 ------------------ 2 files changed, 23 insertions(+), 309 deletions(-) delete mode 100644 "AscendPytorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272FAQ.md" diff --git "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" index 09a53c3..9dc2fae 100644 --- "a/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" +++ "b/Ascend-PyTorch\347\246\273\347\272\277\346\216\250\347\220\206\346\214\207\345\257\274/PyTorch\347\246\273\347\272\277\346\216\250\347\220\206-FAQ.md" @@ -1,10 +1,27 @@ # Ascend PyTorch模型推理常见问题FAQ -- [1 介绍](#1-介绍) -- [2 常见问题FAQ](#2-常见问题FAQ) - - [2.1 NPU模型打通常见问题FAQ](#21-NPU模型打通常见问题FAQ) - - [2.2 NPU模型精度调试常见问题FAQ](#22-NPU模型精度调试常见问题FAQ) - - [2.3 NPU模型性能优化常见问题FAQ](#23-NPU模型性能优化常见问题FAQ) - +- [Ascend PyTorch模型推理常见问题FAQ](#ascend-pytorch模型推理常见问题faq) +- [1 介绍](#1-介绍) +- [2 常见问题FAQ](#2-常见问题faq) + - [2.1 NPU模型打通常见问题FAQ](#21-npu模型打通常见问题faq) + - [FAQ1、需要提供哪些交付件,如何交付?](#faq1需要提供哪些交付件如何交付) + - [FAQ2、装有Ascend 310卡的服务器环境如何使用?](#faq2装有ascend-310卡的服务器环境如何使用) + - [FAQ3、推理与训练的关系?](#faq3推理与训练的关系) + - [FAQ4、推理工作量?](#faq4推理工作量) + - [FAQ5、推理过程中哪些工作在310服务器上做,哪些在t4服务器上做,哪些在cpu上做?](#faq5推理过程中哪些工作在310服务器上做哪些在t4服务器上做哪些在cpu上做) + - [FAQ6、预训练权重文件选择的问题?](#faq6预训练权重文件选择的问题) + - [FAQ7、精度与性能需要测试哪些batch?](#faq7精度与性能需要测试哪些batch) + - [FAQ8、onnx不能推理,t4性能如何测?](#faq8onnx不能推理t4性能如何测) + - [FAQ9、om性能如何测?](#faq9om性能如何测) + - [FAQ10、导出onnx脚本的dynamic_axes与onnx的输入shape(-1,3,224,224)中的-1是什么意思?](#faq10导出onnx脚本的dynamic_axes与onnx的输入shape-13224224中的-1是什么意思) + - [FAQ11、atc命令失败时如何查看日志?](#faq11atc命令失败时如何查看日志) + - [FAQ12、模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子?](#faq12模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子) + - [FAQ13、运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库](#faq13运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库) + - [FAQ14、推理性能不达标,profiling显示TransData算子耗时,参考如下方案优化](#faq14推理性能不达标profiling显示transdata算子耗时参考如下方案优化) + - [FAQ15、onnx转om模型报错atc命令ERROR问题解决](#faq15onnx转om模型报错atc命令error问题解决) + - [FAQ16、离线推理后处理脚本适配](#faq16离线推理后处理脚本适配) + - [FAQ17、执行数据集预处理报错](#faq17执行数据集预处理报错) + - [2.2 NPU模型精度调试常见问题FAQ](#22-npu模型精度调试常见问题faq) + - [2.3 NPU模型性能优化常见问题FAQ](#23-npu模型性能优化常见问题faq) # [1 介绍](#1-介绍) 本文目标读者为Ascend模型离线推理开发者,用于指导开发者在昇腾版本的CANN包下,实现模型推理精度性能达标。这里仅列举模型离线推理中遇到的常见问题与解决方法,持续更新。 diff --git "a/AscendPytorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272FAQ.md" "b/AscendPytorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272FAQ.md" deleted file mode 100644 index 9dc2fae..0000000 --- "a/AscendPytorch\346\250\241\345\236\213\346\216\250\347\220\206\344\274\227\346\231\272FAQ.md" +++ /dev/null @@ -1,303 +0,0 @@ -# Ascend PyTorch模型推理常见问题FAQ -- [Ascend PyTorch模型推理常见问题FAQ](#ascend-pytorch模型推理常见问题faq) -- [1 介绍](#1-介绍) -- [2 常见问题FAQ](#2-常见问题faq) - - [2.1 NPU模型打通常见问题FAQ](#21-npu模型打通常见问题faq) - - [FAQ1、需要提供哪些交付件,如何交付?](#faq1需要提供哪些交付件如何交付) - - [FAQ2、装有Ascend 310卡的服务器环境如何使用?](#faq2装有ascend-310卡的服务器环境如何使用) - - [FAQ3、推理与训练的关系?](#faq3推理与训练的关系) - - [FAQ4、推理工作量?](#faq4推理工作量) - - [FAQ5、推理过程中哪些工作在310服务器上做,哪些在t4服务器上做,哪些在cpu上做?](#faq5推理过程中哪些工作在310服务器上做哪些在t4服务器上做哪些在cpu上做) - - [FAQ6、预训练权重文件选择的问题?](#faq6预训练权重文件选择的问题) - - [FAQ7、精度与性能需要测试哪些batch?](#faq7精度与性能需要测试哪些batch) - - [FAQ8、onnx不能推理,t4性能如何测?](#faq8onnx不能推理t4性能如何测) - - [FAQ9、om性能如何测?](#faq9om性能如何测) - - [FAQ10、导出onnx脚本的dynamic_axes与onnx的输入shape(-1,3,224,224)中的-1是什么意思?](#faq10导出onnx脚本的dynamic_axes与onnx的输入shape-13224224中的-1是什么意思) - - [FAQ11、atc命令失败时如何查看日志?](#faq11atc命令失败时如何查看日志) - - [FAQ12、模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子?](#faq12模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子) - - [FAQ13、运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库](#faq13运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库) - - [FAQ14、推理性能不达标,profiling显示TransData算子耗时,参考如下方案优化](#faq14推理性能不达标profiling显示transdata算子耗时参考如下方案优化) - - [FAQ15、onnx转om模型报错atc命令ERROR问题解决](#faq15onnx转om模型报错atc命令error问题解决) - - [FAQ16、离线推理后处理脚本适配](#faq16离线推理后处理脚本适配) - - [FAQ17、执行数据集预处理报错](#faq17执行数据集预处理报错) - - [2.2 NPU模型精度调试常见问题FAQ](#22-npu模型精度调试常见问题faq) - - [2.3 NPU模型性能优化常见问题FAQ](#23-npu模型性能优化常见问题faq) -# [1 介绍](#1-介绍) - - 本文目标读者为Ascend模型离线推理开发者,用于指导开发者在昇腾版本的CANN包下,实现模型推理精度性能达标。这里仅列举模型离线推理中遇到的常见问题与解决方法,持续更新。 - - -# [2 常见问题FAQ](#2-常见问题FAQ) - -## [2.1 NPU模型打通常见问题FAQ](#21-NPU模型打通常见问题FAQ) - -### FAQ1、需要提供哪些交付件,如何交付? -交付请参考《推理指导》6.2 交付标准与规范 -交付件样例:https://gitee.com/ascend/modelzoo/tree/master/built-in/ACL_PyTorch/Benchmark/cv/classification/ResNext50 - -### FAQ2、装有Ascend 310卡的服务器环境如何使用? -提供的装有Ascend 310卡的服务器已经安装好ascend的包,服务器home/common/resnext50的样例是可以运行的 - -### FAQ3、推理与训练的关系? -模型推理与训练是独立的事情,推理比训练简单一些,推理是使用Ascend 910训练的权重或模型开源代码仓提供的权重在310上执行推理,一般在训练等待结果的时间内可以同步做推理 - -### FAQ4、推理工作量? -做之前需要先熟悉相关工作,然后进行模型推理,如果精度性能不达标就需要花费不少时间了,模型推理到验收还有检视整改测试资料文档的工作,不是三天就能做完模型推理,从开始到验收完成整个周期规划了1个月~1.5个月的时间 - -### FAQ5、推理过程中哪些工作在310服务器上做,哪些在t4服务器上做,哪些在cpu上做? -前后处理与转onnx在cpu上做即可,转om模型和benchmark推理时的命令在装有ascend 310的服务器上执行即可,因为两个命令依赖Ascend cann包提供的编译工具与npu算子库,gpu性能数据需要在装有t4卡的服务器上测 - -### FAQ6、预训练权重文件选择的问题? -如果已经有了ascend 910训练提供的权重文件,那么优先使用910训练提供的权重文件做离线推理,精度与910训练出的精度对齐 -如果开源代码仓提供了多个权重文件,使用常用的基础的那个配置的权重文件即可,并且模型支持多任务时只需要针对一个任务做推理 -如果开源代码仓没有提供pth权重文件,则需要该模型的训练同学提供pth权重文件,或者使用开源代码仓训练脚本简单训练一个pth权重文件,然后对比om精度与该pth权重文件的精度 - -### FAQ7、精度与性能需要测试哪些batch? -如果模型支持多batch,需要测试batch1,4,8,16,32的精度与性能,写在README.md里,模型测试脚本与提交代码的描述只需提供bs1和bs16的精度性能数据 - -### FAQ8、onnx不能推理,t4性能如何测? -如果导出的onnx因包含自定义算子等而不能推理,则在t4上运行开源评测脚本测试pth模型在线推理性能 - -### FAQ9、om性能如何测? -测试时需要确保测试过程中device只进行了这一个测试任务,使用npu-smi info查看device是否空闲 -由于随机数可能不能模拟数据分布,Ascend benchmark工具纯推理功能测的有些模型性能数据可能不太准,所以模型测试脚本与提交代码的描述中的性能数据以Ascend benchmark在数据集上推理时得到性能数据为准 - -### FAQ10、导出onnx脚本的dynamic_axes与onnx的输入shape(-1,3,224,224)中的-1是什么意思? -如下导出的onnx模型通过可视化工具netron查看其输入shape是(-1,3,224,224),-1代表onnx模型是动态batch的,当用tensorRT在t4上测onnx的性能时可以指定任意batch的输入(batch,3,224,224),dynamic_axes是动态batch参数,'image': {0: '-1'}表示输入image的第一维是-1即batch维为-1表示动态 -``` - input_names = ["image"] - output_names = ["class"] - dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} - dummy_input = torch.randn(1, 3, 224, 224) - torch.onnx.export(model, dummy_input, output_file, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, opset_version=11, verbose=True) -``` -无论onnx模型的batch是多少,onnx转换为om时只要通过--input_shape指定batch为正整数,就得到对应batch的om模型,目前om虽然支持动态batch,但是我们不使用动态batch的om模型 -``` -atc --framework=5 --model=./resnext50.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=resnext50_bs16 --log=debug --soc_version=Ascend310 -``` -当然像一些模型如shufflenetv1其实不支持动态batch,转换为固定batch的om时除了指定--input_shape的相同的batch,还需要相同batch的onnx模型来转换,否则会报错 - -### FAQ11、atc命令失败时如何查看日志? -``` -export ASCEND_SLOG_PRINT_TO_STDOUT=1 -export ASCEND_GLOBAL_LOG_LEVEL=0 #debug 0 --> info 1 --> warning 2 --> error 3 -然后执行atc ... > atc.log -``` - -### FAQ12、模型代码包含不能导出onnx的算子时如何解决-等价替换为自定义算子? -pytorch代码的adaptive_avg_pool2d目前onnx还不支持,所以导出onnx时报错,解决方案是尝试使用avg_pool2d替换adaptive_avg_pool2d,但当input最后两维不是output的整数倍时,adaptive_avg_pool2d不能完全等价替换为avg_pool2d,而npu有adaptive_avg_pool2d算子的实现,所以解决方案变为将adaptive_avg_pool2d改为自定义算子导出onnx,自定义算子不需要具体实现代码(因此导出的onnx不能使用onnxruntime进行推理,还需要将pytorch的_check_onnx_proto(proto)改为pass去除导出onnx时进行检查),只要自定义算子返回的输出shape与原算子输出的shape保持一致即可,相当于onnx只包含这个算子的声明(数据类型与属性需要与npu版算子对应),在onnx转为om时,atc工具的onnx插件如果支持该算子,atc工具会根据这个声明找到该算子npu的实现。 -查看npu的adaptive_avg_pool2d声明: -``` -REG_OP(AdaptiveAvgPool2d) - .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) - .REQUIRED_ATTR(output_size, ListInt) - .OP_END_FACTORY_REG(AdaptiveAvgPool2d) -``` -修改模型代码,将adaptive_avg_pool2d改为自定义算子,然后导出onnx,其中output_size_i代表int64类型的算子属性: -``` -class AdaptiveAvgPoolOp(torch.autograd.Function): - - @staticmethod - def forward(ctx, x, output_size): - out = torch.randn(x.shape[0], x.shape[1], output_size[0], output_size[1]).to(x.dtype) - return out - - @staticmethod - def symbolic(g, x, output_size): - out = g.op('AdaptiveAvgPool2d', x, output_size_i = output_size) - return out - -def adaptive_avg_pool_op(x, output_size): - out = AdaptiveAvgPoolOp.apply(x, output_size) - return out - -x = F.adaptive_avg_pool2d(input, output_size=bin_size)替换为x = adaptive_avg_pool_op(input, (bin_size, bin_size)) -``` - -### FAQ13、运行atc或benchmark命令时报错找不到atc命令或找不到ascend动态库 - -* 现象描述 - - ``` - Command 'atc' not found, but can be installed with: - or - ./benchmark.x86_64: error while loading shared libraries: libascendcl.so: cannot open shared object file: No such file or directory - ``` - -* 原因分析 - - 当环境变量未设置或者无效时,会出现上述错误。 - -* 处理方法 - - 设置环境变量: - ``` - export install_path=/usr/local/Ascend/ascend-toolkit/latest - export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH - export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH - export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH - export ASCEND_OPP_PATH=${install_path}/opp - export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest - ``` - 若是普通用户登录装有Ascend310卡的服务器,需要使用sudo执行命令,并且 - ``` - 修改/etc/sudoers将Defaults env_reset改成Defaults !env_reset - 修改/etc/bash.bashrc添加alias sudo='sudo env PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH' - ``` - -### FAQ14、推理性能不达标,profiling显示TransData算子耗时,参考如下方案优化 -(1)修改five_2_four.py优化方法 - 在环境变量env.sh中export install_path=/usr/local/Ascend/ascend-toolkit/latest路径下查找five_2_four.py文件,路径一般为 -``` -/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/opp/op_impl/built-in/ai_core/tbe/impl/five_2_four.py -``` - -修改five_2_four.py文件,将TransData算子的output shape加入five_2_four函数行中,示例如下: -``` -from impl import trans_data_negative_target_ntc - -@util.check_input_type(dict, dict, str, str, str) -def five_2_four(src, dst, src_format, dst_format, kernel_name='five_2_four'): - ... - elif dst_format.lower() == "nhwc" and dst_shape in [[10000, 63, 63, 1], [10000, 127, 127, 1], [16, 19, 19, 486], - [16, 10, 10, 486], [16, 38, 38, 324], [16, 5, 5, 486], - [16, 3, 3, 324], [8, 19, 19, 486], [8, 10, 10, 486], - [8, 38, 38, 324], [8, 5, 5, 486], [8, 3, 3, 324], - [100, 28, 28, 91]]: - trans_data_negative_target_tc.trans_data_negative_target_tc(src, dst, src_format, dst_format, kernel_name) - elif dst_format.lower() == "nchw" and dst_shape in [[2560, 512, 4, 26], [2560, 512, 1, 26], [2560, 256, 8, 25], - [16, 240, 7, 7], [16, 120, 14, 14], [1,19,1024,2048], [4,19,1024,2048]]: - print("=================================") - print("ntc dst shape:", dst_shape) - print("=================================") - trans_data_negative_target_ntc.trans_data_negative_target_ntc(src, dst, src_format, dst_format, kernel_name) - ... -``` -- 不同的batch_size,添加的shape不一样,shape大小为[*,19,1024,2048 ] ,以某模型为例,只测试batch1和batch4,因此添加的shape为[1,19,1024,2048],[4,19,1024,2048] - -修改完成后,重新转换生成om文件,atc转换过程会打印添加的日志,如下: -``` -ATC start working now, please wait for a moment. -================================= -ntc dst shape: [1, 19, 1024, 2048] -================================= -================================= -ntc dst shape: [1, 19, 1024, 2048] -================================= -ATC run success, welcome to the next use. -W11001: High-priority service of op[PartitionedCall_AvgPool_45_2] is invalid, low-priority service is used. It can work normally but may affect performance. -W11001: High-priority service of op[PartitionedCall_AvgPool_52_6] is invalid, low-priority service is used. It can work normally but may affect performance. -``` -(2)output_node输出节点类型更改为float16 -atc转换时指定输出节点类型为float16 -``` -atc --framework=5 --model=./ICNet.onnx --output=ICNet_bs1 --out_nodes="Resize_317:0" --output_type=FP16 --input_format=NCHW --input_shape="actual_input_1: 1,3,1024,2048" --log=debug --soc_version=Ascend310 -``` - -### FAQ15、onnx转om模型报错atc命令ERROR问题解决 -* 现象描述 - ``` - ATC run failed,please check the detail log. try 'atc --help' - E19999: Inter Error! - Unknown error occurred,please check the log. - ``` - 1. 设置环境变量 - ``` - export install_path=/usr/local/Ascend/ascend-toolkit/latest - export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH - export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH - export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH - export ASCEND_OPP_PATH=${install_path}/opp - ``` - 2. 更新最新的推理包run包 - - 3. 打印host日志 - ``` - export ASCEND_SLOG_PRINT_TO_STDOUT=1 - [WARNING] TBE(3112,atc.bin):2021-05-25-15:20:33.329.360 [image_ops.cc:2146][OP_PROTO] ResizeNearestInferShape:2146 OpName:[Resize_140] "Get - constValue failed of [sizes]" - [ERROR] TBE(3112,atc.bin):2021-05-25-15:20:33.329.371 [image_ops.cc:2084][OP_PROTO] CalculateSizeOut:2084 OpName:[Resize_140] "length of scale_out - after erase must be equal to 2" - [ERROR] TBE(3112,atc.bin):2021-05-25-15:20:33.329.376 [image_ops.cc:2155][OP_PROTO] ResizeNearestInferShape:2155 OpName:[Resize_140] "calculate size - out failed." - [ERROR] GE(3112,atc.bin):2021-05-25-15:20:33.329.391 [op_desc.cc:1345]3112 CallInferFunc: ErrorNo: -1(failed) [COMP][PRE_OPT]Resize_140 call infer - func. ret: 4294967295 - [ERROR] GE(3112,atc.bin):2021-05-25-15:20:33.329.397 [shape_refiner.cc:766]3112 InferShapeAndType: ErrorNo: -1(failed) [COMP][PRE_OPT]Resize_140 call - infer function failed. - ``` - 得出的结论为:onnx不支持 constValuse 需要进行优化转换 - 优化转换采用onnx-simplifier 工具进行转换 - 安装onnx-simplifier - pip3 install onnx-simplifier - 简化onnx模型: - python3 -m onnxsim ./hrnet_w18.onnx ./hrnet_w18_1.onnx --input-shape "16,3,224,224" - 转换完成再执行如下命令 - ``` - atc --framework=5 --model=./hrnet_w18_1.onnx --input_format=NCHW --input_shape="image:16,3,224,224" --output=hrnet_bs16 --log=debug -- - soc_version=Ascend310 - ``` - onnx转om成功。 - -### FAQ16、离线推理后处理脚本适配 - 对于一些图像分类的模型,后处理脚本都是通用的;而有些模型(比如分割类)是没有后处理脚本的,需要读者自行适配。 -(1)源码中包含在线推理脚本(如evaluate.py)或测试类脚本(如test.py) -基于这两个脚本适配,一般脚本中都包含类似的model语句 -``` -outputs = model(image) -``` -benchmark离线推理得到的./result/dumpOutput_device0/数据就可以理解为在线推理的model(image)步骤,适配过程就是从./result/dumpOutput_device0/中按照对应的名字将数据读取出来,适配代码参考如下: -``` -outputs = self.file2tensor(annotation_file) - -# 生成的是bin文件 -def file2tensor(self, annotation_file): - - filepath = annotation_file + '_1.bin' - size = os.path.getsize(filepath) - res = [] - L = int(size/4) #由于需要的是float32类型,所以按照4字节读取;根据实际情况按字节读取 - binfile = open(filepath, 'rb') - for i in range(L): - data = binfile.read(4) - num = struct.unpack('f', data) - res.append(num[0]) - binfile.close() - - dim_res = np.array(res).reshape(1,19,1024,2048) #转换为对应的shape,可通过在线推理打印outputs的shape获取到 - tensor_res = torch.tensor(dim_res, dtype=torch.float32) - print(filepath, tensor_res.dtype, tensor_res.shape) - - return tensor_res -``` -(2)如上的文件都没有,可以参考训练过程的validation步骤进行适配,适配方法同上。 - - -### FAQ17、执行数据集预处理报错 -``` -python3.7 imagenet_torch_preprocess.py /opt/npu/imagenet/val ./pre_dataset -``` -报错如下 -``` -PIL.UnidentifeldImageError: cannot identify image file '/opt/npu/imagenet/val/xxxx.jpeg' -``` -出现这个问题代表图片文件损坏。 -解决方法:更换未损坏的val数据集。 - - -## [2.2 NPU模型精度调试常见问题FAQ](#22-NPU模型精度调试常见问题FAQ) - - 1.前后处理与模型参数是否与开源代码仓的推理使用的完全一致 - 2.使用开源代码仓提供的测评pth的脚本测试pth在线推理精度是否达标,可以添加算子输出结果的调试打印 - 3.如果导出的onnx可以推理,确定onnx精度是否达标 - 4.如果是om算子导致精度下降,则模型转换时指定算子为om的输出节点,然后与pth在线推理时该算子(开启verbose导出onnx时会打印算子对应的py文件代码行)的输出对比,查看是否一致 - 5.如果某算子导致精度下降问题,尝试是否可以修改模型使用其它方法替换掉该算子,然后看精度是否达标,如果遇到实在规避不了的算子问题则需要在modelzoo提issue -参考《推理指导》的4.5 maskrcnn端到端推理指导案例 - - -## [2.3 NPU模型性能优化常见问题FAQ](#22-NPU模型性能优化常见问题FAQ) - - 1.优化修改onnx模型去掉影响性能的冗余pad,用Ascend atc的相关优化选项尝试一下,尝试使用最近邻替换双线性的resize重新训练,降低图片分辨率等使性能达标。 - 2.对于算子导致的性能问题,需要使用profiling分析定位引起性能下降的原因,具体到引起性能下降的算子。优先修改模型代码以使其选择性能好的npu算子替换性能差的npu算子使性能达标,然后在modelzoo上提issue,等修复版本发布后再重测性能,继续优化。 - 3.需要交付profiling性能数据,对经过上述方法性能可以达标的模型,在交付文档中写明问题原因与达标需要执行的操作;对经过上述方法性能仍不达标的模型,在交付的README.md文档中写明问题原因与简要的定位过程。 - -- Gitee