From 8e29b5d4478ceb6c45f75dd43a9ff3403d5b7347 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AC=A2=E4=B9=90=E9=A9=AC?= Date: Fri, 9 Aug 2024 12:49:34 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E9=80=82=E9=85=8D=E5=AE=89=E5=85=A8?= =?UTF-8?q?=E5=B8=BD=E8=AF=86=E5=88=AB310b?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../HelmetIdentification/Models/dy_resize.py | 75 --------- .../Models/modify_yolov5s_slice.py | 146 ------------------ contrib/HelmetIdentification/README.md | 98 +++--------- .../HelmetIdentification/Test/test_select.py | 33 ++++ ...64\346\230\216\346\226\207\346\241\243.md" | 6 +- 5 files changed, 63 insertions(+), 295 deletions(-) delete mode 100644 contrib/HelmetIdentification/Models/dy_resize.py delete mode 100644 contrib/HelmetIdentification/Models/modify_yolov5s_slice.py create mode 100644 contrib/HelmetIdentification/Test/test_select.py diff --git a/contrib/HelmetIdentification/Models/dy_resize.py b/contrib/HelmetIdentification/Models/dy_resize.py deleted file mode 100644 index b3478cfd7..000000000 --- a/contrib/HelmetIdentification/Models/dy_resize.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import onnx - -model_path = sys.argv[1] -model = onnx.load(model_path) - -def remove_node(graph, nodelist): - """ - Remove Node - """ - max_idx = len(graph.node) - rm_cnt = 0 - for i in range(len(graph.node)): - if i < max_idx: - gn = graph.node[i - rm_cnt] - if gn.name in nodelist: - print("remove {} total {}".format(gn.name, len(graph.node))) - graph.node.remove(gn) - max_idx -= 1 - rm_cnt += 1 - - -def replace_scales(ori_list, scales_name): - """ - Replace Scales name: - Leave the first two items of the input attribute of Resize unchanged - and the third item--scales name is modified - param:ori_list is the value of Resize.input - """ - n_list = [] - for j, x in enumerate(ori_list): - if j < 2: - n_list.append(x) - if j == 3: - n_list.append(scales_name) - return n_list - -# Replace Resize node -for k in range(len(model.graph.node)): - n = model.graph.node[k] - if n.op_type == "Resize": - model.graph.initializer.append( - onnx.helper.make_tensor('scales{}'.format(k), onnx.TensorProto.FLOAT, [4], [1, 1, 2, 2]) - ) - newnode = onnx.helper.make_node( - 'Resize', - name=n.name, - inputs=replace_scales(n.input, 'scales{}'.format(k)), - outputs=n.output, - coordinate_transformation_mode='asymmetric', - cubic_coeff_a=-0.75, - mode='nearest', - nearest_mode='floor' - ) - model.graph.node.remove(model.graph.node[k]) - model.graph.node.insert(k, newnode) - print("replace {} index {}".format(n.name, k)) - -node_list = ['Constant_330', 'Constant_375'] -remove_node(model.graph, node_list) -onnx.checker.check_model(model) -onnx.save(model, sys.argv[1].split('.')[0] + "_dbs.onnx") diff --git a/contrib/HelmetIdentification/Models/modify_yolov5s_slice.py b/contrib/HelmetIdentification/Models/modify_yolov5s_slice.py deleted file mode 100644 index 4984b6756..000000000 --- a/contrib/HelmetIdentification/Models/modify_yolov5s_slice.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import onnx -# Get the largest int value -INT_MAX = sys.maxsize -# Read model save path -model_path = sys.argv[1] -# Load the model -model = onnx.load(model_path) - -def get_node_by_name(nodes, name): - """ - gain node by names - """ - for n in nodes: - if n.name == name: - return n - return -1 - - -# remove node of Slice_24 -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_24")) -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_34")) - -# Set the output size of Transpose after slice4 and slice24,slice9, slice19, slice29, slice39 -prob_info1 = onnx.helper.make_tensor_value_info('to_slice9', onnx.TensorProto.FLOAT, [1, 3, 640, 320]) -prob_info3 = onnx.helper.make_tensor_value_info('to_slice19', onnx.TensorProto.FLOAT, [1, 3, 640, 320]) -prob_info5 = onnx.helper.make_tensor_value_info('from_slice9', onnx.TensorProto.FLOAT, [1, 3, 320, 320]) -prob_info6 = onnx.helper.make_tensor_value_info('from_slice19', onnx.TensorProto.FLOAT, [1, 3, 320, 320]) -prob_info7 = onnx.helper.make_tensor_value_info('from_slice29', onnx.TensorProto.FLOAT, [1, 3, 320, 320]) -prob_info8 = onnx.helper.make_tensor_value_info('from_slice39', onnx.TensorProto.FLOAT, [1, 3, 320, 320]) -# Transpose after slice4 -node1 = onnx.helper.make_node( - 'Transpose', - inputs=['171'], - outputs=['to_slice9'], - perm=[0, 1, 3, 2] -) -# Transpose after slice24 -node3 = onnx.helper.make_node( - 'Transpose', - inputs=['181'], - outputs=['to_slice19'], - perm=[0, 1, 3, 2] -) -# add Transpose after slice9 -node5 = onnx.helper.make_node( - 'Transpose', - inputs=['from_slice9'], - outputs=['176'], - perm=[0, 1, 3, 2] -) -# add Transpose after slice19 -node6 = onnx.helper.make_node( - 'Transpose', - inputs=['from_slice19'], - outputs=['186'], - perm=[0, 1, 3, 2] -) -# add Transpose after slice29 -node7 = onnx.helper.make_node( - 'Transpose', - inputs=['from_slice29'], - outputs=['196'], - perm=[0, 1, 3, 2] -) -# add Transpose after slice39 -node8 = onnx.helper.make_node( - 'Transpose', - inputs=['from_slice39'], - outputs=['206'], - perm=[0, 1, 3, 2] -) -# add the above node -model.graph.node.append(node1) -model.graph.node.append(node3) -model.graph.node.append(node5) -model.graph.node.append(node6) -model.graph.node.append(node7) -model.graph.node.append(node8) - -# slice9 slice19 Change shaft -model.graph.initializer.append(onnx.helper.make_tensor('starts_9', onnx.TensorProto.INT64, [1], [0])) -model.graph.initializer.append(onnx.helper.make_tensor('ends_9', onnx.TensorProto.INT64, [1], [INT_MAX])) -model.graph.initializer.append(onnx.helper.make_tensor('axes_9', onnx.TensorProto.INT64, [1], [2])) -model.graph.initializer.append(onnx.helper.make_tensor('steps_9', onnx.TensorProto.INT64, [1], [2])) -# add Slice_9 -newnode1 = onnx.helper.make_node( - 'Slice', - name='Slice_9', - inputs=['to_slice9', 'starts_9', 'ends_9', 'axes_9', 'steps_9'], - outputs=['from_slice9'], -) -# remove Original node Slice_9 -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_9")) -model.graph.node.insert(9, newnode1) -# add Slice_19 -newnode2 = onnx.helper.make_node( - 'Slice', - name='Slice_19', - inputs=['to_slice19', 'starts_9', 'ends_9', 'axes_9', 'steps_9'], - outputs=['from_slice19'], -) -# remove Original node Slice_19 -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_19")) -model.graph.node.insert(19, newnode2) - -# slice29 slice39 Change shaft -model.graph.initializer.append(onnx.helper.make_tensor('starts_29', onnx.TensorProto.INT64, [1], [1])) -model.graph.initializer.append(onnx.helper.make_tensor('ends_29', onnx.TensorProto.INT64, [1], [INT_MAX])) -model.graph.initializer.append(onnx.helper.make_tensor('axes_29', onnx.TensorProto.INT64, [1], [2])) -model.graph.initializer.append(onnx.helper.make_tensor('steps_29', onnx.TensorProto.INT64, [1], [2])) -# add Slice_29 -newnode3 = onnx.helper.make_node( - 'Slice', - name='Slice_29', - inputs=['to_slice9', 'starts_29', 'ends_29', 'axes_29', 'steps_29'], - outputs=['from_slice29'], -) -# remove Original node Slice_29 -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_29")) -model.graph.node.insert(29, newnode3) -# add Slice_39 -newnode4 = onnx.helper.make_node( - 'Slice', - name='Slice_39', - inputs=['to_slice19', 'starts_29', 'ends_29', 'axes_29', 'steps_29'], - outputs=['from_slice39'], -) -# remove Original node Slice_39 -model.graph.node.remove(get_node_by_name(model.graph.node, "Slice_39")) -model.graph.node.insert(39, newnode4) -# Save the modified onnx model -onnx.save(model, sys.argv[1].split('.')[0] + "_t.onnx") diff --git a/contrib/HelmetIdentification/README.md b/contrib/HelmetIdentification/README.md index 7779b786b..d95910c4d 100644 --- a/contrib/HelmetIdentification/README.md +++ b/contrib/HelmetIdentification/README.md @@ -6,11 +6,15 @@ ### 1.1 支持的产品 -本项目以昇腾Atlas 500 A2 / Atlas 200I DK A2为主要的硬件平台。 +本项目基于mxVision SDK进行开发,以Atlas 500 A2/Atlas 200I DK A2为主要的硬件平台。 ### 1.2 支持的版本 -本样例配套的CANN版本为[7.0.RC1](https://www.hiascend.com/software/cann/commercial),MindX SDK版本为[5.0.RC3](https://www.hiascend.com/software/Mindx-sdk)。 +本样例配套的MxVision版本、CANN版本、Driver/Firmware版本如下所示: +| MxVision版本 | CANN版本 | Driver/Firmware版本 | +|--------------- | ---------------------------------- | ----------| +| 5.0.0 | 7.0.0 | 23.0.0| +|6.0.RC2 | 8.0.RC2 | 24.1.RC2| MindX SDK安装前准备可参考《用户指南》,[安装教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/quickStart/1-1安装SDK开发套件.md) @@ -30,8 +34,6 @@ MindX SDK安装前准备可参考《用户指南》,[安装教程](https://git ├──HelmetDetection.pipline # 安全帽识别推理流程pipline ├──imgclass.names # 模型所有可识别类 ├──main.py # 推理运行程序 - ├──modify_yolov5s_slice.py #slice算子修改脚本 - ├──dy_resize.py # resize算子修改 ├──utils.py # 数据处理及可视化脚本 ├── plugins ├──MxpiSelectedFrame # 跳帧插件 @@ -57,11 +59,11 @@ MindX SDK安装前准备可参考《用户指南》,[安装教程](https://git 环境依赖软件和版本如下表: | 软件 | 版本 | 说明 | 获取方式 | -| ------------------- | ------------ | ----------------------------- | ------------------------------------------------------------ | -| mxVision | 5.0.RC3 | mxVision软件包 | [链接](https://www.hiascend.com/software/Mindx-sdk) | -| Ascend-CANN-toolkit | 7.0.RC1 | Ascend-cann-toolkit开发套件包 | [链接](https://www.hiascend.com/software/cann/commercial) | -| 操作系统 | ubuntu 22.04 | 操作系统 | Ubuntu官网获取 | -| opencv-python | 4.5.2.54 | 用于识别结果画框 | python3 -m pip install opencv-python | +| ------------------- | ------------ | ----------------------------- | ------------------------------------------------------------ | +| opencv-python | 4.10.0.54 | 用于识别结果画框 | python3 -m pip install opencv-python| +| libgl1-mesa-glx |23.0.4-0ubuntu1~22.04.1 |GL库(opencv-python可能会依赖GL)|apt install libgl1-mesa-glx| +| live555|1.10|实现视频转rstp进行推流|[live555使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md)| +|ffmpeg|2021-08-08-git-ac0408522a | 实现mp4格式视频转为264格式视频 |[ffmpeg使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md) @@ -78,52 +80,17 @@ MindX SDK安装前准备可参考《用户指南》,[安装教程](https://git ##### 1.1模型与软件依赖 - 所用模型与软件依赖如下表所示。若使用A200I DK A2运行,推荐使用PC转换模型,具体方法可参考A200I DK A2资料。 + 所用模型与软件依赖如下表所示。若使用A200I DK A2运行,推荐使用PC转换模型,具体方法可参考A200I DK A2资料。模型相关信息可参考[原项目链接](https://github.com/PeterH0323/Smart_Construction) | 软件名称 | 版本 | 获取方式 | | ----------------------- | -------- | ------------------------------------------------------------ | -| pytorch | 1.5.1 | [pytorch官网](https://pytorch.org/get-started/previous-versions/) | -| ONNX | 1.7.0 | pip install onnx==1.7.0 | -| helmet_head_person_s.pt | v2.0 | [原项目链接](https://github.com/PeterH0323/Smart_Construction)(选择项目中yolov5s权重文件,权重文件保存在README所述网盘中) | | YOLOv5_s.onnx | YOLOv5_s | [链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/HelmetIdentification/model.zip) | - - -##### 1.2 pt文件转换为onnx文件 +##### 1.2 onnx文件转换为om文件 1. 可直接获取已经转换好的YOLOv5_s.onnx文件,链接如1.1所示。此模型已经完成优化,不再使用dy_resize.py、modify_yolov5s_slice.py进行优化。可直接转换为om模型。 -2. 若尝试pt文件转换为onnx文件,可获取[原项目](https://github.com/PeterH0323/Smart_Construction)代码,下载至本地。安装环境依赖**requirements.txt**在原项目中已给出(原项目使用pytorch 1.5.1框架),pt文件转换为onnx文件所需第三方库**ONNX**如1.1中方式安装。 - -3. 通过上述1.1中链接获取模型文件helmet_head_person_s.pt,下载到本地后保存至原项目weights文件中。使用原项目中的export.py将pt文件转换为onnx格式文件。运行: - -```shell -python3 ./models/export.py --weights ./weights/helmet_head_person_s.pt --img 640 --batch 1 -``` - -其中onnx算子版本为opset_version=11。转换完成后权重文件helmet_head_person_s.onnx改名为YOLOv5_s.onnx上传至服务器任意目录下。 - - - -##### 1.3 onnx文件转换为om文件 - - 转换完成后onnx脚本上传至服务器任意目录后先进行优化。 - -1. 利用附件脚本dy_resize.py修改模型resize算子。该模型含有动态Resize算子(上采样),通过计算维度变化,改为静态算子,不影响模型的精度,运行如下命令: - -```shell -python3 modify_yolov5s_slice.py YOLOv5_s.onnx -``` - -2. 然后利用modify_yolov5s_slice.py脚本修改模型slice算子,运行如下命令: - -```bash -python3 modify_yolov5s_slice.py YOLOv5_s.onnx -``` - -可以得到修改好后的YOLOv5_s.onnx模型 - -3. 最后运行atc-env脚本将onnx转为om模型,运行命令如下。 +3. 修改atc-env脚本中的路径,运行atc-env脚本将onnx转为om模型,运行命令如下。 ```shell sh atc-env.sh @@ -193,7 +160,7 @@ cmake .. make -j ``` -编译成功后将产生**libmxpi_selectedframe.so**文件,文件生成位置在build目录下。将其复制至SDK的插件库中(./MindX_SDK/mxVision/lib/plugins) +编译成功后将产生**libmxpi_selectedframe.so**文件,文件生成位置在build目录下。将其复制至SDK的插件库中(./MindX_SDK/mxVision/lib/plugins),并修改权限为440 注:[插件编译生成教程](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md)在《SDK用户手册》深入开发章节 @@ -242,10 +209,10 @@ test.264可替换成任意上传至当前目录的[264格式文件](https://gite 然后切换目录至main.py所在目录下,运行命令: ```shell -python3.9.2 main.py +python3.9 main.py ``` -即可得到输出结果,输出结果将原来的两路视频分为两个文件保存,utils.py中的oringe_imgfile用于设置图像输出路径,用户需手动建立输出文件output,文件路径可自定义设置。本项目文件放置规范如下: +即可得到输出结果,输出结果将原来的两路视频分为两个文件保存,utils.py中的oringe_imgfile用于设置图像输出路径,用户需**手动建立**输出文件output,文件路径可自定义设置。本项目文件放置规范如下: ![image3](https://gitee.com/liu-kai6334/mindxsdk-referenceapps/raw/master/contrib/HelmetIdentification/image/image3.jpg) @@ -264,7 +231,7 @@ python3.9.2 main.py ##### 3.1 性能测试 -性能测试使用脚本performance_test_main.py,该脚本与main.py大体相同,不同之处是在performance_test_main.py中添加了时间戳测试,测试数据为mxpi_rtspsrc拉取的视频流。两路视频尺寸分别取多组不同尺寸的视频做对比。推理三百帧图片后取平均时间值,设置如下环境变量: +性能测试使用脚本Test/performance_test_main.py,该脚本与main.py大体相同,不同之处是在performance_test_main.py中添加了时间戳测试,测试数据为mxpi_rtspsrc拉取的视频流。两路视频尺寸分别取多组不同尺寸的视频做对比。推理三百帧图片后取平均时间值,设置如下环境变量: ```shell export PYTHONPATH=/usr/local/python3.9.2/bin:${MX_SDK_HOME}/python:{path} @@ -275,7 +242,7 @@ export PYTHONPATH=/usr/local/python3.9.2/bin:${MX_SDK_HOME}/python:{path} 运行如下命令得到结果: ```shell -python3 performance_test_main.py +python3.9 performance_test_main.py ``` 注:1.与运行main.py时相同,运行performance_test_main.py时要先使用live555进行推流。**测试视频**上传至[链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/HelmetIdentification/test_video.zip),该视频为不同尺寸不同帧率的同一视频。如test64036830_158s.264为尺寸640×640,帧率30,时长158s的视频。 @@ -304,7 +271,7 @@ python3 performance_test_main.py 依据数据集中ImageSets文件夹中test.txt文件,从原始数据集中筛选出测试数据集,该程序**test_select.py**放在源码根目录Test中,在同目录下创建文件夹TestImages用来存储筛选的数据。在该目录下运行命令: ```shell -python3.9.2 test_select.py +python3.9 test_select.py ``` 程序运行后在根目录Test中会存放筛选出的测试集图片共1517张。 @@ -316,7 +283,7 @@ python3.9.2 test_select.py 运行命令: ```shell -python3.9.2 parse_voc.py +python3.9 parse_voc.py ``` ###### 3.2.4 推理运行 @@ -334,7 +301,7 @@ cls conf x0 y0 x1 y1 运行命令: ```shell -python3.9.2 testmain.py +python3.9 testmain.py ``` 注:testmain.py中直接写入了pipline,其中mxpi_modelinfer插件四个参数的配置与HelmetDetection.pipline完全相同。 @@ -348,29 +315,14 @@ python3.9.2 testmain.py 运行命令: ```shell -python3.9.2 map_calculate.py --label_path ./ground-truth --npu_txt_path ./detection-test-result/ -na -np +python3.9 map_calculate.py --label_path ./ground-truth --npu_txt_path ./detection-test-result/ -na -np ``` 即可得到输出。其中precision、recall和map记录在**output/output.txt**文件中。 +## 4 常见问题 - -## 5 软件依赖说明 - -推理中涉及到第三方软件依赖如下表所示。 - -| 依赖软件 | 版本 | 说明 | -| -------- | ------------------------- | ------------------------------ | -| live555 | 1.09 | 实现视频转rstp进行推流 | -| ffmpeg | 2021-08-08-git-ac0408522a | 实现mp4格式视频转为264格式视频 | - -注:1.[live555使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md) - -​ 2.[ffmpeg使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md) - -## 6 常见问题 - -### 6.1 图片格式问题 +### 4.1 图片格式问题 **问题描述:** diff --git a/contrib/HelmetIdentification/Test/test_select.py b/contrib/HelmetIdentification/Test/test_select.py new file mode 100644 index 000000000..43114d83a --- /dev/null +++ b/contrib/HelmetIdentification/Test/test_select.py @@ -0,0 +1,33 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import cv2 + +with open("ImageSets/Main/test.txt", "r") as f: + data = f.readlines() + text_data = [] + for line in data: + line_new = line.strip('\n') # Remove the newline character of each element in the list + text_data.append(line_new) + print(text_data) + +path = 'JPEGImages' +save_path = 'TestImages' + +for item in os.listdir(path): + file_name = item.split('.')[0] + if file_name in text_data: + img = cv2.imread(path + '/' + item) + cv2.imwrite(save_path + '/' + file_name + ".jpg", img) \ No newline at end of file diff --git "a/docs/\345\217\202\350\200\203\350\265\204\346\226\231/Live555\347\246\273\347\272\277\350\247\206\351\242\221\350\275\254RTSP\350\257\264\346\230\216\346\226\207\346\241\243.md" "b/docs/\345\217\202\350\200\203\350\265\204\346\226\231/Live555\347\246\273\347\272\277\350\247\206\351\242\221\350\275\254RTSP\350\257\264\346\230\216\346\226\207\346\241\243.md" index dab4f2dd6..6aaed2f16 100644 --- "a/docs/\345\217\202\350\200\203\350\265\204\346\226\231/Live555\347\246\273\347\272\277\350\247\206\351\242\221\350\275\254RTSP\350\257\264\346\230\216\346\226\207\346\241\243.md" +++ "b/docs/\345\217\202\350\200\203\350\265\204\346\226\231/Live555\347\246\273\347\272\277\350\247\206\351\242\221\350\275\254RTSP\350\257\264\346\230\216\346\226\207\346\241\243.md" @@ -23,7 +23,11 @@ cd live/ make ``` - +若编译过程中报错:struct std::atomic_flag has no member named 'test' +修改config.linux文件: +``` +CPLUSPLUS_FLAGS = (原有配置项) -std=c++2a +``` 最后就会在当前目录下生成mediaServer 文件夹,有一个live555MediaServer可执行文件 ## 4. 运行 -- Gitee From 89d7bb6f3d69a8ebbe8eaf89402e826c2e25b1da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AC=A2=E4=B9=90=E9=A9=AC?= Date: Wed, 14 Aug 2024 09:47:19 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E5=AE=89=E5=85=A8=E5=B8=BD=E8=AF=86?= =?UTF-8?q?=E5=88=AB=E5=88=A0=E9=99=A4310B=E5=88=86=E6=94=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Models/HelmetDetection.pipline | 147 --- .../Models/Helmet_yolov5.cfg | 10 - .../Models/aipp_YOLOv5.config | 37 - .../HelmetIdentification/Models/atc-env.sh | 10 - .../Models/imgclass.names | 3 - contrib/HelmetIdentification/Models/main.py | 81 -- contrib/HelmetIdentification/Models/utils.py | 123 --- contrib/HelmetIdentification/README.md | 334 ------- .../Test/map_calculate.py | 941 ------------------ .../HelmetIdentification/Test/parse_voc.py | 94 -- .../Test/performance_test_main.py | 90 -- .../HelmetIdentification/Test/test_select.py | 33 - contrib/HelmetIdentification/Test/testmain.py | 162 --- contrib/HelmetIdentification/build.sh | 37 - contrib/HelmetIdentification/image/image1.jpg | Bin 6684 -> 0 bytes contrib/HelmetIdentification/image/image2.jpg | Bin 3501 -> 0 bytes contrib/HelmetIdentification/image/image3.jpg | Bin 1223 -> 0 bytes contrib/HelmetIdentification/image/image4.jpg | Bin 44203 -> 0 bytes .../plugins/MxpiSelectedFrame/CMakeLists.txt | 30 - .../MxpiSelectedFrame/MxpiSelectedFrame.cpp | 96 -- .../MxpiSelectedFrame/MxpiSelectedFrame.h | 80 -- .../plugins/MxpiSelectedFrame/build.sh | 36 - 22 files changed, 2344 deletions(-) delete mode 100644 contrib/HelmetIdentification/Models/HelmetDetection.pipline delete mode 100644 contrib/HelmetIdentification/Models/Helmet_yolov5.cfg delete mode 100644 contrib/HelmetIdentification/Models/aipp_YOLOv5.config delete mode 100644 contrib/HelmetIdentification/Models/atc-env.sh delete mode 100644 contrib/HelmetIdentification/Models/imgclass.names delete mode 100644 contrib/HelmetIdentification/Models/main.py delete mode 100644 contrib/HelmetIdentification/Models/utils.py delete mode 100644 contrib/HelmetIdentification/README.md delete mode 100644 contrib/HelmetIdentification/Test/map_calculate.py delete mode 100644 contrib/HelmetIdentification/Test/parse_voc.py delete mode 100644 contrib/HelmetIdentification/Test/performance_test_main.py delete mode 100644 contrib/HelmetIdentification/Test/test_select.py delete mode 100644 contrib/HelmetIdentification/Test/testmain.py delete mode 100644 contrib/HelmetIdentification/build.sh delete mode 100644 contrib/HelmetIdentification/image/image1.jpg delete mode 100644 contrib/HelmetIdentification/image/image2.jpg delete mode 100644 contrib/HelmetIdentification/image/image3.jpg delete mode 100644 contrib/HelmetIdentification/image/image4.jpg delete mode 100644 contrib/HelmetIdentification/plugins/MxpiSelectedFrame/CMakeLists.txt delete mode 100644 contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.cpp delete mode 100644 contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.h delete mode 100644 contrib/HelmetIdentification/plugins/MxpiSelectedFrame/build.sh diff --git a/contrib/HelmetIdentification/Models/HelmetDetection.pipline b/contrib/HelmetIdentification/Models/HelmetDetection.pipline deleted file mode 100644 index 285630d41..000000000 --- a/contrib/HelmetIdentification/Models/HelmetDetection.pipline +++ /dev/null @@ -1,147 +0,0 @@ -{ - "Detection":{ - "stream_config":{ - "deviceId":"1" - }, - "mxpi_rtspsrc0":{ - "factory":"mxpi_rtspsrc", - "props":{ - "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxx.264", - "channelId":"0" - }, - "next":"queuerstp0" - }, - "mxpi_rtspsrc1":{ - "factory":"mxpi_rtspsrc", - "props":{ - "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxx.264", - "channelId":"1" - }, - "next":"queuerstp1" - }, - "queuerstp0":{ - "props":{ - "max-size-buffers":"50" - }, - "factory":"queue", - "next":"mxpi_videodecoder0" - }, - "queuerstp1":{ - "props":{ - "max-size-buffers":"50" - }, - "factory":"queue", - "next":"mxpi_videodecoder1" - }, - "mxpi_videodecoder0":{ - "factory":"mxpi_videodecoder", - "props":{ - "deviceId":"1", - "inputVideoFormat":"H264", - "outputImageFormat":"YUV420SP_NV12", - "vdecChannelId":"0" - }, - "next":"queue01" - }, - "queue01":{ - "props":{ - "max-size-buffers":"500" - }, - "factory":"queue", - "next":"mxpi_selectedframe0" - }, - "mxpi_videodecoder1":{ - "factory":"mxpi_videodecoder", - "props":{ - "deviceId":"1", - "inputVideoFormat":"H264", - "outputImageFormat":"YUV420SP_NV12", - "vdecChannelId":"1" - }, - "next":"queue11" - }, - "queue11":{ - "props":{ - "max-size-buffers":"500" - }, - "factory":"queue", - "next":"mxpi_selectedframe1" - }, - "mxpi_selectedframe0":{ - "factory":"mxpi_selectedframe", - "next":"mxpi_parallel2serial0:0", - "props":{ - "frameNum":"2" - } - }, - "mxpi_selectedframe1":{ - "factory":"mxpi_selectedframe", - "next":"mxpi_parallel2serial0:1", - "props":{ - "frameNum":"2" - } - }, - "mxpi_parallel2serial0":{ - "factory":"mxpi_parallel2serial", - "props":{ - "dataSource":"mxpi_videodecoder0,mxpi_videodecoder1" - }, - "next":"mxpi_imageresize0" - }, - "mxpi_imageresize0":{ - "props":{ - "dataSource":"mxpi_parallel2serial0", - "resizeType": "Resizer_KeepAspectRatio_Fit", - "resizeHeight":"640", - "resizeWidth":"640" - }, - "factory":"mxpi_imageresize", - "next":"queue0" - }, - "queue0":{ - "props":{ - "max-size-buffers":"500" - }, - "factory":"queue", - "next":"mxpi_modelinfer0" - }, - "mxpi_modelinfer0":{ - "props":{ - "dataSource":"mxpi_imageresize0", - "modelPath":"./YOLOv5_s.om", - "postProcessConfigPath":"./Helmet_yolov5.cfg", - "labelPath":"./imgclass.names", - "postProcessLibPath":"./libMpYOLOv5PostProcessor.so" - }, - "factory":"mxpi_modelinfer", - "next":"queue1" - }, - "queue1":{ - "props":{ - "max-size-buffers":"50" - }, - "factory":"queue", - "next":"mxpi_motsimplesort0" - }, - "mxpi_motsimplesort0": { - "props": { - "dataSourceDetection": "mxpi_modelinfer0" - }, - "factory": "mxpi_motsimplesort", - "next": "mxpi_dataserialize0" - }, - "mxpi_dataserialize0": { - "props": { - "outputDataKeys": "ReservedFrameInfo,mxpi_modelinfer0" - }, - "factory": "mxpi_dataserialize", - "next": "appsink0" - }, - "appsink0": { - "props": { - "blocksize": "4096000" - }, - "factory": "appsink" - } - } -} diff --git a/contrib/HelmetIdentification/Models/Helmet_yolov5.cfg b/contrib/HelmetIdentification/Models/Helmet_yolov5.cfg deleted file mode 100644 index 18c08f4fe..000000000 --- a/contrib/HelmetIdentification/Models/Helmet_yolov5.cfg +++ /dev/null @@ -1,10 +0,0 @@ -CLASS_NUM=3 -BIASES_NUM=18 -BIASES=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326 -SCORE_THRESH=0.4 -OBJECTNESS_THRESH=0.3 -IOU_THRESH=0.5 -YOLO_TYPE=3 -ANCHOR_DIM=3 -MODEL_TYPE=1 -RESIZE_FLAG=0 diff --git a/contrib/HelmetIdentification/Models/aipp_YOLOv5.config b/contrib/HelmetIdentification/Models/aipp_YOLOv5.config deleted file mode 100644 index 2ca2703c4..000000000 --- a/contrib/HelmetIdentification/Models/aipp_YOLOv5.config +++ /dev/null @@ -1,37 +0,0 @@ -aipp_op{ - aipp_mode:static - input_format : YUV420SP_U8 - - src_image_size_w : 640 - src_image_size_h : 640 - - crop: false - load_start_pos_h : 0 - load_start_pos_w : 0 - crop_size_w : 640 - crop_size_h: 640 - - csc_switch : true - rbuv_swap_switch : false - - # 色域转换 - matrix_r0c0: 256 - matrix_r0c1: 0 - matrix_r0c2: 359 - matrix_r1c0: 256 - matrix_r1c1: -88 - matrix_r1c2: -183 - matrix_r2c0: 256 - matrix_r2c1: 454 - matrix_r2c2: 0 - input_bias_0: 0 - input_bias_1: 128 - input_bias_2: 128 - - # 均值归一化 - min_chn_0 : 0 - min_chn_1 : 0 - min_chn_2 : 0 - var_reci_chn_0: 0.003921568627451 - var_reci_chn_1: 0.003921568627451 - var_reci_chn_2: 0.003921568627451} \ No newline at end of file diff --git a/contrib/HelmetIdentification/Models/atc-env.sh b/contrib/HelmetIdentification/Models/atc-env.sh deleted file mode 100644 index c8415211d..000000000 --- a/contrib/HelmetIdentification/Models/atc-env.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# This is used to convert onnx model file to .om model file. -. /usr/local/Ascend/ascend-toolkit/set_env.sh # The path where Ascend-cann-toolkit is located -export Home="./path/" -# Home is set to the path where the model is located - -# Execute, transform YOLOv5 model. -atc --model="${Home}"/YOLOv5_s.onnx --framework=5 --output="${Home}"/YOLOv5_s --insert_op_conf=./aipp_YOLOv5.config --input_format=NCHW --log=info --soc_version=Ascend310B1 --input_shape="images:1,3,640,640" -# --model is the path where onnx is located. --output is the path where the output of the converted model is located \ No newline at end of file diff --git a/contrib/HelmetIdentification/Models/imgclass.names b/contrib/HelmetIdentification/Models/imgclass.names deleted file mode 100644 index a7446756b..000000000 --- a/contrib/HelmetIdentification/Models/imgclass.names +++ /dev/null @@ -1,3 +0,0 @@ -person -head -helmet \ No newline at end of file diff --git a/contrib/HelmetIdentification/Models/main.py b/contrib/HelmetIdentification/Models/main.py deleted file mode 100644 index 41b3e97c6..000000000 --- a/contrib/HelmetIdentification/Models/main.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import signal -import cv2 -import numpy as np -import StreamManagerApi -import utils -import MxpiDataType_pb2 as MxpiDataType - - -def my_handler(signum, frame): - """ - :param signum: signum are used to identify the signal - :param frame: When the signal occurs, get the status of the process stack - func:Change flag of stop_stream - """ - global stop_stream - stop_stream = True - - -# exit flag -stop_stream = False -# When about to exit, get the exit signal -signal.signal(signal.SIGINT, my_handler) - -# The following belongs to the SDK Process -# init stream manager -streamManagerApi = StreamManagerApi.StreamManagerApi() -ret = streamManagerApi.InitManager() -if ret != 0: - print("Failed to init Stream manager, ret=%s" % str(ret)) - -# create streams by pipeline config file -#load pipline -with open("HelmetDetection.pipline", 'rb') as f: - pipelineStr = f.read() -ret = streamManagerApi.CreateMultipleStreams(pipelineStr) -# Print error message -if ret != 0: - print("Failed to create Stream, ret=%s" % str(ret)) - -# Obtain the inference result by specifying streamName and keyVec -# The data that needs to be obtained is searched by the plug-in name -# Stream name -streamName = b'Detection' -keyVec0 = StreamManagerApi.StringVector() -keyVec0.push_back(b"ReservedFrameInfo") -keyVec0.push_back(b"mxpi_modelinfer0") -keyVec0.push_back(b"mxpi_motsimplesort0") -keyVec0.push_back(b"mxpi_videodecoder0") -keyVec0.push_back(b"mxpi_videodecoder1") - -while True: - # exit flag - if stop_stream: - break - # Get data through GetProtobuf interface - inferResult0 = streamManagerApi.GetResult(streamName, b'appsink0', keyVec0) - # Determine whether the output is empty - if inferResult0.metadataVec.size() == 0: - print('Object detection result of model infer is null!!!') - continue - - DictStructure = utils.get_inference_data(inferResult0) - # the visualization of the inference result, save the output in the specified folder - utils.cv_visualization(DictStructure[0], DictStructure[1], DictStructure[2], DictStructure[3], DictStructure[4]) - -# Destroy All Streams -streamManagerApi.DestroyAllStreams() diff --git a/contrib/HelmetIdentification/Models/utils.py b/contrib/HelmetIdentification/Models/utils.py deleted file mode 100644 index 7aaa05f51..000000000 --- a/contrib/HelmetIdentification/Models/utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import signal -import cv2 -import numpy as np -import MxpiDataType_pb2 as MxpiDataType - -def cv_visualization(img, infer, shape, frame_id, channel_id): - """ - :param img: Inference image - :param infer: Inference result - :param shape:Size before image padding - :param frame_id:Inference image id - :param channel_id:Channel id of the current inference image - func: the visualization of the inference result, save the output in the specified folder - """ - img_list2 = [] - print(shape) - # The title of the rectangle - title_l = (round(0.0002 * (shape[0] + shape[1])) + 0.35) - title_f = max(title_l - 1, 1) - # Add the inference results of head to img_list2 - for bbox0 in infer: - if bbox0[5] == 'head': - img_list2.append(bbox0) - for bbox1 in img_list2: - # Determine whether it is helmet - bboxes = {'x0': int(bbox1[0]), - 'x1': int(bbox1[1]), - 'y0': int(bbox1[2]), - 'y1': int(bbox1[3]), - 'confidence': round(bbox1[4], 4), - 'trackid': int(bbox1[6]), - 'age': int(bbox1[7]) - } - print(bboxes) - bboxes_list1 = [] - bboxes_list1.append(int(bboxes['x0'])) - bboxes_list1.append(int(bboxes['x1'])) - bboxes_list1.append(int(bboxes['y0'])) - bboxes_list1.append(int(bboxes['y1'])) - bboxes_list1 = np.array(bboxes_list1, dtype=np.int32) - # Draw rectangle - cv2.putText(img, str(bboxes['confidence']), (bboxes_list1[0], bboxes_list1[2]), 0, title_l, [225, 255, 255], thickness=title_f, - lineType=cv2.LINE_AA) - # rectangle color [255,255,255] - cv2.rectangle(img, (bboxes_list1[0], bboxes_list1[2]), (bboxes_list1[1], bboxes_list1[3]), (0, 0, 255), 2) - if bboxes['trackid'] is not None and bboxes['age'] == 1: - print("Warning:Not wearing a helmet,InferenceId:{},FrameId:{}".format(bboxes['trackid'], frame_id)) - - # Save pictures in two ways - if channel_id == 0: - oringe_imgfile = './output/one/image' + str(channel_id) + '-' + str( - frame_id) + '.jpg' - # Warning result save path - cv2.imwrite(oringe_imgfile, img) - else: - # when channel_id equal 1 - oringe_imgfile = './output/two/image' + str(channel_id) + '-' + str( - frame_id) + '.jpg' - cv2.imwrite(oringe_imgfile, img) - - -def get_inference_data(inference): - """ - :param inference:output of sdk stream inference - :return:img0, img_list1, img0_shape, frame_list0.frameId, frame_list0.channelId - """ - - # add inferennce data into DATA structure - # Frame information structure - frame_list0 = MxpiDataType.MxpiFrameInfo() - frame_list0.ParseFromString(inference.metadataVec[0].serializedMetadata) - # Target object structure - object_list = MxpiDataType.MxpiObjectList() - object_list.ParseFromString(inference.metadataVec[1].serializedMetadata) - # Get target box information - objectlist_data = object_list.objectVec - # track structure - tracklet_list = MxpiDataType.MxpiTrackLetList() - tracklet_list.ParseFromString(inference.metadataVec[2].serializedMetadata) - # Obtain tracking information - tracklet_data = tracklet_list.trackLetVec - # image structure - vision_list0 = MxpiDataType.MxpiVisionList() - vision_list0.ParseFromString(inference.metadataVec[3].serializedMetadata) - vision_data0 = vision_list0.visionVec[0].visionData.dataStr - # Get picture information - vision_info0 = vision_list0.visionVec[0].visionInfo - - # cv2 func YUV to BGR - yuv_bytes_nu = 3 - yuv_bytes_de = 2 - img_yuv = np.frombuffer(vision_data0, dtype=np.uint8) - # reshape - img_yuv = img_yuv.reshape(vision_info0.heightAligned * yuv_bytes_nu // yuv_bytes_de, vision_info0.widthAligned) - # Color gamut conversion - img0 = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR_NV12) - # put inference into dict, - img_list1 = [] - for k in range(len(object_list.objectVec)): - img_list = [round(objectlist_data[k].x0, 4), round(objectlist_data[k].x1, 4), round(objectlist_data[k].y0, 4), - round(objectlist_data[k].y1, 4), - round(objectlist_data[k].classVec[0].confidence, 4), objectlist_data[k].classVec[0].className, - tracklet_data[k].trackId, tracklet_data[k].age] - img_list1.append(img_list) - - # img0_shape is the original image size - img0_shape = [vision_info0.heightAligned, vision_info0.widthAligned] - # Output the results uniformly through the dictionary - dict_structure = [img0, img_list1, img0_shape, frame_list0.frameId, frame_list0.channelId] - return dict_structure diff --git a/contrib/HelmetIdentification/README.md b/contrib/HelmetIdentification/README.md deleted file mode 100644 index d95910c4d..000000000 --- a/contrib/HelmetIdentification/README.md +++ /dev/null @@ -1,334 +0,0 @@ -# 安全帽识别 - -## 1 介绍 - -安全帽作为工作中一样重要的安全防护用品,主要保护头部,防高空物体坠落,防物体打击、碰撞。通过识别每个人是否戴上安全帽,可以对没戴安全帽的人做出告警。本项目支持2路视频实时分析,其主要流程为:分两路接收外部调用接口的输入视频路径,将视频输入。通过视频解码将264格式视频解码为YUV格式图片。模型推理使用YOLOv5进行安全帽识别,识别结果经过后处理完成NMS得到识别框。对重复检测出的没戴安全帽的对象进行去重。最后将识别结果输出为两路,并对没佩戴安全帽的情况告警。 - -### 1.1 支持的产品 - -本项目基于mxVision SDK进行开发,以Atlas 500 A2/Atlas 200I DK A2为主要的硬件平台。 - -### 1.2 支持的版本 - -本样例配套的MxVision版本、CANN版本、Driver/Firmware版本如下所示: -| MxVision版本 | CANN版本 | Driver/Firmware版本 | -|--------------- | ---------------------------------- | ----------| -| 5.0.0 | 7.0.0 | 23.0.0| -|6.0.RC2 | 8.0.RC2 | 24.1.RC2| - -MindX SDK安装前准备可参考《用户指南》,[安装教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/quickStart/1-1安装SDK开发套件.md) - - - -### 1.3 代码目录结构与说明 - -本sample工程名称为HelmetIdentification,工程目录如下图所示: - -``` -├── Models - ├── aipp_YOLOv5.config # 模型转换配置文件 - ├──atc-env.sh # 模型转换脚本 - ├──YOLOv5_s.om #推理模型om文件 - ├──YOLOv5_s.onnx #推理模型onnx文件 - ├──Helmet_yolov5.cfg #后处理配置文件 - ├──HelmetDetection.pipline # 安全帽识别推理流程pipline - ├──imgclass.names # 模型所有可识别类 - ├──main.py # 推理运行程序 - ├──utils.py # 数据处理及可视化脚本 -├── plugins - ├──MxpiSelectedFrame # 跳帧插件 -├── Test - ├──performance_test_main.py # 性能测试脚本 - ├──test_select.py # 测试集筛选脚本 - ├──parse_voc.py # 测试数据集解析脚本 - ├──testmain.py # 测试主程序 - ├──map_calculate.py # 精度计算程序 -├── build.sh -``` - - - -### 1.5 技术实现流程图 - -image4 - - - -## 2 环境依赖 - -环境依赖软件和版本如下表: - -| 软件 | 版本 | 说明 | 获取方式 | -| ------------------- | ------------ | ----------------------------- | ------------------------------------------------------------ | -| opencv-python | 4.10.0.54 | 用于识别结果画框 | python3 -m pip install opencv-python| -| libgl1-mesa-glx |23.0.4-0ubuntu1~22.04.1 |GL库(opencv-python可能会依赖GL)|apt install libgl1-mesa-glx| -| live555|1.10|实现视频转rstp进行推流|[live555使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md)| -|ffmpeg|2021-08-08-git-ac0408522a | 实现mp4格式视频转为264格式视频 |[ffmpeg使用教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md) - - - -在运行脚本main.py前(2.2章节),需要执行如下两个环境配置脚本设置环境变量,运行命令: - -```shell -. /usr/local/Ascend/ascend-toolkit/set_env.sh # Ascend-cann-toolkit开发套件包默认安装路径,根据实际安装路径修改 -. ${MX_SDK_HOME}/mxVision/set_env.sh # ${MX_SDK_HOME}替换为用户的SDK安装路径 -``` - -## 3.推理 - -#### 步骤1 模型转换 - -##### 1.1模型与软件依赖 - - 所用模型与软件依赖如下表所示。若使用A200I DK A2运行,推荐使用PC转换模型,具体方法可参考A200I DK A2资料。模型相关信息可参考[原项目链接](https://github.com/PeterH0323/Smart_Construction) - -| 软件名称 | 版本 | 获取方式 | -| ----------------------- | -------- | ------------------------------------------------------------ | -| YOLOv5_s.onnx | YOLOv5_s | [链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/HelmetIdentification/model.zip) | - -##### 1.2 onnx文件转换为om文件 - -1. 可直接获取已经转换好的YOLOv5_s.onnx文件,链接如1.1所示。此模型已经完成优化,不再使用dy_resize.py、modify_yolov5s_slice.py进行优化。可直接转换为om模型。 - -3. 修改atc-env脚本中的路径,运行atc-env脚本将onnx转为om模型,运行命令如下。 - -```shell -sh atc-env.sh -``` - -提示 **ATC run success** 说明转换成功。 - -脚本中包含atc命令: - -```shell ---model=${Home}/YOLOv5_s.onnx --framework=5 --output=${Home}/YOLOv5_s --insert_op_conf=./aipp_YOLOv5.config --input_format=NCHW --log=info --soc_version=Ascend310B1 --input_shape="images:1,3,640,640" -``` - -其参数如下表所示 - -| 参数名 | 参数描述 | -| ---------------- | :----------------------------------------------------------- | -| -- framework | 原始框架类型。当取值为5时,即为ONNX网络模型,仅支持ai.onnx算子域中opset v11版本的算 子。用户也可以将其他opset版本的算子(比如opset v9),通过PyTorch转换成 opset v11版本的onnx算子 | -| --model | 原始模型文件路径与文件名 | -| --output | 如果是开源框架的网络模型,存放转换后的离线模型的路径以及文件名。 | -| --soc_version | 模型转换时指定芯片版本。昇腾AI处理器的版本,可从ATC工具安装路径的“/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/atc/data/platform_config”目录下 查看。 ".ini"文件的文件名即为对应的${soc_version} | -| --insert_op_conf | 插入算子的配置文件路径与文件名, 例如aipp预处理算子。 | -| --input_shape | 模型输入数据的 shape。 | -| --out_nodes | 指定输出节点,如果不指定输出节点(算子名称),则模型的输出默认为最后一层的算子信息,如果 指定,则以指定的为准 | - -其中--insert_op_conf参数为aipp预处理算子配置文件路径。该配置文件aipp_YOLOv5.config在输入图像进入模型前进行预处理。该配置文件保存在源码Models目录下。 - -注:1. [ATC模型转换工具指南](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md) - 2.atc-env.sh脚本内 Home 为onnx文件所在路径。 - - -#### 步骤2 模型推理 - -##### 2.1 pipline编写 - -pipline根据1.5节中技术实现流程图编写,该文件**HelmetDetection.pipline**放在源码根目录Models。 - - 注: - -1.pipline中mxpi_modelinfer用于加载yolov5安全帽识别模型。该插件包含四个参数,modelPath用于加载om模型文件。labelPath用于加载模型可识别类(imgclass.names)。postProcessLibPath用于加载后处理动态链接库文件,该模块实现NMS等后处理。postProcessConfigPath用于加载后处理所需要的配置文件(Helmet_yolov5.cfg)。本项目使用后处理文件为**libMpYOLOv5PostProcessor.so**。该后处理配置文件内容如下: - -```python -CLASS_NUM=3 -BIASES_NUM=18 -BIASES=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326 -SCORE_THRESH=0.4 -OBJECTNESS_THRESH=0.3 -IOU_THRESH=0.5 -YOLO_TYPE=3 -ANCHOR_DIM=3 -MODEL_TYPE=1 -RESIZE_FLAG=0 -``` - -注:pipline中以上四个参数要修改为相应文件所在绝对路径。 - -2.pipline中mxpi_selectedframe插件完成视频跳帧。对于输入帧率为24fps输入视频进行每三帧抽一帧进行识别。实现8fps的帧率。 - -将目录切换至./plugins/MxpiSelectedFrame - -输入如下命令编译生成mxpi_selectedframe.so: - -```shell -mkdir build -cd build -cmake .. -make -j -``` - -编译成功后将产生**libmxpi_selectedframe.so**文件,文件生成位置在build目录下。将其复制至SDK的插件库中(./MindX_SDK/mxVision/lib/plugins),并修改权限为440 - - 注:[插件编译生成教程](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md)在《SDK用户手册》深入开发章节 - -3.pipline中涉及到的**绝对路径**都要修改成用户安装sdk文件相应的路径 - -##### 2.2 运行推理 - -编写完pipline文件后即可运行推理流程进行识别,该程序**main.py**放在源码根目录Models。 - -mian.py通过调用sdk接口创建多个流完成数据接收、处理以及输出,接口调用流程图如下所示: - -image1 - -本项目通过mxpi_rtspsrc拉流输入数据,通过两路GetResult接口输出数据,一路输出带有帧信息的图片数据,一路输出带有帧信息的目标检测框和检测框跟踪信息。推理过程如下: - -首先通过[live555](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md)进行推流,进入到live555安装目录下mediaServer路径,上传要推流的视频在本目录下然后推流。 live555只支持特定几种格式文件,不支持MP4。 所以本地文件先要转成live555支持的格式。选择使用[ffmpeg](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md)进行格式转换。 - -转换命令如下: - -```shell -ffmpeg -i xxx1.mp4 -vcodec h264 -bf 0 -g 25 -r 24 -s 1280*720 -an -f h264 xxx2.264 -``` - -注:参数如下: - -| 参数 | 作用 | -| ------- | ------------------------------------------------------ | -| -i | 表示输入的音视频路径需要转换视频 | -| -f | 强迫采用特定格式输出 | -| -r | 指定帧率输出 | -| -an | 关闭音频 | -| -s | 分辨率控制 | -| -g | 关键帧间隔控制 | -| -vcodec | 设定视频编解码器,未设定时则使用与输入流相同的编解码器 | - -转换完成后上传视频至live555安装目录下mediaServer。输入命令进行推流: - -```shell -./live555MediaServer test.264 -``` - -test.264可替换成任意上传至当前目录的[264格式文件](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md),如要修改相应的也要在pipline中修改mxpi_rtspsrc的拉流路径 - -![image2](https://gitee.com/liu-kai6334/mindxsdk-referenceapps/raw/master/contrib/HelmetIdentification/image/image2.jpg) - -然后切换目录至main.py所在目录下,运行命令: - -```shell -python3.9 main.py -``` - -即可得到输出结果,输出结果将原来的两路视频分为两个文件保存,utils.py中的oringe_imgfile用于设置图像输出路径,用户需**手动建立**输出文件output,文件路径可自定义设置。本项目文件放置规范如下: - -![image3](https://gitee.com/liu-kai6334/mindxsdk-referenceapps/raw/master/contrib/HelmetIdentification/image/image3.jpg) - -所有数据放置于output中,one 、two为两路视频输出文件。 - -输出结果有如下几种情况: -| 序号 | 输入 | 输出 | -| ---- | ------------------------------ | ---------------------------------------------------------- | -| 1 | 两路只有一路输入 | 只打印有输入一路的输出 | -| 2 | 无输入或输入视频中无可识别对象 | 打印:Object detection result of model infer is null!!! | -| 3 | 输入视频有识别对象 | 打印每次推理的head的帧信息的尺寸与识别结果 | -| 4 | 识别对象未佩戴安全帽 | 打印:Warning:Not wearing a helmet, InferenceId:FrameId: | - -#### 步骤3 测试性能与精度 - - -##### 3.1 性能测试 - -性能测试使用脚本Test/performance_test_main.py,该脚本与main.py大体相同,不同之处是在performance_test_main.py中添加了时间戳测试,测试数据为mxpi_rtspsrc拉取的视频流。两路视频尺寸分别取多组不同尺寸的视频做对比。推理三百帧图片后取平均时间值,设置如下环境变量: - -```shell -export PYTHONPATH=/usr/local/python3.9.2/bin:${MX_SDK_HOME}/python:{path} -``` - -注:{path}设置为根目录中Models所在路径 - -运行如下命令得到结果: - -```shell -python3.9 performance_test_main.py -``` - -注:1.与运行main.py时相同,运行performance_test_main.py时要先使用live555进行推流。**测试视频**上传至[链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/HelmetIdentification/test_video.zip),该视频为不同尺寸不同帧率的同一视频。如test64036830_158s.264为尺寸640×640,帧率30,时长158s的视频。 - -2.performance_test_main.py中加载pipline文件应写HelmetDetection.pipline的绝对路径 - - -##### 3.2 精度测试 - -###### 3.2.1 数据集说明 - -- 数据集来源: [Safety-Helmet-Wearing-Dataset](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/HelmetIdentification/data.zip) - -- 数据集结构 - - ``` - ├── VOC2028 - ├── Annotations # 图片解释文件,含标签等信息,与JPEGImages中图片一一对应 - ├── ImageSets # 存放txt文件 - ├── JPEGImages # 数据集原图片 - ``` - -注:将数据集中的三个文件放置于项目的根目录Test文件下,与**test_select.py**同目录。 - -###### 3.2.2测试数据集筛选 - -依据数据集中ImageSets文件夹中test.txt文件,从原始数据集中筛选出测试数据集,该程序**test_select.py**放在源码根目录Test中,在同目录下创建文件夹TestImages用来存储筛选的数据。在该目录下运行命令: -```shell -python3.9 test_select.py -``` - -程序运行后在根目录Test中会存放筛选出的测试集图片共1517张。 - -###### 3.2.3 测试数据集解析 - -解析测试数据集,在同级目录下生成类别文件**voc.names**、图片信息文件**VOC2028.info**和真实标签文件夹**ground_truth**, 该程序**parse_voc.py**放在源码根目录Test中。 - -运行命令: - -```shell -python3.9 parse_voc.py -``` - -###### 3.2.4 推理运行 - -依据编写的pipline业务流,对测试数据集进行推理,输出结果保存在同级目录**detection-test-result**文件夹中,该文件需要手动建立。程序**testmain.py**文件放在源码根目录Test中。 - -注:输出推理结果文件txt中数据格式为: - -```shell -cls conf x0 y0 x1 y1 -``` - -其中cls表示识别区域所属类别,conf表示识别置信度,(x0,y0)表示识别区域左上角点的坐标,(x1,y1)表示识别区域右下角点的坐标。 - -运行命令: - -```shell -python3.9 testmain.py -``` - -注:testmain.py中直接写入了pipline,其中mxpi_modelinfer插件四个参数的配置与HelmetDetection.pipline完全相同。 - -###### 3.2.5 精度计算 - -推理完成后,依据图片真实标签和推理结果,计算精度。输出结果保存在同级目录**output**文件夹中,该文件需要手动建立。程序map_calculate.py文件放在源码根目录Test中。 - -注:测试数据集中图片有两类标签"person"(负样本,未佩戴安全帽)和"hat"(正样本,佩戴安全帽)。模型输出标签有三类"person"、"head"、"helmet",其中"head"与真实标签"person"对应,"helmet"与真实标签"hat"对应。在**map_calculate.py**文件中做了对应转换处理。 - -运行命令: - -```shell -python3.9 map_calculate.py --label_path ./ground-truth --npu_txt_path ./detection-test-result/ -na -np -``` - -即可得到输出。其中precision、recall和map记录在**output/output.txt**文件中。 - -## 4 常见问题 - -### 4.1 图片格式问题 - -**问题描述:** - -E0628 10:14:48.309166 8155 DvppImageDecoder.cpp:152] [mxpi_imagedecoder0] [2006] [DVPP:ecode jpeg or jpg fail] - -**解决方案:** - -本项目只支持jpg图片输入 如输入其他格式会报如上错误 - diff --git a/contrib/HelmetIdentification/Test/map_calculate.py b/contrib/HelmetIdentification/Test/map_calculate.py deleted file mode 100644 index a50316b3b..000000000 --- a/contrib/HelmetIdentification/Test/map_calculate.py +++ /dev/null @@ -1,941 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import json -import os -import shutil -import operator -import sys -import argparse -import math -import numpy as np - -MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge) -top_margin = 0.15 # in percentage of the figure height -bottom_margin = 0.05 # in percentage of the figure height -parser = argparse.ArgumentParser() -parser.add_argument('-na', '--no-animation', - help="no animation is shown.", action="store_true") -parser.add_argument('-np', '--no-plot', - help="no plot is shown.", action="store_true") -parser.add_argument( - '-q', '--quiet', help="minimalistic console output.", action="store_true") -parser.add_argument('-i', '--ignore', nargs='+', type=str, - help="ignore a list of classes.") -parser.add_argument('--set-class-iou', nargs='+', type=str, - help="set IoU for a specific class.") - -parser.add_argument('--label_path', default="./ground-truth") -parser.add_argument('--npu_txt_path', default="./detection-results") - -args = parser.parse_args() - - -# if there are no classes to ignore then replace None by empty list -if args.ignore is None: - args.ignore = [] - -specific_iou_flagged = False -if args.set_class_iou is not None: - specific_iou_flagged = True - -# make sure that the cwd() is the location of -# the python script (so that every path makes sense) -os.chdir(os.path.dirname(os.path.abspath(__file__))) - -GT_PATH = args.label_path -DR_PATH = args.npu_txt_path -# if there are no images then no animation can be shown -IMG_PATH = os.path.join(os.getcwd(), 'TestImages_Pre') -if os.path.exists(IMG_PATH): - for dirpath, dirnames, files in os.walk(IMG_PATH): - if not files: - # no image files found - args.no_animation = True -else: - args.no_animation = True - -# try to import OpenCV if the user didn't choose the option --no-animation -show_animation = False -if not args.no_animation: - try: - import cv2 - - show_animation = True - except ImportError: - print("\"cv2\" not found, please install to visualize.") - args.no_animation = True - -# try to import Matplotlib -# if the user didn't choose the option --no-plot -draw_plot = False -if not args.no_plot: - try: - import matplotlib.pyplot as plt - - draw_plot = True - except ImportError: - print("\"matplotlib\" not found,install it to get the plots.") - args.no_plot = True - - -def log_average_miss_rate(precision, fp_cumsum, num_images): - """ - log-average miss rate: - Calculated by averaging miss rates at 9 evenly spaced FPPI points - between 10e-2 and 10e0, in log-space. - output: - Lamr | log-average miss rate - Mr | miss rate - Fppi | false positives per image - - references: - "Pedestrian Detection: An Evaluation of the State of the Art." - """ - - # if there were no detections of that class - if precision.size == 0: - Lamr = 0 - Mr = 1 - Fppi = 0 - return Lamr, Mr, Fppi - - Fppi = fp_cumsum / float(num_images) - Mr = (1 - precision) - - fppi_tmp = np.insert(Fppi, 0, -1.0) - mr_tmp = np.insert(Mr, 0, 1.0) - - # Use 9 evenly spaced reference points in log-space - ref = np.logspace(-2.0, 0.0, num=9) - for i0, ref_i in enumerate(ref): - j = np.where(fppi_tmp <= ref_i)[-1][-1] - ref[i0] = mr_tmp[j] - - # log(0) is undefined, so we use the np.maximum(1e-10, ref) - Lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref)))) - - return Lamr, Mr, Fppi - - -def error(msg): - """ - throw error and exit - """ - print(msg) - sys.exit(0) - - -def is_float_between_0_and_1(value): - """ - check if the number is a float between 0.0 and 1.0 - """ - try: - Val = float(value) - if Val > 0.0 and Val < 1.0: - return True - else: - return False - except ValueError: - return False - - -def voc_ap(Rec, Prec): - """ - Calculate the AP given the recall and precision array - 1) We compute a version of the measured - precision/recall curve with precision monotonically decreasing - 2) We compute the AP as the area - under this curve by numerical integration. - --- Official matlab code VOC2012--- - Mrec=[0 ; rec ; 1]; - mpre=[0 ; prec ; 0]; - for i=numel(mpre)-1:-1:1 - mpre(i)=max(mpre(i),mpre(i+1)); - end - i=find(Mrec(2:end)~=Mrec(1:end-1))+1; - Ap=sum((Mrec(i)-Mrec(i-1)).*mpre(i)); - """ - Rec.insert(0, 0.0) # insert 0.0 at begining of list - Rec.append(1.0) # insert 1.0 at end of list - Mrec = Rec[:] - Prec.insert(0, 0.0) # insert 0.0 at begining of list - Prec.append(0.0) # insert 0.0 at end of list - mpre = Prec[:] - # This part makes the precision monotonically decreasing - # (goes from the end to the beginning) - # matlab: for i=numel(mpre)-1:-1:1 - # mpre(i)=max(mpre(i),mpre(i+1)); - - for cnt in range(len(mpre) - 2, -1, -1): - mpre[cnt] = max(mpre[cnt], mpre[cnt + 1]) - # This part creates a list of indexes where the recall changes - # matlab: cnt=find(Mrec(2:end)~=Mrec(1:end-1))+1; - j_list = [] - for j in range(1, len(Mrec)): - if Mrec[j] != Mrec[j - 1]: - j_list.append(j) # if it was matlab would be j + 1 - # The Average Precision (AP) is the area under the curve - # (numerical integration) - # matlab: Ap=sum((Mrec(j)-Mrec(j-1)).*mpre(j)); - Ap = 0.0 - for k in j_list: - Ap += ((Mrec[k] - Mrec[k - 1]) * mpre[k]) - return Ap, Mrec, mpre - - -def file_lines_to_list(path): - """ - Convert the lines of a file to a list - """ - # open txt file lines to a list - with open(path) as F: - content = F.readlines() - # remove whitespace characters like `\n` at the end of each line - content = [con.strip() for con in content] - return content - - -def draw_text_in_image(Img, text0, pos, Color, Line_width): - """ - Draws text in image - """ - Font = cv2.FONT_HERSHEY_PLAIN - fontScale = 1 - lineType = 1 - bottomLeftCornerOfText = pos - cv2.putText(Img, text0, - bottomLeftCornerOfText, - Font, - fontScale, - Color, - lineType) - text_width, _ = cv2.getTextSize(text0, Font, fontScale, lineType)[0] - return Img, (Line_width + text_width) - - -def adjust_axes(r, t, fig0, axes0): - """ - Plot - adjust axes - """ - # get text width for re-scaling - Bb = t.get_window_extent(renderer=r) - text_width_inches = Bb.width / fig0.dpi - # get axis width in inches - current_fig_width = fig0.get_figwidth() - new_fig_width = current_fig_width + text_width_inches - propotion = new_fig_width / current_fig_width - # get axis limit - x_lim = axes0.get_xlim() - axes0.set_xlim([x_lim[0], x_lim[1] * propotion]) - - -def draw_plot_func(dictionary0, n_classes0, window_title0, plot_title0, x_label0, output_path0, to_show0, - plot_color0, true_p_bar0): - """ - Draw plot using Matplotlib - """ - # sort the dictionary by decreasing value, into a list of tuples - sorted_dic_by_value = sorted(dictionary0.items(), key=operator.itemgetter(1)) - # unpacking the list of tuples into two lists - sorted_keys, sorted_values = zip(*sorted_dic_by_value) - # - if true_p_bar0 != "": - # Special case to draw in: - # - green -> TP: True Positives - # (object detected and matches ground-truth) - # - red -> FP: False Positives - # (object detected but does not match ground-truth) - # - pink -> FN: False Negatives - # (object not detected but present in the ground-truth) - - fp_sorted = [] - tp_sorted = [] - for key in sorted_keys: - fp_sorted.append(dictionary0[key] - true_p_bar0[key]) - tp_sorted.append(true_p_bar0[key]) - plt.barh(range(n_classes0), fp_sorted, align='center', color='crimson', label='False Positive') - plt.barh(range(n_classes0), tp_sorted, align='center', color='forestgreen', label='True Positive', - left=fp_sorted) - # add legend - plt.legend(loc='lower right') - # Write number on side of bar - Fig = plt.gcf() # gcf - get current figure - r = Fig.canvas.get_renderer() - for i0, val0 in enumerate(sorted_values): - fp_val = fp_sorted[i0] - tp_val = tp_sorted[i0] - fp_str_val = " " + str(fp_val) - tp_str_val = fp_str_val + " " + str(tp_val) - # trick to paint multicolor with offset: - # first paint everything and then repaint the first number - t = plt.text(val0, i0, tp_str_val, color='forestgreen', va='center', fontweight='bold') - plt.text(val0, i0, fp_str_val, color='crimson', va='center', fontweight='bold') - if i0 == (len(sorted_values) - 1): # largest bar - adjust_axes(r, t, Fig, plt.gca()) - else: - plt.barh(range(n_classes0), sorted_values, color=plot_color0) - # Write number on side of bar - Fig = plt.gcf() # gcf - get current figure - r = Fig.canvas.get_renderer() - for i0, val1 in enumerate(sorted_values): - str_val = " " + str(val1) # add a space before - if val1 < 1.0: - str_val = " {0:.2f}".format(val1) - t = plt.text(val1, i0, str_val, color=plot_color0, va='center', fontweight='bold') - # re-set axes to show number inside the figure - if i0 == (len(sorted_values) - 1): # largest bar - adjust_axes(r, t, Fig, plt.gca()) - # set window title - Fig.canvas.set_window_title(window_title0) - # write classes in y axis - plt.yticks(range(n_classes0), sorted_keys, fontsize=12) # Re-scale height accordingly - # comput the matrix height in points and inches - height_pt = n_classes0 * (12 * 1.4) # 1.4 (some spacing) - height_in = height_pt / Fig.dpi - figure_height = height_in / (1 - top_margin - bottom_margin) - # set new height, init_height = Fig.get_figheight - if figure_height > Fig.get_figheight(): - Fig.set_figheight(figure_height) - - # set plot title - plt.title(plot_title0, fontsize=14) - # set axis titles - plt.xlabel(x_label0, fontsize='large') - # adjust size of window - Fig.tight_layout() - # save the plot - Fig.savefig(output_path0) - # show image - if to_show0: - plt.show() - # close the plot - plt.close() - - -# Create a ".temp_files/" and "output/" directory -TEMP_FILES_PATH = ".temp_files" -if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already - os.makedirs(TEMP_FILES_PATH) -output_files_path = "output" -if os.path.exists(output_files_path): # if it exist already - # reset the output directory - shutil.rmtree(output_files_path) - -os.makedirs(output_files_path) -if draw_plot: - os.makedirs(os.path.join(output_files_path, "classes")) -if show_animation: - os.makedirs(os.path.join(output_files_path, - "images", "detections_one_by_one")) - -# ground-truth -# Load each of the ground-truth files -# into a temporary ".json" file. -# Create a list of all the class names present -# in the ground-truth (gt_classes). -# get a list with the ground-truth files -ground_truth_files_list = glob.glob(GT_PATH + '/*.txt') -if len(ground_truth_files_list) == 0: - error("Error: No ground-truth files found!") -ground_truth_files_list.sort() -# dictionary with counter per class -gt_counter_per_class = {} -counter_images_per_class = {} - -gt_files = [] -for txt_file in ground_truth_files_list: - file_id = txt_file.split(".txt", 1)[0] - - file_id = os.path.basename(os.path.normpath(file_id)) - # check if there is a correspondent detection-results file - temp_path = os.path.join(DR_PATH, (file_id + ".txt")) - if not os.path.exists(temp_path): - continue - lines_list = file_lines_to_list(txt_file) - # create ground-truth dictionary - bounding_boxes = [] - is_difficult = False - already_seen_classes = [] - for line in lines_list: - try: - if "difficult" in line: - class_name, left, top, right, bottom, _difficult = line.split() - is_difficult = True - else: - class_name, left, top, right, bottom = line.split() - except ValueError: - error_msg = "Error: File " + txt_file + " in the wrong format.\n" - error_msg += " Expected: \n" - error_msg += " Received: " + line - error(error_msg) - # check if class is in the ignore list, if yes skip - if class_name == "hat": - class_name = "helmet" - elif class_name == "person": - class_name = "head" - if class_name in args.ignore: - continue - bbox = left + " " + top + " " + right + " " + bottom - if is_difficult: - bounding_boxes.append( - {"class_name": class_name, "bbox": bbox, - "used": False, "difficult": True}) - is_difficult = False - else: - bounding_boxes.append( - {"class_name": class_name, "bbox": bbox, "used": False}) - # count that object - if class_name in gt_counter_per_class: - gt_counter_per_class[class_name] += 1 - else: - # if class didn't exist yet - gt_counter_per_class[class_name] = 1 - - if class_name not in already_seen_classes: - if class_name in counter_images_per_class: - counter_images_per_class[class_name] += 1 - else: - # if class didn't exist yet - counter_images_per_class[class_name] = 1 - already_seen_classes.append(class_name) - - # dump bounding_boxes into a ".json" file - new_temp_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json" - gt_files.append(new_temp_file) - with open(new_temp_file, 'w') as outfile: - json.dump(bounding_boxes, outfile) - - -gt_classes = list(gt_counter_per_class.keys()) -print(gt_counter_per_class) -# let's sort the classes alphabetically -gt_classes = sorted(gt_classes) -n_classes = len(gt_classes) - -# """ -# Check format of the flag --set-class-iou (if used) -# e.g. check if class exists -# """ -if specific_iou_flagged: - n_args = len(args.set_class_iou) - error_msg = \ - '\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]' - if n_args % 2 != 0: - error('Error, missing arguments. Flag usage:' + error_msg) - # Its format is [class_1] [IoU_1] [class_2] [IoU_2] - specific_iou_classes = args.set_class_iou[::2] # even - iou_list = args.set_class_iou[1::2] # odd - if len(specific_iou_classes) != len(iou_list): - error('Error, missing arguments. Flag usage:' + error_msg) - for tmp_class in specific_iou_classes: - if tmp_class not in gt_classes: - error('Error, unknown class \"' + tmp_class + - '\". Flag usage:' + error_msg) - for num in iou_list: - if not is_float_between_0_and_1(num): - error('IoU must be [0.0,1.0].usage:' + error_msg) - -# """ -# detection-results -# Load each of the detection-results files -# into a temporary ".json" file. -# """ -# get a list with the detection-results files -dr_files_list = glob.glob(DR_PATH + '/*.txt') - -dr_files_list.sort() - -for class_index, class_name in enumerate(gt_classes): - bounding_boxes = [] - for txt_file in dr_files_list: - # the first time it checks - # if all the corresponding ground-truth files exist - file_id = txt_file.split(".txt", 1)[0] - - file_id = os.path.basename(os.path.normpath(file_id)) - temp_path = os.path.join(GT_PATH, (file_id + ".txt")) - if class_index == 0: - if not os.path.exists(temp_path): - error_msg = "Error. File not found: {}\n".format(temp_path) - error(error_msg) - lines = file_lines_to_list(txt_file) - for line in lines: - try: - sl = line.split() - tmp_class_name, confidence, left, top, right, bottom = sl - except ValueError: - error_msg = "Error: File " + txt_file + " wrong format.\n" - error_msg += " Expected: \n" - error_msg += " Received: " + line - error(error_msg) - if tmp_class_name == class_name: - bbox = left + " " + top + " " + right + " " + bottom - bounding_boxes.append( - {"confidence": confidence, - "file_id": file_id, "bbox": bbox}) - - # sort detection-results by decreasing confidence - bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True) - with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile: - json.dump(bounding_boxes, outfile) - -# """ -# Calculate the AP for each class -# """ -sum_AP = 0.0 -ap_dictionary = {} -lamr_dictionary = {} -# open file to store the output -with open(output_files_path + "/output.txt", 'w') as output_file: - output_file.write("# AP and precision/recall per class\n") - count_true_positives = {} - for class_index, class_name in enumerate(gt_classes): - count_true_positives[class_name] = 0 - # """ - # Load detection-results of that class - # """ - dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json" - dr_data = json.load(open(dr_file)) - - # """ - # Assign detection-results to ground-truth objects - # """ - nd = len(dr_data) - tp = [0] * nd # creates an array of zeros of size nd - fp = [0] * nd - count = 0 - for idx, detection in enumerate(dr_data): - file_id = detection["file_id"] - if show_animation: - # find ground truth image - ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*") - if len(ground_truth_img) == 0: - error("Error. Image not found with id: " + file_id) - elif len(ground_truth_img) > 1: - error("Error. Multiple image with id: " + file_id) - else: # found image - # Load image - img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0]) - # load image with draws of multiple detections - img_cumulative_path = output_files_path - img_cumulative_path += "/images/" + ground_truth_img[0] - if os.path.isfile(img_cumulative_path): - img_cumulative = cv2.imread(img_cumulative_path) - else: - img_cumulative = img.copy() - # Add bottom border to image - bottom_border = 60 - BLACK = [0, 0, 0] - img = cv2.copyMakeBorder( - img, 0, bottom_border, - 0, 0, cv2.BORDER_CONSTANT, value=BLACK) - # assign detection-results to ground truth object if any - # open ground-truth with that file_id - gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json" - ground_truth_data = json.load(open(gt_file)) - ovmax = -1 - gt_match = -1 - # load detected object bounding-box - bb = [float(x) for x in detection["bbox"].split()] - for obj in ground_truth_data: - # look for a class_name match - if obj["class_name"] == class_name: - bbgt = [float(x) for x in obj["bbox"].split()] - bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]), - min(bb[2], bbgt[2]), min(bb[3], bbgt[3])] - iw = bi[2] - bi[0] + 1 - ih = bi[3] - bi[1] + 1 - if iw > 0 and ih > 0: - # compute overlap (IoU) - ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \ - (bbgt[2] - bbgt[0] + 1) * \ - (bbgt[3] - bbgt[1] + 1) - iw * ih - ov = iw * ih / ua - if ov > ovmax: - ovmax = ov - gt_match = obj - - # assign detection as true positive/don't care/false positive - if show_animation: - status = "NO MATCH FOUND!" - # set minimum overlap - min_overlap = MINOVERLAP - if specific_iou_flagged: - if class_name in specific_iou_classes: - index = specific_iou_classes.index(class_name) - min_overlap = float(iou_list[index]) - if ovmax >= min_overlap: - if "difficult" not in gt_match: - if not bool(gt_match["used"]): - # true positive - tp[idx] = 1 - gt_match["used"] = True - count_true_positives[class_name] += 1 - # update the ".json" file - with open(gt_file, 'w') as f: - f.write(json.dumps(ground_truth_data)) - if show_animation: - status = "MATCH!" - else: - # false positive (multiple detection) - fp[idx] = 1 - if show_animation: - status = "REPEATED MATCH!" - else: - # false positive - fp[idx] = 1 - count += 1 - if ovmax > 0: - status = "INSUFFICIENT OVERLAP" - - # """ - # Draw image to show animation - # """ - if show_animation: - height, widht = img.shape[:2] - # colors (OpenCV works with BGR) - white = (255, 255, 255) - light_blue = (255, 200, 100) - green = (0, 255, 0) - light_red = (30, 30, 255) - # 1st line - margin = 10 - v_pos = int(height - margin - (bottom_border / 2.0)) - text = "Image: " + ground_truth_img[0] + " " - img, line_width = draw_text_in_image( - img, text, (margin, v_pos), white, 0) - text = "Class [" + str(class_index) + "/" + \ - str(n_classes) + "]: " + class_name + " " - img, line_width = draw_text_in_image( - img, text, - (margin + line_width, v_pos), - light_blue, line_width) - if ovmax != -1: - color = light_red - if status == "INSUFFICIENT OVERLAP": - text = "IoU: {0:.2f}% ".format( - ovmax * 100) - text += "< {0:.2f}% ".format(min_overlap * 100) - else: - text = "IoU: {0:.2f}% ".format( - ovmax * 100) - text += ">= {0:.2f}% ".format(min_overlap * 100) - color = green - img, _ = draw_text_in_image( - img, text, (margin + line_width, v_pos), - color, line_width) - # 2nd line - v_pos += int(bottom_border / 2.0) - rank_pos = str(idx + 1) # rank position (idx starts at 0) - text = "Detection #rank: " + rank_pos - temp_conf = float(detection["confidence"]) - text += " conf: {0:.2f}% ".format(temp_conf * 100) - img, line_width = draw_text_in_image( - img, text, (margin, v_pos), white, 0) - color = light_red - if status == "MATCH!": - color = green - text = "Result: " + status + " " - img, line_width = draw_text_in_image( - img, text, (margin + line_width, v_pos), color, line_width) - - font = cv2.FONT_HERSHEY_SIMPLEX - # if there is intersections between the bounding-boxes - if ovmax > 0: - bbgt = [int(round(float(x))) - for x in gt_match["bbox"].split()] - cv2.rectangle(img, (bbgt[0], bbgt[1]), - (bbgt[2], bbgt[3]), light_blue, 2) - cv2.rectangle(img_cumulative, (bbgt[0], bbgt[1]), - (bbgt[2], bbgt[3]), light_blue, 2) - cv2.putText(img_cumulative, class_name, - (bbgt[0], bbgt[1] - 5), font, 0.6, - light_blue, 1, cv2.LINE_AA) - bb = [int(i) for i in bb] - cv2.rectangle(img, (bb[0], bb[1]), - (bb[2], bb[3]), color, 2) - cv2.rectangle(img_cumulative, - (bb[0], bb[1]), (bb[2], bb[3]), color, 2) - cv2.putText(img_cumulative, class_name, - (bb[0], bb[1] - 5), font, - 0.6, color, 1, cv2.LINE_AA) - # show image - cv2.imwrite("result.jpg", img) - # save image to output - output_img_path = output_files_path - output_img_path += "/images/detections_one_by_one/" - output_img_path += class_name + "_detection" - output_img_path += str(idx) + ".jpg" - cv2.imwrite(output_img_path, img) - # save the image with all the objects drawn to it - cv2.imwrite(img_cumulative_path, img_cumulative) - - print("f_count:", count) - # compute precision/recall - cumsum = 0 - for idx, val in enumerate(fp): - fp[idx] += cumsum - cumsum += val - cumsum = 0 - for idx, val in enumerate(tp): - tp[idx] += cumsum - cumsum += val - rec = tp[:] - for idx, val in enumerate(tp): - rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name] - prec = tp[:] - for idx, val in enumerate(tp): - prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) - - ap, mrec, mprec = voc_ap(rec[:], prec[:]) - sum_AP += ap - text = "{0:.2f}%".format(ap * 100) + " = " + class_name + " AP " - # """ - # Write to output.txt - # """ - rounded_prec = ['%.2f' % elem for elem in prec] - rounded_rec = ['%.2f' % elem for elem in rec] - output_file.write(text + "\n Precision: " + str(rounded_prec) + - "\n Recall :" + str(rounded_rec) + "\n\n") - if not args.quiet: - print(text) - ap_dictionary[class_name] = ap - - n_images = counter_images_per_class[class_name] - lamr, mr, fppi = log_average_miss_rate( - np.array(rec), np.array(fp), n_images) - lamr_dictionary[class_name] = lamr - - # """ - # Draw plot - # """ - if draw_plot: - plt.plot(rec, prec, '-o') - # add a new penultimate point to the list (mrec[-2], 0.0) - # since the last line segment - # (and respective area) do not affect the AP value - area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]] - area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]] - plt.fill_between(area_under_curve_x, 0, - area_under_curve_y, alpha=0.2, edgecolor='r') - # set window title - fig = plt.gcf() # gcf - get current figure - fig.canvas.set_window_title('AP ' + class_name) - # set plot title - plt.title('class: ' + text) - # set axis titles - plt.xlabel('Recall') - plt.ylabel('Precision') - # optional - set axes - axes = plt.gca() # gca - get current axes - axes.set_xlim([0.0, 1.0]) - axes.set_ylim([0.0, 1.05]) # .05 to give some extra space - # Alternative option -> wait for button to be pressed - # Alternative option -> normal display - # save the plot - fig.savefig(output_files_path + "/classes/" + class_name + ".png") - plt.cla() # clear axes for next plot - - if show_animation: - cv2.destroyAllWindows() - - output_file.write("\n# mAP of all classes\n") - mAP = sum_AP / n_classes - text = "mAP = {0:.2f}%".format(mAP * 100) - output_file.write(text + "\n") - print(text) - -# """ -# Draw false negatives -# """ -if show_animation: - pink = (203, 192, 255) - for tmp_file in gt_files: - ground_truth_data = json.load(open(tmp_file)) - # get name of corresponding image - start = TEMP_FILES_PATH + '/' - img_id = tmp_file[tmp_file.find( - start) + len(start):tmp_file.rfind('_ground_truth.json')] - img_cumulative_path = output_files_path + "/images/" + img_id + ".jpg" - img = cv2.imread(img_cumulative_path) - if img is None: - img_path = IMG_PATH + '/' + img_id + ".jpg" - img = cv2.imread(img_path) - # draw false negatives - for obj in ground_truth_data: - if not obj['used']: - bbgt = [int(round(float(x))) for x in obj["bbox"].split()] - cv2.rectangle(img, (bbgt[0], bbgt[1]), - (bbgt[2], bbgt[3]), pink, 2) - cv2.imwrite(img_cumulative_path, img) - -# remove the temp_files directory -shutil.rmtree(TEMP_FILES_PATH) - -# """ -# Count total of detection-results -# """ -# iterate through all the files -det_counter_per_class = {} -for txt_file in dr_files_list: - file_id = txt_file.split(".txt", 1)[0] - - # get lines to list - lines_list = file_lines_to_list(txt_file) - for line in lines_list: - class_name = line.split()[0] - # check if class is in the ignore list, if yes skip - if class_name in args.ignore: - continue - # count that object - if class_name in det_counter_per_class: - det_counter_per_class[class_name] += 1 - else: - # if class didn't exist yet - det_counter_per_class[class_name] = 1 - -dr_classes = list(det_counter_per_class.keys()) -print("dr_class:", dr_classes) - -# """ -# Plot the total number of occurences of each class in the ground-truth -# """ -if draw_plot: - window_title = "ground-truth-info" - plot_title = "ground-truth\n" - plot_title += "(" + str(len(ground_truth_files_list)) + \ - " files and " + str(n_classes) + " classes)" - x_label = "Number of objects per class" - output_path = output_files_path + "/ground-truth-info.png" - to_show = False - plot_color = 'forestgreen' - draw_plot_func( - gt_counter_per_class, - n_classes, - window_title, - plot_title, - x_label, - output_path, - to_show, - plot_color, - '', - ) - -# """ -# Write number of ground-truth objects per class to results.txt -# """ -with open(output_files_path + "/output.txt", 'a') as output_file: - output_file.write("\n# Number of ground-truth objects per class\n") - for class_name in sorted(gt_counter_per_class): - output_file.write(class_name + ": " + - str(gt_counter_per_class[class_name]) + "\n") - -# """ -# Finish counting true positives -# if class exists in detection-result -# but not in ground-truth -# then there are no true positives in that class -# """ -for class_name in dr_classes: - if class_name not in gt_classes: - count_true_positives[class_name] = 0 -print("count_true_p:", count_true_positives) -# """ -# Plot the total number of occurences of -# each class in the "detection-results" folder -# """ -if draw_plot: - window_title = "detection-results-info" - # Plot title - plot_title = "detection-results\n" - plot_title += "(" + str(len(dr_files_list)) + " files and " - count_non_zero_values_in_dictionary = sum( - int(x) > 0 for x in list(det_counter_per_class.values())) - plot_title += str(count_non_zero_values_in_dictionary) - plot_title += " detected classes)" - # end Plot title - x_label = "Number of objects per class" - output_path = output_files_path + "/detection-results-info.png" - to_show = False - plot_color = 'forestgreen' - true_p_bar = count_true_positives - draw_plot_func( - det_counter_per_class, - len(det_counter_per_class), - window_title, - plot_title, - x_label, - output_path, - to_show, - plot_color, - true_p_bar - ) - -# """ -# Write number of detected objects per class to output.txt -# """ -with open(output_files_path + "/output.txt", 'a') as output_file: - output_file.write("\n# Number of detected objects per class\n") - for class_name in sorted(dr_classes): - n_det = det_counter_per_class[class_name] - text = class_name + ": " + str(n_det) - text += " (tp:" + str(count_true_positives[class_name]) + "" - text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n" - output_file.write(text) - -# """ -# Draw log-average miss rate plot (Show lamr of all classes in decreasing order) -# """ -if draw_plot: - window_title = "lamr" - plot_title = "log-average miss rate" - x_label = "log-average miss rate" - output_path = output_files_path + "/lamr.png" - to_show = False - plot_color = 'royalblue' - draw_plot_func( - lamr_dictionary, - n_classes, - window_title, - plot_title, - x_label, - output_path, - to_show, - plot_color, - "" - ) - -# """ -# Draw mAP plot (Show AP's of all classes in decreasing order) -# """ -if draw_plot: - window_title = "mAP" - plot_title = "mAP = {0:.2f}%".format(mAP * 100) - x_label = "Average Precision" - output_path = output_files_path + "/mAP.png" - to_show = True - plot_color = 'royalblue' - draw_plot_func( - ap_dictionary, - n_classes, - window_title, - plot_title, - x_label, - output_path, - to_show, - plot_color, - "" - ) diff --git a/contrib/HelmetIdentification/Test/parse_voc.py b/contrib/HelmetIdentification/Test/parse_voc.py deleted file mode 100644 index 587027762..000000000 --- a/contrib/HelmetIdentification/Test/parse_voc.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import argparse -import xml.etree.ElementTree as ET - -CLASSES = ('person', 'hat') - -def main(arg): - """ - gain ground-truth - """ - info = open('./VOC2028.info', 'w') - cls = open('./voc.names', 'w') - for i in CLASSES: - cls.write(i) - cls.write('\n') - - for idx, jpg_name in enumerate(os.listdir(arg.img_path)): - key_name = jpg_name.split('.')[0] - xml_name = os.path.join(arg.ann_path, key_name + '.xml') - #parse xml - tree = ET.parse(xml_name) - root = tree.getroot() - size = root.find('size') - width = size.find('width').text - height = size.find('height').text - info.write('{} {} {} {}'.format(idx, os.path.join(arg.img_path, jpg_name), width, height)) - info.write('\n') - - with open('{}/{}'.format(arg.gtp, key_name + '.txt'), 'w') as f: - for obj in root.iter('object'): - difficult = int(obj.find('difficult').text) - cls_name = obj.find('name').text.strip().lower() - if cls_name not in CLASSES: - continue - xml_box = obj.find('bndbox') - xmin = (float(xml_box.find('xmin').text)) - ymin = (float(xml_box.find('ymin').text)) - xmax = (float(xml_box.find('xmax').text)) - ymax = (float(xml_box.find('ymax').text)) - - if difficult: - comment = '{} {} {} {} {} {}'.format(cls_name, xmin, ymin, xmax, ymax, 'difficult') - else: - comment = '{} {} {} {} {}'.format(cls_name, xmin, ymin, xmax, ymax) - f.write(comment) - f.write('\n') - - -def err_msg(msg): - """ - print error message - """ - print('-' * 55) - print("The specified '{}' file does not exist".format(msg)) - print('You can get the correct parameter information from -h') - print('-' * 55) - exit() - - -def check_args(param): - """ - check input args - """ - if not os.path.exists(param.img_path): - err_msg(param.img_path) - if not os.path.exists(param.ann_path): - err_msg(param.ann_path) - if not os.path.exists(param.gtp): - os.makedirs(param.gtp) - return param - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Parse the VOC2028 dataset label') - parser.add_argument("--img_path", default="TestImages", help='The image path') - parser.add_argument("--ann_path", default="Annotations", help='Origin xml path') - parser.add_argument("--gtp", default="ground-truth/", help='The ground true file path') - args = parser.parse_args() - args = check_args(args) - main(args) - \ No newline at end of file diff --git a/contrib/HelmetIdentification/Test/performance_test_main.py b/contrib/HelmetIdentification/Test/performance_test_main.py deleted file mode 100644 index 7350d8ddf..000000000 --- a/contrib/HelmetIdentification/Test/performance_test_main.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import signal -import cv2 -import numpy as np -import StreamManagerApi -import MxpiDataType_pb2 as MxpiDataType -import utils - - -def my_handler(signum, frame): - """ - :param signum: signum are used to identify the signal - :param frame: When the signal occurs, get the status of the process stack - func:Change flag of stop_stream - """ - global stop_stream - stop_stream = True - - -# exit flag -stop_stream = False -# When about to exit, get the exit signal -signal.signal(signal.SIGINT, my_handler) - -# The following belongs to the SDK Process -# init stream manager -streamManagerApi = StreamManagerApi.StreamManagerApi() -ret = streamManagerApi.InitManager() -if ret != 0: - print("Failed to init Stream manager, ret=%s" % str(ret)) - -# create streams by pipeline config file -#load pipline -with open("HelmetDetection.pipline", 'rb') as f: - pipelineStr = f.read() -ret = streamManagerApi.CreateMultipleStreams(pipelineStr) -# Print error message -if ret != 0: - print("Failed to create Stream, ret=%s" % str(ret)) - -# Obtain the inference result by specifying streamName and keyVec -# The data that needs to be obtained is searched by the plug-in name -# Stream name -streamName = b'Detection' -keyVec0 = StreamManagerApi.StringVector() -keyVec0.push_back(b"ReservedFrameInfo") -keyVec0.push_back(b"mxpi_modelinfer0") -keyVec0.push_back(b"mxpi_motsimplesort0") -keyVec0.push_back(b"mxpi_videodecoder0") -keyVec0.push_back(b"mxpi_videodecoder1") - -i = 0 -t_all = 0 -while True: - # exit flag - if stop_stream: - break - i += 1 - if i > 300: - break - t1 = time.time() - inferResult0 = streamManagerApi.GetResult(streamName, b'appsink0', keyVec0) - if inferResult0.metadataVec.size() == 0: - print('Object detection result of model infer is null!!!') - continue - - DictStructure = utils.get_inference_data(inferResult0) - # the visualization of the inference result, save the output in the specified folder - utils.cv_visualization(DictStructure[0], DictStructure[1], DictStructure[2], DictStructure[3], DictStructure[4]) - t2 = time.time() - t_diff = t2-t1 - t_all += t_diff -t_ave = t_all/i -print("e2e Time:{}".format(t_ave)) - -# Destroy All Streams -streamManagerApi.DestroyAllStreams() diff --git a/contrib/HelmetIdentification/Test/test_select.py b/contrib/HelmetIdentification/Test/test_select.py deleted file mode 100644 index 43114d83a..000000000 --- a/contrib/HelmetIdentification/Test/test_select.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import shutil -import cv2 - -with open("ImageSets/Main/test.txt", "r") as f: - data = f.readlines() - text_data = [] - for line in data: - line_new = line.strip('\n') # Remove the newline character of each element in the list - text_data.append(line_new) - print(text_data) - -path = 'JPEGImages' -save_path = 'TestImages' - -for item in os.listdir(path): - file_name = item.split('.')[0] - if file_name in text_data: - img = cv2.imread(path + '/' + item) - cv2.imwrite(save_path + '/' + file_name + ".jpg", img) \ No newline at end of file diff --git a/contrib/HelmetIdentification/Test/testmain.py b/contrib/HelmetIdentification/Test/testmain.py deleted file mode 100644 index df6c1bb34..000000000 --- a/contrib/HelmetIdentification/Test/testmain.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import random -import cv2 -import numpy as np -import StreamManagerApi -import MxpiDataType_pb2 as MxpiDataType - -if __name__ == '__main__': - # init stream manager - streamManagerApi = StreamManagerApi.StreamManagerApi() - ret = streamManagerApi.InitManager() - if ret != 0: - print("Failed to init Stream manager, ret=%s" % str(ret)) - exit() - - # create streams by pipeline config file - pipeline = { - "detection": { - "stream_config": { - "deviceId": "1" - }, - "appsrc0": { - "props": { - "blocksize": "409600" - }, - "factory": "appsrc", - "next": "mxpi_imagedecoder0" - }, - "mxpi_imagedecoder0": { - "factory": "mxpi_imagedecoder", - "next": "mxpi_imageresize0" - }, - "mxpi_imageresize0": { - "factory": "mxpi_imageresize", - "props": { - "dataSource": "mxpi_imagedecoder0", - "resizeType": "Resizer_KeepAspectRatio_Fit", - "resizeHeight": "640", - "resizeWidth": "640" - }, - "next": "queue0" - }, - "queue0": { - "props": { - "max-size-buffers": "500" - }, - "factory": "queue", - "next": "mxpi_modelinfer0" - }, - "mxpi_modelinfer0": { - "props":{ - "dataSource":"mxpi_imageresize0", - "modelPath":"./YOLOv5_s.om", - "postProcessConfigPath":"./Helmet_yolov5.cfg", - "labelPath":"./imgclass.names", - "postProcessLibPath":"./libMpYOLOv5PostProcessor.so" - }, - "factory": "mxpi_modelinfer", - "next": "mxpi_dataserialize0" - }, - "mxpi_dataserialize0": { - "props": { - "outputDataKeys": "mxpi_modelinfer0" - }, - "factory": "mxpi_dataserialize", - "next": "appsink0" - }, - "appsink0": { - "props": { - "blocksize": "4096000" - }, - "factory": "appsink" - } - } - } - - pipelineStr = json.dumps(pipeline).encode() - ret = streamManagerApi.CreateMultipleStreams(pipelineStr) - if ret != 0: - print("Failed to create Stream, ret=%s" % str(ret)) - exit() - - path = "./TestImages/" - for item in os.listdir(path): - img_path = os.path.join(path,item) - print("file_path:",img_path) - img_name = item.split(".")[0] - img_txt = "./detection-test-result/" + img_name + ".txt" - if os.path.exists(img_txt): - os.remove(img_txt) - dataInput = StreamManagerApi.MxDataInput() - if os.path.exists(img_path) != 1: - print("The test image does not exist.") - - with open(img_path, 'rb') as f: - dataInput.data = f.read() - - - # Inputs data to a specified stream based on streamName. - streamName = b'detection' - ret = streamManagerApi.SendData(streamName, 0, dataInput) - - if ret < 0: - print("Failed to send data to stream.") - - - # Obtain the inference result by specifying streamName and uniqueId. - infer_result = streamManagerApi.GetResult(streamName, 0) - if infer_result.errorCode != 0: - print("GetResult error. errorCode=%d, errorMsg=%s" % ( - infer_result.errorCode, infer_result.data.decode())) - - # print the infer result - - results = json.loads(infer_result.data.decode()) - img = cv2.imread(img_path) - img_shape = img.shape - print(img_shape) - bboxes = [] - key = "MxpiObject" - if key not in results.keys(): - continue - for bbox in results['MxpiObject']: - bboxes = {'x0': int(bbox['x0']), - 'x1': int(bbox['x1']), - 'y0': int(bbox['y0']), - 'y1': int(bbox['y1']), - 'confidence': round(bbox['classVec'][0]['confidence'], 4), - 'text': bbox['classVec'][0]['className']} - text = "{}{}".format(str(bboxes['confidence']), " ") - print(bboxes) - L1 = [] - L1.append(int(bboxes['x0'])) - L1.append(int(bboxes['x1'])) - L1.append(int(bboxes['y0'])) - L1.append(int(bboxes['y1'])) - L1.append(bboxes['confidence']) - L1.append(bboxes['text']) - print(L1) - - with open(img_txt,"a+") as f: - content = '{} {} {} {} {} {}'.format(L1[5], L1[4], L1[0], L1[2], L1[1], L1[3]) - f.write(content) - f.write('\n') - - # destroy streams - streamManagerApi.DestroyAllStreams() diff --git a/contrib/HelmetIdentification/build.sh b/contrib/HelmetIdentification/build.sh deleted file mode 100644 index 4d14c15a3..000000000 --- a/contrib/HelmetIdentification/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -current_folder="$( cd "$(dirname "$0")" ;pwd -P )" - - -SAMPLE_FOLDER=( -/plugins/MxpiSelectedFrame/ -) - - -err_flag=0 -for sample in "${SAMPLE_FOLDER[@]}";do - cd "${current_folder}/${sample}" - bash build.sh || { - echo -e "Failed to build ${sample}" - err_flag=1 - } -done - -if [ ${err_flag} -eq 1 ]; then - exit 1 -fi -exit 0 \ No newline at end of file diff --git a/contrib/HelmetIdentification/image/image1.jpg b/contrib/HelmetIdentification/image/image1.jpg deleted file mode 100644 index 028ebb00c0214731032701e4632f366e2bd713a3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6684 zcmdU!d0f(2zsE5vb3am!IL zQ$#cYb3vvoEzLv>QB182O$pb)4e(+;Gtcw7Gxxsc-sg4ieeNH;ew^Qr-+BEwhx0z~ z?}ytiPBw~i+vNZNfFjh^%1v@4008SOH*J)>i%H1*S#pqybhEJlRKMCWA!)1+Hg_}! z0BY0Zg#jBRZP^R9ry>CWVC&jTsy+O{SpeX>TBwz|#}$8GA=TN#k92c-3`PN(;8(*_`6i^yx7}T0>PsXV<*|V4WaZ z#mxDk!T6^%GVPv`g2e}{HQ>bwxUCGs;&{3W;~FU)h)cSpu7{IGN-vEUVcVq8=Ij&c z6eU~GCWb%?DQ(U+Q%9@Y4$9yZ*CSW1lCG(v)zZPrIG!|P<8>0|#Q*w-{jMe^Cic_? zcWVYPmJP_)ywjtloSdDtN}S}sHWI`_qu80Z=AK?jldm_PF=uaxS^kXl^iG;o--Yu_ z9~~X7tdV1yo7gs~LlT7BhG^K;yc!+%olg@R`7EtNwz8gLszHvDluG~i?rx&p+{Ks}BQcS` zwQ<*VUeCb5fai<%Z`x|lG5S=lT)K3`=mNExK+sD8h!wr!A}jo5s1A`Wig_y;TF&Rm zsmA&Q6)<(1ppz77~rnuf_%u2Kyk&>4-=1DO`Iy*VFiG;mYQFlFq%1FI!3m6aoK6!|0 zl3Fio!gHl0zGbx;1v3tabUUA)(;duDcqNygGwC@s-mnkcrks~>KkIZt&n|QUmXTMG z@IB$B6*kl>72g@m)#gU&SA;2~ zAkhS3*U&PYI>X})kNH*))kHl=;dtZ*j4n4<*56EHu9*E$5~Gt$_kb6txC4D26?5}Q z38iGC1nHzn-dp>4zkzznyA~>uMSd}%#L8j)oZU?)L)}XXItU5mXq`I#>xdi?fnXCk zzk)|Wob-s09Jr`te;FxRh2Tp$jgD}H(=-|>MvFBmUWio=lT|V8DDz&4P{oqcZB~%e z=T*&iQl4}}i%}!2;r_gw7vY)nbYBc&gy!4ys3MK5J-*)|y7S$_1=p}$mLD|AGZ9qc+vFb=9?5_3axNS7v)8ob zL9^}P#T$HpCmWFLD_N)vl18ka;ZtyBb}JCZ2`#m(Q`A?gro8J2LWlAj`*tXjXLrBa zP7Bi&7HnyxL`~!ytZI7s;OkOsrQtL8;UPYJs;8u1 zc$3Z_m3ni}?WxM^;Shp3+xYs29{1Pl$y&ZkSvuQsNlEk!rU20c_r68~L|**|F#d(r z_9r;~a#x6^o;fL1<7H-aU|{KSfVow&3FwIfGGJTuCc4VM3Q_+6_5;5@e*F03KwWUg zK_n)ZePX|D6QI1je6wqL($@wAE8byxW#$(hyhT)~bgeq?x(Q~Pq*G-PrL4p_@O*am z;_tXsgCzVoHA2HWdS_Jt-Q2ZB)&P)w8p%|44?5iZH_*F6H()2L(c1@$# z8gmo-hllUk;!zk4H znVAhaZ1?#V9dZ~Bx~dL;syudA<(IAh3`qZUYOz_QoSdA|)!hkL>}us%u&SzRDWA{( ziqNP`dv0O&yigcVYHU2t9vY(RTsvvb{w|YY_bZu9p2Y_+Dht^;55zArwAQv+;ORV4 zet!PwzM|)My+?UGo(5VSOD08QBO(kEn9cmMH@MHv@Fap`v0#$uYj(3kfeH1BJG!69 z$-8WJ29B^C^)9SN$F~w&$0@aGsi|#Chq5u zd)Ca*$*Fd9;#Xwlgeatx7Z`Sx5dGU{ZjoC2@3N1@@)?tC z!3&GPH%HVC5s0ev!1rA?1ho!#Q(^>!NLjUBO-m zEP9cjjnc13p;oE$6rgtBjQ!+=CMRTgyu9lb6-u1TA3KaE(zhiX@FK#R7DW6?v(U9p zd-AqeE~3h|9zRRy{@`*z|BgQ&r}s?otq(9sZ1l425pr#y0z`!2HZot%&xfk|2rPJb zW?sz#j$8#B+&;D*N45N@J-LBN#pVrfO&0)N6HFfc@Z`?mv1HPWjrpGn(|?%@M>4Y2 z;qA)j?3!w8U3Xqp-)(tMG2lMwpn~!G5c&Tjo%+M#|BF@^Ed>Mut-J4;=HGyFJHL9? zF;mmRTgUajHWruW$3H$!l<_>ZwbC7QYXegC!$fbnXTa9V#4G91=Ba6E_$N9ZGP4qR z2p4AqFRhA2s3(;UYXsR?Q&$)12+uMHrGu|to*wr5P03akrc2`%rW|iD$sNBaN`y-w z5Lm(eEREB_w_00UZ<896qt(&MBP|#Zp{nXf%P)d<7KK9XSfi~KZAYvAb5ihql5irv z#O8*0LdXWJ@7bMvFoy&n{P#@D8;-nu`Lb5Smf+xE*io!Xm*sqqFx1!A_s2dKt9|uZ z4~N6?oCp&vER>IJu1tG5?&`FPIMOvZ?R+M3r-eI`M;DDD1EYwIArc|8kv!=rE9M{) z27}=-9;+W-WF+nBCDX$95@hj$@nB_Y+Y#d_E4Ur6WSTU!lWdv*;*- zN8=?epBh3pqy*Y)5Jqg#Rs+sA!94ZPH$KZumGMF6_oYGKaz*jXk$cUY<94#0M;C=9 zVbiQ~dQ<~=qC_*8w<0?N%j9{6FFAb6MJ?{&M4?U1 zXvGDU?rN^D+%r*iOPNbsLw)cI&YZ#1G0Aix=+Gc(doPX;n&@d;(z1+EK8nPw+Z3F%O!5mu&8^XP-djQpnM-Emn&2xLERI_Pp}{vlfEVxMTJ1NIIn9N@ixwU>?xJt`ir_X$MPrhNIHpNbu^&J4AZE|Yfn=nhH7k=6vyvc zZ%T@0McxUvx_?W&(Qg*ldQ)jL^2_NHV$s6L#S=d1;B-}e{~y1|Tr+kx;R~@pu!xA! zs}xB*5h#K7tnnJOXPf?MX#Xdux7FZ@#o|5F%gf6j-@kufsi`ILywbwL!r8{Lcb`6m zz3~_AUMr#7cPsc=?y0|6g(tDMU{8N5kid5o_02^9wI_b>`{wO~}oydowXvaoYe_mpDF>1B;1>5^T7dA37 z^5MCZptwp(W!;X@=}1`!F@g|U-moMf)HWy-bK9jicc_)Lt6im!<>ccH^eZ7}Ihk*> zeMIqvZF5=Bxh9G@S{q;_@Q zWcN+-(yLyn**dc4=;g%)5~QM(Ls4@}M;$X>;`nIAtQEuTk-CRYnMOw-wBWrRp1R>l zT|t|$4)AwM?u*jOoMf)&0()_L*^Q2H=e=C6(%Em2OfL+2%34xz1F?P}XO=&_2`tRJ zF>;vP1ch62IPNg?;+41bq@?ieiV+-tF34`ZC~V zXw>rkDrSb)1Db50WJ4YcVBC&yOz4Vb7KrjXV)+XX6@TjMgXQPA3`Ur)P2an;1e52f zR{LlLeO9Sffl!Bx<9FEb*sCNgVCO?qsgpR;x6lR$mmzM&ccS|>&76#S8QZ4yRuCl5 zF(9~SPFt+?C?(KV9=qkY#P2V%>^~r&&0(Z$NtA*_U_l@dQ)<(0gAW4|x0P?Lv6L*? zf2qvyA0+qxx3}iHt|2hD=YJA`C)nDbrly?wO8VDg!|z@_d-f~@gAvuoH^O4SmxTmZ+~Cx!IHy?uv1keOQ7dEHe8~+T9{BZ-LWh$Gy$Ip`0E zAWm}2s%%4GG<-}TB|S3#RDoug8zv;3Z(9$cYn&m7yAw+l&(EHB#{y+~0x zxs>Zx;$5ify(kV1Uf^VSlq|P2))yR|AiUbMwFAis{l2FhsvD+I1OZWoHQvM^0~-G5 zNNbV(Ke2%Sb(1O8w_W*$f6tl_E%6n1CN0tGDw6PCA{MY9&j)Kk(uNq(a@6lJQ+E*r zmh|-kV3%p^B}v@$rFw)B{eQ0>d&`|!IML$lsHdJ)BFWq&e(}QYs;|L)XO6xWV{$E0 zGLK)J3HV!{|D3ieHU_Sh=5z0#{qR*%7N$qizDSC2ng6A%Ogj6c^{RYXl!0dDSwXGj Q9~}Vnq?1*(#p&z+2F(lFS^xk5 diff --git a/contrib/HelmetIdentification/image/image2.jpg b/contrib/HelmetIdentification/image/image2.jpg deleted file mode 100644 index 03f200f44d1ac38b7fb8b99c6c26a3c518420ac2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3501 zcmV;e4N~%nP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!~g&e!~vBn4jTXf4MIsoK~#8N?VW#U z6lWU8U#~sgoaUlN`#*@)i%VccN#rYQ}sN}w)F7!`#R6~X7cXt>}GbeKfWJ$nVENH z-TN(RrB)YI$^!*i!apY zGiRzh?d{GH*~h;=`pCV9Uw{5t9j~s|dC3qChgDr&ovN&?RLhnvQ@3y5cJ>fL2q7dE zDHk!16cnh?m@ztGJr@`}VS);kl&C?)#ZFH096o$lFOM8KqIU1zt%{0@^fn=c5JF;; z_UWX7BSxq{mX-!005SmxGl&|bWVn3!vMPxPqEIO03 z>eN6yOXJ6nSDl@mdRmDPLI@%L$%wa;&b|G%UP9=!u3oLzC#_ha%GR&f>xet~YUxr{ zwqb+1e)On1zhj5aFR!mxk1blH*MS^&v$#fw$< zl`Bq8@ALR&*REY^@7}#?N91~^PoM7O_N1kyMIX0i%N9;05kd$d36L>wCy6k5>Do25 z;Otq|b@F7dVA-aH3$*~jF%T9eB4CPV&(`~qN+UuAg2W6Wj&&ZC(Yk7ts@T5WEdwbT z0ppjBjt(_;>{vBu&>$y=5JCtce#j_KjOh_UUij)&HEruw{Z#2F= z${wB<5jOIj{f z`kS*xNP0Op|L@LPa%7Ar#t*TAXCXYiZwyJ$?SC&^aB^aU<12RV)B*)mqWou|8_Lw~LyA}j!e3j_>AjyVP87e9f$m{vM<>Xcf$b}e7CCWQ10=5$hD z-TQ>ZBgp41V>~gV=FU~^+qUT`98*~M1>O@(10hfsWgwXXMu>Hu-|pR0Cu(cm&o??A z90Ulb3jR(EA%qZ;92pzfn+no(o=IQ0!xpVp7NrVtW zNCHsA^fU0jY}&L*zp2FkP9lU5Lj02v(@BI7Li!I*Cy{i*=_Eo3A%x^T6fuMlLI}x9 zC}Id9gbm!X|DChe z8@^&q(g|O&CWH_|NZylIA_o6-eZ0Eb{SPJl%Qd!)?JXcJ5R!UAv|hoIR_CRa7{8dV)F$m6WJK#l=oe;tU)yLJgZfT`zMH2$zme zK2eibtMNUrejLx-HT%moAFS4)=Y!7>?SfE8mdmhoy1%qHG4VAPn1At%XJ20$M6 z$#Gu8DBgZQz<`v`BSkYPaj)ia(FBOxySJPm#*V&{*V9S(0+13Nl<<%pfAK}@0~!>z zbM&<#`$2>rQnH|o(3mkviA6M`O!Ri#YHd}I78a_)$&;NNLb4z9DK}1>aJC*G1D1D? z4jMn_kwNyeDO1#c+S;7l#L)fb(xs|+_H1p?AyqXts)56YJ4ZYOeQR5Ny|eWY^he)q z+^8Op^uPH0UydD9%_~>-xc0)*QdNE7f_sj!wY92!+qPZ;<;%s3Rq5*0?r|_QWemc# zwnhx4itXF=K9mPK4jj-+zl?g}1-E<{uw5rl_7WhdTih$|MUH)J(IRJWPtNb!rEWGi zJ6m>w7(0?7?>sRWT+?02nGU=sMN_A`4JjF*#0O8D*sqE_RiG1s7iRp+FFQGed#$)vTs!U|o?#~5nMo^F=%q;zV@F@fJ5Nm6`t|M?jHOovi)CQC~1HAXrBYIDaTy%I~WFL|#mV`5}zpi5}u)i`gaKk-m8*F27 ze#Z{oSm1WV^~q>e3E;JRc@Wse6Rd8qGkk@^?6>|ek6#uX1})TXOdlGg=_ z^PGF@E%of4J$gT^4e1mT1szm8FHAD!64Kv@7sfldOZ*)ABxz$XN=zVN(C^?_TN{hj z2g+dQ=RAD{?H58LG-{O9coJkg+H?vqMSuVypuHy!BJ3d0$~J81Q67%N@sAD}64k&$ z`O~&;)dC4+U^;5X2Oq>0V6nixVLHm}!*rdLl&CvD{NUD!h)ee8uO`msBV7yt={$v27Em9c=%0KoC<1cYE)BG zlU^nZIxJD@rdP}lxGywr2$WdD0j}WkNaL4=2WtbJ2dobhe6Q#@V|~AVyfE1i7)gi; z+Ksa}qXg1$++1u{qWV@PgesbH=@1fOE_BLWmBXHZ5k~ zmkChBSmO5!x2;a@O;Rk>3#W*Axe?pMT%uarW=Zw6ar6!O>M_Q1e9(V0|uyHZ`{z^>wsX1zf8Aw_$$ ztG}R#NrG4c0}mR4SH0L2JKT-(U?QjtU|I>_A;!Zj)&hX5_V-0ax-s+zu>yO80{0xP z!FG3a=;X!gjY3Qe-U0w92UZM%<-GMK^Vg=eCE#;@y)Z!p-4T*5;(1}D??F4XA;KWq zw65PiFy48B0wMr0AtIo#>}qO?8`Nqtw7gtjt4tlE3{1Dct77&&g00`daj19n`%-V> zd69k}*VOg(*E%1d92{>8OqQ2VY*Tg^PjJ1sZW|InEd3CW zVV3xpTtzwyaqa55aT@7w_wK0^wY6?O-e3Z-@=Rc1Adt^ICeeCJ#0!&(Mo&m8@FTp@ zPSC%ZQwiQ_TXw#;1 z`pq|WA1v2{GEL7D?j8G(qV+rQ?3nc?FCa+0;F?gbj4db!Mp_~P&-jipo|tcWBAIoD zYx0gSAd&MVLrFx)hC(4{D;W3(ko^E2GQ@NsFwQntNs?A)4?A=iq}!5NrY_t14!Hp1W+{a=94K9NdF=pe7qRKKY%18 zh0qH_auGjXm~6ru%i>L$7Q2zpU^?mG!Go%*s>;a;4tiqx0eWImM-ZMELh_Al^u&Bby-!Fw;dBxqgb+gV b5T*VPoDecHrPJLg00000NkvXXu0mjfpKgUh diff --git a/contrib/HelmetIdentification/image/image3.jpg b/contrib/HelmetIdentification/image/image3.jpg deleted file mode 100644 index a0dcd8d7ff543a9947c27f9abc2908d6e87c188a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1223 zcmeAS@N?(olHy`uVBq!ia0y~yV6*|UZ8_M0q=DX5aUjK59OUlAu(+Jc;}Xyp zTG8vJy3A|o1r5<4%_PQ2B~CnBoQ*b?tm+O4ee<&2x@g<_ZTkJf>Ng7YzRTBj$=o*o zmwNo|)(170nMAKd>DvzR-W~eb2e`-e12xm|LOK zz;bZ*;&^fKSMT2GO~0oHj^oT70(&g0_FfLyzI}Vrn=jw1_Lt1_?TQt= z6ty`j_I~)=gauV=eGC7aoQmO=Xn$}va(A^Jn}5B&tp3ELu$C>?)^NzhxO{l_=H+Mc zSI?gLtCdwnBP3t_>U_n^_Uhk1I}?d7p~bdb=U>Hz$8z63p1t;srDOQpb$dG(PmMkA zk~3?o)JH>yn;dbxe&_Ado0e~1`uLNFq4uG;<44}@+C0DJ`YU@Eef^+^cCS~R$w_ar zd>rKv3RG;iO@1{H7?k}A&i6k46y&14eiACLyR>BIM>b9GyS65qF;ua_)Y7I(9_VPV z-^U?J>OL@Cy{B9eVaKvwEqnIuSFg9ff4F>0y^Lm>>y6h>?p-=2*t1CFsQ>%ylBOqO zrs@^Vb)S|S&V0N)f_nwm!Qu%bo0XPqJ!ZS*LdlFX`(D@C7f((5mMRy;)Bp4S){2bA zRRT8bo`%{^QLA-gmL6nf;a{pyQNVc6SrZsk4)FM4e!$zren6UqzrmbCjv-&bhT)z< zh1}-bGB)sK&$1nm)>u@_;TKA_48f+UfVo3 z0an8)Kq+Nw6W`lIDDV9@*VLNW$Kc$ zQ)eDbpF8^=`;^WHmL6R57PfbJJ@;5LLo{*yWNQzTzIiwE+nSfPH`*OBkmETez!U;j z7CSe*Lim2KqMI(?y$>MTs&E8r{P6H?xaJ9bOymQMm&gnZ*VSZ}kBm{J3?jo1QD&%Va#$_P{dZ_;C@M66h_df%5xH!HsF*?rNn@{@<=0xp*GjzJ%Ks_tb@+ULDEc6MIw z*=>Js-#ekZR@8BU577GVAAa1jCZ8`T?P==1A3SmI*|TwbKVD$^?DoM%?dTegOx**( j1cjVrAi`yVA+P!iT}r)z4*}Q$iB}j6x{v diff --git a/contrib/HelmetIdentification/image/image4.jpg b/contrib/HelmetIdentification/image/image4.jpg deleted file mode 100644 index 2b6f84d48a092d4a4b36b7e4c4332cffb8c0fcae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44203 zcmcG$by!qw+c!FZ2!g;MCEYNzK?u?eEir_2s5H{uB{Gyqi8PEf3P?$Vq9R=?(%sz+ z`x^Cq?&sd`KKAkb^Znr=ELbyZ*1E3q{MAXgx~e<@9u*!00wGXTfNMY?7zhXi%LDf+ zI1))?EeZa^aM6&LhLrTsEQ4>ZSW2l#K_KsA@XsD$gYU07D(JaDAcU>xe;92J1!fS) zTS-N@l$NLQ`uSlh>5t39@PNyx^GBtVR}jvGN-w#E7t>f5X}rBX{Z^fpsOrNo^KA2cEL0^jhxQxt>Q`OUQ;I}lywm#@ z&n|C-KWi`N?%d`^c9e4`wFVb(cRCCI-}%N$z_s3ey=29Hyl!~9HH^8LU5DObtfs%| z;$-!kZIpa8m3i9>+Q_q#G!uN(k3OLZ&o^S{dxH;4jT^5Il^BiE1)MqEcbt-(JDgjY z4aKLZTRk37<}-3i&-7m5INcjmb4xEqV)xrvF^KpcIdZ+bM#A>%O^kTar}>~DSc_q$ z6=G$@_~-HN;`-@k^M`7?(G?LLV)Q!&7hICVi(}BIN_`IiN7{m|1S%O;nz8#=o=8?B_MCF=baxb9}Nk;I}i^9=Gl} zn5QD>cj9_DT&S5}q7c)YA!geaL;t~RcVRd~%*W6;N5W$xjmNm**Vh<&2l*E?PML|~ zdE@mSqea@qW8z8<^%>axT;0MGwJ!PH$#>g=p4)z_;_&NM%@j9sP8U8Lt+uOZzBuVL zswg-O_A86tyiUdct&wP#g?=M8TPCcIf4Pk$<#s(jIrp~{5%dSb#^`@%@Ik-$;X9h^ zK{}nUc-GfHOjdu5roP|2+jHOkz7Q-VO{dh@?yf~wS!vTrNA=U;$BjOR?#sr$+p<2h z(L!tIhm#-jR8s3cwW(aR5=q>%+L&yfTo36T`QYG?>35KfWq`Xh(DuE~%~l7Oqi=b% z#8PTc$;DqdAAG70M)mH(S%Ox5S#wmgLX*CO{wK@qIP!Gf;5NCG&D=LUR81GDbp>x+ zRR6PKfr>}3{Ii(WvACC`e{B-vU@l)&vaR&Ki0jf+ONINo{_2n7B)iF`@mIV?HerOc&KF0sG0IGl;Uw%MnE@Bpn~jGV z(R5-Xxw(CQr|TaC?Z@sNo=klVsrJ}3)`0}gzhsno3Z9~%+lrR$&$r3ZH*bF;8&DHJ z{w&LRdaz!PnX@`n>Vchpo*&>ZhL6(mp6^K9pmG%Z`hwP$>)~5Rtbtk_PuR{@ny^bA z0kvS4RKh95N<(Jadz-eUGvsC1La)T-TFG$#8+p6kg>FK_F(PL97nO?fY_&cSJ{BDb z9E7frB->A7u@~xd0&QQhyvFO?bjGlBDj4+r*2Li9(17E4iq(;-VzGl?PYBZ_6Ofo? zn8U?V3DlVi!I+BEEOjg^Ir{Qa%%83lNn9MgiPSnu%Up5W7#k*&J{$L#6h68*7zfMP#1WJV3?Ran%nc8DLT zHv6BCHMU^I9~!QjcfJyy+A;EftekRjep+wa{x787sUa>{gg8{5ZMMo`UTmDDWVI_`;tUv~0 zg2242z4g%{S=k)sx<>}Tno|4?NfSIK{f5L(mY8)420s{e23u7IF%#WE$(GpSB1-7j z;~3$!J!!is0=6Rt4vq8v#_etk{!=GTw5`babHwgc6QtaiP$A%aUuTH_mgkn~A%s<* zbtX;B$Nei->{+WqqK#PhRr7E;^se7z>`-~|(fU_ec71ygRcHjbxAD!4@NkBL9>lkT zylBe1U$wD<*b#&f>=ajW1a^+(-rDe>EMrdY6sFEdGc3xLyF2~8 zNUsDIRzbrxNV0hnM%w5D&ZY;p$Ho0lYDBFD6eV~!IGD6;WOAmB@3|~IkV}D~H~NIm zVmO;|z`2{x@qChz<&9XZsW<6SJuLPs;E6^dp@9Q0gh36((Vn^nAEt;^!0o5K?_PtO zcuvyaN2o-4-?Rmy^8?Ejp5?3hh#)3X!R&Zvz9b|

4w%Pc*cQ8xg_aor^&oU)skA zw}cKAs-y}Mionwiu||qpS40=nY(KkUIP;G>HU`0nTKSreP*+fa*L9sQ`)c9cAz^f; zEJUVVmVq%ik3An4D%*&{v(L<%`2`?bdrxA@|19uY%EIUUAss66l~USK$RJf(@8KXO zZQ($6%mCAVq447plbLt=1fw4E%(W==@LoqN2jFg5Aizmr1PUgqS} zB6s`_NTd^RIAq=g-Xex+yKfLMA)0&ykj|N9wD(;29&a!osQ7rFY$sKN>otVhp|sYK zB3|7p3LjBo_>LNmHMJbLl5w~RpJe!z_BsfZsVNaSFW)rAOUtr+GQlH9u!LeGon|Xy z=)}BhFcldI81ZO0S{_N^`K;aE${A@FMCI59X+fg}Tl?>ownXBPEC#L0S6mxPu@My@ zKs_v>>}TIHS0Gu8*S|)jQ^IqaZPBuytCQXw7#GHSNH~NCt)Ndq78K>nBs4&H4rz&& zw{F&6tYHO`5Qk;{xny-H_Jgq+IK^I$#w}^&K+`o`Yoa$rD)OYEftV1}ASPNYxX{>V z$PMI;b9G6MXIeukLJk`Y57^0Rp@DH{fv~KcGU8B;*X=^0a>j$7CHG%8aJ*i=l&Qn+ zw*v3FDn}5s+wPWip4wYutTkEt)zGDcl}Mq4q-;)vq-rN=Rr0%{hz+Tzq%|Jqr|(uZ z#_+N+P2VI!cnQWK{vky-UIVXRT8?=Q>1lon{eBB6x`g0?rTghK0paIB1}Jt0oRpMp zN`W0SfP)6qXD90=^{l!caTsjuv#V)Y0Fy2gmB`Jbv?4Jfk&wz^qI8uUa})SoQYJi^ zE-RY`w%nO5QrVedQ2?NO%K_q1``_ zGnQj0a+PpN+7r{1E{>nTu@quC(xs?Oa8=!<8S<@Q*33lIO8B9TFu(`yQj!VVuOU$x z8s7S~$H=o=>Q{701&jLO@o>6IH)oe!%Gm2aDsDMnaVO;$pB0RKiWcNC;ilO=HUu<{;9S_@#H#pNWn^m^N<@ zMJmLFacnL_XD%RhIoKsnE$I!P?6Bam`5mPf%zies++m6-SO*v8BqhnuO}gA>8NL7F=M9&0^v?9a=1%V8_oR>zAb3UyzyAQ48LoQY3{fxPGIq)4pz*&1dT) zjuDZI)z$F9yN3E&nuCMWryKRXHs1@T52W__Ir(axA5@vrQ#yPF843%!C z1P#bu>0FB2a|PWtXB;$#jp>uMirzZdfqoTj?PDI~!$*URUrMc)+H;Fi-dU+;Q6x*t zof>J;InSrC;V>!tUv%!XNq0Y-<}13X^63_DN-An6&ILd?ywp#UA7|JL_F$G+8Y7*M zC9cwj8Pv~$^G@wnLoUgV))z0hiq_1@>`tO2r$lx;MKJg;Ul8?2Q}evHP^k>DQBo8U zQMkE4oVc;s%tLJKIuaM0?`&2_K+PRH0qK>Tud;nJNGFz(eE5R4fBEQ#!U=&qJ3^~& zf0ZY8q6t2+`cpPvY^T>#^24T4J!#{c*b%Qw1TrNu8yKC(q?cat-G9jaw2JJ>)B@4N@$bmCS z++GU3AwvzUPi7{2KoY;X?j_`3j>VnJgP(crrt=t}2K$?67P?JduyWG2W%ysp&G{{r zHosLH%yTzU=gCj6`N6i(sNhJzef9Ej823!oj27v)!6;p zsm=VWJKC?Dl_^#DJ~TAx*P7Emsw;5ryo}AT{!ihC-ArrF5&BUn$^Anlvu6r5;F}tGQ20oacA>3EG&+V$3P+ zWQdDB@lmY1JTZA^^(7+CRTe`slHMD3$If|5k_!*AFo2DHdpKLhtZqhVq6{^4m8acj zqT!osjn(o%zz!k9LmId>VmZ$5L>B=ON#YIEEVmu{=K6)N$x2r0BlqwkN;Y=cv47^v zWe&ci28|>qr_oOR=35vfM42M(aE?dhkkNf7De=#eumITQm!d^@^ zkt5bNR)J?-AHm5gPU5ME#PED`pm+9-`5|dwCClb~C-dO++*`fNoHt`l`wv~~dc`YH z5N}>1NdMeR#@OYE&8E?aV1^vC%;JZ7+n1-8a(ODM8E0yBQ`1^wHBPQ|F%mu8AD(;# zm(Vm%&V?6f1fGOIrfjY`J?nC?U%dQ@`vuWqOVkv@3xa-SaK1hH*lTi84CPY|M}{N- zW!9z&ovo)oExBU6MGE%e|7@b{?|8Gd~(Ak`cHmWi6yl}d^tR8Ugh9J+&ExUvCEh(7IU;M;&*6U z2u%&jdm)&JWki!syha8_B$$WtIt}Sr%l;_9K;C zojkNCgf*5zVov?TS^~N?N^|x=KeM{5D2oIGW0QAdS5IMzZA~GfO4R{dK}IK;my4bK z9#oeDM&d~S3$K-OrQ;i7YaSC``B)rcW!Xr52u!Tu`q8VR(t?iNU6T@437+|p=b}wQNPWgpUE0#d|4!dA7@|@o4HERs#ksUUgUVVc|)nDxB@T z*U57J_-j)p4M@l43FWii(&;#7QjTI!hu(>|fuZ=w`x0ncyTMjG`@EVq^wMb<1JE+c*Y1|M!}cf*pP zJP8wC7dqZD$x}K#6A#1<$4OO%DE2SE4GEHr-)b%rx&iOx8$rGHj(WtF>AMr2$|K*@ zB&@RslUEtm`5`M$sM+~G*!KG+n|!KXj_#MB_PoYe27UifsQIvgMX}|9_wh*{doAF) zKgdzs&0W6Oh|5$OyPr{yy=p|&MsS##Dz~EKWAi$U%^Az(48DVb$={c>;W}u z9Etnb%J}4Yu?9K3lv#>D<4XUh_#`L+>6d0#bmcYjNcKB4;-n5>MShKE(D%Fnuh>7? zx4gkxrSDfMmR)tl|XH%-+ac@UG}K;DNsog?+n&3q%^Xrrdg#17d?TMP~#Jq7}X2T zQkCnsyr8{5HnKNer~K4%XA1~5b&9!MGT=(L9x0bW&D0N2H_=yGaEI}v=lk%3`JeS00Vn6AB`tBSk6dMAW2+3XT zmMi^hU%Ivep$wP`CEJzOgKynde^y)e(j8`;2HBmT>{kF)V)$Tvbg^W;BazGQ>}dO7 zf5>FCEBWrYWwA~v;Z4VTRfl7p%E*OskBszzrImV4EYy9EjY`V97P{*rRYqp-O+R8% zi~H7KJqAkCJD0^C@YclEH==Jn;t#d@^z7O}x6q_+>vMwj5F+t1pug1~dTvd9bl+R* z*W#^BWYBkyos45u9uxbSuSTCLwm3ncE4|aF(u&)AwhTc2?RAdg<2dw!?;ndmmK!&EmqZYrEve54IGR(xypn-jeeX0YM-d8r zubvJxr%6Ln<l>98S>Sr1{P?-y7Q_FZtE8+^n;9Y;? z%67vIQ){4{jkFsQ_Hh>pgYZH$2yM z%Zna+=VJ)*wkc(!1|I?Cim-v7K>etIT;pg5?9{axeF2AwG5h*;LEDkaLfciAkvVTH z%@S$^jOdg47#0ot=snd9$^lG+6fEzdA`TfmZ3=JuMpY1Jt#ts2g zEA*~mArP=wCFdM|8OrG8BwK9He9@6i@IkJsrucqSMqX7~G){OT!#|7)#n$0YjBYOC zM#gQ9IyA2{DWjCWp7lRbOI(i@ns6k>Bj!-geOYBU`cszGRZu1ebk$mk8FVO>6Y2=_08{_52U?n@lmJW4e10%CwSo*eWw3O zVe!eLOt4ioAu5GIRLOj{6x>wOoL=$MuIT+7M+nnbqQ{WoB5f|Tpy{JVmQirUbmuUV zugP)tLAbOu64S){-{)h z;RUS&^n#EP!wvs~_R_?MZx!a}O8;p(4>ucJdC)Bk6dHWgKoz<_{a*b_LveqaT_ppj zk@BBrR-*f;o!5xlX-$^}=l#F>nbY?CZ4?F$>K^imOy$u+YIoCxNMd~R|LQqxKi|F& zaPNmstu@J)P=^1saVy4heK>V9?G$v~Qgx!Cq4xksJ3WlzIBgup+ z`w8D&l@EsUpg$BxnVjTmAWt`Zz#44-j+g z`qh&9wJ#JVQDer*3%EkmznWIVe4Z2oZ6TIF+lS_q}b34dfqMG&wl5Bcz(;w-F|2$5t(%04v@ePoxu6)&Zm?a^t3T5cr0^R*DudR14dwN7d-j#? zhn^cE89PV$n|dg8~BnfV+s?4$x`0mcf>7=}Jr$yMLSVZZVERxK*@wPIo`DCTozSe9b#Fs}} zr>q0q+@q?2<-x+V9N+LnQ9bexCe8l5nC4v!Sgip^(>Tv~M?T=R<@x zFXyE)l?uleCdgQ)6=dpvus1Txw)y#1k27>Gz6ZEDV3V$q4V^ZfZtSjltY{;G3o=nW zQ8vEI(gM~4Ia+w_Li@{uixI1U#gGV0&gr9n{$@I9Y9@~Y#-zF5cUtUsdN52NH1^V| zJM22;Ob04K<{HXBf=;h!53CFi#ULP-#D)~lmDYTn-fxeC;fo`nkWx=vfj0w%^}g4RnN4JZ@}GxS9nHBk&j3B@XH~nLg%N6+2(;$9}vv;=JE1y^RRHtoT>gXe;%s13UW{05i$1&$lo`dh2YO?mF zPb+@ja1rr*V|dWRjwwAlQdO9Crd^{>E5xH+I1uTgU9AV*=;UjT8he<$U}&9P7a=rM z$2k%9dUDS;yN=rs*0`(f*KRfWIdw^<&<#(l2gmw=E7yPfX+VE^?v)N#KHfGU693jPj z|MuX(&vypvXUDsRY9)LYtHbY~gc*38c|UFj59r;UyCT<67N;S>ZTh;y$i25~UShXX z2Ve0Rl-FfC^YAft^or@cD~GFJeRJsvIVpJ~w*rm#>e0 zsK+b^AnC&u=gW)p@ez?OvkL%&EpytsTzf`l0c>mwL=tDF09Ly$-*;cL0#2yoF=P!5&5btEEjib2wEErp za7CL~L+fg^qJ-mA3)ew9&z3R(=2tnpn+AYVf?|}ZQPvnfDvo&D<1kX$PR?t%H1(N4 z2y7)lMpC&<0i3S}jlUh^#X2NbS~e(a>l}PWUNr81_wb!QVY%<-)^w};)|6Dzi>f7X zF>B;ySXUtE?Z#^&*>2WE+VuWOn|>rYPns|x9$Bc@$6##x%rm=RWsho6`cmJ=WL|0$ z>uS9WGXM-_+--rzg{Sty(1k(7qU+{|Ukr=Iz=7V_1eC={WJAof{qSBX4ZUqo0Dz<$ zjHef;oAtdl?1ApD57Ip*-gl=8)mPPcD(qye$Fr$8SH0SvZL0@9Q5?=8!V7S|RS2&X zhcX(qV4}tffUR3$uyIg5YVU%eBgU4T`wDJAVfsC%PtOTy^)dUwk`O`Oi5Kphm0SFp zU)FdqVoMnD_P**B>5n%7bBVkMdZJhoZ`9*({$qOGe7ai|sze(<-=*>xRCke?0tDOo zymnTKbRtIL;#cE~V(I6w`B}hbV6_WfM}%ef+TcC}RQ!(0R*-HeO7y7aAdQcy3(X&F z#RD#Ui%puFy3>RQja)WZ=?cCp9bHhE_)eFI4M8PIqMbKt7fPEi>N;b>7>PEeLax@H zMxEl9?F5=r3sC-C=Gn|(8YdG> zgsDCq0Ac9bhIWh^U>C*jZ2VcQc<F>m1!t+9^;yoL6yxsr z0)0*GRXBD0aAA$XzI4y9&-AE3XjN?ff_S~HL148a1$)z+I4u7q)-XK zfJUTRLUx~?0kBoX#GsUV)9wix2FG40!_=2dz=W{WzL!C2K!&g03Zfa+rK5!GQvSF< z0gNh4Ov!M=+GGwEU%C4>=(ctV*s8K}CKp@kpX^G`ZLZuI;deKSclRTM(O(%!40J;dtBvC88 zEH7LbmfnjH+mo-<4mD@B2a@Twy&*|ocuA51_MUP@bwU~*7hy$5Nxhrjxn@y)ZemJJhI~`ml zB8t{V_8itOh|8{uA>kKU%aQ_tgZ^s^*%u*?aXt{@tnRJ0-H1jmaAx3=j!uN4(8 zV}~2QEq-8(tqK)g_3LaE!~0G6ra>%2_nyHnbx!ubwj_n!J+v|W-N(C)$G*YIN$;S@ zQ1VQ2cvk_tyNVsQbOm=i!(Cl`_Ul}I%RbP3T-)FuQ7T-|nEKg>39{Ij=6x!|Ft6-n z(?(+|*HebFJ>yr;58)WU-aLKkr2P#_@TKog;PZlV63T&%c3}*Z@7EB5GGd+^*q9$b zOn7%<#80@FtPZM4tkXSxbjm6~&4f6RZzJgzgJ;4~kVG~r$!^=CqnyPiTt=!F6AD77 z1qL5VM&QZ-cQeeXplAl1$IyAlr5G^=Eu@K%-7iv?n6D8 z2v1>pnM2BpnqDY$v1!U%Q*-*LgPD(!>N{cN*Qob!s1@7^ijNvX$Vw+*N`B1hkmKXP z4l78ImUK&lg&t_2NWO+YZ?u*AW6`rrK+zRKC`c5-fLkExg4hv*758G9BOT%G;nsu=aB~_~xU5I}F zmTHywBKwv|PHotEe+KPFLQ0I0Mpv95^xtV=K$T5>>2_O7aN%>5_N%s&xOl4RqT!rX!A@|L^s39Aj3wfm9CF^ z9dJ8*6`_rWM^<;H=^keqX2P5rJ->wxMZ1pCk8v%}xRfxT&ex%~{7#y}MQhu3c%=9Z za6(yXb^zGZmX)r2`B!Y+x}{7bVs`C9&gqt;FY@Yt@k}enOSTXnM&g8X*wuf-OLRTY zQ_VfS{SS*pFYd)~TmzkIO z!xSbt&O84|$yBh2fMtMrK9`$Y6i!;6zJ;P5P_}W4kls2zv53QG%PuK_nk$km+=iV8 zwCf1ng?r)jo9*m4x5IM~G}me9Q9<}pjBs>z@5#vRSC3wnI4@{ey5lh&+_}D zhiOF?dG${c`18&Ut<>byIBMM;lP&?}?vU{GTv3JDKdc(fb)~*LpxN=rqTYZkv^)L^ zqM7x#cp+*#c8{y+(qDOPlD_@FNOZ4=@2`kgyp5s{`GZ4rq3^!_(cLe|{s{eu%ZrV+ zV~zPgeB@Y%^m2ff8WrxcIHrcm>G-4k@4g+(=Xvjv&)Sn2v+!Hk;noJN60OKv&hJqu zpNYS+(kkNrL)v>gSmBXNS}CH!iSb&Lf8H!3*9qnSm+Qa!BMG3-1qAhtq31?perLwXfxQ;xQ+n}kBg0P(i2P-`*azPK|4*QYBukLx>i9>&$=fLXE{850I z#k(6X`Z5tPla#*#L5lCbc7;iRiaGPVX1&LzJF%5D(tW9qF;RL)(Qq^QuH{G+<=wR~ zdjB$z@zMt*0gbu7BbxDkM3&6X=$TfLuKf`0m#6- z7jawB4Y^A26x&&evvo8^3+C6WjjDecr1NPV$ z=Sze&=cs%Pol3f5VS|3Nf8*&RZ0NZt%tz#d*Gu4BZ8xjFzV*=tq;KcX#$0-bjXcK9 z4<~#|#ej(z3r=k@D!RFScM{}CM^%Kz&43Sskbf-BAJD{_D&V3S@XEr}3b0|+^u>@# zfSXwMb^E-aW=^l+a`S56mw&5;r9bkQ@2n0F+6XV=i%1KRxwPQQus zab&JaY8N5B>c4bOUjI2_{F%dld27qktLYT*iod4b|LC0mF5)5q_hy4-kTXyNQt^9)hXjU=yKV`2~&efQkv)v09yItan=;_`<{g)n}~U z!{Lx}7tR*1c$i-6vZxQz<72DpZ>4%L-IgCGBb)qAM+y*c#{41cK-CCYy^Ks9tw6ky zBkY>?2T_g4@v(&E86O}z1{2Yj!-NaQZ7ZzneH-yK+k}Hr`86KI-PQsQ~bT z4u^sC_4MNGm@t_8nD~=@Pns}zVWC42EmF8niQ)1>vl(_D$ol=7YmcXe3@|Eb-M1Uz zrywrXuj=ToX}!~c78*vOP`kmr=#Y~vsgN-J_^*l+1g-<$+xLVX6j~ zMOBYXXNj@_jN<|R1m2Wz|I2fa!z_X?e!zSa^@#c0*s~1$u1bcK^gviFTPzb4%L*vi z6+qj$msResYK_Ef8Z!3jwj%vRQU%1&s*5r|_>Sej&u;k_zXMU%zS=U)ifk1Z9M)Mny9r>mP#eChj{5xw19JAWnq`Yd7ZSEpy1d$ zpIc6iUb@Z%R^ro)Vb=bjU`7JzS^^98mdAt~d9XUvYxE@R5GY6WelF_+j2*4+{V3Uu z?2g8dO@-D_KcX@OUcHXWkI7q2)oLmP%_~iBP za-VwdWwylY;|}4z-Vr`j%~wrVVgSucOD8q!$;)cD5{c1(i(|{fM)Y`xU%3TIrVC$R zgBDu3_@6X@#AO4v4h!0)Q$S)WFnoQ=~Y)fD~;sFrhahs(VbA?k{Ru8|GP1qF3>CT$2&5sB9n+lGu)~CfcrU8SaYqW(U_kBd;S>t&Y@6dt7!k zal>H)vQ^E%Nsoo*c6^PTJO%J+-^$v^mP$l1d^*vw3?C$ep{4T6!1vxnO5;nHh#T-Gb(iW{RcxtxVp7R zzPH*t1$kD1n;Su8%=pNGr_Zb% znyKKmYM&5{!AshtX{mFm4*E>hR%3H# zgR1$8tR#q#B8Z-uE~YjtuYxbnyH{g4|m=4ilGz;rxNd#3d4 zvpCWQa*Vb>B@IL_%K`f}OwVtPA*5!I)0vlTJkX5Q^5jT&cY^8q?Ks0aNyhnfVP4gayYh}{TB?F%D zO9lrfHWa0Ipkl8-GBEu0FAth)s3fp{Q^H1o?`zd7x*LT>R&{mio3K(CjF}0-FvMWQ zU$vEW9f(3jGlo74Dk5|x@P@No&G1v;Z-4I$xxuj8H&;K#UYWXUn!CV^lv9vN>nM!~ zw#rt78m*?t6%d%x*;Iy+oFm?`oy-O)lHg(OdRaAsnJk^I$UsnAuwKC8C;V)M zi+DD21|>7xITVB3BC%aEa{W8ERp*?J1r3)Z+e)ri;JVg0<%HaRIvce(779ZB8p~5s z)rWI^_URkDSJhh4Bs?Ima0X1;!ahc&-RrRyTEo{=2Ju^8B1tJ_odg2=RaM+hZBU%O z$An)lj=qqpTzwP|4G^YFp;%H!rStniBnh2D$(X_`ty8rd?H(0NvPviADjCPY*Y5vQ#;yC{+i67&%c}S z`V*o;hp$+WJN5Ia4o$-#xKYiyh~^~rjr%_VmR?V_uPYp@$kloN`~53ivn3PsqK>W^ z-SgAczB8C|dl5Fca+_LRE2YEzWUl{*EC_{x&lSu3?jCN|Sl}t{Z-X~7JT199L(l*9 z2aTee7>J*_j|EvIIg`2{6n*;K2Uz%}KiA1BpK_Wo$^UTHTH?jHYZV}(V7U(lqckkK zc`Taq8-;Cr%u{y1j1@k3I}`uM;o?~tNeu1R99BqVl;Rh9EXQmi_xO1(VBWP@IdFe8 zzkcwJ!h>nEs0enEdtVT)7dFg$QSr1iAplKOf9gNZ&=^_H6@|fcN;EkrNL~VW@h_8J zO)`0VByqtk)VO<%?Ipp5QsSRj+Jas!&5RbX%ZAs6fnVGKD9vyC8EuGzr~rV_75~t2 zG?)74AU0g$d{0xVGLQu@SZYDt@_+!LoVyYKAB23HMD6wda(ste(e;11;Qa5sDw5Sm z2(*B&i_R_d)_L~dXnUbI^C{Gd@G3}`w`j~O{jt*@IyS{`Hd=;*p-8j?YQz0INC&PS z)D&)gvuYm?PWHk(e?X|HvP{|;+OhvHZ2`PEu<;+__Ahj&jy}6a@^s~Uq4rMz^%QBV zwCI3ekLK_%=q2pYMjM#B(g#tn;{1q;|cDvly6wH|wb#%-n z6Mo$C*ceMbfyX^w8DLf3DrTkm@|$$&#&s}BX%$tERJ7dyBYfe5`RF@^;8?r?&$TH| zyLcR4bhbTPn8vca^81LPG{0W{i{U_w{~Y3qCP^#`zJe$mQG))zzpzA26TygySRkosWYiaV2a;{phg5wY~|@+2}O)9h&>@UFd)@2odR3 zv3z;R8+MNj`^O!FGKR!>7Nf$!e6>-0zJ@7N_f`TS-BdxlwNIh=h40AtQmzFq*nJpZ z*qP&NJcuJTpkn9Lt_G>I^+0s2Z3KYnS10@)sKv`f9b+f2pQR>~0$d6jPp_dg%xwWo zFEQGC-$qwK#`Q2qygU77$Vj#f4#n{Whzyjx(~5hn3{B)T*h{H+i+#6y@rkSkQ!jc( zwpurf2dOk4$G~aMJVJ!FqflQX_L?vD`6!|X?$cK_qWpTr4r`{hh{TUf=MD^$zZ`*# zW|By19EE=U&$vb(gPEgkwB4`8Nn9M(wExrK0B{t8OmRQXuD6lB13idQ=Xv+_ky;Z@ zS-Z|qjHT7mW_zE5HN|w5c8Wit3Vq+pbBoCU@Bnv!6dG`?=oT4A(f8vGR!cKU-mCj2 zDp7b)__@~Kof1vmO-QR`&U~C@C{+n%L3(rQ%4=VI3gx1F?F{tdS7&hyR3^a%`TAuo zaXk9EGv8trfLg4@2{=Fca0j~OENfUKE6lSTgI(Tq1)-Lxj4bk)kN~}g6}+63R)<`$ zmG3oF2G}Cye@_G3=_wFAk{DJB^VB}f3>jB^{umNV^EtRci|NmcxUD3xR@+y7u66nE z!vTNw%Kj)y3yf^mfIqKM`ll6qi8JXSN1d^W?*UbBjNf(IXM@P%{i0i)ZeqM$t^{A? zugk(LZ?|`wclIa1!1nm$YWVrqsrNvIL)Rto;xBOJZc~A*3*9cAjJXwj%)Z{}uJatSs*`7iWz^&*!}NyDcuj5_NboYkIip9j?nqVSHJGNbqBzU|c_;D=&}2I>xO=cszc~Ub=Xl(=mY2DbU`eXs zh@cw#Kmm4nIa?2cz*S&>j{w@QbOw~6x~gSjb-MfRZwV;xe11+uzwu(Vb}_>xo_80m zpj81uG6~)QV$QCE8__`iIOpTezCJc62BJc7l)3i=4^*0HMoXqL{6Wnbw}s$CgLn^5 z_rXdj0`-t`>?t=p=kc4Tj2%}#OX3#X_7Rg@1HnQblcq7Aq%*(p@qg$J@POU|i!U zHOW^hxwcok?otNAAu(R6iZKI>crU@Cw+80%DOta3!as}dOAzc9ApWW2PHnV=uN&Bv z&Q^*`pH6=YEm<)xe#oQK%_jB|yP6*h0%uZd`OS6DA)kP>Qh$#10iDMHA%;aJ4V3W# zwwBjqt}+FV$W)C*H&xP$=JV4-55#uWh~Ti?1dvWWh&ccSOz3iQT_5@Jv69;^2kZv) zvQFTEhxZ`~!$nNImsxFr#2BIvQm6nEPJRH1)?Z+(*{DkFsbx-)ia@5qd$dRt*l;xh z-4kLT*N_8RKRVVF#65}Cq_t>$V@hnwocff(J7yzkLNOBBneoVu0d-V5gWMO+N|?>jySzzGd{QMz>rJo(l@ckLasp!*`F zpL+5EBih#rxvIAvaMAk;x5Snd#3wluVbn-W*`hiK2^2jFuRUIrb4DT4t7{633aidGV(@n=B@HY|wq@cNG@BLamzSY;p)Ev&8~a0&LG zAvJ5MH4}qay2x4*E;$yANhPq$=S7m}k`&GY{6pZ0HFm-Tsfmf5+;kk^Z)vh-$F77|z0QM-zzb7$=@>`~@BAdWcZUV3*o5QGZ5 znAl<9D?(byP7_lRrx~~<1MV-#ZM%1s4mYfy_p`lx!;^5RHEZoc(B^kzIZuZ4W*ncz z*vb!vj`JuhuG}Z#u#}~E&H&Ipjl_ekf+F&A{Z6@^F3u9KKM)2qM%kZ(8u^@7nBI3Q z$Ze_bquUllmPw^WMhnk0d;!TVaXd#d^j;>PSlfe1^gt>{QGW#PR3j(Whv~W8H+zJ9 z+Nu!Czs7vU&>DH;5vHujJ7ceTvLV4#!qx$(d2^JPd6@(=!4dwi)_zQo;9V&U4)b@4 zrh4_O{t)jX^aT;(>(AWrDUE#{K`6EyBg>$a{EzhBf#R*F`2=?xJRtM|!#K+W&AqF8 zn|YC{Q+w~U^$bAnH5tTxwr`7$E_c>EchF#OunVM}#^a#m#xbY&i0Y1oVTrvm*S zw!Me%%FRC5zq;qnjYh+XO8@F)JWaEN6@h(m5`Yc@2BGuUa`^e~?tRZ&iP3W*SNe~@ z9@j?1vZ{hXmoJ^RA+mpNFPLND1B;tS@3CxVfPMHI`QM1P5Y7lcZoE$t@y%Rl@bnsr zjuRK=d>ENK-`Rbzp#nO078HdZ088CB;;rATTjiDdE(hno>7dgs+z-R{9 zdrocdxca47gm2yuT<16l32~f)Sn3^y4l{aST-C0e?8a$5x&ddUhbLZuvcVg)6t5<3 z4HQykyTlwC8!!@jVRAtMSjYrwy#j>wSfPHP&l=^&v&vG%(lVa^(v$ox$yX$MTGT=P5KQ@~$?a=|;)xt~x+lMQfiXDxA z^Pn{MS|ppa1mhA6Xoa0W)A=7gst4BFIK)@6Bl5MU88D91BUu%jI{Zin{(YjQ54 zBX8NN++RMf7A>XU_sHu(%|*+_xOcX)18c|(h!DBg!Qa$TbC-7tTTEw{81{z zv_ULiZ=KILY7RYZIzw%LYjq5m(Q}c%&pOxsi+_I_j|nG?{WP8+nd!|%YEP1b)DyDIz8tjj4(KZdGMv3h6Qgssv-z|qm;wH zek6b%t8Nv1vp2Nh=wCs}d!Re(T35CXtQ8gl%HkyeyPcKQ4>F<%Wq2nDXDz*6#Qr3FHz@YS;UfKHQ%*n6x)ez+wvpQ zKUz%pY7j|2qPziWwM~+7ufX@ngFgOKX=6^pNdU|%oxa@mrtN={S?IsLMHBxtBs7$b z4l2V$;%h`uCTfD*^vdL8f4BJ&W#l)qAFT4^ZcT(%yss3D61(=y0}#2ibFL|k-sM~Q z3qc2~KfaXsZ~IZahN`u1u-bbDDp_ux{T$Tq)+3HxhzX!CqvwZKe&x`|W2A=5``rvx z)*xi50C?BVtKKsb-ay38fC0g6Zpdp&m_m2nB;V}rCiJnAa6(Dti({=pyNcpLaZ0W0!cFQ)6fawQx81;T-C!MWDxuHsttNj z0njC)F(ocXjiY2ET;MCfE84@^DQVT5D9E8gMrj>*I2W{9vN1mjk-_fU;p3(O>r~Xi zVtu7I=9VGXNsvN(3reOgrKgx8D8)*ku9iK{*a@q)bCldEI0jhh@dIxJ3FlLXiO^3w zZsEA;B=}(6@?f$PkL-F1(x0CV2Zjoen#tZXs-QD$^`o4|HP*0*7+K4CEjn?K1#G0} zEYt>)N?!kqy1NV1bb6|5G<2Ka6K=fFOpzD4fnUO8E5z-M6*v=d;*sd?QOI-V*9ZIQ z&$=3sqCl%ckJ}v>!M6ykd?Ed@^JJwT4OaYGQxzql$zTW~sc`=GSNTOo4iUqGw6~bY zDD`TBE-#QhViY_oqnr&74@*@*8=`qwpf+t&eEJb+2a+}+e~HFrgS-ZofX903Ky*uc zpYgYgP-_x45YS4KO5vJKZXc%qL9fWb-M)OW#b+<}otdm?vn zn+t1WDWyo19T;1pHc!+T5Oob(>kdXnk6(*6D{LGZj3iBzwTY4)(C#6Gp?@VvYhsD; zr=r~nkK@?a`g+hE-*Wx02hMYWtP#AaR7X|9Pe0<8Nsrz55OJ&-{Er?S+%o%>ij%H- z;*?E4Y!P$K!~@9XQ9ZxdD&9{S=H%SST z3->2#0~J>*a$%u7*5a3?8`8wH;pr{?@hhNI5Nba=csHY>`Pu?giY(TpRZE}NP{ z6#n$NgoBn!ddb#NG=eg?uc!y8BQ4=8yU;(qoPH3DcZA)Dfs%o~y`1!A(B5Zi^16B1 zk))V=2h%S+4H7u@=$OlXy^5Jg-xD589P?CMUPHtJ4w;xI0s9XY?hhhWm5IItM0ich zs&?3&nUmaaqd6vbaRBu_QV$;uf4V{3_!= zH?Q>QA5@M^UYRa*b5p(?f2ckDp_{(!J=C!1PxObn*az_}&lYn$03dMS0#=XJ{O?aX)fj{MP*EZ0MQda{6R_BQ3ub1++dXi;CAUa6yM{}#fawC_E6=m&XXjtA24-vK@tNcNh{ z2BN?rqfa%(L1i9;;N_PxUjs?utMIRwF>4-p@;I%EA8hb7Gm?e5#(-vM&!Q?W7D}mq zuM;V8k(#J9wEKz?^jmD}Z{!sSQdx`P5EwPS5N6O8!W?tTTar*N;~xU%Dz@uVZ%b9Q z{1^j)0BQXQlsmUmEf+XFHoh)#K^boiRl}hBuOe=b>6ak2SK1Lm#-#ZG9yOl}DDOn; z(LxYq{r+D1B1bQOswke?icK@h20J(n`)O+gTlxW1It?CW_dKwb&6TKXYk;0j+r10U zQ+tq$p(AAs<-y@+{w4f4VQo)dAzRI3LXptZeJ(}F8E#TK|Zx< zz&SONl5~Wh%uI?0@yWtV>1{I&|Tauw<7Tv8p z-@S{CRpsG)xT9JUTH7rFi=Ce_?>}e?5=Bk|i2xQ{R6U|nvG--&gLji$c{>X;8!0{4 zXT_)5j{RMZ%0I zGUUsJ=J^E#T-IfMR$qd!3Ur5gC4e)PU4ITsY72jIT?P2GHU22>$ABrAKy3+f;@c9B zQ23-yhFU&&YkHtGnSP#EQvlMZKByoZT(g%Fx$HlW=A$7Bi8xqAWRhvFCjl0UT#jYU_kvFtiPjP?*G z$DMNjDwsu>wVKbvrYqyy%TVcI$hntjmp@hA%C4d#cLDsi1SNQaGiNe&3049EZ&^2f zElzyn*f$(7jDTawd2&ls_&mCPx5e!y6jC!Rt($=QOM>O6bhaHh90z!MMH(!QfY0%fkS&D21lFUNMe zJZ^%`paz065yD^T3w3+f9$NNk)APEQGd0*&yg+=@1-6+|KZfJ$@bNRSa}=XWaS@XX zon=0%q;okqiHg$VB~)S$l*m{zwmL`E>zpKTzqG zeP`lfI`fwxUym`e6Z(D7hSWzwJku9wJm8)^wi_xC|fAhuA<#j<7sA3c{=Mm19q z$*kMlKxMbmQ{68;qAS8|YO8S5Xg;wX>GUVcxu{!l7VQ!ZRM=jj?8}Tj#4V`2Om>k8 zhu->ot%^5z7zc=@dbtC?16DkPWKC_RSufS2P8PW{utAz?ZLxzHezJ<9jQZ)mIYsxS7RNa^wCmM zh}!$;i#p~m)TpkHo;fhQ{28C0c&(3bB2d`j$zoAQ1cpq-{9*z>PX+ho5edCpoLarg z*MTOeXZmJfNf<7`jO#41FUX!fcEdjwy#xscw? zW{t{S5z%ES*cKxP*W7{_nhIzY)b&ijk(;^8hst^wIK={q>Vx$>sxo)_TzFQ=js5tu z3L!LXy?hhLagQlqV((I9=}rZ60!yj6qRLy+(M)+Ud+@ppK7c*K4zS_WhEqDdSBSa$ z0>y^NUt(U4C}q-rJ5T@qb$$e|LympkUN|WxU*G*`*SsJVawibQ5RIh36Pl{rDJS=u zRJX_^W_I&CyV+rzHL8PeUQKlVTy#i(r|4kXX@ZE6!S-6n%s?$FY8l9vwH-bqi zT*NhW>6UZ|N8Xmeu7v}q)W}Mx!go$WeA(*!FBKNi^R5IZO3cRNodVhHK<1Y>uHvDL zF&m6@7NcrB^bsE&ziTi{V`=~tA-jG4`$n%7 zu-mV3e%~-RyB_l`%hvLXtYssTOG-{TS7m!QAphP^qZj&Kd!;1pgc$aw&jQ zdUlVuUGiC<`TJN!g(d%-j8kpM0il;~-0^Co=(F<%Wj_5#mPCI;aRW zI$IQ!Qr7e&qCq)Q;Hr>o6;MuKaErX6x&@y5m#9~(wq*0{N{UTORQZOgNjRV{gSdYt zBf+r%4pH+}SjaC&KgMQit7wP8x9v34{g$boUcZsA8%7{KV!nLn(^Ox>Qz=6GK`n5#BBRjxd`~prMnpCNNr#k>9 zs_A+1-`LeC+gyuEg;j=Wx9aS_@nF9%==^g)rSNynDP&+;;%bFpIoRf#AJzdo zzVbW~5K_O&weJZCc04TQXn%)-mQY?2R)oRKLNIUgOMq`KN&Cb5IW}5A2ze9?&+5NkA)*=7cA%P zB3@X{F`4;qm?U^Lc77n&36^vJoVTK}xKjibnCjN&+)0Zv<}#`h2X2`9}ll@}?0UY}AYg z5?m1wpHTw0B8Do{m6gpS7+I|kR zCx}^utjn9)gNz3(=<51;X?N|JW{3ssw8_v0q5qE2Yhq0*2S1T+2uf|x)~)!^eD zr=5NQmYzh+Ml)0jD(N~gBNob4hJk8GERno9Ys8{Nb40$1?A|pps=X=Rv(l(WCZLT_ z*OtZ2DEFl*k^-zFTUljrCwY#ZQZxQ!y;3FwdKE%ed&PQG74cXQ{V{ux@61Z1l2z2Q zEW9EzSgy;b-`t0b-2C;Nc{^|0F3c~Vso%aX6v1~5dvMT)mLQ3OV(`kVJyNW6sx0(u zQ?wD^0okH4BeEi9{G5jA^c-(}N>dgPy%(h;cL8(H66~U;n`3zI8-x$gl&nIs8S*f! z%a+xxu@>f`-WH(F0r|6hu0F6E6~HDDqY%aSWb-SMgtH~UGUJOqmly*3?d?iY)G9UX z!zz%aS$C(|*7&TsJU6Z^x$_N5KULt1ElVGED^R7>8Gj2d>>DEfUt({}pCe^Fg zV`T#Y%}vPU%pOf80*?;1cOw# z+fM-`7R`(cu!Ri})Inz`IPlZJ{FkCKqs2NrO-A!A#WydVqN;ID3+bgDeK_K}P6w(ENDZTzh)xRGj8qG)3AgGAZu1 zBYRSjTWkHW-=2|GjlI}j1C)@%Qb!`q*$?_PSwDh;8~W+T(8`_Slbo4#?&Z*wtxm ze7UL@cZ$1U;YY$Fc#V-bgX>f(bW{_~Gzdt}ar=N3q zlzra=2BHtF-4F+&<6TxlhW)Lg;^b5C7`Z5LxSj+q~c-ngRG2ezj=-8d*N zuJw#AQGcLoBEvjbtUBhW(eR}82YcFPB)fn6UCv0Z5UzM3;jlo5>F{g%m^=(_`p&EA z#}q_wvVs(Xb9+n-0ay{3EgnvG3++swt!zi))JT8WP1=Fhmec#8pzil0kelX6*bafX zi^L#T;{nlM`bDa&@5ws`4|$fc<(Ut6CNxaQv|ECK{%Jw_}o zy%j%c58GfK9)a{5?~GOjcP{1C zLdu?xIRw_AJZq>VgP)=jQ^GBad3$Bs5sC;-X;7_Z_g+M$WVLsW^5kUl{e1bziU9Q2nV)C=5NjdkSNQ^;OHC1d#n^+8-Ug1jzCgN0}G~A>{_A3-J(9 z_2zQlfV~d1*edCfX{?|xz*k1rr#){U0;Lxes!*oQ0IH*s{wR$V9tWS+5N1I!{Gw>} zVkDbIP!iYZWxFne0)0_c)-&vc%0}-hY&oUyjidvR&a?|Q(oYBeCd!3tR<0JZ%b_aY zQ36IARF1}6neMuBqZpW~bouDCe!<-)=iYwH`!odX*%nS+53q_6{H%bq-6RyLrUSu@ zF=b9)RQ6;bcRKgB+w|uXeZZwgFF=0KGzjquDUSHGLZlX3TtKeFA+DO!E>@n)DrVJv2RCRruBh+#etMZ=#0f z3{Ng8(XyXDHNViXY?oJ^0d=b`;My++Jw=&wWm^295uKO8;3D z_MQ}^X*l;=zw!5)@@!|gQeyD6zxsf)1#emZSp#=JSVavXU--O|8K9<%8Qev!-rNT(U2UUuW9Q zNhjCj)gV~0sS-|Hzb6b*Q6QyfXNAcY17-l5vxmVhj_dv9t^+kZYEMj$oqzBWA$6#X z;863jj6DLeMZnBA%#b31MgDKK%{SamKPLZz##8ccU7M5P!KzN3N?+`{$n=vo3| zs_r}03yOWc-MopeoF*O(0#;(rAp==aQ|;-hPKVXpOp!~QATVEO1**Il*~=F(K|~=f zvBFm@kx8|%PIG_qB7ChJkeD|>Fx^!90TvcnCm67kxeUi{FB&$}c-tK$k)u|_NNbJ~umJxngwndg2aVYPAURn!7b$OuYlP4@ zwpot$0J6;>O~n0;>K15S861*9*l-Ub?1nqaU9Dj6oHP29GRR~4GnNF4JBzyjsx&mS zWu*fCZb#)vC@-W>G4PYTlXm@nimxDpLC-~=R}?>PugQ|KG}-wv*=JsvVgc0bL&P!T zMJF*m??Hz-(EhR32YySF0pnX@LtjW=m7nS>&EvQ{ZuT_-WNQ(&G9fz}Xq^(e^vnD0 zAt1mK=+KwGb^9r*f;d2iF~hMZD$7w6EWL1RznhZRW+fLAvC)qrd>MD1^Q=5+B+(0S z;?1sO9_ov~S0w90+_O@uY7|H<&c<$|%oa`yj>KFv9%=NQ3J#M5H@^UhAXP=HM`{OU zvK%OJ{4J=kM5h3jW*NZdSjAD-o@Y{Cq&OIO zcqTz11K>c9;acK#h$ol<+TE4Idn%`N8pKD=!(L0B`o@cYoX6?r1+sqRGHI%fxNa(; zHUI2Qa1{uhz7BTd^*+v20uBmaM6!FW=+;lObQBe-2{`@Uj5?ewkI z{a0|0vmiWjRy39CEY}~@vN1#qbmU>zL}mT6 z%sRGgPs|Y1F0WcCw61*L+6J<9eoV6jk=g*~kMV3OPrVboX@n{i1T!#G;1&}Z zeMvYuRXNpU;)1?sxy0V%voSoAJg4Jed5e*rx6fM8f|`F%q_|bHoBAG1qJp&X$on#l zn`idWapTAypdmV;Lcx~LKp`ys9k_(bFroN^M6&dvo?_Yo-K)qwhwS~$`51xLu zh`@;=K?Y&%Xm*gGgpLGoR3v?bGz)^9}xM)Kq0=FU*?&$ODg5<h%aXtS@YetR(uGdcJGnEwMq9tnmD*7c$mq*xipfX-r0iP>mSWL0## z5fSISv~5~>ZB6?lJrsXy_3*8lb9&Wof8cbNoBTerZ}lQq)vxF6(}FW7fzjB`V^{$d z+3-#iN8p;`vpz5=Hu3RB@coT}Uat5Q0vrrR>sv7L^!?3CqH6fki{pal?o{el|LZ{f zy1g~jB;$rW7rU}GL`DCnH0~eT0tt%{=5D+NV6}v5k$;z-oLqHQwf}5NX;~IN{I3uG zT^on;s2#sjDguhre^iF&WZC&@$u_~{~gpxh2( z2PdImQ$+Y~^-AOUFtt%Gi00|05y?y06x#4dOuP_X_y0=i7OcLhgYymNcccGHL$dyhHVUHz7FNRDykiy&qT&7=?l6)cd0 z03`R}4q`7*yH_wvzbTFf9)1_4>`4pmd2#}wlu~9#t6J{dml&o#iUf*%(28pF19Y9X=rMo}pU-IcTCVU3ObLBgZk+Zp_-5bN1px*MNwsGEI6UK&xcE1sfN5^ zuF*^2a?cz9X5ItDd2rAyXr}-e3{OU+sEpr6ncVz`V;%s0_VG9MaEz*=&Ko3!Be)uV zoeW*+j;D*Yq_h~v=k}T~NdTDD1ewrC7)_9FzohEaQT5|Bm40KaA#!uNhLz^Fa)!1aBp7rXb$uon`S+s3-p!cNqJ_9nxKIX;cw! zn^zS>@l&@xjk`H9|Lwa_K-!UvWqZ@#GD!yHx;_zI1|dLF{LRA`H#{&AR{l+0&<7{W zCna2D`Wq4@uQO{a9lCs*e2Nni_OZVoaDiB`Z~En=O;+99g)-r>`uR{Q-hCqSA06iZ z2^XCVKl5TWJ5xlnkribV|BZ`6fX$LE;Yt6;Pj^e*0}l^gD{-+vOaI?GPUsx*`!I3V zT#HM(U2f%3K!6(D@avOQO_XKXSug^kn7|%zLQX?|J>=Fk^j zC}PG)p=(_ejL+KuT+J~;=BkM*fL=wL`fFb(R&2bI(fCJsAnw^tIo0}x`!rPE1DrYA z@SabR^;_AT9BB83T`>jOf>8Mq@b0S%XsC;!G?Fd7F*K;SKVS=KEWy5sbcZNY zwB-7Gn!YmUXX{(BCVmsk?~QP^A}Y|#6@pqaeb8dx#mFP;Tc8Z>cLo6D>4?NO3La(C z`gjb;CJ>Y>H6r9f9DvD>ka{c*upluoceY@Ac7H7J4bn0; z^Ak|sz4N0V2N({&I6*sdZ~x)%wZD+5X@=;=+aUVc!31ExWt&^URhM{W|H;ilAZ29~ z5aDh!p|8)fSOE$uYOVP_?Nz?v-- zZzaaP=vPkb0qTDW0vAYoZ(`Ppe)vM$ zY~+6A$z%Dy*FRi*z|3>S z*~<>Ny^kO=cVH$V3=gGr4^XB9>tMSf=p@3Po(8-I9VO3!p!dEbsfr)GW=;vCh@dFD z23354-H~(|+Rs7S!DPYbvwtlZ%f$YGKh=1B63tauhcdDUguE8va?4K^K-FuDl<8;= z7k5f}!W_{cY(l4QPJXKIEpf0z{v*4aqrMPH^#YLn+gVIGY_cz``%kGJ|CG(N1JwyI z;D|z-k9HM|NzjeR%_P)1e|;OwO_zQwq?xN%%;re?v7kX8e7}%VU)o)ln@QO$0k&J{G5 zVpWu<+Ei&U*}WakZ|%JL0%0tq{Ft}yT=sI|mRy8@Q~F9`@*+rSDo+P+C8eCsvlCOTn5FAGnT8< z!>d17A^4z;KNmw<2c=U)%!kJpBRo{_{V2git8T4dDYnQY(h-Eqxs&RZyhQ2ZAuWM? zrr*VvmBQt*f(kU=y@AKx#GQ3DQiwW{^~DO4jmaO4fR%rn;wI;jfQ3N6+3B{Ij70vC$TK^M@jmyH6u^`kRG_B*W&LdWU=DmTY*hSW5x z6z8=5n=mY+_yW7<+hpHmLpZaaPB~N5);{qS+Y`O4MWpg}4bnBDpPWPzSI>JXCl*6& zHzDd*XGadx`8N2{#`v?Cz3i>&Yb?8BQ0*L^eQy`DbmXj7>5h}$tKxM@3u*bRHhT>J z4HH>pH}6|>;pE}-`9AmFoYd{ko-aJ3P`@?dz10`@8)p- zE$qB~^F5?lCtCmXUlWD4ITIaO1V(!o0XdoPQ{FRJjkHm_m0P4TL>ycX4rO_^6YB8y z*_Cs}N~zl0B)9#-@&7{SG12wXutWXxyUs&K1Bm1@EOkUmPo}eOu=%^0#bY9%Y6dOj) zXHYV}SbcimMLZ=7Dy=Jsu4a=qQ~DTt#(*+lKUt#y_c^ zf)u2zc$}LbwMdgf_@B`G6uoEnp57M(d(5XCX(*Xs8`HZ97)J{PLswG=40=uFbODzh zS)vWdB>+YhR9ee`7X*s`IS%uP?ZI2Fi4aBGK1snpq}v&IHO~_~CehunE2FsyqJeyD zY8a^OXm>Aw&$s=BigyU=0JpDiDZQZ-n?WWaM^M@`Pu*wNPTtkBn01TH&Y~-I1$-MV zO+CuResG>a&>sJLv+R*K_gxFk#XzJAmQVHNIsffGPhiyuzl}fL6WJnXg(`U@G|axt z2L_Q6t={vV!pfdl(3vrcTg5vJ#G_LdiL++H;REtqY5?Jz4wIb?>c7;3(g(o$$>Dy0 z`$pPP4R|~rrbB5iP*#+puMDYq?;}$5OHw?)AIixeA*d(ymbf2>hJ7AB7z$?USYHxc^0AXFF{mShMb^ zI^DSHBSNR<`dUsk~+Lo}pI8_)5# zXqAE16LK~dtLC6$aTvMJ5qLHznRXwM4NS6-rMMnXCjg@=GSQTcTfZozGDl)v_G9sL zNcBuGYcIi+V^pmn#L*XGSGyo?k3t3iU3VfF1CepxYPun`?FtNb;KG#iqEXAS%Wxct zyrRPaf$X}5Qu?Z!?}88w?T+r_rivFqNHedr7BCKqzJ5FWLVz1oqkqXLA#&BOB~3ts zsRAYLWW-z$Q7P(R6tq@nsAIb`$Ic&BT!P_B;xj+DCrJChSHN70QHpL z<69ppvwsu}4_}spd-Ry(+5b77_UkYE_nO z?Ia4E8;?5lSdg%DDfT^>p=v&7lEt zfDs`m1v7TBnbfrRdN`}J_7^NtZKB3+6sG7pg7h?Zf!DX6N6(p4oyZf=YN`Ip{NYrT7>9zTHZwvDep?r%#&m+N%y27rZtzN!meASTb#C$?z)7{I(46H@E;xT zYS(~hx&lfl;|scb<+ik&C~}D+zFC;~&;;4s*ndnjKy8dkos9Y!N!bIkz5YLSBmTAa zgJ=4Lvu*+dgx-4}Q{c5#yQ8yDq3A7d<{>)l#~xz+c(2$4v2#EF*gJ3h*8_&9^TzU4 z3i6~gfmVE+SKA*+v1v_9`TFl@_f|r$xNe=LiBbCt)}CHM79bmPor@DOJ~I-kWD@EB zX)GB$UC1W;e`zd%9LxDPw8H-(Op>*NsVT1ijW8)#DYVgJPU9bDNh2TWeWS6eyV1+s z+e|RhK&!#xCg?fHutx}1g+HPpuoTpc0m%3|VRj|9Hx3S%=u4eqTOc$edgRIRA6FY?K%q+RnL8=~cib z`)FaY5oj!#-UM{dmWYEP7((FnCX1lXsyM2#_zB7I2~|mNSz9DD+F^@)1Xs!dv&Qjr zv~nZ8=fNVF^h-C=C2vP$&1nY;~M$X%W?%@+WU9N6M8&t&pWjM=nprWH-$wA0{ZFDtbF);ABcL59 z&x+7HQ@%~aHvv*4Q^3XYfg~(jnjroLR)c;I{5%9oN~}4t`s4x1B5BtpbdwrROxLz? zpmY1zp$YsO%HN~MpCBoWiq-~FY%d=au}SNm(@N#jgD~69ysG=%8r?CBHs#x^NtTfJo4Z8=w96YVP}*k0y;~+h$C2T>>xOb z+4vxaXqIE&hWy5lb09kiuk<%KwD9Hr5#$z^EqusFM+f!Xz8{i~{njYeMu9vCyvNS# z#~h$iH1@-k0%!Wl0iYp8PZ+S&1%_jmx_fj5WCM0MKr;3a=Z!h zg4aKXHo?OXLV2;w%Jl>>Wz2?-Cr5*vqWe0U|ATU7P*OSzJ&7Er8YtuM5zT@_Xfl8T z0|S!AaqnK1CRrr(w1DwF2Uy zZ-^opMU0U;WI>D5jC`n{DyN~M!l!!ZY@}@IfSB>^qX^x=M;lmAj(t@D%vvMtPgfEJ zZtlf$ycr9+xCFKtWSp*ql&8|0dj3TB1D&CcHZtGmlsyl)%UJF{4JN}saA^CbOwC4P zUbr7Z;F>L%q1;j}{ikE+oEk^oJNp7XZp#=g7X9(4hSKT$v1XL4fty!g`XS-i{~5UJ zFR2%}>bC;;I=#GJ1s-rh^u65rbYX52V^I4ZmHQn-bxkT1^tI29LGhHvYc{d~s-ikY zvBN>yNtw?kV^bBh(uW|lmy}=c=CDd7Uy`YtP=r5PcmxI%R*cx`EGS)RG>U8(q}z39 zFcWIF(CIxNBt0YJKYoaJA2@a;a4Id!NFJ)ej_+SA=s2V#kuc{zH#p#akXWwQ&{MsV zf9)RK03ea7-Y_s~<=4W;V!E_XcS%nG{(^pjJfbc`ZG$gehEsA6zmqtL)YDN4eG>@JaL^ zQ70d1xw-aW?!Lp*05{l;X4!fEuy`r|{+yf2eS@-aSb;mmG3bXop}+{Db&9^U7t7f& zDe$i{N) z?TnuIT_}<<%CO2OqD8c$JlofWjehkH@y_l}4H}G*3rE0ScJSGM3ccsk+O{^vRx-_; z?bk}8)4s_H>YvZ-E1pWJTC4QF=~lrkD6&oaMgGpVumD^0hXmJRFL$?!nLg3&S83n+ zDWuMtR}5E0OV3?@FSc;CQc`B1Dm^d%Fy3vR@!qm;)bWq5?uK)5z_>hrl ztjMXl`Wvqvs_FNa<#XR5745Ndx~`P|rhb?#Dvb(WuPUmPdH7UoWF4jJ#O(y$p6B}J z6<DQf(*at$ISCqFa&T2fraUsz^=cT3-rP(`T(cdZ@PAJVil* zlTBlfl#g=7Ffo?3mRrm?oIQrx(I8f&tnTrhJxUpZxyF5wb?0)-Q;4IU!JBHW!kZo! zbZqC68R#~-B1V*}%A&w4ZL13(51X|)QFWy6o)sOKPhRd|kv&RSHVD*7AsyfM@PS0X)ni^kpRwj-!h^&>?BTz)>*JfEQ+cIbM2YGb(@@xjMQ zGVr|}dq2Sh^>p);x^sMD#>41~xz&;8m0imDP*l&s;JQLHK|;h`=?)iTW6(QpRB~?y z4*ov$vZy}&tHBylrUBffA1dJg2CLz_b?4ov=P=-hd?`t|4i8)oN4eJ=juZLtar97H zC}9rjr~L{^V(&m1xUq1^E}}8nLFqBC$+eaB4yUBZ;kR7355xDMi{rX|2i?qb=hNRD z(pjcpIYr-pG6HU4+DK_0u^OXdU9cJph9{UvhI^fQpQ^4*;)$cgo)k{-Q>L)Us`y3>cO#ds1MHm1S~u zh!#J%5mOJi^B5DgVSWSzsY%(zaFcwFnSQ)(GpSoTV>CP@<5AQ{@}w&Zd+bdf1#RQf zvGYdxXA<|a=N`KA?9OY3xUjv)0<@K7u2Q5ZW$a(ZMuz!Pcw#cB*!>P+sUp;LE6!{; zHYT{zC5C<|syo-4bgtc5gOuoe234-o`5sEn&mY{%+{*VixSID4C-du+@1L#vJSthX za;$?~(J-#TLCgCCpR9#&X;jj1C0vdwm4;?_kYyGmQixKs+XHH|0cI&S434+s$gKh+ zeftfdopW1;;rw$P7c^{C9zj@+8vw1m#a8QHZAWRBz6IZ=U=L?Xj1F>FNP*FkR z4Jw>V^a_+}(;!S24&;+tY?Nee468Yq}RPe;DRc!@Ct9C!Hqvm7oQ408}KxX6N;XEIpRNvZ5<3-nRvc zD0@i8P4i}p&M0Uq50%L*7=mw4y)x19UN7zjtPN?Z?J1M(0yz3FBeu;<<>n}ju zhzlVf`sFh8VN}RWe<1h&kC!?rDHN5JeU4KOfg;fXDAWQRj0hNPH7z^bf@ zj(d_!LoJ#5IJ}EJ+*mhLayQiS9+&gqsuK4E2H{27|I02nH`E=fN8{U3??E$JfwGxqB|6vPCROZZHYJ7;j$ryeKx_n9!3@G; zhn`ZV0`cg0SkE0`-9@Ns(B#vJ5J}yzl#3{{DL4KVWI$1V_jY;>N(VMo-C~=m4FFLF>;hEo({LMhqPT&+g?9$xTGq_ zJ}zQ9Hcj23HL6cp@ua#6)TgB7Q-g>HA;F84DcH}GR)~W7pecwARM?s<=-U{g(J0;A zxBw5W;g2Sq*plc>G@#=5(`<31HpJs2^2nE52vE^Wq+yo{9^cRV$Np;^Oqp8UAg zakRmibg2yHign-4O8hc^mg%_I2YIq1)iVM66xzy3nUGzc;?;3{8puf7V^lt=WYX!m z)!dO5;k>-8>LpLwfXTtUs2X!sQ1<4XNqHJ%O3Xw0%SOd?s7m8LADaf|jZgUI+lYT{ z37ET!xQ+!Hh2XOg1vArol$iOOL?E|ADgd1>4p~Lf?dK`~3}B_d4*%b8_2fFI99_D@ zV@b7Tbs4WtFr&{9UCd-O<>pi(ikR!LQIZv2A1bnW9BLNGH-3Ms*alB{ zc~+#xQGDmLD*#-%zKubr$n@%qw3F@u9|=~H{+r}T01agd$N0%cmoPHRbARvhqj#;J{(cAY zHVBU=+>UFA>Dze>43N)!XQ3ZJM$(-R`@efB$K^^7hf=qDMjo)RO?Ho{xbbgb)paW5 zd3WNtCj=+XT3it~jDc0d}sjW~bsGoTZu(@6y{XnEo~w0bWN&_Qgh_p$I-xTc~2 zunh*Crs<)j!jXSP!{I@d;ea&kz}Ss`Lew2RYf9`F^nL)^#Tj0S8_2(Y0iK<8H1#dw z{n7PnibV?4n79j!wMq`Lf`g1aYIGQ?T9AGl&^5gxZ+rcc<1+Nu3pW&tjtd2OO@qg@ z7kL|BJ_BN_QnC$n#;-w1We$61A;7vyewtxc&0|Q9_)c>Thnb9j<}C)z-;J3 zP<@uUngDH(An)t-`(09CNLY@ovZBTcnj(i{Q0~f4db{|DUak5JXQ2B#X)A+H@WdT^ zbxa@1q2zUdri#G1ApK0?6%5iVfh+W3(PhmC^mlM{8UD5Q7x=jH`wlFzm3v*GTj~ef zxLfDSBFO+u40rb!X??Z1obbH){@6O$gxqIvSewzw8;YM?Q_lDg!uQkFPTgM0^Dkf4 zau*nF2135nD%mq*BvD+z4Bk9~@Zut(4LIXsnZ{v6&{=Ys_Ze?;VJLC#v2a%nZ@z_n znDpr9aLGxIpQ-Z!U%pWw1eBHzv}R_xxx9;`=oW^t>{5fbo&jWkDd`sb6?iI8P@IU^ zS5ciVrSu5F=ElLdcS4U^PC3*&(H2cf8At;cuRKH}+5bx)N^`pWuPs`4hy-9|=#r5GkwD?5uObvW! z5$wyWnh!xP=Mp$eKe&1cF9>qnb2@R~=2Ok6HLw~T%Ww>oA2G)y>6cF*zzBZEp!~){ zDDCw4bqv51&03yQAZOFgCFSW1fFqe8c0DHVmdeegNCY z;6K-XocVdEFOPfv8hP6(d;^`u{MUkw?Wd=@6Dtc67tF;bnh$#(XOw@;MS7~{vp+Qi$ zWA(!GU|%@JKmJ;on`=jprlayGviw*G1A}b^sk^S^a>h)nh*d^45H&K>hY5$x6Jxhv zKHtq=v;nnTsn}lqMq6se8xN4Udh+R>&21<;L)F3!UC3uX=f6>BXd>(AX4sz6JhN$z z;M%bqP{`2OC)!!QJ_3nS%)>miC8C#$K2%$gIOS@BB%W|XDqqm0zs%Qt52eoZJ~ncE zQO}-!BIXFnyP;<%-i*nZ>e zl8kGXPRVzu4InOo^|g{}y1W9xrxly7BJ%KLKUiRkr#NuA*ftsscUL?$X>^_4b~^lY z0C%@LRMRlm(&gq1XhXj6Xdo464ww)fZCIMW%??z~pqx~YtyV02JQwcGcX~)j;e%h^ z&1RTAhMrQmi{%mgOuB8|FATroj_~mE;E%?SNUR^jJ#GETez6m97o||98ra#|>JWjE z`-6cWC4|i|61RXRxZ<_8m*qmM(2OcESq-w1_el-!Neqb30vBWcY*~gz(j-n}Sj=zZ z7R65_Ey#*aKaTo~tM{)0s~3kaUc)y(PUfKqOPRYn78ex1{cvx%!_fyY{%oe1ZEBhH zJdYTUSo--|i8qPfgRXM)5l6>JE+pM(yz>JN5yzCoGn`ljY7S3R7q(Mlr&B*48KV>L zk$TUVJcQDhtHxH#XklTYwHeEds||X6wsRfYF=z6CiV|wlu5%+w^Gap4Jr=PFrX7XVUnADu7mQ&N}wx{V&qNDR;+Z1l7XB!K2C37NX zx6|u{c_NCB?$dNXb|roi?2*^)u|Eq1R^=YW9^aO=J3(K)V9M1nvNc?{mkD$TbFo^r zv6fK)b`$mt57r=9j;+j=M0#X42@@z+|sI+z6+@El=x&(fT%55M~*NF}?=rSY2HlW5^Ul~sAS&rpx) z9l#KCZZmERZcFu@4PuBW&}6qgX@U9>n$sJjtUB+$ClXb?l#5!pxcH&?o6Py7xtEhI z`3u`9UqP?l=B%k~F^TP=MA>$i8%|ZMRS60u^JAL&c`JClSW7>1XlgPyOfYG_IJHGedpLP4_PL!JH+N)wy zDB|n*OiiYiFX(#ZMFzlr$S0G_+YCpkrB;!fRzXp8RW?f39P(1a*s zT0GOUZ+{T|lg~d`Blx9FN}4dUDMRqQe^ZidcVL*u*pEab%>YIPmluit&Abu;KUj5K zhuQZIxvHJsODxkJbv;O?`>AwzS~XPHgvA|3irpuw@azYgKA8J!70zOaw|b2se#REX z?!1rq?i0xqoV4;cfC35)7&p!&YXAS5yZWdk?>!D9LvfmgxM`A#=F8Gbb6XHr7SdAV zN}=Y!>vqGndl}neUAf@6RNP^MEl8>XgH*?wH35Y0Fhk|C8WQ za|@hnzN3L?%#%hh8)B42r37k~xM4baCxzDajn4i-`;DaQ2{WfM1O+n&%;Amcq>G76 z&03x^mSJR{v@S?U5G6{fhkZ+U)^lz9%WdWAG)zP{-`FNxwesfco%OS1Yz6f;G|c|w z^?8;bEN0a+lv-L?2@0M!aOOqBu0~f<=z2*x%3emKI61F)S5b9`CLVWo>pyS>?Y>g< zVu$%C`bknj@Av09GY(d!_RjZ)4v=om9?Qi2;6vO4x7F7m(r4>GTtU=h>&JO_toVwf zDuJ!nJ}i|x6r+84e?dLsLAy3f)=)BcbQsUPDTUY*jyM)I302W{>!~Y!u z`|ly&8@$^S*mci3@u{`4Boq#6O~L`yZs>9F1phIfwxJXDo#HwEO|rP5rlIFMyH(U_ zVexRTQPIFLK*!A!-ZDjD0#8}=$Hg3qOsQ?^uDIua#ky%Ut(dUA1iZ~iS$geOO-*-K zlkGphUF0ZwydL_$g8Ebh#fn?bJWZ8naSlggQzy;`k9dB`9Ibwr7v3F2 z;4|u?q66d~Xry9Ek^a2PJ7V_@ucUbQw&j^eoG)9iOa+}33nd{knX)?kD`*WbHki6{ z11(8nricoPg15j}McqOmRuk%v_-VWHLw03it@3%Qw+uKXKt-iq~$zPf1=vfl>@Bh}so>q#<2b_mtfj5ZP08BNXw(wBY-vLRA$ z5-_zZ`+z>|^ZrP8E4&>NqI`aL363|GJsxtMtWTa|Be9vkfW({=Z}a3YBP1UR!Hf2G zi$LJEOlq~TuSG)R;?Ff8A>nkHxDk(5lAWMjf?IzZazT-3`bS%{Z(~n^(-wqM)rZ<% z69z^6LRe^p003AgbwUpFG6u6hVxiOi>=58=B*`r>>5*f9UDyxG$!{_gaf7v;Y3dVH z$k+gZ*M+9ofesJ@pQoIQ3I=)C>3Kok8=cQO>igoe;d5@H)XyyFh*Nw7cL~wwW;MFv z&<}BWEIp7e36Kdd>fWvNG*hP`vpb>=qLdE?8}=f;P9y*|c~UmNDHzCxbz)H?GP}0& zfSmf*_2)*18@_O71HeT|D)rFyi0uc<<5L(9_Q%ZWWaQsK&cc0s9Ib*&F9|@^TYGA6 z9o4oqh=wE}6q*Wa*{z|d+*rz=2g4Mgt>ipaR~rexwcOGXI2IBl&StA#t94P{sx6qi zz(7#)6!XJzwY%-*xrjev9x9-Qw@uw1A7&c#&WK<7^B%&eZ*7@>5_K))de!sRn5`_; zakVZ6JQSFFFp9LWmtZ*W@b>t0?MFwr!YC0wUZBg`)JE1c`rz~XE57@c?i(Kg{AsO; zxe$m|heVsixkg*v_g6QrA-bPA`OH)7Pbuv;{s)2G7jVB=j(8)jKe0T3+o|nZRXt77 zybb~WAhOQuaVK&}0B1$fZ%EK>fh#0ye=6BA3LcI1aecBZM4gP-YY?g#hMi3wt>LY1 zA`4;8Cuj{sBtIiL$X~v5m_y8%1u<4~>ok%&@&%=N5t;o5=)=GXB4$05#OdWW!Ev0N zozezZi?n~0<}y${bqhK1Dow)<3tCjjg-4OztP4@i*8k}X`38Pok1ao!S<_tM@g533 Ndwm$R+FcPDe*+WoWfTAa diff --git a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/CMakeLists.txt b/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/CMakeLists.txt deleted file mode 100644 index d0e93415e..000000000 --- a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -cmake_minimum_required(VERSION 3.5.2) - -add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) -add_definitions(-Dgoogle=mindxsdk_private) -set(PLUGIN_NAME "mxpi_selectedframe") -set(TARGET_LIBRARY ${PLUGIN_NAME}) -set(MX_SDK_HOME "$ENV{MX_SDK_HOME}") - -include_directories(${MX_SDK_HOME}/include) -include_directories(${MX_SDK_HOME}/opensource/include) -include_directories(${MX_SDK_HOME}/opensource/include/gstreamer-1.0) -include_directories(${MX_SDK_HOME}/opensource/include/glib-2.0) -include_directories(${MX_SDK_HOME}/opensource/lib/glib-2.0/include) -include_directories(${MX_SDK_HOME}/include/MxTools/PluginToolkit) -include_directories(${MX_SDK_HOME}/lib) - -link_directories(${MX_SDK_HOME}/lib) -link_directories(${MX_SDK_HOME}/opensource/lib) - -add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) -add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}") -add_library(${TARGET_LIBRARY} SHARED MxpiSelectedFrame.cpp) - -target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0) -target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxbase mxpidatatype) -target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s) - -install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_SOURCE_DIR}/dist/lib) - -# ${MX_SDK_HOME}设置为远程SDK安装路径 \ No newline at end of file diff --git a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.cpp b/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.cpp deleted file mode 100644 index bcf707e82..000000000 --- a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "MxpiSelectedFrame.h" -#include "MxBase/Log/Log.h" -using namespace MxBase; -using namespace MxTools; -using namespace MxPlugins; - -APP_ERROR MxpiSelectedFrame::Init(std::map> &configParamMap) -{ - LogInfo << "Begin to initialize MxpiSelectedFrame(" << pluginName_ << ")."; - // get parameters from website. - SelectedFrameNum_ = *std::static_pointer_cast(configParamMap["frameNum"]); - LogInfo << "skip frame nunmber(" << SelectedFrameNum_ << ")."; - LogInfo << "End to initialize MxpiSelectedFrame(" << pluginName_ << ")."; - return APP_ERR_OK; -} - -APP_ERROR MxpiSelectedFrame::DeInit() -{ - LogInfo << "Begin to deinitialize MxpiSelectedFrame(" << pluginName_ << ")."; - LogInfo << "End to deinitialize MxpiSelectedFrame(" << pluginName_ << ")."; - return APP_ERR_OK; -} - -APP_ERROR MxpiSelectedFrame::Process(std::vector &mxpiBuffer) -{ - MxpiBuffer *inputMxpiBuffer = mxpiBuffer[0]; - MxpiMetadataManager mxpiMetadataManager(*inputMxpiBuffer); - auto errorInfoPtr = mxpiMetadataManager.GetErrorInfo(); - if (errorInfoPtr != nullptr) { - LogWarn << "Input data is invalid, element(" << pluginName_ << ") plugin will not be executed rightly."; - SendData(0, *inputMxpiBuffer); - return APP_ERR_COMM_FAILURE; - } - - if (SelectedFrameNum_ == 0) { - SendData(0, *inputMxpiBuffer); - } else { - count++; - if ((count % (SelectedFrameNum_ + 1)) == 0) { - count = 0; - SendData(0, *inputMxpiBuffer); - } else { - MxpiBufferManager::DestroyBuffer(inputMxpiBuffer); - } - } - return APP_ERR_OK; -} - -std::vector> MxpiSelectedFrame::DefineProperties() -{ - std::vector> properties; - auto prop1 = std::make_shared>(ElementProperty { - UINT, - "frameNum", - "frameNum", - "the number of selected frame", - 0, 0, 100 - }); - properties.push_back(prop1); - return properties; -} - -MxpiPortInfo MxpiSelectedFrame::DefineInputPorts() -{ - MxpiPortInfo inputPortInfo; - std::vector> value = {{"ANY"}}; - GenerateStaticInputPortsInfo(value, inputPortInfo); - return inputPortInfo; -} - -MxpiPortInfo MxpiSelectedFrame::DefineOutputPorts() -{ - MxpiPortInfo outputPortInfo; - std::vector> value = {{"ANY"}}; - GenerateStaticOutputPortsInfo(value, outputPortInfo); - return outputPortInfo; -} -namespace { - MX_PLUGIN_GENERATE(MxpiSelectedFrame) -} \ No newline at end of file diff --git a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.h b/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.h deleted file mode 100644 index de76e850c..000000000 --- a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/MxpiSelectedFrame.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MXPLUGINS_MXPISKIPFRAME_H -#define MXPLUGINS_MXPISKIPFRAME_H - -#include "MxBase/ErrorCode/ErrorCode.h" -#include "MxTools/PluginToolkit/base/MxPluginGenerator.h" -#include "MxTools/PluginToolkit/buffer/MxpiBufferManager.h" -#include "MxTools/PluginToolkit/metadata/MxpiMetadataManager.h" -#include "MxTools/Proto/MxpiDataType.pb.h" - -/** - * This plugin is used for skip frame. - */ -namespace MxPlugins { - class MxpiSelectedFrame : public MxTools::MxPluginBase { - public: - /** - * @description: Init configs. - * @param configParamMap: config. - * @return: Error code. - */ - APP_ERROR Init(std::map> &configParamMap) override; - - /** - * @description: DeInit device. - * @return: Error code. - */ - APP_ERROR DeInit() override; - - /** - * @description: MxpiSelectedFrame plugin process. - * @param mxpiBuffer: data receive from the previous. - * @return: Error code. - */ - APP_ERROR Process(std::vector &mxpiBuffer) override; - - /** - * @description: MxpiSelectedFrame plugin define properties. - * @return: properties. - */ - static std::vector> DefineProperties(); - - /** - * @api - * @brief Define the number and data type of input ports. - * @return MxTools::MxpiPortInfo. - */ - static MxTools::MxpiPortInfo DefineInputPorts(); - - /** - * @api - * @brief Define the number and data type of output ports. - * @return MxTools::MxpiPortInfo. - */ - static MxTools::MxpiPortInfo DefineOutputPorts(); - - private: - - uint32_t SelectedFrameNum_ = 0; - - uint32_t count = 0; - }; -} - -#endif \ No newline at end of file diff --git a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/build.sh b/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/build.sh deleted file mode 100644 index 58cbd3f76..000000000 --- a/contrib/HelmetIdentification/plugins/MxpiSelectedFrame/build.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License.mitations under the License. - -set -e - -current_folder="$( cd "$(dirname "$0")" ;pwd -P )" - -function build_plugin() { - build_path=$current_folder/build - if [ -d "$build_path" ]; then - rm -rf "$build_path" - else - echo "file $build_path is not exist." - fi - mkdir -p "$build_path" - cd "$build_path" - cmake .. - make -j - cd .. - exit 0 -} - -build_plugin -exit 0 \ No newline at end of file -- Gitee