From bdc777351556bab3841db290b5ed7d66bdd13367 Mon Sep 17 00:00:00 2001 From: "wenbo.x" Date: Sat, 8 May 2021 15:08:37 +0800 Subject: [PATCH] add reference apps --- License.md | 89 + README.md | 47 +- README.zh.md | 10 + contrib/.gitkeep | 0 mxVision/AllObjectsStructuring/CMakeLists.txt | 32 + .../Proto/CMakeLists.txt | 46 + .../MxpiAllObjectsStructuringDataType.proto | 31 + mxVision/AllObjectsStructuring/README.md | 194 ++ mxVision/AllObjectsStructuring/build.sh | 49 + mxVision/AllObjectsStructuring/main.py | 20 + .../main_pipeline/__init__.py | 14 + .../main_pipeline/main_pipeline.py | 61 + .../pipeline/AllObjectsStructuring.pipeline | 1956 +++++++++++++++++ .../pipeline/face_registry.pipeline | 127 ++ .../plugins/MpObjectSelection/CMakeLists.txt | 14 + .../MpObjectSelection/MpObjectSelection.cpp | 592 +++++ .../MpObjectSelection/MpObjectSelection.h | 119 + .../plugins/MxpiFaceSelection/CMakeLists.txt | 14 + .../MxpiFaceSelection/MxpiFaceSelection.cpp | 507 +++++ .../MxpiFaceSelection/MxpiFaceSelection.h | 129 ++ .../plugins/MxpiFrameAlign/BlockingMap.h | 82 + .../plugins/MxpiFrameAlign/CMakeLists.txt | 17 + .../plugins/MxpiFrameAlign/MxpiFrameAlign.cpp | 385 ++++ .../plugins/MxpiFrameAlign/MxpiFrameAlign.h | 119 + .../plugins/MxpiSkipFrame/CMakeLists.txt | 14 + .../plugins/MxpiSkipFrame/MxpiSkipFrame.cpp | 96 + .../plugins/MxpiSkipFrame/MxpiSkipFrame.h | 80 + .../AllObjectsStructuring/requirements.txt | 5 + .../retrieval/__init__.py | 14 + .../retrieval/feature_retrieval.py | 280 +++ .../retrieval/register.py | 159 ++ mxVision/AllObjectsStructuring/run.sh | 54 + .../AllObjectsStructuring/util/__init__.py | 14 + .../AllObjectsStructuring/util/arguments.py | 166 ++ .../util/channel_status.py | 31 + .../AllObjectsStructuring/util/checker.py | 57 + .../AllObjectsStructuring/util/display.py | 100 + .../AllObjectsStructuring/util/main_entry.py | 167 ++ .../util/multi_process.py | 48 + .../AllObjectsStructuring/util/pipeline.py | 316 +++ mxVision/AllObjectsStructuring/util/yuv.py | 49 + .../InferOfflineVideo/regular/README.zh.md | 52 + mxVision/InferOfflineVideo/regular/main.cpp | 71 + .../regular/pipeline/regular.pipeline | 60 + mxVision/InferOfflineVideo/regular/run.sh | 30 + mxVision/MediaCodec/CMakeLists.txt | 29 + mxVision/MediaCodec/README.zh.md | 155 ++ mxVision/MediaCodec/dist/.gitkeep | 0 mxVision/MediaCodec/logs/.gitkeep | 0 mxVision/MediaCodec/main.cpp | 65 + mxVision/MediaCodec/pipeline/test.pipeline | 90 + mxVision/MediaCodec/script/build.sh | 47 + mxVision/MediaCodec/script/create_pipeline.sh | 69 + mxVision/MediaCodec/script/run.sh | 35 + mxVision/MediaCodec/script/show.sh | 33 + mxVision/MediaCodec/script/stop.sh | 20 + mxVision/MultiThread/C++/CMakeLists.txt | 30 + mxVision/MultiThread/C++/EasyStream.pipeline | 140 ++ .../C++/EasyStream_protobuf.pipeline | 255 +++ mxVision/MultiThread/C++/README.zh.md | 125 ++ mxVision/MultiThread/C++/build.sh | 47 + mxVision/MultiThread/C++/dist/.gitkeep | 0 mxVision/MultiThread/C++/main.cpp | 329 +++ mxVision/MultiThread/C++/run.sh | 27 + mxVision/MultiThread/picture/.gitkeep | 0 .../MultiThread/python/EasyStream.pipeline | 190 ++ .../python/EasyStream_protobuf.pipeline | 254 +++ mxVision/MultiThread/python/README.zh.md | 99 + mxVision/MultiThread/python/main.py | 98 + .../MultiThread/python/main_sendprotobuf.py | 316 +++ mxVision/MultiThread/python/run.sh | 33 + mxVision/VideoQualityDetection/README.zh.md | 87 + mxVision/VideoQualityDetection/main.cpp | 65 + .../pipeline/VideoQualityDetection.pipeline | 70 + mxVision/VideoQualityDetection/run.sh | 30 + tools/precision_analysis/README.md | 135 ++ tools/precision_analysis/executor/__init__.py | 14 + .../executor/data/__init__.py | 14 + .../executor/data/dataloader.py | 34 + .../executor/data/image_loader.py | 75 + tools/precision_analysis/executor/element.py | 33 + .../precision_analysis/executor/inference.py | 128 ++ .../executor/model/__init__.py | 14 + .../precision_analysis/executor/model/base.py | 36 + .../executor/model/mindspore_model.py | 49 + .../executor/model/onnx_model.py | 59 + .../executor/model/pb_model.py | 102 + .../executor/model/pipeline.py | 117 + .../precision_analysis/indicator/__init__.py | 14 + .../precision_analysis/indicator/criterion.py | 205 ++ tools/precision_analysis/indicator/metrics.py | 252 +++ .../precision_analysis/interface/__init__.py | 14 + tools/precision_analysis/interface/eval.py | 64 + tools/precision_analysis/main.py | 72 + tools/precision_analysis/requirements.txt | 9 + tools/precision_analysis/test/__init__.py | 17 + tools/precision_analysis/test/build.py | 23 + tools/precision_analysis/test/test_CRNN.py | 91 + tools/precision_analysis/test/test_ctpn.py | 53 + .../test/test_ssd_mobilenet_fpn.py | 101 + tools/precision_analysis/utils/__init__.py | 14 + tools/precision_analysis/utils/arguments.py | 47 + tools/precision_analysis/utils/checker.py | 74 + .../utils/coding_conversion.py | 38 + tools/precision_analysis/utils/collection.py | 65 + tools/precision_analysis/utils/constants.py | 33 + tools/precision_analysis/utils/parser.py | 119 + 107 files changed, 11331 insertions(+), 39 deletions(-) create mode 100644 License.md create mode 100644 README.zh.md create mode 100644 contrib/.gitkeep create mode 100644 mxVision/AllObjectsStructuring/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/Proto/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/Proto/MxpiAllObjectsStructuringDataType.proto create mode 100644 mxVision/AllObjectsStructuring/README.md create mode 100644 mxVision/AllObjectsStructuring/build.sh create mode 100644 mxVision/AllObjectsStructuring/main.py create mode 100644 mxVision/AllObjectsStructuring/main_pipeline/__init__.py create mode 100644 mxVision/AllObjectsStructuring/main_pipeline/main_pipeline.py create mode 100644 mxVision/AllObjectsStructuring/pipeline/AllObjectsStructuring.pipeline create mode 100644 mxVision/AllObjectsStructuring/pipeline/face_registry.pipeline create mode 100644 mxVision/AllObjectsStructuring/plugins/MpObjectSelection/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.cpp create mode 100644 mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.h create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.cpp create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.h create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/BlockingMap.h create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.cpp create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.h create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/CMakeLists.txt create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.cpp create mode 100644 mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.h create mode 100644 mxVision/AllObjectsStructuring/requirements.txt create mode 100644 mxVision/AllObjectsStructuring/retrieval/__init__.py create mode 100644 mxVision/AllObjectsStructuring/retrieval/feature_retrieval.py create mode 100644 mxVision/AllObjectsStructuring/retrieval/register.py create mode 100644 mxVision/AllObjectsStructuring/run.sh create mode 100644 mxVision/AllObjectsStructuring/util/__init__.py create mode 100644 mxVision/AllObjectsStructuring/util/arguments.py create mode 100644 mxVision/AllObjectsStructuring/util/channel_status.py create mode 100644 mxVision/AllObjectsStructuring/util/checker.py create mode 100644 mxVision/AllObjectsStructuring/util/display.py create mode 100644 mxVision/AllObjectsStructuring/util/main_entry.py create mode 100644 mxVision/AllObjectsStructuring/util/multi_process.py create mode 100644 mxVision/AllObjectsStructuring/util/pipeline.py create mode 100644 mxVision/AllObjectsStructuring/util/yuv.py create mode 100644 mxVision/InferOfflineVideo/regular/README.zh.md create mode 100644 mxVision/InferOfflineVideo/regular/main.cpp create mode 100644 mxVision/InferOfflineVideo/regular/pipeline/regular.pipeline create mode 100644 mxVision/InferOfflineVideo/regular/run.sh create mode 100644 mxVision/MediaCodec/CMakeLists.txt create mode 100644 mxVision/MediaCodec/README.zh.md create mode 100644 mxVision/MediaCodec/dist/.gitkeep create mode 100644 mxVision/MediaCodec/logs/.gitkeep create mode 100644 mxVision/MediaCodec/main.cpp create mode 100644 mxVision/MediaCodec/pipeline/test.pipeline create mode 100644 mxVision/MediaCodec/script/build.sh create mode 100644 mxVision/MediaCodec/script/create_pipeline.sh create mode 100644 mxVision/MediaCodec/script/run.sh create mode 100644 mxVision/MediaCodec/script/show.sh create mode 100644 mxVision/MediaCodec/script/stop.sh create mode 100644 mxVision/MultiThread/C++/CMakeLists.txt create mode 100644 mxVision/MultiThread/C++/EasyStream.pipeline create mode 100644 mxVision/MultiThread/C++/EasyStream_protobuf.pipeline create mode 100644 mxVision/MultiThread/C++/README.zh.md create mode 100644 mxVision/MultiThread/C++/build.sh create mode 100644 mxVision/MultiThread/C++/dist/.gitkeep create mode 100644 mxVision/MultiThread/C++/main.cpp create mode 100644 mxVision/MultiThread/C++/run.sh create mode 100644 mxVision/MultiThread/picture/.gitkeep create mode 100644 mxVision/MultiThread/python/EasyStream.pipeline create mode 100644 mxVision/MultiThread/python/EasyStream_protobuf.pipeline create mode 100644 mxVision/MultiThread/python/README.zh.md create mode 100644 mxVision/MultiThread/python/main.py create mode 100644 mxVision/MultiThread/python/main_sendprotobuf.py create mode 100644 mxVision/MultiThread/python/run.sh create mode 100644 mxVision/VideoQualityDetection/README.zh.md create mode 100644 mxVision/VideoQualityDetection/main.cpp create mode 100644 mxVision/VideoQualityDetection/pipeline/VideoQualityDetection.pipeline create mode 100644 mxVision/VideoQualityDetection/run.sh create mode 100644 tools/precision_analysis/README.md create mode 100644 tools/precision_analysis/executor/__init__.py create mode 100644 tools/precision_analysis/executor/data/__init__.py create mode 100644 tools/precision_analysis/executor/data/dataloader.py create mode 100644 tools/precision_analysis/executor/data/image_loader.py create mode 100644 tools/precision_analysis/executor/element.py create mode 100644 tools/precision_analysis/executor/inference.py create mode 100644 tools/precision_analysis/executor/model/__init__.py create mode 100644 tools/precision_analysis/executor/model/base.py create mode 100644 tools/precision_analysis/executor/model/mindspore_model.py create mode 100644 tools/precision_analysis/executor/model/onnx_model.py create mode 100644 tools/precision_analysis/executor/model/pb_model.py create mode 100644 tools/precision_analysis/executor/model/pipeline.py create mode 100644 tools/precision_analysis/indicator/__init__.py create mode 100644 tools/precision_analysis/indicator/criterion.py create mode 100644 tools/precision_analysis/indicator/metrics.py create mode 100644 tools/precision_analysis/interface/__init__.py create mode 100644 tools/precision_analysis/interface/eval.py create mode 100644 tools/precision_analysis/main.py create mode 100644 tools/precision_analysis/requirements.txt create mode 100644 tools/precision_analysis/test/__init__.py create mode 100644 tools/precision_analysis/test/build.py create mode 100644 tools/precision_analysis/test/test_CRNN.py create mode 100644 tools/precision_analysis/test/test_ctpn.py create mode 100644 tools/precision_analysis/test/test_ssd_mobilenet_fpn.py create mode 100644 tools/precision_analysis/utils/__init__.py create mode 100644 tools/precision_analysis/utils/arguments.py create mode 100644 tools/precision_analysis/utils/checker.py create mode 100644 tools/precision_analysis/utils/coding_conversion.py create mode 100644 tools/precision_analysis/utils/collection.py create mode 100644 tools/precision_analysis/utils/constants.py create mode 100644 tools/precision_analysis/utils/parser.py diff --git a/License.md b/License.md new file mode 100644 index 000000000..496857aba --- /dev/null +++ b/License.md @@ -0,0 +1,89 @@ +**Apache License** + +**Version 2.0, January 2004** + +**http://www.apache.org/licenses/** + + + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +**1. Definitions**. + +"**License**" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"**Licensor**" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"**Legal Entity**" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "**control**" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"**You**" (or "**Your**") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"**Source**" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"**Object**" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"**Work**" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"**Derivative Works**" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"**Contribution**" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "**submitted**" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "**Not a Contribution.**" + +"**Contributor**" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +**2. Grant of Copyright License**. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +**3. Grant of Patent License**. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +**4. Redistribution**. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + +2. You must cause any modified files to carry prominent notices stating that You changed the files; and + +3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +4. If the Work includes a "**NOTICE**" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +**5. Submission of Contributions**. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +**6. Trademarks**. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +**7. Disclaimer of Warranty**. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +**8. Limitation of Liability**. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +**9. Accepting Warranty or Additional Liability**. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + + + +END OF TERMS AND CONDITIONS + + + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + + + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + + + +http://www.apache.org/licenses/LICENSE-2.0 + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index c75d40a7c..0856fea45 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,8 @@ -# mindxsdk-referenceapps - -#### 介绍 -{**以下是 Gitee 平台说明,您可以替换此简介** -Gitee 是 OSCHINA 推出的基于 Git 的代码托管平台(同时支持 SVN)。专为开发者提供稳定、高效、安全的云端软件开发协作平台 -无论是个人、团队、或是企业,都能够用 Gitee 实现代码托管、项目管理、协作开发。企业项目请看 [https://gitee.com/enterprises](https://gitee.com/enterprises)} - -#### 软件架构 -软件架构说明 - - -#### 安装教程 - -1. xxxx -2. xxxx -3. xxxx - -#### 使用说明 - -1. xxxx -2. xxxx -3. xxxx - -#### 参与贡献 - -1. Fork 本仓库 -2. 新建 Feat_xxx 分支 -3. 提交代码 -4. 新建 Pull Request - - -#### 特技 - -1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md -2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) -3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 -4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 -5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) -6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) +EN|[CN](README.zh.md) +# MindX SDK Reference Apps + +[MindX SDK](https://www.huaweicloud.com/intl/en-us/ascend/mindx-sdk) is launched by Huawei. It provides simplified high-performance APIs and tools to enable applications with Ascend AI Processors. + +## Copyright Description + +Refer to [License.md](License.md) \ No newline at end of file diff --git a/README.zh.md b/README.zh.md new file mode 100644 index 000000000..4322dd4d5 --- /dev/null +++ b/README.zh.md @@ -0,0 +1,10 @@ +中文|[英文](README.md) +# MindX SDK Reference Apps + +[MindX SDK](https://www.huaweicloud.com/ascend/mindx-sdk) 是华为推出的软件开发套件(SDK),提供极简易用、高性能的API和工具,助力昇腾AI处理器赋能各应用场景。 + +mxSdkReferenceApps是基于MindX SDK开发的参考样例。 + +## 版权说明 + +请参阅 [License.md](License.md) \ No newline at end of file diff --git a/contrib/.gitkeep b/contrib/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mxVision/AllObjectsStructuring/CMakeLists.txt b/mxVision/AllObjectsStructuring/CMakeLists.txt new file mode 100644 index 000000000..8205309aa --- /dev/null +++ b/mxVision/AllObjectsStructuring/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.5.1) +project(AllObjectsStructuring) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +set(CMAKE_CXX_STANDARD 11) +set(PROJECT_DIR ${PROJECT_SOURCE_DIR}) + +if(NOT DEFINED ENV{MX_SDK_HOME}) + message(FATAL_ERROR "MX_SDK_HOME is not defined, please set it first.") +else() + set(MX_SDK_HOME $ENV{MX_SDK_HOME}) + message("MX_SDK_HOME=$ENV{MX_SDK_HOME}") +endif() + +set(ENV{LD_LIBRARY_PATH} ${MX_SDK_HOME}/opensource/lib:$LD_LIBRARY_PATH) + +include_directories(${MX_SDK_HOME}/include) +include_directories(${MX_SDK_HOME}/opensource/include) +include_directories(${MX_SDK_HOME}/opensource/include/opencv4) +include_directories(${MX_SDK_HOME}/opensource/include/gstreamer-1.0) +include_directories(${MX_SDK_HOME}/opensource/include/glib-2.0) +include_directories(${MX_SDK_HOME}/opensource/lib/glib-2.0/include) + +link_directories(${MX_SDK_HOME}/lib) +link_directories(${MX_SDK_HOME}/opensource/lib) + +add_subdirectory(Proto) +add_subdirectory(plugins/MpObjectSelection) +add_subdirectory(plugins/MxpiFaceSelection) +add_subdirectory(plugins/MxpiSkipFrame) +add_subdirectory(plugins/MxpiFrameAlign) \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/Proto/CMakeLists.txt b/mxVision/AllObjectsStructuring/Proto/CMakeLists.txt new file mode 100644 index 000000000..0e3d56c4b --- /dev/null +++ b/mxVision/AllObjectsStructuring/Proto/CMakeLists.txt @@ -0,0 +1,46 @@ +cmake_minimum_required(VERSION 3.10.0) +project(ProtoFile) + +set(TARGET_LIBRARY mxpiallobjectsstructuringdatatype) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) + +if(EXISTS ${MX_SDK_HOME}) + set(PROTOBUF_FOUND TRUE) + set(PROTOBUF_PROTOC_EXECUTABLE ${MX_SDK_HOME}/opensource/bin/protoc) + set(PROTOBUF_INCLUDE_DIRS ${MX_SDK_HOME}/opensource/include) + set(PROTOBUF_PROTOC_LIBRARIES ${MX_SDK_HOME}/opensource/lib/libprotoc.so -lpthread) + set(PROTOBUF_LITE_LIBRARIES ${MX_SDK_HOME}/opensource/lib/libprotoc-lite.so -lpthread) + include_directories(${PROTOBUF_INCLUDE_DIRS}) + link_directories(${MX_SDK_HOME}/opensource/lib) + find_library(PROTOBUF_LIB NAMES libprotobuf.so.22 PATHS ${MX_SDK_HOME}/opensource/lib) + +endif() +if (PROTOBUF_FOUND) + message(STATUS "protobuf library found") + +else () + message(FATAL_ERROR "protobuf library is needed but cant be found") +endif () + +LIST(APPEND PROTO_FLAGS -I${PROJECT_SOURCE_DIR}) + +EXECUTE_PROCESS( + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} ${PROTO_FLAGS} --cpp_out=${PROJECT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}/MxpiAllObjectsStructuringDataType.proto + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} ${PROTO_FLAGS} --python_out=${PROJECT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}/MxpiAllObjectsStructuringDataType.proto + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} +) + +file(GLOB_RECURSE SOURCE_FILES ${PROJECT_SOURCE_DIR}/*.cc) +add_library(${TARGET_LIBRARY} SHARED ${SOURCE_FILES}) +target_link_libraries(${TARGET_LIBRARY} mindxsdk_protobuf) +target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s) + +if(ENABLE_TEST) + target_link_libraries(${TARGET_LIBRARY} gcov) +endif() + +install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_DIR}/dist/lib) +install(FILES ${PROJECT_SOURCE_DIR}/MxpiAllObjectsStructuringDataType.pb.h DESTINATION ${PROJECT_DIR}/dist/include) +install(FILES ${PROJECT_SOURCE_DIR}/MxpiAllObjectsStructuringDataType_pb2.py DESTINATION ${PROJECT_DIR}/dist/python) diff --git a/mxVision/AllObjectsStructuring/Proto/MxpiAllObjectsStructuringDataType.proto b/mxVision/AllObjectsStructuring/Proto/MxpiAllObjectsStructuringDataType.proto new file mode 100644 index 000000000..dfb01f880 --- /dev/null +++ b/mxVision/AllObjectsStructuring/Proto/MxpiAllObjectsStructuringDataType.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +message MxpiMetaHeader +{ + string parentName = 1; + int32 memberId = 2; + string dataSource = 3; +} + +message MxpiWebDisplayDataList +{ + repeated MxpiWebDisplayData webDisplayDataVec = 1; +} + +message MxpiWebDisplayData +{ + repeated MxpiMetaHeader headerVec = 1; + bytes h264_data = 2; + uint32 h264_size = 3; + repeated MxpiBBox bbox_vec = 4; + string channel_id = 5; + uint32 frame_index = 6; +} + +message MxpiBBox +{ + float x0 = 1; + float y0 = 2; + float x1 = 3; + float y1 = 4; +} \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/README.md b/mxVision/AllObjectsStructuring/README.md new file mode 100644 index 000000000..504dfee6d --- /dev/null +++ b/mxVision/AllObjectsStructuring/README.md @@ -0,0 +1,194 @@ +# 全目标结构化 + +## 1 简介 + +全目标结构化样例基于mxVision SDK进行开发,以昇腾Atlas300卡为主要的硬件平台,主要支持以下功能: + +1. 目标检测:在视频流中检测出目标,本样例选用基于Yolov4-tiny的目标检测,能达到快速精准检测。 +2. 动态目标识别和属性分析:能够识别出检测出的目标类别,并对其属性进行分析。 +3. 人体属性分类+PersonReID:能够根据人体属性和PersonReID进行分类. +4. 人脸属性分类+FaceReID:能够根据人脸属性和FaceReID进行分类. +5. 车辆属性分类:能够对车辆的属性进行分类。 + + +## 2 环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ----------------------------------- | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡 (型号3010)| CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | +| Python | 3.7.5 | + + + +## 3 代码主要目录介绍 + +本代码仓名称为mxSdkReferenceApps,工程目录如下图所示: + +``` +├── mxVision +│   ├── AllObjectsStructuring +│   | ├── pipeline +│   | │   └── AllObjectsStructuring.pipeline +│   | ├── plugins +│   | │   ├── MpObjectSelection +| | | | ├── CMakeLists.txt +| | | | ├── MpObjectSelection.cpp +| | | | └── MpObjectSelection.h +│   | │   └── MxpiFaceSelection +│   | | ├── CMakeLists.txt +│   |   │   ├── MxpiFaceSelection.cpp +│   |   │   └── MxpiFaceSelection.h +│   | ├── models +│   | ├── CMakeLists.txt +│   | ├── README.zh.md +│   | ├── build.sh +│   | ├── main.py +│   | └── run.sh +``` + + + +## 4 准备 + +**步骤1:** 参考安装教程《mxVision 用户指南》安装 mxVision SDK。 + +**步骤2:** 配置 mxVision SDK 环境变量。 + +`export MX_SDK_HOME=${安装路径}/mxVision ` + +注:本例中mxVision SDK安装路径为 /root/MindX_SDK。 + +**步骤3:** 在项目根目录下 AllObjectStructuring/ 创建目录models `mkdir models` ,联系我们获取最新模型,并放到项目根目录下 AllObjectStructuring/models/ 目录下。 + +**步骤4:** 在项目根目录下 AllObjectStructuring/ 创建目录faces_to_register `mkdir faces_to_register` ,将用来注册入库的人脸照片放到项目根目录下 AllObjectStructuring/faces_to_register/ 目录下。faces_to_register目录中可以存放子文件夹,照片格式必须为.jpg,且子文件夹名称必须为英文字符。如果不需要接入特征检索功能,此步骤可忽略。 + +**步骤5:** 修改项目根目录下 AllObjectStructuring/pipeline/AllObjectsStructuring.pipeline文件: + +①:将所有“rtspUrl”字段值替换为可用的 rtsp 流源地址(需要自行准备可用的视频流,目前只支持264格式的rtsp流,264视频的分辨率范围最小为128 * 128,最大为4096 * 4096,不支持本地视频),配置参考如下: +```bash +rstp流格式为rtsp://${ip_addres}:${port}/${h264_file} +例:rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264 +``` + +②:将所有“deviceId”字段值替换为实际使用的device的id值,可用的 device id 值可以使用如下命令查看: + +`npu-smi info` + +**步骤6:** 修改项目根目录下 AllObjectStructuring/pipeline/face_registry.pipeline文件: + +①:将所有“deviceId”字段值替换为实际使用的device的id值,勿与AllObjectStructuring.pipeline使用同一个deviceId。可用的 device id 值可以使用如下命令查看: + +`npu-smi info` + +**步骤7:** 编译mxSdkReferenceApps库中的插件: + +在当前目录下,执行如下命令: + +`bash build.sh` + +**步骤8:** 在当前目录下,安装必要python库: + +`pip3.7.5 install -r requirements.txt` + +**步骤9:** 请在昇腾社区下载特征检索源码包https://ascend.huawei.com/#/software/mindx-sdk/applicationDetails/60172218,并根据readme来搭建特征检索库。如果不需要接入特征检索功能,此步骤可忽略。 + +注:当前版本特征检索库缺少本例中人脸检索所需部分算子(Flat,IVFSQ8),需自行生成,请参考特征检索readme 4.2.2: + +首先进入特征检索 src/ascendfaiss/tools 目录, + +在该目录下执行生成算子命令,当前特征检索版本算子生成命令如下: + +`python flat_min64_generate_model.py -d 256` + +`python ivfsq8_generate_model.py -d 256 -c 8192` + +生成算子后将算子模型文件移动到上级目录的modelpath目录下: + +`mv op_models/* ../modelpath` + +重新执行环境部署: + +`bash install.sh ` + +``为“`Ascend310-driver-{software version}-minios.aarch64-src.tar.gz`”文件解压后的目录,例如文件在“`/usr/local/software/`”目录解压,则``为“`/usr/local/software/`” 。本步命令用于实现将device侧检索daemon进程文件分发到多个device,执行命令后Ascend Driver中device的文件系统会被修改,所以需要执行**“`reboot`”**命令生效。 + +准确的算子生成方式需以特征检索 readme 4.2.2 为准。 + + + +## 5 运行 + +### 不带检索 + +运行 +`bash run.sh` + +正常启动后,控制台会输出检测到各类目标的对应信息。 + + +### 带检索 +需要在项目根目录下 AllObjectsStructuring/util/arguments.py 配置检索大小库运行的芯片id + +配置检索小库运行芯片id,根据实际情况修改`default`值,勿与注册人脸、全目标结构化pipeline使用同一芯片 +```bash + parser.add_argument('-index-little-device-ids', + type=int, + nargs="+", + default=[2], + help='specify the device assignment for little index.', + dest='index_little_device_ids') +``` +配置检索大库运行芯片id,根据实际情况修改`default`值,勿与注册人脸、全目标结构化pipeline使用同一芯片 +```bash + parser.add_argument('-index-large-device-ids', + type=int, + nargs="+", + default=[3], + help='specify the device assignment for large index.', + dest='index_large_device_ids') +``` + +运行 +`bash run.sh index` + +正常启动后,控制台会输出检测到人脸目标的对应索引信息。 + + + +## 6 参考链接 + +MindX SDK社区链接:https://www.huaweicloud.com/ascend/mindx-sdk + + + +## 7 FAQ + +### 7.1 运行程序时,LibGL.so.1缺失导致导入cv2报错 + +**问题描述:** +运行程序时报错:"ImportError: libGL.so.1: cannot open shared object file: No such file or directory" + +**解决方案:** + +如果服务器系统是Debian系列,如Ubantu,执行下列语句: +```bash +sudo apt update +sudo apt install libgl1-mesa-glx +``` + +如果服务器系统是RedHat系列,如Centos,执行下列语句: +```bash +yum install mesa-libGL +``` \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/build.sh b/mxVision/AllObjectsStructuring/build.sh new file mode 100644 index 000000000..db6a1923b --- /dev/null +++ b/mxVision/AllObjectsStructuring/build.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +path_cur=$(cd $(dirname $0); pwd) +build_type="Release" + +function prepare_path() { + if [ -d "$1" ]; then + rm -rf $1 + else + echo "file $1 is not exist." + fi + mkdir -p $1 + cd $1 +} + +function build() { + echo ${path_cur} + path_build=$path_cur/build + prepare_path $path_build + cmake -DCMAKE_BUILD_TYPE=$build_type .. + if [ $? -ne 0 ]; then + echo "cmake failed" + exit -1 + fi + make -j8 + if [ $? -ne 0 ]; then + echo "make failed" + exit -1 + fi + make install + cd .. +} + +build \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/main.py b/mxVision/AllObjectsStructuring/main.py new file mode 100644 index 000000000..0ae73f0c9 --- /dev/null +++ b/mxVision/AllObjectsStructuring/main.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from util.display import Display +from util.main_entry import AllObjectStructurization + +if __name__ == '__main__': + AllObjectStructurization(Display) diff --git a/mxVision/AllObjectsStructuring/main_pipeline/__init__.py b/mxVision/AllObjectsStructuring/main_pipeline/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/mxVision/AllObjectsStructuring/main_pipeline/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/mxVision/AllObjectsStructuring/main_pipeline/main_pipeline.py b/mxVision/AllObjectsStructuring/main_pipeline/main_pipeline.py new file mode 100644 index 000000000..72fc0aa9e --- /dev/null +++ b/mxVision/AllObjectsStructuring/main_pipeline/main_pipeline.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from multiprocessing.context import Process +from util.pipeline import Pipeline +from threading import Thread + + +class MainPipeline(Process): + def __init__(self, args, queue_obj, stream_bbox_queue_by_channel): + super().__init__() + self.args = args + self.queue_obj = queue_obj + self.stream_bbox_queue_by_channel = stream_bbox_queue_by_channel + + def run(self): + out_plugin_id = 0 + out_stream_bbox_plugin_id = 1 + pipeline = Pipeline(pipeline_cfg_file=self.args.main_pipeline_path, + stream_name=self.args.main_pipeline_name, + out_plugin_id=out_plugin_id, + out_stream_plugin_id=out_stream_bbox_plugin_id, + keys=self.args.main_keys2fetch, + stream_bbox_key=self.args.main_stream_bbox_keys2fetch + ) + + get_stream_frame_thread = GetStreamAndBBoxData(self.args, pipeline, self.stream_bbox_queue_by_channel) + get_stream_frame_thread.start() + + while True: + buffer = pipeline.get_single_stream_ret( + save_fig=self.args.main_save_fig, + base64_enc=self.args.main_base64_encode) + self.queue_obj.put(buffer) + + +class GetStreamAndBBoxData(Thread): + def __init__(self, args, pipeline, stream_bbox_queue_by_channel): + super().__init__() + self.args = args + self.pipeline = pipeline + self.stream_bbox_queue_by_channel = stream_bbox_queue_by_channel + + def run(self): + while True: + stream_bbox_data = self.pipeline.get_stream_bbox_data() + if self.args.display_stream_bbox_data: + channel_id = stream_bbox_data["channel_id"] + self.stream_bbox_queue_by_channel.put(stream_bbox_data, channel_id) diff --git a/mxVision/AllObjectsStructuring/pipeline/AllObjectsStructuring.pipeline b/mxVision/AllObjectsStructuring/pipeline/AllObjectsStructuring.pipeline new file mode 100644 index 000000000..b1f178576 --- /dev/null +++ b/mxVision/AllObjectsStructuring/pipeline/AllObjectsStructuring.pipeline @@ -0,0 +1,1956 @@ +{ + "detection":{ + "stream_config":{ + "deviceId":"1" + }, + "mxpi_rtspsrc0":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"0" + }, + "next":"mxpi_videodecoder0" + }, + "mxpi_rtspsrc1":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"1" + }, + "next":"mxpi_videodecoder1" + }, + "mxpi_rtspsrc2":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"2" + }, + "next":"mxpi_videodecoder2" + }, + "mxpi_rtspsrc3":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"3" + }, + "next":"mxpi_videodecoder3" + }, + "mxpi_rtspsrc4":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"4" + }, + "next":"mxpi_videodecoder4" + }, + "mxpi_rtspsrc5":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"5" + }, + "next":"mxpi_videodecoder5" + }, + "mxpi_rtspsrc6":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"6" + }, + "next":"mxpi_videodecoder6" + }, + "mxpi_rtspsrc7":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"7" + }, + "next":"mxpi_videodecoder7" + }, + "mxpi_rtspsrc8":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"8" + }, + "next":"mxpi_videodecoder8" + }, + "mxpi_rtspsrc9":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"9" + }, + "next":"mxpi_videodecoder9" + }, + "mxpi_rtspsrc10":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"10" + }, + "next":"mxpi_videodecoder10" + }, + "mxpi_rtspsrc11":{ + "factory":"mxpi_rtspsrc", + "props":{ + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId":"11" + }, + "next":"mxpi_videodecoder11" + }, + "mxpi_videodecoder0":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"0" + }, + "next":"mxpi_skipframe0" + }, + "mxpi_skipframe0":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:0", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder1":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"1" + }, + "next":"mxpi_skipframe1" + }, + "mxpi_skipframe1":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:1", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder2":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"2" + }, + "next":"mxpi_skipframe2" + }, + "mxpi_skipframe2":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:2", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder3":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"3" + }, + "next":"mxpi_skipframe3" + }, + "mxpi_skipframe3":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:3", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder4":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"4" + }, + "next":"mxpi_skipframe4" + }, + "mxpi_skipframe4":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:4", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder5":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"5" + }, + "next":"mxpi_skipframe5" + }, + "mxpi_skipframe5":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:5", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder6":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"6" + }, + "next":"mxpi_skipframe6" + }, + "mxpi_skipframe6":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:6", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder7":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"7" + }, + "next":"mxpi_skipframe7" + }, + "mxpi_skipframe7":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:7", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder8":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"8" + }, + "next":"mxpi_skipframe8" + }, + "mxpi_skipframe8":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:8", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder9":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"9" + }, + "next":"mxpi_skipframe9" + }, + "mxpi_skipframe9":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:9", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder10":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"10" + }, + "next":"mxpi_skipframe10" + }, + "mxpi_skipframe10":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:10", + "props":{ + "frameNum":"2" + } + }, + "mxpi_videodecoder11":{ + "factory":"mxpi_videodecoder", + "props":{ + "inputVideoFormat":"H264", + "outputImageFormat":"YUV420SP_NV12", + "vdecChannelId":"11" + }, + "next":"mxpi_skipframe11" + }, + "mxpi_skipframe11":{ + "factory":"mxpi_skipframe", + "next":"mxpi_parallel2serial0:11", + "props":{ + "frameNum":"2" + } + }, + "mxpi_parallel2serial0":{ + "factory":"mxpi_parallel2serial", + "props":{ + "dataSource":"mxpi_videodecoder0,mxpi_videodecoder1,mxpi_videodecoder2,mxpi_videodecoder3,mxpi_videodecoder4,mxpi_videodecoder5,mxpi_videodecoder6,mxpi_videodecoder7,mxpi_videodecoder8,mxpi_videodecoder9,mxpi_videodecoder10,mxpi_videodecoder11" + }, + "next":"mxpi_imageresize0" + }, + "mxpi_imageresize0":{ + "props":{ + "dataSource":"mxpi_parallel2serial0", + "resizeHeight":"416", + "resizeWidth":"416" + }, + "factory":"mxpi_imageresize", + "next":"queue0" + }, + "queue0":{ + "props":{ + "max-size-buffers":"500" + }, + "factory":"queue", + "next":"mxpi_modelinfer0" + }, + "mxpi_modelinfer0":{ + "props":{ + "dataSource":"mxpi_imageresize0", + "modelPath":"./models/yolov4_improve/yolov4_detection.om", + "postProcessConfigPath":"./models/yolov4_improve/yolov4.cfg", + "labelPath":"./models/yolov4_improve/coco.names", + "postProcessLibPath":"libMpYOLOv3PostProcessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_objectdistributor0" + }, + "mxpi_objectdistributor0":{ + "props":{ + "classIds":"3,2|0", + "dataSource":"mxpi_modelinfer0" + }, + "factory":"mxpi_distributor", + "next":["queue1","queue33"] + }, + "queue1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop1" + }, + "mxpi_imagecrop1":{ + "props":{ + "dataSource":"mxpi_objectdistributor0_0", + "resizeHeight":"96", + "resizeWidth":"96" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_channeldistributor_pfm" + }, + "mxpi_channeldistributor_pfm":{ + "props":{ + "channelIds":"0|1|2|3,4|5|6|7,8|9|10|11" + }, + "factory":"mxpi_distributor", + "next":["queue_landmark0","queue_landmark1","queue_landmark2"] + }, + "queue_landmark0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_facelandmark0" + }, + "mxpi_facelandmark0":{ + "props":{ + "dataSource":"mxpi_imagecrop1", + "modelPath":"./models/facequality/face_quality_improve.om", + "postProcessLibPath":"libfacelandmarkpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_facelandmark:0" + }, + "queue_landmark1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_facelandmark1" + }, + "mxpi_facelandmark1":{ + "props":{ + "dataSource":"mxpi_imagecrop1", + "modelPath":"./models/facequality/face_quality_improve.om", + "postProcessLibPath":"libfacelandmarkpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_facelandmark:1" + }, + "queue_landmark2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_facelandmark2" + }, + "mxpi_facelandmark2":{ + "props":{ + "dataSource":"mxpi_imagecrop1", + "modelPath":"./models/facequality/face_quality_improve.om", + "postProcessLibPath":"libfacelandmarkpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_facelandmark:2" + }, + "mxpi_facelandmark":{ + "factory":"mxpi_parallel2serial", + "props":{ + "dataSource":"mxpi_facelandmark0,mxpi_facelandmark1,mxpi_facelandmark2" + }, + "next":"queue2" + }, + "queue2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop0" + }, + "mxpi_imagecrop0":{ + "props":{ + "dataSource":"mxpi_objectdistributor0_0", + "resizeHeight":"96", + "resizeWidth":"64" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_channeldistributor_pfm2" + }, + "mxpi_channeldistributor_pfm2":{ + "props":{ + "channelIds":"0|1|2|3|4|5,6|7|8|9|10|11" + }, + "factory":"mxpi_distributor", + "next":["queue_embedding0","queue_embedding1"] + }, + "queue_embedding0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_FaceFeatureModelInfer0" + }, + "mxpi_FaceFeatureModelInfer0":{ + "props":{ + "dataSource":"mxpi_imagecrop0", + "modelPath":"./models/faceembedding/face_embedding.om", + "postProcessConfigPath":"./models/facefeature/yolov3-tiny-addpad.cfg", + "postProcessLibPath":"libresnetfeaturepostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_FaceFeatureModelInfer:0" + }, + "queue_embedding1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_FaceFeatureModelInfer1" + }, + "mxpi_FaceFeatureModelInfer1":{ + "props":{ + "dataSource":"mxpi_imagecrop0", + "modelPath":"./models/faceembedding/face_embedding.om", + "postProcessConfigPath":"./models/facefeature/yolov3-tiny-addpad.cfg", + "postProcessLibPath":"libresnetfeaturepostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_FaceFeatureModelInfer:1" + }, + "mxpi_FaceFeatureModelInfer":{ + "factory":"mxpi_parallel2serial", + "props":{ + "dataSource":"mxpi_FaceFeatureModelInfer0,mxpi_FaceFeatureModelInfer1" + }, + "next":"queue_test" + }, + "queue_test":{ + "props":{ + "max-size-buffers":"100" + }, + "factory":"queue", + "next":"mxpi_channeldistributor0" + }, + "mxpi_channeldistributor0":{ + "props":{ + "channelIds":"0,1,2,3,4,5,6,7,8,9,10,11" + }, + "factory":"mxpi_distributor", + "next":["mxpi_motsimplesort_face0","mxpi_motsimplesort_face1","mxpi_motsimplesort_face2","mxpi_motsimplesort_face3","mxpi_motsimplesort_face4","mxpi_motsimplesort_face5","mxpi_motsimplesort_face6","mxpi_motsimplesort_face7","mxpi_motsimplesort_face8","mxpi_motsimplesort_face9","mxpi_motsimplesort_face10","mxpi_motsimplesort_face11"] + }, + "mxpi_motsimplesort_face0": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_0" + }, + "queue_sel_0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection0" + }, + "mxpi_faceselection0":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face0", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_0", + "queue_crop_0" + ] + }, + "queue_mux_0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:0" + }, + "queue_crop_0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop2" + }, + "mxpi_imagecrop2":{ + "props":{ + "dataSource":"mxpi_faceselection0_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:0" + }, + "mxpi_motsimplesort_face1": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_1" + }, + "queue_sel_1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection1" + }, + "mxpi_faceselection1":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face1", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_1", + "queue_crop_1" + ] + }, + "queue_mux_1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:1" + }, + "queue_crop_1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop3" + }, + "mxpi_imagecrop3":{ + "props":{ + "dataSource":"mxpi_faceselection1_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:1" + }, + "mxpi_motsimplesort_face2": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_2" + }, + "queue_sel_2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection2" + }, + "mxpi_faceselection2":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face2", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_2", + "queue_crop_2" + ] + }, + "queue_mux_2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:2" + }, + "queue_crop_2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop4" + }, + "mxpi_imagecrop4":{ + "props":{ + "dataSource":"mxpi_faceselection2_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:2" + }, + "mxpi_motsimplesort_face3": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_3" + }, + "queue_sel_3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection3" + }, + "mxpi_faceselection3":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face3", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_3", + "queue_crop_3" + ] + }, + "queue_mux_3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:3" + }, + "queue_crop_3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop5" + }, + "mxpi_imagecrop5":{ + "props":{ + "dataSource":"mxpi_faceselection3_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:3" + }, + "mxpi_motsimplesort_face4": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_4" + }, + "queue_sel_4":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection4" + }, + "mxpi_faceselection4":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face4", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_4", + "queue_crop_4" + ] + }, + "queue_mux_4":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:4" + }, + "queue_crop_4":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop6" + }, + "mxpi_imagecrop6":{ + "props":{ + "dataSource":"mxpi_faceselection4_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:4" + }, + "mxpi_motsimplesort_face5": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_5" + }, + "queue_sel_5":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection5" + }, + "mxpi_faceselection5":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face5", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_5", + "queue_crop_5" + ] + }, + "queue_mux_5":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:5" + }, + "queue_crop_5":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop7" + }, + "mxpi_imagecrop7":{ + "props":{ + "dataSource":"mxpi_faceselection5_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:5" + }, + "mxpi_motsimplesort_face6": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_6" + }, + "queue_sel_6":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection6" + }, + "mxpi_faceselection6":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face6", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_6", + "queue_crop_6" + ] + }, + "queue_mux_6":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:6" + }, + "queue_crop_6":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_6" + }, + "mxpi_imagecrop_6":{ + "props":{ + "dataSource":"mxpi_faceselection6_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:6" + }, + "mxpi_motsimplesort_face7": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_7" + }, + "queue_sel_7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection7" + }, + "mxpi_faceselection7":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face7", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_7", + "queue_crop_7" + ] + }, + "queue_mux_7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:7" + }, + "queue_crop_7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_7" + }, + "mxpi_imagecrop_7":{ + "props":{ + "dataSource":"mxpi_faceselection7_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:7" + }, + "mxpi_motsimplesort_face8": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_8" + }, + "queue_sel_8":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection8" + }, + "mxpi_faceselection8":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face8", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_8", + "queue_crop_8" + ] + }, + "queue_mux_8":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:8" + }, + "queue_crop_8":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_8" + }, + "mxpi_imagecrop_8":{ + "props":{ + "dataSource":"mxpi_faceselection8_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:8" + }, + "mxpi_motsimplesort_face9": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_9" + }, + "queue_sel_9":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection9" + }, + "mxpi_faceselection9":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face9", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_9", + "queue_crop_9" + ] + }, + "queue_mux_9":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:9" + }, + "queue_crop_9":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_9" + }, + "mxpi_imagecrop_9":{ + "props":{ + "dataSource":"mxpi_faceselection9_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:9" + }, + "mxpi_motsimplesort_face10": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_10" + }, + "queue_sel_10":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection10" + }, + "mxpi_faceselection10":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face10", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_10", + "queue_crop_10" + ] + }, + "queue_mux_10":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:10" + }, + "queue_crop_10":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_10" + }, + "mxpi_imagecrop_10":{ + "props":{ + "dataSource":"mxpi_faceselection10_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:10" + }, + "mxpi_motsimplesort_face11": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_0", + "dataSourceFeature":"mxpi_FaceFeatureModelInfer" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_sel_11" + }, + "queue_sel_11":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_faceselection11" + }, + "mxpi_faceselection11":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_face11", + "dataSourceKeyPoint":"mxpi_facelandmark", + "minScoreThreshold":"0" + }, + "factory":"mxpi_faceselection", + "next":[ + "queue_mux_11", + "queue_crop_11" + ] + }, + "queue_mux_11":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_parallel2serial1:11" + }, + "queue_crop_11":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop_11" + }, + "mxpi_imagecrop_11":{ + "props":{ + "dataSource":"mxpi_faceselection11_0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_parallel2serial2:11" + }, + "mxpi_parallel2serial1":{ + "factory":"mxpi_parallel2serial", + "props":{ + "dataSource":"mxpi_faceselection0_1,mxpi_faceselection1_1,mxpi_faceselection2_1,mxpi_faceselection3_1,mxpi_faceselection4_1,mxpi_faceselection5_1,mxpi_faceselection6_1,mxpi_faceselection7_1,mxpi_faceselection8_1,mxpi_faceselection9_1,mxpi_faceselection10_1,mxpi_faceselection11_1" + }, + "next":"mxpi_facealignment0:1" + }, + "mxpi_parallel2serial2":{ + "factory":"mxpi_parallel2serial", + "props":{ + "dataSource":"mxpi_imagecrop2,mxpi_imagecrop3,mxpi_imagecrop4,mxpi_imagecrop5,mxpi_imagecrop6,mxpi_imagecrop7,mxpi_imagecrop_6,mxpi_imagecrop_7,mxpi_imagecrop_8,mxpi_imagecrop_9,mxpi_imagecrop_10,mxpi_imagecrop_11" + }, + "next":"mxpi_facealignment0:0" + }, + "mxpi_facealignment0":{ + "props":{ + "status":"1", + "dataSourceImage":"mxpi_parallel2serial2", + "dataSourceKeyPoint":"mxpi_parallel2serial1", + "afterFaceAlignmentHeight":"112", + "afterFaceAlignmentWidth":"112" + }, + "factory":"mxpi_facealignment", + "next":"tee3" + }, + "tee3":{ + "factory":"tee", + "next":[ + "queue7", + "queue19" + ] + }, + "queue7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"face_attribute" + }, + "face_attribute":{ + "props":{ + "dataSource":"mxpi_facealignment0", + "modelPath":"./models/faceattr/face_attribute_batch_4.om", + "postProcessConfigPath":"./models/faceattr/yolov3-tiny-addpad.cfg", + "labelPath":"./models/faceattr/coco.names", + "postProcessLibPath":"libresnetattrpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_synchronize0:0" + }, + "queue19":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"face_feature" + }, + "face_feature":{ + "props":{ + "dataSource":"mxpi_facealignment0", + "modelPath":"./models/facefeature/face_feature_batch_1.om", + "postProcessConfigPath":"./models/facefeature/yolov3-tiny-addpad.cfg", + "postProcessLibPath":"libresnetfeaturepostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_synchronize0:1" + }, + "mxpi_synchronize0":{ + "factory":"mxpi_synchronize", + "next":"mxpi_dataserialize0" + }, + "mxpi_dataserialize0":{ + "props":{ + "outputDataKeys":"ReservedFrameInfo,face_attribute,face_feature" + }, + "factory":"mxpi_dataserialize", + "next":"mxpi_parallel2serial3:0" + }, + "queue33":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imagecrop8" + }, + "mxpi_imagecrop8":{ + "props":{ + "dataSource":"mxpi_objectdistributor0_1", + "resizeHeight":"224", + "resizeWidth":"224", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1" + }, + "factory":"mxpi_imagecrop", + "next":"mxpi_channeldistributor1" + }, + "mxpi_channeldistributor1":{ + "props":{ + "channelIds":"0,1,2,3,4,5,6,7,8,9,10,11" + }, + "factory":"mxpi_distributor", + "next":["queue_mot_obj0","queue_mot_obj1","queue_mot_obj2","queue_mot_obj3","queue_mot_obj4","queue_mot_obj5","queue_mot_obj6","queue_mot_obj7","queue_mot_obj8","queue_mot_obj9","queue_mot_obj10","queue_mot_obj11"] + }, + "queue_mot_obj0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj0" + }, + "mxpi_motsimplesort_obj0": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_0" + }, + "queue_ob_0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection0:0" + }, + "mxpi_objectselection0":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj0", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:0" + }, + "queue_mot_obj1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj1" + }, + "mxpi_motsimplesort_obj1": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_1" + }, + "queue_ob_1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection1:0" + }, + "mxpi_objectselection1":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj1", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1 ,0.2, 0.3,0.4,0. 5,0.6,0.7, 0.8,0.9,0.8,0 .7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:1" + }, + "queue_mot_obj2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj2" + }, + "mxpi_motsimplesort_obj2": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_2" + }, + "queue_ob_2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection2:0" + }, + "mxpi_objectselection2":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj2", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:2" + }, + "queue_mot_obj3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj3" + }, + "mxpi_motsimplesort_obj3": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_3" + }, + "queue_ob_3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection3:0" + }, + "mxpi_objectselection3":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj3", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:3" + }, + "queue_mot_obj4":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj4" + }, + "mxpi_motsimplesort_obj4": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_4" + }, + "queue_ob_4":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection4:0" + }, + "mxpi_objectselection4":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj4", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:4" + }, + "queue_mot_obj5":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj5" + }, + "mxpi_motsimplesort_obj5": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_5" + }, + "queue_ob_5":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection5:0" + }, + "mxpi_objectselection5":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj5", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:5" + }, + "queue_mot_obj6":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj6" + }, + "mxpi_motsimplesort_obj6": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_6" + }, + "queue_ob_6":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection6:0" + }, + "mxpi_objectselection6":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj6", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:6" + }, + "queue_mot_obj7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj7" + }, + "mxpi_motsimplesort_obj7": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_7" + }, + "queue_ob_7":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection7:0" + }, + "mxpi_objectselection7":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj7", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:7" + }, + "queue_mot_obj8":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj8" + }, + "mxpi_motsimplesort_obj8": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_8" + }, + "queue_ob_8":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection8:0" + }, + "mxpi_objectselection8":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj8", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:8" + }, + "queue_mot_obj9":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj9" + }, + "mxpi_motsimplesort_obj9": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_9" + }, + "queue_ob_9":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection9:0" + }, + "mxpi_objectselection9":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj9", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:9" + }, + "queue_mot_obj10":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj10" + }, + "mxpi_motsimplesort_obj10": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_10" + }, + "queue_ob_10":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection10:0" + }, + "mxpi_objectselection10":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj10", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:10" + }, + "queue_mot_obj11":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_motsimplesort_obj11" + }, + "mxpi_motsimplesort_obj11": { + "props": { + "dataSourceDetection": "mxpi_objectdistributor0_1" + }, + "factory": "mxpi_motsimplesort", + "next": "queue_ob_11" + }, + "queue_ob_11":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_objectselection11:0" + }, + "mxpi_objectselection11":{ + "props":{ + "status":"1", + "dataSourceDetection":"mxpi_motsimplesort_obj11", + "dataSourceImage":"mxpi_imagecrop8", + "tmarginValue":"0.1", + "weightMargin":"0.2", + "weightOcclude":"0.11", + "weightSize":"0.21", + "weightConf":"0.31", + "trackTime":"0", + "normRadius":"0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1", + "outputKeys":"vision,object" + }, + "factory":"mxpi_objectselection", + "next":"mxpi_parallel2serial4:11" + }, + "mxpi_parallel2serial4":{ + "factory":"mxpi_parallel2serial", + "next":"mxpi_objectdistributor1" + }, + "mxpi_objectdistributor1":{ + "props":{ + "classIds":"2,0", + "dataSource":"object" + }, + "factory":"mxpi_distributor", + "next":[ + "queue12", + "queue13" + ] + }, + "queue12":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"tee4" + }, + "tee4":{ + "factory":"tee", + "next":[ + "queue20", + "queue21" + ] + }, + "queue20":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imageresize3" + }, + "mxpi_imageresize3":{ + "props":{ + "dataSource":"vision", + "resizeHeight":"224", + "resizeWidth":"224" + }, + "factory":"mxpi_imageresize", + "next":"queue14" + }, + "queue14":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"motor_attr" + }, + "motor_attr":{ + "props":{ + "dataSource":"mxpi_imageresize3", + "modelPath":"./models/motorattr/vehicle_attribute.om", + "postProcessConfigPath":"./models/motorattr/yolov3-tiny-addpad.cfg", + "labelPath":"./models/motorattr/coco.names", + "postProcessLibPath":"libresnetattrpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_synchronize2:0" + }, + "queue21":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imageresize6" + }, + "mxpi_imageresize6":{ + "props":{ + "dataSource":"vision", + "resizeHeight": "640", + "resizeWidth": "480" + }, + "factory":"mxpi_imageresize", + "next":"queue22" + }, + "queue22":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_modelinfer4" + }, + "mxpi_modelinfer4": { + "props": { + "parentName": "mxpi_imageresize6", + "modelPath": "./models/car_plate_detection/car_plate_detection.om", + "postProcessConfigPath": "./models/car_plate_detection/ssd_vgg16_caffe_glue_hole.cfg", + "labelPath": "./models/car_plate_detection/ssd_vgg16_caffe_glue_hole.names", + "postProcessLibPath": "libssdvggpostprocessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_objectdistributor2" + }, + "mxpi_objectdistributor2":{ + "props":{ + "classIds":"0|1", + "dataSource":"mxpi_modelinfer4", + "distributeAll":"yes" + }, + "factory":"mxpi_distributor", + "next":"mxpi_imagecrop9" + }, + "mxpi_imagecrop9":{ + "props":{ + "dataSource":"mxpi_objectdistributor2_0", + "resizeHeight":"72", + "resizeWidth":"272" + }, + "factory":"mxpi_imagecrop", + "next":"queue23" + }, + "queue23":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"car_plate" + }, + "car_plate": { + "props": { + "parentName": "mxpi_imagecrop9", + "modelPath": "./models/car_plate_recognition/car_plate_recognition.om", + "postProcessLibPath": "libcarplaterecognitionpostprocessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_synchronize2:1" + }, + "mxpi_synchronize2":{ + "factory":"mxpi_synchronize", + "next":"mxpi_dataserialize1" + }, + "mxpi_dataserialize1":{ + "props":{ + "outputDataKeys":"ReservedFrameInfo,motor_attr,car_plate" + }, + "factory":"mxpi_dataserialize", + "next":"mxpi_parallel2serial3:1" + }, + "queue13":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"tee2" + }, + "tee2":{ + "factory":"tee", + "next":[ + "queue16", + "queue17" + ] + }, + "queue16":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imageresize4" + }, + "mxpi_imageresize4":{ + "props":{ + "dataSource":"vision", + "resizeHeight":"256", + "resizeWidth":"192" + }, + "factory":"mxpi_imageresize", + "next":"queue15" + }, + "queue15":{ + "props":{ + "max-size-buffers":"100" + }, + "factory":"queue", + "next":"pedestrian_attribute" + }, + "pedestrian_attribute":{ + "props":{ + "dataSource":"mxpi_imageresize4", + "modelPath":"./models/pedestrianattribute/pede_attr_4_batch.om", + "postProcessConfigPath":"./models/pedestrianattribute/pedes_attr.cfg", + "labelPath":"./models/pedestrianattribute/coco.names", + "postProcessLibPath":"libresnetattrpostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_synchronize1:0" + }, + "queue17":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imageresize5" + }, + "mxpi_imageresize5":{ + "props":{ + "dataSource":"vision", + "resizeHeight":"384", + "resizeWidth":"128" + }, + "factory":"mxpi_imageresize", + "next":"queue18" + }, + "queue18":{ + "props":{ + "max-size-buffers":"100" + }, + "factory":"queue", + "next":"pedestrian_reid" + }, + "pedestrian_reid":{ + "props":{ + "dataSource":"mxpi_imageresize5", + "modelPath":"./models/pedereid/pede_reid_4_batch.om", + "postProcessConfigPath":"./models/pedereid/pedereid.cfg", + "labelPath":"./models/pedereid/coco.names", + "postProcessLibPath":"libresnetfeaturepostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_synchronize1:1" + }, + "mxpi_synchronize1":{ + "factory":"mxpi_synchronize", + "next":"mxpi_dataserialize2" + }, + "mxpi_dataserialize2":{ + "props":{ + "outputDataKeys":"ReservedFrameInfo,pedestrian_attribute,pedestrian_reid" + }, + "factory":"mxpi_dataserialize", + "next":"mxpi_parallel2serial3:2" + }, + "mxpi_parallel2serial3":{ + "factory":"mxpi_parallel2serial", + "next":"appsink0" + }, + "appsink0":{ + "factory":"appsink" + } + } +} \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/pipeline/face_registry.pipeline b/mxVision/AllObjectsStructuring/pipeline/face_registry.pipeline new file mode 100644 index 000000000..d53bed401 --- /dev/null +++ b/mxVision/AllObjectsStructuring/pipeline/face_registry.pipeline @@ -0,0 +1,127 @@ +{ + "face-feature": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_imagedecoder0", + "resizeHeight": "416", + "resizeWidth": "416" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props":{ + "dataSource":"mxpi_imageresize0", + "modelPath":"./models/yolov4_improve/yolov4_detection.om", + "postProcessConfigPath":"./models/yolov4_improve/yolov4.cfg", + "labelPath":"./models/yolov4_improve/coco.names", + "postProcessLibPath":"libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_imagecrop0" + }, + "mxpi_imagecrop0": { + "props": { + "dataSource": "mxpi_modelinfer0", + "leftExpandRatio":"0.1", + "rightExpandRatio":"0.1", + "upExpandRatio":"0.1", + "downExpandRatio":"0.1", + "resizeHeight":"112", + "resizeWidth":"112" + }, + "factory": "mxpi_imagecrop", + "next": "tee0" + }, + "tee0":{ + "props":{ + + }, + "factory":"tee", + "next":[ + "queue0", + "queue1" + ] + }, + "queue0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_imageresize1" + }, + "queue1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next":"mxpi_facealignment0:0" + }, + "mxpi_imageresize1": { + "props": { + "resizeHeight":"96", + "resizeWidth":"96", + "dataSource": "mxpi_imagecrop0" + }, + "factory": "mxpi_imageresize", + "next": "face_landmark" + }, + "face_landmark": { + "props":{ + "dataSource":"mxpi_imageresize1", + "modelPath":"./models/facequality/face_quality_0605_b1.om", + "postProcessLibPath":"libfacelandmarkpostprocessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_facealignment0:1" + }, + "mxpi_facealignment0":{ + "props":{ + "status":"1", + "dataSourceImage":"mxpi_imagecrop0", + "dataSourceKeyPoint":"face_landmark", + "afterFaceAlignmentHeight":"112", + "afterFaceAlignmentWidth":"112" + }, + "factory":"mxpi_facealignment", + "next":"face_feature" + }, + "face_feature":{ + "props":{ + "dataSource":"mxpi_facealignment0", + "modelPath":"./models/facefeature/face_feature_batch_1.om", + "postProcessConfigPath":"./models/facefeature/yolov3-tiny-addpad.cfg", + "postProcessLibPath":"libresnetfeaturepostprocessor.so" + }, + "factory":"mxpi_modelinfer", + "next":"mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "face_feature" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} diff --git a/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/CMakeLists.txt b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/CMakeLists.txt new file mode 100644 index 000000000..20258a308 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/CMakeLists.txt @@ -0,0 +1,14 @@ +set(PLUGIN_NAME "mxpi_objectselection") +set(TARGET_LIBRARY ${PLUGIN_NAME}) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) +add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}") + +add_library(${TARGET_LIBRARY} SHARED MpObjectSelection.cpp) + +target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0) +target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxbase mxpidatatype) +target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s) + +install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_SOURCE_DIR}/dist/lib) \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.cpp b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.cpp new file mode 100644 index 000000000..cd429bd57 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.cpp @@ -0,0 +1,592 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "MpObjectSelection.h" + +#include +#include +#include +#include "MxTools/Proto/MxpiDataTypeDeleter.h" +#include "MxBase/CV/Core/DataType.h" + +using namespace MxBase; +using namespace MxTools; + +namespace { + const int KEY_COUNT = 2; + const int FRAME_COUNT_FOR_SEC = 10; + const int MARGINRATE_COUNT = 3; +} + +APP_ERROR MpObjectSelection::Init(std::map>& configParamMap) +{ + LogInfo << "Start to initialize MpObjectSelection(" << elementName_ << ")."; + prePluginName_ = *std::static_pointer_cast(configParamMap["dataSourceDetection"]); + cropPluginName_ = *std::static_pointer_cast(configParamMap["dataSourceImage"]); + tmargin_ = *std::static_pointer_cast(configParamMap["tmarginValue"]); + weightMargin_ = *std::static_pointer_cast(configParamMap["weightMargin"]); + weightOcclude_ = *std::static_pointer_cast(configParamMap["weightOcclude"]); + weightSize_ = *std::static_pointer_cast(configParamMap["weightSize"]); + weightConf_ = *std::static_pointer_cast(configParamMap["weightConf"]); + std::string normRadius = *std::static_pointer_cast(configParamMap["normRadius"]); + trackTime_ = *std::static_pointer_cast(configParamMap["trackTime"]); + std::string keys = *std::static_pointer_cast(configParamMap["outputKeys"]); + if (keys != "") { + boost::split(keysVec_, keys, boost::is_any_of(","), boost::token_compress_on); + } else { + LogError << "Using property key"; + return APP_ERR_COMM_INVALID_PARAM; + } + if (normRadius.empty() || keysVec_.size() < KEY_COUNT) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get norm radius failed."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + GetNormRadius(normRadius); + LogInfo << "End to initialize MpObjectSelection(" << elementName_ << ")."; + return APP_ERR_OK; +} + +void MpObjectSelection::GetNormRadius(std::string& normRadius) +{ + size_t index = 0; + while ((index = normRadius.find(' ', index)) != std::string::npos) { + normRadius.erase(index, 1); + } + std::vector normRadiusVec; + boost::split(normRadiusVec, normRadius, boost::is_any_of(","), boost::token_compress_on); + for (size_t i = 0; i < normRadiusVec.size(); i++) { + std::istringstream iss(normRadiusVec[i]); + float normRadiu; + iss >> normRadiu; + normRadius_.push_back(normRadiu); + } +} + +APP_ERROR MpObjectSelection::DeInit() +{ + LogInfo << "Start to deinitialize MpObjectSelection(" << elementName_ << ")."; + LogInfo << "End to deinitialize MpObjectSelection(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::SetMxpiErrorInfo(const std::string pluginName, APP_ERROR errorCode, + const std::string& errorText) +{ + InputParam inputParam = {}; + inputParam.mxpiMemoryType = MXPI_MEMORY_DVPP; + inputParam.deviceId = deviceId_; + inputParam.dataSize = 0; + MxpiBuffer* mxpiBuffer = MxpiBufferManager::CreateDeviceBufferWithMemory(inputParam); + if (mxpiBuffer == nullptr) { + return APP_ERR_OK; + } + + if (errorText == "") { + return SendData(0, *mxpiBuffer); + } + return SendMxpiErrorInfo(*mxpiBuffer, pluginName, errorCode, errorText); +} + +APP_ERROR MpObjectSelection::CheckInputBuffer(MxpiBuffer& motBuffer) +{ + errorInfo_.str(""); + + MxpiMetadataManager mxpiMetadataManager(motBuffer); + if (mxpiMetadataManager.GetErrorInfo() != nullptr) { + LogWarn << "Input data is invalid, element(" << elementName_ + << ") plugin will not be executed rightly."; + SetMxpiErrorInfo(elementName_, APP_ERR_COMM_INVALID_PARAM, ""); + MxpiBufferManager::DestroyBuffer(&motBuffer); + return APP_ERR_COMM_INVALID_PARAM; + } + auto metadataPtr = mxpiMetadataManager.GetMetadata(prePluginName_); + if (metadataPtr == nullptr) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get previous plugin pointer failed."; + LogDebug << errorInfo_.str(); + SetMxpiErrorInfo(elementName_, APP_ERR_COMM_INVALID_PARAM, ""); + MxpiBufferManager::DestroyBuffer(&motBuffer); + return APP_ERR_COMM_INVALID_PARAM; + } else { + auto metadataType = mxpiMetadataManager.GetMetadataWithType(prePluginName_, "MxpiTrackLetList"); + if (metadataType == nullptr) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get the error type."; + LogError << errorInfo_.str(); + SetMxpiErrorInfo(elementName_, APP_ERR_COMM_INVALID_PARAM, "Get the error type."); + MxpiBufferManager::DestroyBuffer(&motBuffer); + return APP_ERR_COMM_INVALID_PARAM; + } + } + std::shared_ptr mxpiTrackLetList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(prePluginName_)); + if (mxpiTrackLetList == nullptr) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get previous plugin pointer failed."; + LogError << errorInfo_.str(); + SetMxpiErrorInfo(elementName_, APP_ERR_COMM_INVALID_PARAM, "Get previous plugin pointer failed."); + MxpiBufferManager::DestroyBuffer(&motBuffer); + return APP_ERR_COMM_INVALID_PARAM; + } else if (mxpiTrackLetList->trackletvec_size() == 0) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Track list is null."; + LogError << errorInfo_.str(); + MxpiBufferManager::DestroyBuffer(&motBuffer); + return APP_ERR_COMM_INVALID_PARAM; + } + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::Process(std::vector& mxpiBuffer) +{ + LogDebug << "Begin to process MpObjectSelection(" << elementName_ << ")."; + MxpiBuffer *inputMxpiBuffer = mxpiBuffer[0]; + APP_ERROR ret = CheckInputBuffer(*inputMxpiBuffer); + if (ret != APP_ERR_OK) { + return ret; + } + MxpiMetadataManager mxpiMetadataManager(*inputMxpiBuffer); + std::shared_ptr mxpiTrackLetList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(prePluginName_)); + ret = TargetSelect(*inputMxpiBuffer, mxpiTrackLetList); + if (ret != APP_ERR_OK) { + SetMxpiErrorInfo(elementName_, APP_ERR_COMM_INVALID_PARAM, "Select target failed."); + MxpiBufferManager::DestroyBuffer(inputMxpiBuffer); + return ret; + } + MxpiBufferManager::DestroyBuffer(inputMxpiBuffer); + LogDebug << "End to process MpObjectSelection(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::TargetSelect(MxpiBuffer& buffer, std::shared_ptr& datalist) +{ + MxpiMetadataManager mxpiMetadataManager(buffer); + MxpiFrame inputMxpiFrame = MxpiBufferManager::GetHostDataInfo(buffer); + MxpiVisionList visionList = inputMxpiFrame.visionlist(); + channelId_ = inputMxpiFrame.frameinfo().channelid(); + int imageWidth = visionList.visionvec(0).visioninfo().width(); + int imageHeight = visionList.visionvec(0).visioninfo().height(); + while (!stackSet_.empty()) { + stackSet_.pop(); + } + APP_ERROR ret = PushDataToStack(buffer, *datalist, imageHeight); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + + std::shared_ptr cropVisionList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(cropPluginName_)); + if (cropVisionList == nullptr) { + ret = APP_ERR_COMM_INVALID_PARAM; + errorInfo_ << GetError(ret, elementName_) << "Get previous plugin pointer failed."; + LogError << errorInfo_.str(); + return ret; + } + ret = StartSelect(*cropVisionList, imageHeight, imageWidth); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(ret, elementName_) << "Select target failed."; + LogError << errorInfo_.str(); + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::PushDataToStack(MxpiBuffer& buffer, MxpiTrackLetList& trackLetList, float yMax) +{ + LogDebug << "Start to push data to stack(" << elementName_ << ")."; + std::string cropParentName = ""; + for (int i = 0; i < trackLetList.trackletvec_size(); i++) { + if (trackLetList.trackletvec(i).headervec_size() > 0) { + cropParentName = trackLetList.trackletvec(i).headervec(0).datasource(); + break; + } + } + MxpiMetadataManager mxpiMetadataManager(buffer); + std::shared_ptr mxpiObjectList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(cropParentName)); + if (mxpiObjectList == nullptr) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get object list failed."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + APP_ERROR ret = PushObject(yMax, trackLetList, mxpiObjectList); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + LogDebug << "End to push data to stack(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::PushObject(float yMax, MxTools::MxpiTrackLetList& trackLetList, + std::shared_ptr& mxpiObjectList) +{ + APP_ERROR ret = APP_ERR_OK; + std::vector targetTrack = {}; + TargetTrack tmpTargetTrack = {}; + for (int i = 0; i < trackLetList.trackletvec_size(); i++) { + if (trackLetList.trackletvec(i).trackflag() == LOST_OBJECT) { + ret = CreatNewBuffer(trackLetList.trackletvec(i).trackid()); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + continue; + } + int objectIndex = trackLetList.trackletvec(i).headervec(0).memberid(); + if (objectIndex >= mxpiObjectList->objectvec_size()) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_POINTER, elementName_) << "Get object failed."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + tmpTargetTrack.mxpiObject.CopyFrom(mxpiObjectList->objectvec(objectIndex)); + tmpTargetTrack.mxpiTrackLet.CopyFrom(trackLetList.trackletvec(i)); + tmpTargetTrack.channelId = channelId_; + targetTrack.push_back(tmpTargetTrack); + } + std::map tmpObject = {}; + for (size_t i = 0; i < targetTrack.size(); i++) { + int tmpIndex = 0; + float tmpYmax = yMax; + for (size_t j = 0; j < targetTrack.size(); j++) { + if (tmpObject.find(j) == tmpObject.end() && tmpYmax >= targetTrack[j].mxpiObject.y1()) { + tmpYmax = targetTrack[j].mxpiObject.y1(); + tmpIndex = j; + } + } + stackSet_.push(targetTrack[tmpIndex]); + tmpObject[tmpIndex] = 1; + } + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::CreatNewBuffer(const int trackId, bool refresh) +{ + APP_ERROR ret; + auto iter = targetTrack_.find(trackId); + if (iter == targetTrack_.end()) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Get track id(" << trackId << ") failed."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + InputParam inputParam = {keysVec_[0], deviceId_, (int)iter->second.data.size, iter->second.data.ptrData}; + inputParam.mxpiVisionInfo.CopyFrom(iter->second.mxpiVision.visioninfo()); + inputParam.mxpiFrameInfo.set_frameid(frameId_); + inputParam.mxpiFrameInfo.set_channelid(iter->second.channelId); + inputParam.mxpiMemoryType = MXPI_MEMORY_DVPP; + MxpiBuffer* mxpiBuffer; + if (!refresh) { + mxpiBuffer = MxpiBufferManager::CreateDeviceBufferAndCopyData(inputParam); + } else { + mxpiBuffer = MxpiBufferManager::CreateDeviceBufferWithMemory(inputParam); + } + + if (mxpiBuffer == nullptr) { + LogError << "New buffer is null, trackId(" << trackId << "), dataptr(" + << iter->second.data.ptrData << ")."; + ret = MxBase::MemoryHelper::MxbsFree(iter->second.data); + targetTrack_.erase(iter); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Failed to free data."; + LogError << errorInfo_.str(); + return ret; + } + return APP_ERR_OK; + } + + ret = AddObjectList(*mxpiBuffer, iter); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + if (!refresh) { + return APP_ERR_OK; + } + targetTrack_.erase(iter); + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::AddObjectList(MxTools::MxpiBuffer& buffer, std::map::iterator& iter) +{ + std::shared_ptr objectList = std::make_shared(); + MxTools::MxpiObject* objectData = objectList->add_objectvec(); + objectData->set_x0(iter->second.mxpiObject.x0()); + objectData->set_y0(iter->second.mxpiObject.y0()); + objectData->set_x1(iter->second.mxpiObject.x1()); + objectData->set_y1(iter->second.mxpiObject.y1()); + MxTools::MxpiClass* classInfo = objectData->add_classvec(); + classInfo->set_classid(iter->second.mxpiObject.classvec(0).classid()); + classInfo->set_classname(iter->second.mxpiObject.classvec(0).classname()); + classInfo->set_confidence(iter->second.mxpiObject.classvec(0).confidence()); + MxTools::MxpiMetaHeader* header = objectData->add_headervec(); + header->set_datasource(pluginName_); + header->set_memberid(0); + MxTools::MxpiMetadataManager mxpiMetadataManager(buffer); + APP_ERROR ret = mxpiMetadataManager.AddProtoMetadata(keysVec_[1], objectList); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) + << "Add proto metadata(" << keysVec_[1] << ") failed."; + LogError << errorInfo_.str(); + return ret; + } + SendData(0, buffer); // Send the data to downstream plugin + frameId_++; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::StartSelect(MxpiVisionList& cropVisionList, int imageHeight, int imageWidth) +{ + LogDebug << "Start to select(" << elementName_ << ")."; + APP_ERROR ret = APP_ERR_OK; + frontSet_.clear(); + while (stackSet_.size() > 0) { + TargetTrack targetTrack = stackSet_.top(); + targetTrack.imageHeight = imageHeight; + targetTrack.imageWidth = imageWidth; + ret = GetOccludeScore(targetTrack); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + for (int i = 0; i < cropVisionList.visionvec_size(); i++) { + if (cropVisionList.visionvec(i).headervec(0).memberid() == + targetTrack.mxpiTrackLet.headervec(0).memberid()) { + targetTrack.mxpiVision.CopyFrom(cropVisionList.visionvec(i)); + break; + } + } + targetTrack.data.type = MxBase::MemoryData::MEMORY_DVPP; + targetTrack.data.size = targetTrack.mxpiVision.visiondata().datasize(); + targetTrack.data.deviceId = deviceId_; + MemoryData src; + src.type = MxBase::MemoryData::MEMORY_DVPP; + src.size = targetTrack.mxpiVision.visiondata().datasize(); + src.ptrData = (void*)targetTrack.mxpiVision.visiondata().dataptr(); + src.deviceId = deviceId_; + ret = MxBase::MemoryHelper::MxbsMallocAndCopy(targetTrack.data, src); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Failed to malloc and copy data."; + LogError << errorInfo_.str(); + return ret; + } + targetTrack.age = 0; + ret = RefleshData(targetTrack); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + stackSet_.pop(); + } + LogDebug << "End to select(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::RefleshData(TargetTrack& targetTrack) +{ + APP_ERROR ret; + auto iter = targetTrack_.find(targetTrack.mxpiTrackLet.trackid()); + if (iter != targetTrack_.end()) { + if (iter->second.score < targetTrack.score) { + iter->second.score = targetTrack.score; + iter->second.marginScore = targetTrack.marginScore; + iter->second.occludeScore = targetTrack.occludeScore; + iter->second.imageHeight = targetTrack.imageHeight; + iter->second.imageWidth = targetTrack.imageWidth; + iter->second.channelId = targetTrack.channelId; + iter->second.mxpiVision.CopyFrom(targetTrack.mxpiVision); + ret = MxBase::MemoryHelper::MxbsFree(iter->second.data); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Failed to free data."; + LogError << errorInfo_.str(); + return ret; + } + iter->second.data = targetTrack.data; + iter->second.mxpiObject.CopyFrom(targetTrack.mxpiObject); + } else { + ret = MxBase::MemoryHelper::MxbsFree(targetTrack.data); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Failed to free data."; + LogError << errorInfo_.str(); + return ret; + } + } + iter->second.mxpiTrackLet.CopyFrom(targetTrack.mxpiTrackLet); + + if (CheckSendData(targetTrack.mxpiTrackLet.trackid())) { + ret = CreatNewBuffer(targetTrack.mxpiTrackLet.trackid(), false); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + } + } else { + targetTrack.age = targetTrack.mxpiTrackLet.hits(); + targetTrack_[targetTrack.mxpiTrackLet.trackid()] = targetTrack; + } + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::GetPositionScore(const MxpiObject& mxpiObject, TargetTrack& targetTrack) +{ + if (targetTrack.imageHeight <= 0 || targetTrack.imageWidth <= 0) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << "Input image width is (" + << targetTrack.imageWidth << "), height is (" << targetTrack.imageHeight << "). Please check it."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + int marginRateY = (targetTrack.imageHeight - mxpiObject.y1()) / targetTrack.imageHeight; + marginRateY = (marginRateY > tmargin_) ? tmargin_ : marginRateY; + int marginRateXRight = (targetTrack.imageWidth - mxpiObject.x1()) / targetTrack.imageWidth; + int marginRateXLeft = mxpiObject.x0() / targetTrack.imageWidth; + marginRateXRight = (marginRateXRight > tmargin_) ? tmargin_ : marginRateXRight; + marginRateXLeft = (marginRateXLeft > tmargin_) ? tmargin_ : marginRateXLeft; + marginRateY = marginRateY / tmargin_; + marginRateXRight = marginRateXRight / tmargin_; + marginRateXLeft = marginRateXLeft / tmargin_; + targetTrack.marginScore = (marginRateY + marginRateXRight + marginRateXLeft) / MARGINRATE_COUNT; + return APP_ERR_OK; +} + +APP_ERROR MpObjectSelection::GetOccludeScore(TargetTrack& targetTrack) +{ + APP_ERROR ret; + float occludeScore = 0.0; + float targetArea = (targetTrack.mxpiObject.x1() - targetTrack.mxpiObject.x0()) * + (targetTrack.mxpiObject.y1() - targetTrack.mxpiObject.y0()); + if (fabs(targetArea) < FLT_EPSILON) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) << ", Input image width is (" + << targetTrack.imageWidth << "), height is (" << targetTrack.imageHeight << "). Please check it."; + LogError << errorInfo_.str(); + return APP_ERR_COMM_INVALID_PARAM; + } + for (size_t i = 0; i < frontSet_.size(); i++) { + if (targetTrack.mxpiObject.x1() <= frontSet_[i].mxpiObject.x0() || + targetTrack.mxpiObject.x0() >= frontSet_[i].mxpiObject.x1() || + targetTrack.mxpiObject.y1() >= frontSet_[i].mxpiObject.y0()) { + continue; + } else { + float tmpXmin = (targetTrack.mxpiObject.x0() > frontSet_[i].mxpiObject.x0()) ? + targetTrack.mxpiObject.x0() : frontSet_[i].mxpiObject.x0(); + float tmpXmax = (targetTrack.mxpiObject.x1() < frontSet_[i].mxpiObject.x1()) ? + targetTrack.mxpiObject.x0() : frontSet_[i].mxpiObject.x0(); + float intersection = (tmpXmax - tmpXmin) * + (targetTrack.mxpiObject.y1() - frontSet_[i].mxpiObject.y0()); + occludeScore = (occludeScore > (intersection / targetArea)) ? occludeScore : (intersection / targetArea); + } + } + targetTrack.occludeScore = occludeScore; + frontSet_.push_back(targetTrack); + ret = GetPositionScore(targetTrack.mxpiObject, targetTrack); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + return ret; + } + float imageWidth = targetTrack.mxpiObject.x1() - targetTrack.mxpiObject.x0(); + float imageHeight = targetTrack.mxpiObject.y1() - targetTrack.mxpiObject.y0(); + float imageArea = imageWidth * imageHeight; + if ((size_t)targetTrack.mxpiObject.classvec(0).classid() < normRadius_.size() && + fabs(normRadius_[targetTrack.mxpiObject.classvec(0).classid()]) > FLT_EPSILON) { + targetTrack.sizeScore = sqrt(imageArea) / normRadius_[targetTrack.mxpiObject.classvec(0).classid()]; + } + + targetTrack.score = weightMargin_ * targetTrack.marginScore + weightOcclude_ * targetTrack.occludeScore + + weightSize_ * targetTrack.sizeScore + weightConf_ * targetTrack.confScore; + return APP_ERR_OK; +} + +bool MpObjectSelection::CheckSendData(const int trackId) +{ + auto iter = targetTrack_.find(trackId); + if (iter == targetTrack_.end()) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_PARAM, elementName_) + << " Get track id (" << trackId << ") failed."; + LogError << errorInfo_.str(); + return false; + } + if (trackTime_ > 0 && iter->second.age++ > trackTime_ * FRAME_COUNT_FOR_SEC) { + iter->second.age = 0; + return true; + } + return false; +} + +MxpiPortInfo MpObjectSelection::DefineInputPorts() +{ + MxpiPortInfo inputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticInputPortsInfo(value, inputPortInfo); + + return inputPortInfo; +} + +MxpiPortInfo MpObjectSelection::DefineOutputPorts() +{ + MxpiPortInfo outputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticOutputPortsInfo(value, outputPortInfo); + + return outputPortInfo; +} + +std::vector> MpObjectSelection::DefineProperties() +{ + std::vector> properties; + + auto dataSourceDetection = std::make_shared>(ElementProperty { + STRING, "dataSourceDetection", "name", "The name of detection data source", "", "", "" + }); + auto dataSourceImage = std::make_shared>(ElementProperty { + STRING, "dataSourceImage", "name", "The name of image data source", "", "", "" + }); + auto tmarginValue = std::make_shared>(ElementProperty { + FLOAT, "tmarginValue", "value", "the value of tmargin", 0.1, 0.0, 0.2 + }); + auto weightMargin = std::make_shared>(ElementProperty { + FLOAT, "weightMargin", "value", "the value of weight margin", 0.1, 0.0, 0.5 + }); + auto weightOcclude = std::make_shared>(ElementProperty { + FLOAT, "weightOcclude", "value", "the value of weight occlude", 0.1, 0.0, 0.5 + }); + auto weightSize = std::make_shared>(ElementProperty { + FLOAT, "weightSize", "value", "the value of weight size", 0.1, 0.0, 0.5 + }); + auto weightConf = std::make_shared>(ElementProperty { + FLOAT, "weightConf", "value", "the value of weight conf", 0.1, 0.0, 0.5 + }); + auto trackTime = std::make_shared>(ElementProperty { + INT, "trackTime", "value", "the value of track time", 5, 0, 10000 + }); + auto normRadius = std::make_shared>(ElementProperty { + STRING, "normRadius", "norm radius", "the value of norm radius", "", "", "" + }); + auto keys = std::make_shared>(ElementProperty { + STRING, "outputKeys", "outputKeys", "keys for add meta data", "", "", "" + }); + properties.push_back(dataSourceDetection); + properties.push_back(dataSourceImage); + properties.push_back(tmarginValue); + properties.push_back(weightMargin); + properties.push_back(weightOcclude); + properties.push_back(weightSize); + properties.push_back(weightConf); + properties.push_back(normRadius); + properties.push_back(trackTime); + properties.push_back(keys); + return properties; +} + +namespace { +MX_PLUGIN_GENERATE(MpObjectSelection) +} diff --git a/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.h b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.h new file mode 100644 index 000000000..bf56afac0 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MpObjectSelection/MpObjectSelection.h @@ -0,0 +1,119 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MXPLUGINS_MPOBJECTSELECTION_H +#define MXPLUGINS_MPOBJECTSELECTION_H + +#include +#include "MxBase/ErrorCode/ErrorCode.h" +#include "MxTools/PluginToolkit/base/MxPluginGenerator.h" +#include "MxTools/PluginToolkit/buffer/MxpiBufferManager.h" +#include "MxTools/PluginToolkit/metadata/MxpiMetadataManager.h" +#include "MxTools/Proto/MxpiDataType.pb.h" + +struct TargetTrack { + MxTools::MxpiTrackLet mxpiTrackLet; + MxTools::MxpiVision mxpiVision; + MxTools::MxpiObject mxpiObject; + MxBase::MemoryData data; + uint32_t channelId; + float score; + float marginScore; + float occludeScore; + float sizeScore; + float confScore; + int imageHeight; + int imageWidth; + int age; +}; + +class MpObjectSelection : public MxTools::MxPluginBase { +public: + /** + * @api + * @param configParamMap. + * @return Error code. + */ + APP_ERROR Init(std::map>& configParamMap) override; + + /** + * @api + * @return Error code. + */ + APP_ERROR DeInit() override; + + /** + * @api + * @brief Definition the parameter of configure properties. + * @return Error code. + */ + APP_ERROR Process(std::vector& mxpiBuffer) override; + + /** + * @api + * @brief Definition the parameter of configure properties. + * @return std::vector> + */ + static std::vector> DefineProperties(); + + /** + * @api + * @brief Define the number and data type of input ports. + * @return MxTools::MxpiPortInfo. + */ + static MxTools::MxpiPortInfo DefineInputPorts(); + + /** + * @api + * @brief Define the number and data type of output ports. + * @return MxTools::MxpiPortInfo. + */ + static MxTools::MxpiPortInfo DefineOutputPorts(); + +private: + void GetNormRadius(std::string& normRadius); + APP_ERROR SetMxpiErrorInfo(const std::string pluginName, APP_ERROR errorCode, const std::string& errorText); + APP_ERROR CheckInputBuffer(MxTools::MxpiBuffer& motBuffer); + APP_ERROR TargetSelect(MxTools::MxpiBuffer& buffer, std::shared_ptr& datalist); + APP_ERROR GetPositionScore(const MxTools::MxpiObject& mxpiObject, TargetTrack& targetTrack); + APP_ERROR GetOccludeScore(TargetTrack& targetTrack); + APP_ERROR PushDataToStack(MxTools::MxpiBuffer& buffer, MxTools::MxpiTrackLetList& mxpiTrackLetList, float yMax); + APP_ERROR PushObject(float yMax, MxTools::MxpiTrackLetList& trackLetList, + std::shared_ptr& mxpiObjectList); + APP_ERROR StartSelect(MxTools::MxpiVisionList& cropVisionList, int imageHeight, int imageWidth); + APP_ERROR RefleshData(TargetTrack& targetTrack); + APP_ERROR CreatNewBuffer(const int trackId, bool refresh = true); + APP_ERROR AddObjectList(MxTools::MxpiBuffer& buffer, std::map::iterator& iter); + bool CheckSendData(const int trackId); +private: + std::string prePluginName_ = ""; + std::string cropPluginName_ = ""; + std::ostringstream errorInfo_ {}; + std::map targetTrack_ = {}; + std::stack stackSet_ = {}; + std::vector frontSet_ = {}; + std::vector normRadius_ = {}; + std::vector keysVec_ = {}; + uint32_t frameId_ = 0; + uint32_t channelId_ = 0; + int trackTime_ = 0; + float tmargin_ = 0.f; + float weightMargin_ = 0.f; + float weightOcclude_ = 0.f; + float weightSize_ = 0.f; + float weightConf_ = 0.f; +}; +#endif diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/CMakeLists.txt b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/CMakeLists.txt new file mode 100644 index 000000000..7b83d0928 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/CMakeLists.txt @@ -0,0 +1,14 @@ +set(PLUGIN_NAME "mxpi_faceselection") +set(TARGET_LIBRARY ${PLUGIN_NAME}) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) +add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}") + +add_library(${TARGET_LIBRARY} SHARED MxpiFaceSelection.cpp) + +target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0) +target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxbase mxpidatatype) +target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack) + +install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_SOURCE_DIR}/dist/lib) \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.cpp b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.cpp new file mode 100644 index 000000000..a1c28fcaf --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.cpp @@ -0,0 +1,507 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "MxpiFaceSelection.h" + +#include +#include "MxBase/Log/Log.h" + +using namespace MxBase; +using namespace MxTools; + +namespace { +const int KEY_POINTS_VEC_SIZE = 15; +const int FACE_KEY_POINT = 5; +} + +APP_ERROR MxpiFaceSelection::Init(std::map> &configParamMap) +{ + LogInfo << "Begin to initialize MxpiFaceSelection(" << pluginName_ << ")."; + // Set previous MOT plugin name + std::shared_ptr trackedParentName = std::static_pointer_cast( + configParamMap["dataSourceDetection"]); + trackedParentName_ = *trackedParentName; + // Set previous face key point plugin name + std::shared_ptr keyPointParentName = std::static_pointer_cast( + configParamMap["dataSourceKeyPoint"]); + keyPointParentName_ = *keyPointParentName; + // Set weight of face key point score + std::shared_ptr keyPointWeight = std::static_pointer_cast(configParamMap["keyPointWeight"]); + keyPointWeight_ = *keyPointWeight; + // Set weight of face pose score + std::shared_ptr eulerWeight = std::static_pointer_cast(configParamMap["eulerWeight"]); + eulerWeight_ = *eulerWeight; + // Set weight of face size score + std::shared_ptr faceSizeWeight = std::static_pointer_cast(configParamMap["faceSizeWeight"]); + faceSizeWeight_ = *faceSizeWeight; + // Set min face score threshold + std::shared_ptr minScoreThreshold = std::static_pointer_cast(configParamMap["minScoreThreshold"]); + minScoreThreshold_ = *minScoreThreshold; + // Set max send age of face selection + std::shared_ptr maxAge = std::static_pointer_cast(configParamMap["maxAge"]); + maxAge_ = *maxAge; + LogInfo << "End to initialize MxpiFaceSelection(" << pluginName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::DeInit() +{ + LogInfo << "Begin to deinitialize MxpiFaceSelection."; + LogInfo << "End to deinitialize MxpiFaceSelection."; + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::Process(std::vector &mxpiBuffer) +{ + LogDebug << "Begin to process MxpiFaceSelection(" << pluginName_ << ")."; + // Get mxpiBuffer from first import port + MxpiBuffer* inputBuffer = mxpiBuffer[0]; + MxpiMetadataManager mxpiMetadataManagerFirst(*inputBuffer); + errorInfo_.str(""); + MxpiErrorInfo mxpiErrorInfo; + auto errorInfoPtr = mxpiMetadataManagerFirst.GetErrorInfo(); + if (errorInfoPtr != nullptr) { + LogDebug << "WARNING. Input data is invalid, element(" << pluginName_ + << ") plugin will not be executed rightly."; + SendData(0, *inputBuffer); + return APP_ERR_OK; + } + APP_ERROR ret = ErrorProcess(*inputBuffer); + if (ret != APP_ERR_OK) { + return ret; + } + ret = CheckMetadataType(*inputBuffer); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + SendMxpiErrorInfo(*inputBuffer, pluginName_, ret, errorInfo_.str()); + return ret; + } + // Get tracking and face key point from mxpiBuffer of first and second import port + std::vector faceObjectQueue; + ret = GetPrePluginsResult(*inputBuffer, faceObjectQueue); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + SendMxpiErrorInfo(*inputBuffer, pluginName_, ret, errorInfo_.str()); + return ret; + } + isUsedBuffer_ = false; + FaceQualityEvaluation(faceObjectQueue, *inputBuffer); + ret = GetFaceSelectionResult(); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + SendMxpiErrorInfo(*inputBuffer, pluginName_, ret, errorInfo_.str()); + return APP_ERR_OK; + } + if (!isUsedBuffer_) { + MxpiBufferManager::DestroyBuffer(inputBuffer); + } + for (auto iter = bufferMap_.begin(); iter != bufferMap_.end(); ++iter) { + if (iter->second.ref == 0) { + iter->second.mxpiBuffer = nullptr; + iter = bufferMap_.erase(iter); + } + } + LogDebug << "End to process MxpiFaceSelection(" << pluginName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::CheckMetadataType(MxTools::MxpiBuffer &inputBuffer) +{ + MxpiMetadataManager mxpiMetadataManager(inputBuffer); + auto trackedRes = mxpiMetadataManager.GetMetadataWithType(trackedParentName_, "MxpiTrackLetList"); + if (trackedRes == nullptr) { + errorInfo_ << GetError(APP_ERR_PROTOBUF_NAME_MISMATCH, elementName_) + << "Not a MxpiTrackLetList object."; + return APP_ERR_PROTOBUF_NAME_MISMATCH; + } + + auto keyPointRes = mxpiMetadataManager.GetMetadataWithType(keyPointParentName_, "MxpiKeyPointAndAngleList"); + if (keyPointRes == nullptr) { + errorInfo_ << GetError(APP_ERR_PROTOBUF_NAME_MISMATCH, elementName_) + << "Not a MxpiKeyPointAndAngleList object."; + return APP_ERR_PROTOBUF_NAME_MISMATCH; + } + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::ErrorProcess(MxTools::MxpiBuffer &inputBuffer) +{ + MxpiMetadataManager mxpiMetadataManager(inputBuffer); + if (mxpiMetadataManager.GetMetadata(trackedParentName_) == nullptr) { + LogDebug << "Failed to get object tracked result."; + gst_buffer_ref((GstBuffer*) inputBuffer.buffer); + auto* tmpBuffer = new(std::nothrow) MxpiBuffer {inputBuffer.buffer}; + SendData(1, *tmpBuffer); + SendData(0, inputBuffer); + return APP_ERR_COMM_INVALID_POINTER; + } + + if (mxpiMetadataManager.GetMetadata(keyPointParentName_) == nullptr) { + LogDebug << "Failed to get face key points result."; + gst_buffer_ref((GstBuffer*) inputBuffer.buffer); + auto* tmpBuffer = new(std::nothrow) MxpiBuffer {inputBuffer.buffer}; + SendData(1, *tmpBuffer); + SendData(0, inputBuffer); + return APP_ERR_COMM_INVALID_POINTER; + } + + return APP_ERR_OK; +} + +std::vector> MxpiFaceSelection::DefineProperties() +{ + std::vector> properties; + auto keyPointWeight = std::make_shared>(ElementProperty { + FLOAT, "keyPointWeight", "keyPointWeight", "weight of key point score", 1.f, 0.f, 1.f + }); + auto eulerWeight = std::make_shared>(ElementProperty { + FLOAT, "eulerWeight", "eulerWeight", "weight of face euler angles score", 1.f, 0.f, 1.f + }); + auto faceSizeWeight = std::make_shared>(ElementProperty { + FLOAT, "faceSizeWeight", "faceSizeWeight", "weight of big face score", 1.f, 0.f, 1.f + }); + auto minScoreThreshold = std::make_shared>(ElementProperty { + FLOAT, "minScoreThreshold", "bigFaceWeight", "min face total score threshold", 4.f, 0.f, 10.f + }); + auto trackedParentName = std::make_shared>(ElementProperty { + STRING, "dataSourceDetection", "dataSourceDetection", "the key of detection data source", "" + }); + auto keyPointParentName = std::make_shared>(ElementProperty { + STRING, "dataSourceKeyPoint", "dataSourceKeyPoint", "the key of face key point data source", "" + }); + auto maxAge = std::make_shared>(ElementProperty { + UINT, "maxAge", "maxAge", "Max age for stopping face selection", 500, 1, 1000 + }); + properties.push_back(keyPointWeight); + properties.push_back(eulerWeight); + properties.push_back(faceSizeWeight); + properties.push_back(minScoreThreshold); + properties.push_back(trackedParentName); + properties.push_back(keyPointParentName); + properties.push_back(maxAge); + return properties; +} + +MxpiPortInfo MxpiFaceSelection::DefineInputPorts() +{ + MxpiPortInfo inputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticInputPortsInfo(value, inputPortInfo); + + return inputPortInfo; +} + +MxpiPortInfo MxpiFaceSelection::DefineOutputPorts() +{ + MxpiPortInfo outputPortInfo; + std::vector> value = {{"ANY"}, {"ANY"}}; + GenerateStaticOutputPortsInfo(value, outputPortInfo); + + return outputPortInfo; +} + +APP_ERROR MxpiFaceSelection::GetPrePluginsResult(MxpiBuffer &inputBuffer, std::vector &faceObjectQueue) +{ + APP_ERROR ret = APP_ERR_COMM_INVALID_POINTER; + MxpiMetadataManager mxpiMetadataManager(inputBuffer); + MxpiFrame frameData = MxTools::MxpiBufferManager::GetDeviceDataInfo(inputBuffer); + uint32_t frameId = frameData.frameinfo().frameid(); + uint32_t channelId = frameData.frameinfo().channelid(); + // Get object tracking resulut from the buffer + std::shared_ptr trackLetList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(trackedParentName_)); + if (trackLetList->trackletvec_size() == 0) { + errorInfo_ << GetError(ret, pluginName_) << "Failed to get object tracked result."; + return ret; + } + // Get object detection result list by trackLet parent name from the buffer + std::string parentName = ""; + for (uint32_t j = 0; j < trackLetList->trackletvec_size(); ++j) { + if (trackLetList->trackletvec(j).headervec_size() > 0) { + parentName = trackLetList->trackletvec(j).headervec(0).datasource(); + break; + } + } + std::shared_ptr objectList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(parentName)); + if (objectList == nullptr || objectList->objectvec_size() == 0) { + errorInfo_ << GetError(ret, pluginName_) << "Failed to get detection result."; + return ret; + } + // Get face key point result list from the buffer + std::shared_ptr keyPointAndAngleList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(keyPointParentName_)); + if (keyPointAndAngleList->keypointandanglevec_size() == 0) { + errorInfo_ << GetError(ret, pluginName_) << "Failed to get face key points result."; + return ret; + } + for (uint32_t i = 0; i < trackLetList->trackletvec_size(); ++i) { + FaceObject faceObject {trackedParentName_, i, frameId, channelId, 0, trackLetList->trackletvec(i)}; + if (trackLetList->trackletvec(i).headervec_size() == 0) { + faceObjectQueue.push_back(faceObject); + continue; + } + int32_t memberId = trackLetList->trackletvec(i).headervec(0).memberid(); + if (memberId >= objectList->objectvec_size() || memberId >= keyPointAndAngleList->keypointandanglevec_size()) { + faceObjectQueue.push_back(faceObject); + continue; + } + // Get object detection and face key point result by memberId + faceObject.detectInfo = objectList->objectvec(memberId); + faceObject.keyPointAndAngle = keyPointAndAngleList->keypointandanglevec(memberId); + faceObjectQueue.push_back(faceObject); + } + return APP_ERR_OK; +} + +float MxpiFaceSelection::CalKeyPointScore(const FaceObject &faceObject) +{ + float score = 0.f; + if (faceObject.keyPointAndAngle.keypointsvec_size() == 0) { + LogWarn << "WARNING. Key points vector size is zero, trackId is: " << faceObject.trackLet.trackid() << "."; + return score; + } + if (faceObject.keyPointAndAngle.keypointsvec_size() != KEY_POINTS_VEC_SIZE) { + LogWarn << "WARNING. Key points vector size is invalid, trackId is: " << faceObject.trackLet.trackid() << "."; + return score; + } + float elementScoreLimit = 0.2; + int IndexOffset = 10; + for (int i = 0; i < FACE_KEY_POINT; ++i) { + float tmpScore = faceObject.keyPointAndAngle.keypointsvec(i + IndexOffset); + score += ((tmpScore > elementScoreLimit) ? elementScoreLimit : tmpScore); + } + return score; +} + +float MxpiFaceSelection::CalEulerScore(const FaceObject &faceObject) +{ + if (faceObject.keyPointAndAngle.keypointsvec_size() == 0) { + LogWarn << "WARNING. Key points vector size is zero, trackId is: " << faceObject.trackLet.trackid() << "."; + return 0.f; + } + uint32_t degree90 = 90; + uint32_t pitchConstant = 6; + float yaw = faceObject.keyPointAndAngle.angleyaw(); + float pitch = faceObject.keyPointAndAngle.anglepitch(); + float roll = faceObject.keyPointAndAngle.angleroll(); + pitch = (pitch > pitchConstant) ? pitch - pitchConstant : 0; + return (degree90 - yaw) / degree90 + (degree90 - pitch) / degree90 + (degree90 - roll) / degree90; +} + +float MxpiFaceSelection::CalFaceSizeScore(const FaceObject &faceObject) +{ + float width = faceObject.detectInfo.x1() - faceObject.detectInfo.x0(); + float height = faceObject.detectInfo.y1() - faceObject.detectInfo.y0(); + uint32_t maxFaceHW = 60; + uint32_t normFaceHW = 50; + float faceStretchRatio = 1.2; + float faceScoreConstant = 3600.0; + width = (width > normFaceHW) ? maxFaceHW : (width * faceStretchRatio); + height = (height > normFaceHW) ? maxFaceHW : (height * faceStretchRatio); + return 1 - std::fabs(maxFaceHW - width) * std::fabs(maxFaceHW - height) / faceScoreConstant; +} + +float MxpiFaceSelection::CalTotalScore(const FaceObject &faceObject) +{ + float keyPointScore = CalKeyPointScore(faceObject); + float eulerScore = CalEulerScore(faceObject); + float faceSizeScore = CalFaceSizeScore(faceObject); + float score = keyPointWeight_ * keyPointScore + eulerWeight_ * eulerScore + faceSizeWeight_ * faceSizeScore; + return score; +} + +void MxpiFaceSelection::FaceQualityEvaluation(std::vector &faceObjectQueue, MxpiBuffer &buffer) +{ + uint32_t frameId = faceObjectQueue[0].frameId; + BufferManager bufferManager {0, nullptr}; + std::vector trackIdVec; + for (size_t i = 0; i < faceObjectQueue.size(); ++i) { + uint32_t trackId = faceObjectQueue[i].trackLet.trackid(); + if (faceObjectQueue[i].trackLet.trackflag() == LOST_OBJECT && + qualityAssessmentMap_.find(trackId) != qualityAssessmentMap_.end()) { + qualityAssessmentMap_[trackId].trackLet.set_trackflag(LOST_OBJECT); + continue; + } + if (bufferMap_.find(frameId) != bufferMap_.end()) { + LogDebug << "FrameId is existed."; + continue; + } + float score = CalTotalScore(faceObjectQueue[i]); + if (score <= minScoreThreshold_) { + continue; + } + faceObjectQueue[i].score = score; + if (qualityAssessmentMap_.find(trackId) == qualityAssessmentMap_.end()) { + qualityAssessmentMap_[trackId] = faceObjectQueue[i]; + bufferManager.ref++; + trackIdVec.push_back(trackId); + } else { + if (qualityAssessmentMap_[trackId].score < score) { + uint32_t oldFrameId = qualityAssessmentMap_[trackId].frameId; + auto iter = std::find(bufferMap_[oldFrameId].trackIdVec.begin(), + bufferMap_[oldFrameId].trackIdVec.end(), trackId); + if (iter != bufferMap_[oldFrameId].trackIdVec.end()) { + bufferMap_[oldFrameId].trackIdVec.erase(iter); + bufferMap_[oldFrameId].ref--; + } + if (bufferMap_[oldFrameId].ref == 0) { + MxpiBufferManager::DestroyBuffer(bufferMap_[oldFrameId].mxpiBuffer); + } + qualityAssessmentMap_[trackId] = faceObjectQueue[i]; + bufferManager.ref++; + trackIdVec.push_back(trackId); + } + } + } + if (bufferManager.ref > 0) { + isUsedBuffer_ = true; + bufferManager.mxpiBuffer = &buffer; + bufferManager.trackIdVec = trackIdVec; + bufferMap_[frameId] = bufferManager; + } +} + +APP_ERROR MxpiFaceSelection::GetFaceSelectionResult() +{ + auto iter = qualityAssessmentMap_.begin(); + while (iter != qualityAssessmentMap_.end()) { + if (iter->second.trackLet.trackflag() == LOST_OBJECT) { + // Get MxpiObjectList result + auto objectList = std::make_shared(); + GetObjectListResult(iter, objectList); + // Get MxpiKeyPointAndAngleList result + auto keyPointAndAngleList = std::make_shared(); + GetKeyPointResult(iter, keyPointAndAngleList); + APP_ERROR ret = SendSelectionDate(iter, objectList, keyPointAndAngleList); + if (ret != APP_ERR_OK) { + return ret; + } + iter = qualityAssessmentMap_.erase(iter); + } else { + ++iter; + } + } + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::SendSelectionDate(std::map::iterator &iter, + std::shared_ptr &objectList, + std::shared_ptr &keyPointAndAngleList) +{ + uint32_t frameId = iter->second.frameId; + uint32_t trackId = iter->second.trackLet.trackid(); + uint32_t channelId = iter->second.channelId; + if (bufferMap_[frameId].ref > 1) { + MxpiFrame frameData = MxTools::MxpiBufferManager::GetDeviceDataInfo(*bufferMap_[frameId].mxpiBuffer); + InputParam inputParam = { + pluginName_, deviceId_, frameData.visionlist().visionvec(0).visiondata().datasize(), + (void*) frameData.visionlist().visionvec(0).visiondata().dataptr() + }; + inputParam.mxpiVisionInfo = frameData.visionlist().visionvec(0).visioninfo(); + inputParam.mxpiFrameInfo.set_frameid(frameId); + inputParam.mxpiFrameInfo.set_channelid(channelId); + inputParam.mxpiMemoryType = MXPI_MEMORY_DVPP; + MxpiBuffer* mxpiBuffer = MxpiBufferManager::CreateDeviceBufferAndCopyData(inputParam); + APP_ERROR ret = AddMetaData(*mxpiBuffer, objectList, keyPointAndAngleList); + if (ret != APP_ERR_OK) { + return ret; + } + gst_buffer_ref((GstBuffer*) mxpiBuffer->buffer); + auto* tmpBuffer = new(std::nothrow) MxpiBuffer {mxpiBuffer->buffer}; + if (tmpBuffer == nullptr) { + return APP_ERR_COMM_ALLOC_MEM; + } + SendData(0, *tmpBuffer); + SendData(1, *mxpiBuffer); + } else if (bufferMap_[frameId].ref == 1) { + APP_ERROR ret = AddMetaData(*bufferMap_[frameId].mxpiBuffer, objectList, keyPointAndAngleList); + if (ret != APP_ERR_OK) { + return ret; + } + gst_buffer_ref((GstBuffer*) bufferMap_[frameId].mxpiBuffer->buffer); + auto* tmpBuffer = new(std::nothrow) MxpiBuffer {bufferMap_[frameId].mxpiBuffer->buffer}; + if (tmpBuffer == nullptr) { + return APP_ERR_COMM_ALLOC_MEM; + } + SendData(0, *tmpBuffer); + SendData(1, *bufferMap_[frameId].mxpiBuffer); + } + auto bufferMapIter = std::find(bufferMap_[frameId].trackIdVec.begin(), + bufferMap_[frameId].trackIdVec.end(), trackId); + if (bufferMapIter != bufferMap_[frameId].trackIdVec.end()) { + bufferMap_[frameId].trackIdVec.erase(bufferMapIter); + bufferMap_[frameId].ref--; + } + return APP_ERR_OK; +} + +APP_ERROR MxpiFaceSelection::AddMetaData(MxTools::MxpiBuffer &buffer, std::shared_ptr &objectList, + std::shared_ptr &keyPointAndAngleList) +{ + MxpiMetadataManager metadataManager(buffer); + std::string objectListKey = pluginName_ + "_0"; + APP_ERROR ret = metadataManager.AddProtoMetadata(objectListKey, + std::static_pointer_cast(objectList)); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(ret, pluginName_) << "Fail to add metadata."; + return ret; + } + std::string keyPointAndAngleListKey = pluginName_ + "_1"; + ret = metadataManager.AddProtoMetadata(keyPointAndAngleListKey, + std::static_pointer_cast(keyPointAndAngleList)); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(ret, pluginName_) << "Fail to add metadata."; + return ret; + } + return APP_ERR_OK; +} + +void MxpiFaceSelection::GetObjectListResult(std::map::iterator &iter, + std::shared_ptr &objectList) +{ + MxpiObject* object = objectList->add_objectvec(); + MxpiClass* mxpiClass = object->add_classvec(); + MxpiMetaHeader* objectMetaHeader = object->add_headervec(); + object->set_x0(iter->second.detectInfo.x0()); + object->set_x1(iter->second.detectInfo.x1()); + object->set_y0(iter->second.detectInfo.y0()); + object->set_y1(iter->second.detectInfo.y1()); + mxpiClass->set_classid(iter->second.detectInfo.classvec(0).classid()); + mxpiClass->set_classname(iter->second.detectInfo.classvec(0).classname()); + mxpiClass->set_confidence(iter->second.detectInfo.classvec(0).confidence()); + objectMetaHeader->set_datasource(iter->second.parentName); + objectMetaHeader->set_memberid(iter->second.memberId); +} + +void MxpiFaceSelection::GetKeyPointResult(std::map::iterator &iter, + std::shared_ptr &keyPointAndAngleList) +{ + MxpiKeyPointAndAngle* keyPointAndAngle = keyPointAndAngleList->add_keypointandanglevec(); + MxpiMetaHeader* keyPointHeader = keyPointAndAngle->add_headervec(); + for (int i = 0; i < iter->second.keyPointAndAngle.keypointsvec_size(); ++i) { + keyPointAndAngle->add_keypointsvec(iter->second.keyPointAndAngle.keypointsvec(i)); + } + keyPointAndAngle->set_anglepitch(iter->second.keyPointAndAngle.anglepitch()); + keyPointAndAngle->set_angleroll(iter->second.keyPointAndAngle.angleroll()); + keyPointAndAngle->set_angleyaw(iter->second.keyPointAndAngle.angleyaw()); + keyPointHeader->set_datasource(iter->second.parentName); + keyPointHeader->set_memberid(iter->second.memberId); +} + +namespace { +MX_PLUGIN_GENERATE(MxpiFaceSelection) +} \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.h b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.h new file mode 100644 index 000000000..324344f85 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFaceSelection/MxpiFaceSelection.h @@ -0,0 +1,129 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MXPLUGINS__FACESELECTION_H +#define MXPLUGINS__FACESELECTION_H + +#include +#include "MxTools/PluginToolkit/base/MxPluginGenerator.h" +#include "MxTools/PluginToolkit/metadata/MxpiMetadataManager.h" +#include "MxTools/Proto/MxpiDataType.pb.h" +#include "MxBase/ErrorCode/ErrorCode.h" +#include "MxBase/CV/Core/DataType.h" + +struct FaceObject { + std::string parentName; + uint32_t memberId; + uint32_t frameId; + uint32_t channelId; + float score; + MxTools::MxpiTrackLet trackLet; + MxTools::MxpiObject detectInfo; + MxTools::MxpiKeyPointAndAngle keyPointAndAngle; +}; + +struct BufferManager { + int ref; + MxTools::MxpiBuffer* mxpiBuffer; + std::vector trackIdVec; +}; + +class MxpiFaceSelection : public MxTools::MxPluginBase { +public: + /** + * @api + * @param configParamMap + * @return + */ + APP_ERROR Init(std::map> &configParamMap) override; + + /** + * @api + * @return + */ + APP_ERROR DeInit() override; + + /** + * @api + * @param mxpiBuffer + * @return + */ + APP_ERROR Process(std::vector &mxpiBuffer) override; + + /** + * @api + * @brief Definition the parameter of configure properties. + * @return std::vector> + */ + static std::vector> DefineProperties(); + + /** + * Optional, defines input ports of the plugin. + */ + static MxTools::MxpiPortInfo DefineInputPorts(); + + /** + * Optional, defines output ports of the plugin. + */ + static MxTools::MxpiPortInfo DefineOutputPorts(); + +private: + APP_ERROR GetPrePluginsResult(MxTools::MxpiBuffer &inputBuffer, std::vector &faceObjectQueue); + + float CalKeyPointScore(const FaceObject &faceObject); + + float CalEulerScore(const FaceObject &faceObject); + + float CalFaceSizeScore(const FaceObject &faceObject); + + float CalTotalScore(const FaceObject &faceObject); + + void FaceQualityEvaluation(std::vector &faceObjectQueue, MxTools::MxpiBuffer &buffer); + + APP_ERROR GetFaceSelectionResult(); + + APP_ERROR SendSelectionDate(std::map::iterator &iter, + std::shared_ptr &objectList, + std::shared_ptr &keyPointAndAngleList); + + APP_ERROR AddMetaData(MxTools::MxpiBuffer &buffer, std::shared_ptr &objectList, + std::shared_ptr &keyPointAndAngleList); + + void GetObjectListResult(std::map::iterator &iter, + std::shared_ptr &objectList); + + void GetKeyPointResult(std::map::iterator &iter, + std::shared_ptr &keyPointAndAngleList); + + APP_ERROR ErrorProcess(MxTools::MxpiBuffer &inputBuffer); + + APP_ERROR CheckMetadataType(MxTools::MxpiBuffer &inputBuffer); + +private: + float keyPointWeight_ = 0.f; // weight of key point score + float eulerWeight_ = 0.f; // weight of face euler angles score + float faceSizeWeight_ = 0.f; // weight of face score + float minScoreThreshold_ = 0.f; // min face total score threshold + uint32_t maxAge_ = 0; // max age for stopping face selection + std::string trackedParentName_ = ""; // the key of trackLet input data + std::string keyPointParentName_ = ""; // the key of face key point input data + std::map qualityAssessmentMap_ = {}; + std::map bufferMap_ = {}; + bool isUsedBuffer_ = false; // if is used current input buffer + std::ostringstream errorInfo_ {}; // the error information +}; + +#endif diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/BlockingMap.h b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/BlockingMap.h new file mode 100644 index 000000000..78752b8b1 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/BlockingMap.h @@ -0,0 +1,82 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BLOCKING_MAP_H +#define BLOCKING_MAP_H + +#include +#include +#include + +template class BlockingMap { +public: + BlockingMap() = default; + + ~BlockingMap() = default; + + void Insert(const TKey &id, TValue &streamData) + { + std::lock_guard lock(mtx_); + blockingMap_[id] = streamData; + } + + TValue Pop(const TKey &id) + { + std::lock_guard lock(mtx_); + if (blockingMap_.find(id) != blockingMap_.end()) { + auto streamData = blockingMap_[id]; + blockingMap_.erase(id); + return streamData; + } else { + TValue value {}; + return value; + } + } + + size_t Size() + { + return blockingMap_.size(); + } + + bool Find(const TKey &id) + { + std::lock_guard lock(mtx_); + return blockingMap_.find(id) != blockingMap_.end(); + } + + typename std::map::iterator RBegin() + { + auto iter = blockingMap_.end(); + iter--; + return iter; + } + + typename std::map::iterator Begin() + { + return blockingMap_.begin(); + } + + typename std::map::iterator End() + { + return blockingMap_.end(); + } + +private: + std::mutex mtx_ = {}; + std::map blockingMap_ = {}; +}; + +#endif diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/CMakeLists.txt b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/CMakeLists.txt new file mode 100644 index 000000000..86540f6ab --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/CMakeLists.txt @@ -0,0 +1,17 @@ +set(PLUGIN_NAME "mxpi_framealign") +set(TARGET_LIBRARY ${PLUGIN_NAME}) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +include_directories(${PROJECT_DIR}/Proto) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) + +add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) +add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}") + +add_library(${TARGET_LIBRARY} SHARED MxpiFrameAlign.cpp) + +target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0) +target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxbase mxpidatatype mxpiallobjectsstructuringdatatype mindxsdk_protobuf) +target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack) + +install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_SOURCE_DIR}/dist/lib) diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.cpp b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.cpp new file mode 100644 index 000000000..5ad3d59b6 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.cpp @@ -0,0 +1,385 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "MxpiFrameAlign.h" +#include "MxBase/Log/Log.h" + +using namespace MxBase; +using namespace MxTools; + +namespace { +const int STREAM_DATA_INPUT_PORT_ID = 0; +const char SPLIT_RULE = ','; +const int ALIGN_NUM = 2; +} + +APP_ERROR MxpiFrameAlign::Init(std::map> &configParamMap) +{ + LogInfo << "Begin to initialize MxpiFrameAlign(" << elementName_ << ")."; + if (status_ != ASYNC) { + LogDebug << "element(" << elementName_ + << ") status must be async(0), you set status sync(1), so force status to async(0)."; + status_ = ASYNC; + } + dataSource_ = *std::static_pointer_cast(configParamMap["dataSource"]); + dataKeyVec_ = SplitWithRemoveBlank(dataSource_, SPLIT_RULE); + if (dataKeyVec_.empty()) { + LogError << GetError(APP_ERR_COMM_INIT_FAIL, elementName_) + << "the data source can not be null"; + return APP_ERR_COMM_INIT_FAIL; + } + intervalTime_ = *std::static_pointer_cast(configParamMap["intervalTime"]); + sendThread_ = std::thread(&MxpiFrameAlign::SendThread, this); + + LogInfo << "End to initialize MxpiFrameAlign(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiFrameAlign::DeInit() +{ + LogInfo << "Begin to deinitialize MxpiFrameAlign(" << elementName_ << ")."; + sendStop_ = true; + if (sendThread_.joinable()) { + sendThread_.join(); + } + LogInfo << "End to deinitialize MxpiFrameAlign(" << elementName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiFrameAlign::Process(std::vector &mxpiBuffer) +{ + LogDebug << "Begin to process MxpiFrameAlign(" << pluginName_ << ")."; + errorInfo_.str(""); + int inputPortId = -1; + for (size_t i = 0; i < mxpiBuffer.size(); i++) { + if (mxpiBuffer[i] != nullptr) { + inputPortId = i; + break; + } + } + MxpiBuffer* inputBuffer = mxpiBuffer[inputPortId]; + MxTools::MxpiMetadataManager mxpiMetadataManager(*inputBuffer); + if (mxpiMetadataManager.GetErrorInfo() != nullptr) { + LogError << "Input data is invalid, element(" << elementName_ << ") plugin will not be executed rightly."; + SendData(0, *inputBuffer); + return APP_ERR_OK; + } + if (inputPortId == STREAM_DATA_INPUT_PORT_ID) { + GetStreamData(*inputBuffer); + MxpiBufferManager::DestroyBuffer(inputBuffer); + } else { + APP_ERROR ret = GetObjectList(*inputBuffer); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + MxpiBufferManager::DestroyBuffer(inputBuffer); + return ret; + } + AlignFrameObjectInfo(); + MxpiBufferManager::DestroyBuffer(inputBuffer); + } + + LogDebug << "End to process MxpiFrameAlign(" << elementName_ << ")."; + return APP_ERR_OK; +} + +std::vector> MxpiFrameAlign::DefineProperties() +{ + std::vector> properties; + auto dataSource = std::make_shared>(ElementProperty { + STRING, "dataSource", "dataSource", "the name of data source", "", "", "" + }); + auto intervalTime = std::make_shared>(ElementProperty { + INT, "intervalTime", "intervalTime", "the interval time of send data (ms)", 20, 0, 100 + }); + properties.push_back(dataSource); + properties.push_back(intervalTime); + return properties; +} + +MxpiPortInfo MxpiFrameAlign::DefineInputPorts() +{ + MxpiPortInfo inputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticInputPortsInfo(value, inputPortInfo); + std::vector> featureCaps = {{"ANY"}}; + GenerateStaticInputPortsInfo(featureCaps, inputPortInfo); + return inputPortInfo; +} + +MxpiPortInfo MxpiFrameAlign::DefineOutputPorts() +{ + MxpiPortInfo outputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticOutputPortsInfo(value, outputPortInfo); + + return outputPortInfo; +} + +void MxpiFrameAlign::GetStreamData(MxTools::MxpiBuffer &inputBuffer) +{ + MxpiFrame mxpiFrame = MxpiBufferManager::GetHostDataInfo(inputBuffer); + MxpiWebDisplayData webDisplayData {}; + uint32_t frameId = mxpiFrame.frameinfo().frameid(); + webDisplayData.set_channel_id(std::to_string(mxpiFrame.frameinfo().channelid())); + webDisplayData.set_frame_index(frameId); + webDisplayData.set_h264_size(mxpiFrame.visionlist().visionvec(0).visiondata().datasize()); + webDisplayData.set_h264_data((void*) mxpiFrame.visionlist().visionvec(0).visiondata().dataptr(), + mxpiFrame.visionlist().visionvec(0).visiondata().datasize()); + StreamData streamData {}; + streamData.webDisplayData = webDisplayData; + streamData.sendFlag = false; + streamDataMap_.Insert(frameId, streamData); +} + +APP_ERROR MxpiFrameAlign::GetObjectList(MxpiBuffer &inputBuffer) +{ + MxpiFrame frameData = MxTools::MxpiBufferManager::GetDeviceDataInfo(inputBuffer); + uint32_t frameId = frameData.frameinfo().frameid(); + std::string channelId = std::to_string(frameData.frameinfo().channelid()); + MxpiMetadataManager mxpiMetadataManager(inputBuffer); + if (!streamDataMap_.Find(frameId)) { + return APP_ERR_OK; + } + std::vector objectInfoList; + for (size_t i = 0; i < dataKeyVec_.size(); ++i) { + std::shared_ptr trackLetList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(dataKeyVec_[i])); + if (trackLetList == nullptr || trackLetList->trackletvec_size() == 0) { + continue; + } + // Get object detection result list by trackLet parent name from the buffer + std::string parentName = ""; + for (uint32_t j = 0; j < trackLetList->trackletvec_size(); ++j) { + if (trackLetList->trackletvec(j).headervec_size() > 0) { + parentName = trackLetList->trackletvec(j).headervec(0).datasource(); + break; + } + } + std::shared_ptr objectList = std::static_pointer_cast( + mxpiMetadataManager.GetMetadata(parentName)); + if (objectList == nullptr || objectList->objectvec_size() == 0) { + errorInfo_ << GetError(APP_ERR_COMM_INVALID_POINTER, pluginName_) << "Failed to get detection result."; + continue; + } + for (int k = 0; k < trackLetList->trackletvec_size(); ++k) { + if (trackLetList->trackletvec(k).headervec_size() == 0) { + continue; + } + int32_t memberId = trackLetList->trackletvec(k).headervec(0).memberid(); + if (memberId >= objectList->objectvec_size()) { + continue; + } + ObjectInfo objectInfo {}; + objectInfo.trackId = std::to_string(trackLetList->trackletvec(k).trackid()) + "_" + + std::to_string(objectList->objectvec(memberId).classvec(0).classid()); + objectInfo.x0 = objectList->objectvec(memberId).x0(); + objectInfo.y0 = objectList->objectvec(memberId).y0(); + objectInfo.x1 = objectList->objectvec(memberId).x1(); + objectInfo.y1 = objectList->objectvec(memberId).y1(); + objectInfoList.push_back(objectInfo); + } + } + objectListMap_.Insert(frameId, objectInfoList); + return APP_ERR_OK; +} + +bool MxpiFrameAlign::HadTrackId(std::vector &objectInfoList, std::string &trackId) +{ + for (size_t i = 0; i < objectInfoList.size(); ++i) { + if (trackId == objectInfoList[i].trackId) { + return true; + } + } + return false; +} + +void MxpiFrameAlign::ObjectInfoInterpolated(std::vector &interpolatedObjectInfoList, + std::vector &previousObjectInfoList, + std::vector &latterObjectInfoList, + float &offset) +{ + for (size_t i = 0; i < latterObjectInfoList.size(); ++i) { + std::string trackId = latterObjectInfoList[i].trackId; + ObjectInfo latterObject = latterObjectInfoList[i]; + ObjectInfo previousObject {}; + for (size_t j = 0; j < previousObjectInfoList.size(); ++j) { + if (trackId == previousObjectInfoList[j].trackId) { + previousObject = previousObjectInfoList[j]; + } + } + ObjectInfo interpolatedObject {}; + interpolatedObject.trackId = trackId; + interpolatedObject.x0 = previousObject.x0 + (latterObject.x0 - previousObject.x0) * offset; + interpolatedObject.y0 = previousObject.y0 + (latterObject.y0 - previousObject.y0) * offset; + interpolatedObject.x1 = previousObject.x1 + (latterObject.x1 - previousObject.x1) * offset; + interpolatedObject.y1 = previousObject.y1 + (latterObject.y1 - previousObject.y1) * offset; + interpolatedObjectInfoList.push_back(interpolatedObject); + } +} + +void MxpiFrameAlign::AlignFrameObjectInfo() +{ + if (objectListMap_.Size() >= ALIGN_NUM) { + auto previousIter = objectListMap_.RBegin(); + previousIter--; + auto lastIter = objectListMap_.RBegin(); + if (previousFrameId_ == 0) { + previousFrameId_ = previousIter->first; + } + std::vector previousObjectInfoList = previousIter->second; + std::vector objectInfoAllVector = lastIter->second; + std::vector objectInfoMatchVector; + for (size_t i = 0; i < objectInfoAllVector.size(); ++i) { + std::string trackId = objectInfoAllVector[i].trackId; + if (HadTrackId(previousObjectInfoList, trackId)) { + objectInfoMatchVector.push_back(objectInfoAllVector[i]); + } + } + uint32_t frameId = lastIter->first; + uint32_t previousFrameId = previousFrameId_; + uint64_t frameStep = frameId - previousFrameId_; + int stepNo = 0; + while (previousFrameId_ < frameId) { + if (previousFrameId_ == previousFrameId) { + if (streamDataMap_.Find(previousFrameId_)) { + StreamData streamData = streamDataMap_.Pop(previousFrameId_); + streamData.sendFlag = true; + streamDataMap_.Insert(previousFrameId_, streamData); + } + previousFrameId_++; + continue; + } + stepNo = previousFrameId_ - previousFrameId; + float offset = float(stepNo) / float(frameStep); + std::vector interpolatedObjectInfoList; + ObjectInfoInterpolated(interpolatedObjectInfoList, previousObjectInfoList, objectInfoMatchVector, offset); + objectListMap_.Insert(previousFrameId_, interpolatedObjectInfoList); + if (streamDataMap_.Find(previousFrameId_)) { + StreamData streamData = streamDataMap_.Pop(previousFrameId_); + streamData.sendFlag = true; + streamDataMap_.Insert(previousFrameId_, streamData); + } + previousFrameId_++; + } + } +} + +APP_ERROR MxpiFrameAlign::SendAlignFrame() +{ + for (auto iterFrame = streamDataMap_.Begin(); iterFrame != streamDataMap_.End();) { + if (iterFrame->second.sendFlag) { + uint32_t frameId = iterFrame->first; + InputParam inputParam = {}; + inputParam.dataSize = 0; + auto* mxpiBuffer = MxpiBufferManager::CreateHostBufferAndCopyData(inputParam); + std::shared_ptr webDisplayDataList = std::make_shared(); + MxpiMetadataManager mxpiMetadataManager(*mxpiBuffer); + APP_ERROR ret = GetWebDisplayData(webDisplayDataList, frameId); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(ret, elementName_) << "Failed to get web display data."; + return ret; + } + ret = mxpiMetadataManager.AddProtoMetadata(elementName_, + std::static_pointer_cast(webDisplayDataList)); + if (ret != APP_ERR_OK) { + errorInfo_ << GetError(ret, elementName_) << "Failed to add metadata."; + return ret; + } + SendData(0, *mxpiBuffer); + break; + } else { + ++iterFrame; + } + } + return APP_ERR_OK; +} + +APP_ERROR MxpiFrameAlign::GetWebDisplayData(std::shared_ptr &webDisplayDataList, + uint32_t &frameId) +{ + if (!objectListMap_.Find(frameId)) { + LogError << "FrameId is invaild. " << "FrameId = " << frameId << " ."; + return APP_ERR_COMM_INNER; + } + auto objectList = objectListMap_.Pop(frameId); + auto streamData = streamDataMap_.Pop(frameId); + MxpiWebDisplayData* webDisplayData = webDisplayDataList->add_webdisplaydatavec(); + webDisplayData->CopyFrom(streamData.webDisplayData); + for (size_t i = 0; i < objectList.size(); ++i) { + MxpiBBox* boundingBox = webDisplayData->add_bbox_vec(); + boundingBox->set_x0(objectList[i].x0); + boundingBox->set_y0(objectList[i].y0); + boundingBox->set_x1(objectList[i].x1); + boundingBox->set_y1(objectList[i].y1); + } + return APP_ERR_OK; +} + +std::vector MxpiFrameAlign::Split(const std::string &inString, char delimiter) +{ + std::vector result; + if (inString.empty()) { + return result; + } + + std::string::size_type fast = 0; + std::string::size_type slow = 0; + while ((fast = inString.find_first_of(delimiter, slow)) != std::string::npos) { + result.push_back(inString.substr(slow, fast - slow)); + slow = inString.find_first_not_of(delimiter, fast); + } + + if (slow != std::string::npos) { + result.push_back(inString.substr(slow, fast - slow)); + } + + return result; +} + +std::string &MxpiFrameAlign::Trim(std::string &str) +{ + str.erase(0, str.find_first_not_of(' ')); + str.erase(str.find_last_not_of(' ') + 1); + + return str; +} + +std::vector MxpiFrameAlign::SplitWithRemoveBlank(std::string &str, char rule) +{ + Trim(str); + std::vector strVec = Split(str, rule); + for (size_t i = 0; i < strVec.size(); i++) { + strVec[i] = Trim(strVec[i]); + } + return strVec; +} + +void MxpiFrameAlign::SendThread() +{ + while (!sendStop_) { + // send data if exist time is greater than maxSendTime_ + APP_ERROR ret = SendAlignFrame(); + if (ret != APP_ERR_OK) { + LogError << errorInfo_.str(); + continue; + } + std::this_thread::sleep_for(std::chrono::milliseconds(intervalTime_)); + } +} + +namespace { +MX_PLUGIN_GENERATE(MxpiFrameAlign) +} \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.h b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.h new file mode 100644 index 000000000..fcb913304 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiFrameAlign/MxpiFrameAlign.h @@ -0,0 +1,119 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "MxTools/PluginToolkit/base/MxPluginGenerator.h" +#include "MxTools/PluginToolkit/metadata/MxpiMetadataManager.h" +#include "MxTools/Proto/MxpiDataType.pb.h" +#include "MxBase/ErrorCode/ErrorCode.h" +#include "MxBase/CV/Core/DataType.h" +#include "MxpiAllObjectsStructuringDataType.pb.h" +#include "BlockingMap.h" + +#ifndef MXPLUGINS_MXPIFRAMEALIGN_H +#define MXPLUGINS_MXPIFRAMEALIGN_H + +struct StreamData { + MxpiWebDisplayData webDisplayData = {}; + bool sendFlag = false; +}; + +struct ObjectInfo { + std::string trackId; + float x0; + float y0; + float x1; + float y1; +}; + +class MxpiFrameAlign : public MxTools::MxPluginBase { +public: + /** + * @api + * @param configParamMap + * @return + */ + APP_ERROR Init(std::map> &configParamMap) override; + + /** + * @api + * @return + */ + APP_ERROR DeInit() override; + + /** + * @api + * @param mxpiBuffer + * @return + */ + APP_ERROR Process(std::vector &mxpiBuffer) override; + + /** + * @api + * @brief Definition the parameter of configure properties. + * @return std::vector> + */ + static std::vector> DefineProperties(); + + /** + * Optional, defines input ports of the plugin. + */ + static MxTools::MxpiPortInfo DefineInputPorts(); + + /** + * Optional, defines output ports of the plugin. + */ + static MxTools::MxpiPortInfo DefineOutputPorts(); + +private: + void GetStreamData(MxTools::MxpiBuffer &inputBuffer); + + APP_ERROR GetObjectList(MxTools::MxpiBuffer &inputBuffer); + + void AlignFrameObjectInfo(); + + bool HadTrackId(std::vector &objectInfoList, std::string &trackId); + + void ObjectInfoInterpolated(std::vector &interpolatedObjectInfoList, + std::vector &previousObjectInfoList, + std::vector &latterObjectInfoList, + float &offset); + + APP_ERROR SendAlignFrame(); + + APP_ERROR GetWebDisplayData(std::shared_ptr &webDisplayDataList, uint32_t &frameId); + + std::vector Split(const std::string &inString, char delimiter = ' '); + + std::string &Trim(std::string &str); + + std::vector SplitWithRemoveBlank(std::string &str, char rule); + + void SendThread(); + +private: + std::ostringstream errorInfo_ {}; + std::vector dataKeyVec_ = {}; + std::string dataSource_ = ""; + uint32_t previousFrameId_ = 0; + bool sendStop_ = false; + std::thread sendThread_ = {}; + int intervalTime_ = 0; + BlockingMap streamDataMap_ = {}; + BlockingMap> objectListMap_ = {}; +}; + +#endif diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/CMakeLists.txt b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/CMakeLists.txt new file mode 100644 index 000000000..afee91b5a --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/CMakeLists.txt @@ -0,0 +1,14 @@ +set(PLUGIN_NAME "mxpi_skipframe") +set(TARGET_LIBRARY ${PLUGIN_NAME}) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations) +add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}") + +add_library(${TARGET_LIBRARY} SHARED MxpiSkipFrame.cpp) + +target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0) +target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxbase mxpidatatype) +target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s) + +install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION ${PROJECT_SOURCE_DIR}/dist/lib) \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.cpp b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.cpp new file mode 100644 index 000000000..7f20f036c --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.cpp @@ -0,0 +1,96 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "MxpiSkipFrame.h" +#include "MxBase/Log/Log.h" +using namespace MxBase; +using namespace MxTools; +using namespace MxPlugins; + +APP_ERROR MxpiSkipFrame::Init(std::map> &configParamMap) +{ + LogInfo << "Begin to initialize MxpiSkipFrame(" << pluginName_ << ")."; + // get parameters from website. + skipFrameNum_ = *std::static_pointer_cast(configParamMap["frameNum"]); + LogInfo << "skip frame nunmber(" << skipFrameNum_ << ")."; + LogInfo << "End to initialize MxpiSkipFrame(" << pluginName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiSkipFrame::DeInit() +{ + LogInfo << "Begin to deinitialize MxpiSkipFrame(" << pluginName_ << ")."; + LogInfo << "End to deinitialize MxpiSkipFrame(" << pluginName_ << ")."; + return APP_ERR_OK; +} + +APP_ERROR MxpiSkipFrame::Process(std::vector &mxpiBuffer) +{ + MxpiBuffer *inputMxpiBuffer = mxpiBuffer[0]; + MxpiMetadataManager mxpiMetadataManager(*inputMxpiBuffer); + auto errorInfoPtr = mxpiMetadataManager.GetErrorInfo(); + if (errorInfoPtr != nullptr) { + LogWarn << "Input data is invalid, element(" << pluginName_ <<") plugin will not be executed rightly."; + SendData(0, *inputMxpiBuffer); + return APP_ERR_COMM_FAILURE; + } + + if (skipFrameNum_ == 0) { + SendData(0, *inputMxpiBuffer); + } else { + count++; + if ((count % (skipFrameNum_ + 1)) == 0) { + count = 0; + SendData(0, *inputMxpiBuffer); + } else { + MxpiBufferManager::DestroyBuffer(inputMxpiBuffer); + } + } + return APP_ERR_OK; +} + +std::vector> MxpiSkipFrame::DefineProperties() +{ + std::vector> properties; + auto prop1 = std::make_shared>(ElementProperty { + UINT, + "frameNum", + "frameNum", + "the number of skip frame", + 0, 0, 100 + }); + properties.push_back(prop1); + return properties; +} + +MxpiPortInfo MxpiSkipFrame::DefineInputPorts() +{ + MxpiPortInfo inputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticInputPortsInfo(value, inputPortInfo); + return inputPortInfo; +} + +MxpiPortInfo MxpiSkipFrame::DefineOutputPorts() +{ + MxpiPortInfo outputPortInfo; + std::vector> value = {{"ANY"}}; + GenerateStaticOutputPortsInfo(value, outputPortInfo); + return outputPortInfo; +} +namespace { + MX_PLUGIN_GENERATE(MxpiSkipFrame) +} \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.h b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.h new file mode 100644 index 000000000..0c22ede11 --- /dev/null +++ b/mxVision/AllObjectsStructuring/plugins/MxpiSkipFrame/MxpiSkipFrame.h @@ -0,0 +1,80 @@ +/* + * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MXPLUGINS_MXPISKIPFRAME_H +#define MXPLUGINS_MXPISKIPFRAME_H + +#include "MxBase/ErrorCode/ErrorCode.h" +#include "MxTools/PluginToolkit/base/MxPluginGenerator.h" +#include "MxTools/PluginToolkit/buffer/MxpiBufferManager.h" +#include "MxTools/PluginToolkit/metadata/MxpiMetadataManager.h" +#include "MxTools/Proto/MxpiDataType.pb.h" + +/** + * This plugin is used for skip frame. + */ +namespace MxPlugins { + class MxpiSkipFrame : public MxTools::MxPluginBase { + public: + /** + * @description: Init configs. + * @param configParamMap: config. + * @return: Error code. + */ + APP_ERROR Init(std::map> &configParamMap) override; + + /** + * @description: DeInit device. + * @return: Error code. + */ + APP_ERROR DeInit() override; + + /** + * @description: MxpiSkipFrame plugin process. + * @param mxpiBuffer: data receive from the previous. + * @return: Error code. + */ + APP_ERROR Process(std::vector &mxpiBuffer) override; + + /** + * @description: MxpiSkipFrame plugin define properties. + * @return: properties. + */ + static std::vector> DefineProperties(); + + /** + * @api + * @brief Define the number and data type of input ports. + * @return MxTools::MxpiPortInfo. + */ + static MxTools::MxpiPortInfo DefineInputPorts(); + + /** + * @api + * @brief Define the number and data type of output ports. + * @return MxTools::MxpiPortInfo. + */ + static MxTools::MxpiPortInfo DefineOutputPorts(); + + private: + + uint32_t skipFrameNum_ = 0; + + uint32_t count = 0; + }; +} + +#endif \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/requirements.txt b/mxVision/AllObjectsStructuring/requirements.txt new file mode 100644 index 000000000..e213445df --- /dev/null +++ b/mxVision/AllObjectsStructuring/requirements.txt @@ -0,0 +1,5 @@ +numpy +opencv-python +Pillow==8.0.1 +protobuf==3.9.0 +websocket-server==0.4 \ No newline at end of file diff --git a/mxVision/AllObjectsStructuring/retrieval/__init__.py b/mxVision/AllObjectsStructuring/retrieval/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/mxVision/AllObjectsStructuring/retrieval/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/mxVision/AllObjectsStructuring/retrieval/feature_retrieval.py b/mxVision/AllObjectsStructuring/retrieval/feature_retrieval.py new file mode 100644 index 000000000..acccf3b6b --- /dev/null +++ b/mxVision/AllObjectsStructuring/retrieval/feature_retrieval.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import time +import faiss +import ascendfaiss +import numpy as np +from multiprocessing import Lock + +from util.checker import check_saving_path, check_loading_path + + +class Searcher: + _bucket_mode_list = ["ivf", "normal"] + _compression_mode_list = ["flat", "pq", "sq"] + _op_name_map = { + "normal": "", + "ivf": "IVF", + "flat": "Flat", + "pq": "PQ", + "sq": "SQ" + } + + def __init__(self, + mode="ivf_sq", + device_name="ascend", + device_id=(0, ), + d=128, + metrics=faiss.METRIC_L2, + nlist=16384, + m=64, + nprob=1, + quantizer=None, + sub_vec_enc_len=8, + sq_bits=faiss.ScalarQuantizer.QT_8bit, + base_mtx=None, + multi_flag=False): + self.mode = mode + self.device_name = device_name + self.device_ids = device_id + self.devices = None + self.d = d + self.metrics = metrics + self.nlist = nlist + self.m = m + self.nprob = nprob + self.sq_bits = sq_bits + self.sub_vec_enc_len = sub_vec_enc_len + self.quantizer = quantizer + self.base_mtx = base_mtx + self.config = None + self.bucket_mode = None + self.compression_mode = None + self.index = None + self.multi_flag = multi_flag + self.lock = Lock() if multi_flag else None + + self.parse_mode() + self.make_config() + self.build_indexer() + + def make_config(self): + if self.device_name == "ascend": + self.devices = ascendfaiss.IntVector() + self.device_ids = self.device_ids if isinstance( + self.device_ids, (list, tuple)) else [self.device_ids] + for device_id in self.device_ids: + if not isinstance(device_id, int): + raise TypeError("Wrong device id type.") + self.devices.push_back(device_id) + self.config = self.get_method("config")(self.devices) + + def parse_mode(self): + if not isinstance(self.mode, str): + raise TypeError("Mode should be a string.") + + parsed_mode = self.mode.split("_") + if len(parsed_mode) != 2: + raise ValueError("mode format should like \"_" + "\".") + + self.bucket_mode, self.compression_mode = parsed_mode + if self.bucket_mode not in self._bucket_mode_list or \ + self.compression_mode not in self._compression_mode_list: + raise ValueError(f"Note that bucket_mode should be included in" + f" {self._bucket_mode_list} and " + f"compression_mode should be included in" + f" {self._compression_mode_list}") + + def get_method(self, category): + method_name = "Index" + self._op_name_map[self.bucket_mode] + \ + self._op_name_map[self.compression_mode] + + method_name = "Ascend" + method_name if self.device_name == "ascend" \ + else method_name + pkg = ascendfaiss if self.device_name == "ascend" else faiss + + if category == "builder": + return getattr(pkg, method_name) + elif category == "config": + return getattr(pkg, method_name + "Config") + else: + raise ValueError("Invalid method type.") + + def get_parameters(self): + param_head = [self.d] + param_tail = [self.config] if self.config else [] + + if self.compression_mode == "sq": + param_tail = [self.metrics, True] + param_tail + elif self.mode == "normal_flat": + param_tail = [self.metrics] + param_tail + + if self.bucket_mode == "ivf": + param_head.append(self.nlist) + if self.device_name != "ascend": + param_head = [self.quantizer] + param_head + if self.quantizer is None: + raise ValueError("Please assign a quantizer for the " + "index.") + + if self.compression_mode == "sq": + param_head.append(self.sq_bits) + elif self.compression_mode == "pq": + param_head.extend([self.m, self.sub_vec_enc_len]) + + return param_head + param_tail + + def build_indexer(self): + building_method = self.get_method("builder") + parameters = self.get_parameters() + self.index = building_method(*parameters) + + if self.base_mtx is not None and not self.index.is_trained: + self.train(self.base_mtx) + + if self.base_mtx is not None: + self.add(self.base_mtx) + + def train(self, base_mtx): + if self.multi_flag: + self.lock.acquire() + if self.index.is_trained: + raise AssertionError("Index has been trained once.") + + self.index.train(base_mtx) + if self.multi_flag: + self.lock.release() + + def add(self, new_data, idx=None): + if idx: + if isinstance(idx, int): + idx = [idx] + + if isinstance(idx, (list, tuple)): + idx = np.array(idx, np.int) + + if not isinstance(idx, np.ndarray) or idx.dtype != np.int: + raise TypeError("idx type is invalid.") + + if idx.shape != (new_data.shape[0], ): + raise ValueError("idx rank mismatch.") + + if self.multi_flag: + self.lock.acquire() + self.index.add_with_ids(new_data, idx) + if self.multi_flag: + self.lock.release() + + else: + if self.multi_flag: + self.lock.acquire() + idx = self.index.ntotal + self.index.add(new_data) + if self.multi_flag: + self.lock.release() + + return idx + + def search(self, query, k=10): + if self.multi_flag: + self.lock.acquire() + ret = self.index.search(query, k) + if self.multi_flag: + self.lock.release() + + return ret + + def count(self): + return self.index.ntotal + + def set_multi_flag(self): + self.multi_flag = True + self.lock = Lock() + + def unset_multi_flag(self): + self.multi_flag = False + self.lock = None + + def save(self, saving_path): + check_saving_path(saving_path, "saving path", suffix_set=".index") + + if self.device_name == "ascend": + self.index = ascendfaiss.index_ascend_to_cpu(self.index) + + faiss.write_index(self.index, saving_path) + print(f"Your index has been saved at {saving_path}") + + def load(self, loading_path): + check_loading_path(loading_path, "loading path", suffix_set=".index") + + self.index = faiss.read_index(loading_path) + if self.device_name == "ascend": + self.index = ascendfaiss.index_cpu_to_ascend( + self.devices, self.index) + + print(f"Your index has restored from {loading_path}") + + +def main(): + d = 128 # vector dims + nb = 100000 # databse size + nq = 10 # query size + np.random.seed(1234) + xb = np.random.random((nb, d)).astype('float32') + n_new_count = 50 + x_new = np.random.random((n_new_count, d)).astype('float32') + xq = xb[:nq, :] + + nlist = 16384 + k = 5 + + quantizer = faiss.IndexFlatL2(d) + + timer = time.time() + index = Searcher( + mode="normal_flat", + device_id=[10], + quantizer=quantizer, + d=d, + nlist=nlist, + base_mtx=xb) + span = time.time() - timer + print(f"Building a new index costs {span} s") + + timer = time.time() + distance, indexes = index.search(xq, k) + span = time.time() - timer + print(f"Searching xq costs {span} s") + print(distance) + print("---------------------------------------------------------------") + print(indexes) + + timer = time.time() + index.add(x_new) + span = time.time() - timer + print(f"Adding x_new costs {span} s") + + print("Done!") + + index.save("test.index") + + index_new = Searcher(device_name="ascend") + index_new.load("test.index") + + +if __name__ == "__main__": + main() diff --git a/mxVision/AllObjectsStructuring/retrieval/register.py b/mxVision/AllObjectsStructuring/retrieval/register.py new file mode 100644 index 000000000..1fa97eef1 --- /dev/null +++ b/mxVision/AllObjectsStructuring/retrieval/register.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import json +import os +import cv2 +import numpy as np +from PIL import Image + +from multiprocessing import Lock + +from util.checker import check_loading_path, check_saving_path + + +class Canvas: + def __init__(self, desired_height, desired_width, resized_height=200): + self.canvas_height = desired_height + self.canvas_width = desired_width + self.resized_height = resized_height + + @staticmethod + def read(path): + if not isinstance(path, str): + raise ValueError("Please assign one string to indicates the " + "image path.") + + if not os.path.exists(path): + raise ValueError("Image directory does not exist.") + + return cv2.imread(path) + + def deploy(self, image): + canvas = np.zeros([self.canvas_height, self.canvas_width, 3], + dtype=np.uint8) + h, w, _ = image.shape + canvas[0:h, 0:w, :] = image + return canvas + + @staticmethod + def encode(image): + _, buf = cv2.imencode(".jpg", image) + return Image.fromarray(np.uint8(buf)).tobytes() + + def resize(self, image): + h, w, _ = image.shape + resized_width = round(self.resized_height / h * w) + return cv2.resize(image, (resized_width, self.resized_height)) + + def __call__(self, path, binary=True): + image = self.read(path) + image = self.resize(image) + enlarged_img = self.deploy(image) + + return self.encode(enlarged_img) if binary else enlarged_img + + +class Collector: + def __init__(self, root_dir, object_name, suffix=(".jpg",)): + if not isinstance(root_dir, str): + raise ValueError("Please assign one string to indicates the " + "root path which containing images.") + + if not os.path.exists(root_dir): + raise ValueError("Root directory does not exist.") + + self.root_dir = root_dir + self.object = object_name + self.suffix = suffix + self.collection = {} + self.collect(self.root_dir, self.object) + + def collect(self, current_path, current_name): + collection = os.listdir(current_path) + for child in collection: + child_path = os.path.join(current_path, child) + name, tail = os.path.splitext(child) + child_name = current_name + "_" + name + if os.path.isdir(child_path): + self.collect(child_path, child_name) + + elif tail in self.suffix: + self.collection[child_name] = child_path + + def __getitem__(self, item): + return self.collection.get(item) + + def __iter__(self): + for key, item in self.collection.items(): + yield key, item + + +class Map: + def __init__(self, map_path=None, multi_flag=False): + self.map = {} + self.multi_flag = multi_flag + self.lock = Lock() + if map_path: + check_loading_path(map_path, + "loading map path", + suffix_set=".json") + with open(map_path, 'r') as f: + self.map = json.load(f) + + print(f"Your map has restored from {map_path}") + + def __getitem__(self, item): + if item not in self.map: + raise ValueError("Given idx not found.") + + return self.map.get(item) + + def __setitem__(self, key, value): + if self.multi_flag: + self.lock.acquire() + self.map[key] = value + if self.multi_flag: + self.lock.release() + + def save(self, saving_path): + check_saving_path(saving_path, "saving map path", suffix_set=".json") + with open(saving_path, 'w') as f: + json.dump(self.map, f) + + print(f"Your map has been saved at {saving_path}") + + +def parse_feature(string): + dictionary = json.loads(string) + if dictionary.get("MxpiFeatureVector") is not None: + return dictionary.get("MxpiFeatureVector")[0].get("featureValues") + + +if __name__ == "__main__": + path_collection = Collector("../faces_to_register", "face") + idx2face = Map() + my_canvas = Canvas(720, 1280) + document = "face" + if not os.path.exists(document): + os.makedirs(document) + for image_key, image_path in path_collection: + print(f"{image_key}: {image_path}") + idx2face[image_key] = image_path + enlarged_image = my_canvas(image_path, binary=False) + save_dir = os.path.join(document, f"{image_key}.jpg") + cv2.imwrite(save_dir, enlarged_image) + + print("Done!") diff --git a/mxVision/AllObjectsStructuring/run.sh b/mxVision/AllObjectsStructuring/run.sh new file mode 100644 index 000000000..240fabc2e --- /dev/null +++ b/mxVision/AllObjectsStructuring/run.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +function check_index() { + run_with_index=false + if [ ${param_num} -gt 0 ]; then + for param in ${params}; + do + if [ "${param}" == "index" ]; then + run_with_index=true + break + fi + done + fi +} + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +if [ -z "${MX_SDK_HOME}" ]; then + echo "MX_SDK_HOME not found" + exit -1 +fi + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:/opt/OpenBLAS/lib:/usr/local/lib:/usr/local/protobuf/lib:$PWD/dist/lib:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins:$PWD/dist/lib +export PYTHONPATH=${MX_SDK_HOME}/python:$PWD/dist/python:$PYTHONPATH + +param_num=$# +params=$@ +check_index +if [ "${run_with_index}" == true ]; then + echo "run with index" + python3.7.5 main.py +else + echo "run main pipeline" + python3.7.5 main.py -main-pipeline-only=True +fi diff --git a/mxVision/AllObjectsStructuring/util/__init__.py b/mxVision/AllObjectsStructuring/util/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/mxVision/AllObjectsStructuring/util/arguments.py b/mxVision/AllObjectsStructuring/util/arguments.py new file mode 100644 index 000000000..bdcb86a8d --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/arguments.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse + + +def get_args_for_all_object_structurization(): + """ + module for argument configuration + """ + parser = argparse.ArgumentParser('Parameters for all object ' + 'structurization.') + parser.add_argument('-face-feature-pipeline-name', + type=str, + default='face-feature', + help='indicate the name of face feature pipeline path', + dest='face_feature_pipeline_name') + parser.add_argument('-face-feature-pipeline-path', + type=str, + default='./pipeline/face_registry.pipeline', + help='indicate the path of face feature pipeline path', + dest='face_feature_pipeline_path') + parser.add_argument('-face-root-path', + type=str, + default="./faces_to_register", + help='indicate the root path of face images.', + dest='face_root_path') + parser.add_argument('-canvas-size', + type=int, + nargs=2, + default=[720, 1280], + help='indicate the width and height of canvas which ' + 'to place the cropped faces.', + dest='canvas_size') + parser.add_argument('-index-loading-path_large', + type=str, + default="./database/large_base.index", + help='indicate the loading path of the large index.', + dest='index_loading_path_large') + parser.add_argument('-index-loading-path-little', + type=str, + default="./database/little_base.index", + help='indicate the root path of the little index.', + dest='index_loading_path_little') + parser.add_argument('-index-vector-dimension', + type=int, + default=256, + help='specify the dimension of face feature vector', + dest='index_vector_dimension') + parser.add_argument('-index-base-size', + type=int, + default=400000, + help='specify the dimension of initialized training ' + 'matrix for the large base.', + dest='index_base_size') + parser.add_argument('-index-cluster-count', + type=int, + default=8192, + help='specify the cluster number for ivf.', + dest='index_cluster_count') + parser.add_argument('-index-topk', + type=int, + default=1, + help='specify the number of nearest points by sort.', + dest='index_topk') + parser.add_argument('-index-little-device-ids', + type=int, + nargs="+", + default=[2], + help='specify the device assignment for little index.', + dest='index_little_device_ids') + parser.add_argument('-index-large-device-ids', + type=int, + nargs="+", + default=[3], + help='specify the device assignment for large index.', + dest='index_large_device_ids') + parser.add_argument('-idx2face-name-map-path', + type=str, + default='./database/idx2face.json', + help='indicate the path of idx to face name mapping ' + 'file.', + dest='idx2face_name_map_path') + parser.add_argument('-main-pipeline-only', + type=bool, + default=False, + help='whether only run main pipeline.', + dest='main_pipeline_only') + parser.add_argument('-main-pipeline-name', + type=str, + default='detection', + help='name of all object structurization pipeline.', + dest='main_pipeline_name') + parser.add_argument('-main-pipeline-path', + type=str, + default='./pipeline/AllObjectsStructuring.pipeline', + help='path of all object structurization pipeline.', + dest='main_pipeline_path') + parser.add_argument('-main-pipeline-channel-count', + type=int, + default=12, + help='channle count for given pipeline.', + dest='main_pipeline_channel_count') + parser.add_argument('-main-keys2fetch', + type=str, + nargs="+", + default=[ + 'face_attribute', 'face_feature', + 'mxpi_parallel2serial2', 'motor_attr', + 'car_plate', 'ReservedFrameInfo', + 'pedestrian_attribute', 'pedestrian_reid', + 'vision', 'object' + ], + help='specify the keys to fetch their corresponding ' + 'proto buf.', + dest='main_keys2fetch') + parser.add_argument('-main-stream-bbox-keys2fetch', + type=str, + nargs="+", + default=['mxpi_framealign0'], + help='specify the keys to fetch stream frame and mot data ', + dest='main_stream_bbox_keys2fetch' + ) + parser.add_argument('-main-save-fig', + type=bool, + default=False, + help='specify whether to save detected object as ' + 'image.', + dest='main_save_fig') + parser.add_argument('-main-base64-encode', + type=bool, + default=True, + help='specify whether to encode detected object into ' + 'base64.', + dest='main_base64_encode') + parser.add_argument('-display-stream-bbox-data', + type=bool, + default=False, + help='specify whether to display stream and bbox data ', + dest='display_stream_bbox_data') + parser.add_argument('-web-display-ip', + type=str, + default='0.0.0.0', + help='the ip of WebSocket server', + dest='web_display_ip') + parser.add_argument('-web-display-port', + type=int, + default=30020, + help='the port of WebSocket server', + dest='web_display_port') + + args = parser.parse_args() + + return args diff --git a/mxVision/AllObjectsStructuring/util/channel_status.py b/mxVision/AllObjectsStructuring/util/channel_status.py new file mode 100644 index 000000000..802e76d8f --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/channel_status.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class ChannelStatus: + """ + management for channel requiring state + """ + def __init__(self): + self.status_dict = {} + + def update(self, channel_id, status): + self.status_dict[channel_id] = status + + def isAlive(self, channel_id): + if channel_id not in self.status_dict: + return False + else: + return self.status_dict[channel_id] diff --git a/mxVision/AllObjectsStructuring/util/checker.py b/mxVision/AllObjectsStructuring/util/checker.py new file mode 100644 index 000000000..feb739f32 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/checker.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os + + +def check_loading_path(loading_path, path_name, suffix_set=()): + if not isinstance(suffix_set, (list, tuple)): + suffix_set = [suffix_set] + + for item in suffix_set: + if not isinstance(item, str): + raise TypeError("Wrong suffix type.") + + if not isinstance(loading_path, str): + raise ValueError(f"please specify a string as the {path_name}") + + _, suffix = os.path.splitext(loading_path) + if suffix not in suffix_set: + raise ValueError(f"Please specify a {path_name} with suffix '.index'.") + + if not os.path.exists(loading_path): + raise NotADirectoryError(f"Given {path_name} does not exist.") + + +def check_saving_path(saving_path, path_name, suffix_set=()): + if not isinstance(suffix_set, (list, tuple)): + suffix_set = [suffix_set] + + for item in suffix_set: + if not isinstance(item, str): + raise TypeError("Wrong suffix type.") + + if not isinstance(saving_path, str): + raise ValueError(f"please assign a string as the {path_name}") + + root_dir, file_name = os.path.split(saving_path) + root_dir = "./" if root_dir == "" else root_dir + + os.makedirs(root_dir, exist_ok=True) + + _, suffix = os.path.splitext(file_name) + + if suffix not in suffix_set: + raise ValueError(f"Please assign suffix '.index' to {path_name}.") diff --git a/mxVision/AllObjectsStructuring/util/display.py b/mxVision/AllObjectsStructuring/util/display.py new file mode 100644 index 000000000..eeec2b9c1 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/display.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from multiprocessing.context import Process +from threading import Thread +from multiprocessing import Value + + +class Display(Process): + """ + module for displaying + """ + + def __init__(self, args, infer_result_queue_by_channel, stream_bbox_queue_by_channel): + super().__init__() + self.args = args + self.infer_result_queue_by_channel = infer_result_queue_by_channel + self.stream_bbox_queue_by_channel = stream_bbox_queue_by_channel + self.channel_switch = [Value('I', 0) for _ in infer_result_queue_by_channel] + + def run(self): + if self.args.display_stream_bbox_data: + stream_bbox_processor_list = [ + WebDisplayProcessor(stream_bbox_display_queue, channel_id) + for channel_id, stream_bbox_display_queue in self.stream_bbox_queue_by_channel + ] + for processor in stream_bbox_processor_list: + processor.start() + + for switch in self.channel_switch: + switch.value = 1 + + infer_result_processor_list = [ + InferResultProcessor(infer_result_display_queue, channel_id, switch) + for (channel_id, infer_result_display_queue + ), switch in + zip(self.infer_result_queue_by_channel, self.channel_switch) + ] + for processor in infer_result_processor_list: + processor.start() + + for processor in infer_result_processor_list: + processor.join() + + +class InferResultProcessor(Thread): + """ + infer result displaying thread for each channel + """ + + def __init__(self, infer_result_display_queue, channel_id, switch): + super().__init__() + self.channel_id = channel_id + self.infer_result_display_queue = infer_result_display_queue + self.switch = switch + + def run(self): + while True: + ret = self.infer_result_display_queue.get() + del ret["attribute"] + del ret["feature_vector"] + del ret["image_encoded"] + if ret.get("object"): + del ret["object"] + if not self.switch.value: + continue + + print("output result:") + print(ret) + + +class WebDisplayProcessor(Thread): + """ + stream and bounding box displaying thread for each channel + """ + + def __init__(self, stream_bbox_display_queue, channel_id): + super().__init__() + self.channel_id = channel_id + self.stream_bbox_display_queue = stream_bbox_display_queue + + def run(self): + while True: + ret = self.stream_bbox_display_queue.get() + del ret["web_display_data_serialized"] + del ret["web_display_data_dict"] + print("stream data and bounding box result:") + print(ret) diff --git a/mxVision/AllObjectsStructuring/util/main_entry.py b/mxVision/AllObjectsStructuring/util/main_entry.py new file mode 100644 index 000000000..bfeba40e6 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/main_entry.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import numpy as np + +from retrieval.register import Map, Collector, Canvas, parse_feature +from util.multi_process import MultiprocessingQueue, MultiprocessingDictQueue +from util.arguments import get_args_for_all_object_structurization +from util.pipeline import Pipeline +from main_pipeline.main_pipeline import MainPipeline + + +class AllObjectStructurization: + def __init__(self, display): + self.display = display + self.index_little = None + self.index_large = None + self.idx2face_little = None + self.face_register_pipeline = None + self.idx2face_large = Map(multi_flag=True) + self.queue_obj = MultiprocessingQueue() + self.args = get_args_for_all_object_structurization() + self.infer_result_queue_by_channel = MultiprocessingDictQueue( + self.args.main_pipeline_channel_count) + self.stream_bbox_queue_by_channel = MultiprocessingDictQueue( + self.args.main_pipeline_channel_count) + self.main() + + def get_index(self): + import faiss + from retrieval.feature_retrieval import Searcher + + d = self.args.index_vector_dimension # vector dims + quantizer = faiss.IndexFlatL2(d) + + self.index_little = Searcher( + mode="normal_flat", + device_id=self.args.index_little_device_ids, + quantizer=quantizer, + d=d, + nlist=self.args.index_cluster_count) + self.index_large = Searcher(mode="ivf_sq", + device_id=self.args.index_large_device_ids, + quantizer=quantizer, + d=d, + nlist=self.args.index_cluster_count) + + try: + self.index_little.load(self.args.index_loading_path_little) + self.idx2face_little = Map(self.args.idx2face_name_map_path) + except NotADirectoryError: + self.create_little_index() + + try: + self.index_large.load(self.args.index_loading_path_large) + except NotADirectoryError: + self.create_large_index() + + def create_little_index(self): + self.idx2face_little = Map() + self.register_faces() + self.index_little.save(self.args.index_loading_path_little) + self.idx2face_little.save(self.args.idx2face_name_map_path) + + def create_large_index(self): + np.random.seed(1234) + xb = np.random.random( + (self.args.index_base_size, + self.args.index_vector_dimension)).astype('float32') + self.index_large.train(xb) + self.index_large.save(self.args.index_loading_path_large) + + def register_faces(self): + self.face_register_pipeline = Pipeline( + pipeline_cfg_file=self.args.face_feature_pipeline_path, + stream_name=self.args.face_feature_pipeline_name, + in_plugin_id=0) + canvas_width, canvas_height = self.args.canvas_size + collection = Collector(self.args.face_root_path, "face") + canvas = Canvas(canvas_height, canvas_width) + idx = 0 + for key, path in collection: + print(f"{key}: {path}") + enlarged_img = canvas(path, binary=True) + ret = self.face_register_pipeline.infer(image_bin=enlarged_img) + if ret: + vector = parse_feature(ret) + if vector: + mtx = np.asarray(vector, dtype=np.float32)[np.newaxis, :] + self.index_little.add(mtx, idx) + self.idx2face_little[str(idx)] = key + idx += 1 + + def main(self): + if not self.args.main_pipeline_only: + self.get_index() + + main_pipeline = MainPipeline(self.args, self.queue_obj, self.stream_bbox_queue_by_channel) + feature_retrieval = RegisterAndRetrive(self.args, self.index_little, + self.index_large, + self.idx2face_little, + self.idx2face_large, + self.queue_obj, + self.infer_result_queue_by_channel) + display = self.display(self.args, self.infer_result_queue_by_channel, self.stream_bbox_queue_by_channel) + + try: + main_pipeline.start() + display.start() + feature_retrieval.run() + except KeyboardInterrupt: + if main_pipeline.is_alive(): + main_pipeline.kill() + if display.is_alive(): + display.kill() + print("Stop AllObjectsStructuring successfully.") + + +class RegisterAndRetrive: + def __init__(self, args, index_little, index_large, idx2face_little, + idx2face_large, queue_obj, queue_display): + self.args = args + self.index_little = index_little + self.index_large = index_large + self.idx2face_little = idx2face_little + self.idx2face_large = idx2face_large + self.queue_obj = queue_obj + self.queue_display = queue_display + + def run(self): + while True: + obj_dict = self.queue_obj.get() + channel_id = obj_dict.get("channel_id") + if not isinstance(channel_id, int): + raise IOError("Channel Id not found.") + + if self.args.main_pipeline_only or \ + obj_dict.get("object_name") != "face": + self.queue_display.put(obj_dict, channel_id) + continue + + feat_vec = obj_dict.get("feature_vector") + feat_vec = np.asarray(feat_vec, dtype=np.float32)[np.newaxis, :] + obj_idx = obj_dict.get("object_index") + + # Todo 序号需要转换成字符吗 + idx_large = self.index_large.add(feat_vec) + self.idx2face_large[idx_large] = obj_idx + + distance, indexes = self.index_little.search( + feat_vec, self.args.index_topk) + idx_little = str(indexes[0][0]) + retrieved_key = self.idx2face_little[idx_little] + obj_dict["retrieved_key"] = retrieved_key + self.queue_display.put(obj_dict, channel_id) diff --git a/mxVision/AllObjectsStructuring/util/multi_process.py b/mxVision/AllObjectsStructuring/util/multi_process.py new file mode 100644 index 000000000..825607a07 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/multi_process.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from multiprocessing import Lock +from multiprocessing import Queue + + +class MultiprocessingQueue: + def __init__(self): + self.queue = Queue() + self.lock = Lock() + + def put(self, element): + self.lock.acquire() + self.queue.put(element) + self.lock.release() + + def get(self): + return self.queue.get(True) + + +class MultiprocessingDictQueue: + def __init__(self, channel_count): + self.chl_ret = dict() + for i in range(channel_count): + self.chl_ret[i] = MultiprocessingQueue() + + def put(self, element, channel_id): + self.chl_ret[int(channel_id)].put(element) + + def get(self, channel_id): + return self.chl_ret[int(channel_id)].get() + + def __iter__(self): + for key, item in self.chl_ret.items(): + yield key, item diff --git a/mxVision/AllObjectsStructuring/util/pipeline.py b/mxVision/AllObjectsStructuring/util/pipeline.py new file mode 100644 index 000000000..c54767984 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/pipeline.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import datetime +from time import sleep +from google.protobuf import json_format + +from util.yuv import yuv2bgr +from StreamManagerApi import MxDataInput, StreamManagerApi, StringVector +import MxpiDataType_pb2 as MxpiDataType +import MxpiAllObjectsStructuringDataType_pb2 as WebDisplayDataType + + +def binary2string(string): + if isinstance(string, str): + return string + + return string.decode() + + +def string2binary(string): + if isinstance(string, str): + return string.encode() + + return string + + +class Pipeline: + key_name2data_struc = { + 'face_attribute': 'MxpiAttributeList', + 'face_feature': 'MxpiFeatureVectorList', + 'mxpi_facealignment0': 'MxpiVisionList', # 获取人脸图片 + 'mxpi_parallel2serial2': 'MxpiVisionList', # 获取行人车辆图片 + 'ReservedFrameInfo': 'MxpiFrameInfo', + 'motor_attr': 'MxpiAttributeList', + 'car_plate': 'MxpiAttributeList', + 'pedestrian_attribute': 'MxpiAttributeList', + 'pedestrian_reid': 'MxpiFeatureVectorList', + 'vision': 'MxpiVisionList', + 'object': 'MxpiObjectList', + 'mxpi_framealign0': 'MxpiWebDisplayDataList' + } + clr_cvt_map = { + "face": "COLOR_YUV2BGR_NV12", # "COLOR_YUV2BGR_I420", + "motor-vehicle": "COLOR_YUV2BGR_NV12", + "person": "COLOR_YUV2BGR_NV12" + } + + def __init__(self, + pipeline_cfg_file=None, + stream_name=None, + in_plugin_id=None, + out_plugin_id=None, + out_stream_plugin_id=None, + keys=None, + stream_bbox_key=None): + self.stream_name = string2binary(stream_name) + self.in_plugin_id = in_plugin_id + self.out_plugin_id = out_plugin_id + self.out_stream_plugin_id = out_stream_plugin_id + self.key_vec = None + self.stream_key_vec = None + self.bbox_key_vec = None + self.dataInput = MxDataInput() + self.set_fetch_keys(keys) + self.set_stream_bbox_keys(stream_bbox_key) + self.infer_result_has_errorCode = False + self.stream_bbox_data_has_errorCode = False + + if not os.path.exists(pipeline_cfg_file): + raise IsADirectoryError("Given pipeline config path is invalid.") + + with open(pipeline_cfg_file, 'rb') as f: + pipeline_str = f.read() + + self.streams = StreamManagerApi() + ret = self.streams.InitManager() + if ret != 0: + raise SystemError("Failed to init Stream manager, ret=%s" % + str(ret)) + + ret = self.streams.CreateMultipleStreams(pipeline_str) + if ret != 0: + raise IOError("Failed to create Stream, ret=%s" % str(ret)) + + def set_fetch_keys(self, keys): + if not keys: + return + if isinstance(keys, (list, tuple)): + self.key_vec = StringVector() + for key in keys: + self.key_vec.push_back(string2binary(key)) + + def set_stream_bbox_keys(self, stream_bbox_key): + if not stream_bbox_key: + return + if isinstance(stream_bbox_key, (list, tuple)): + self.stream_key_vec = StringVector() + for key in stream_bbox_key: + self.stream_key_vec.push_back(string2binary(key)) + + def get_single_stream_ret(self, save_fig=False, base64_enc=False): + ret_dict = { + "stream_name": binary2string(self.stream_name), + "out_plugin_id": self.out_plugin_id, + "channel_id": None, + "frame_id": None, + "object_name": None, + "image": None, + "attribute": None, + "feature_vector": None + } + while True: + infer_result = self.streams.GetProtobuf(self.stream_name, + self.out_plugin_id, + self.key_vec) + if infer_result.size() == 0: + sleep(0.1) + continue + + for item in infer_result: + if item.errorCode != 0: + self.infer_result_has_errorCode = True + continue + + self.parse_item(item, ret_dict) + + if self.infer_result_has_errorCode: + self.infer_result_has_errorCode = False + sleep(0.1) + continue + + self.generate_fig(ret_dict, + save_fig=save_fig, + base64_enc=base64_enc) + + if ret_dict["object_name"]: + ret_dict["object_index"] = "_".join([ + ret_dict.get("stream_name"), + str(ret_dict.get("channel_id")), + str(ret_dict.get("frame_id")), + ret_dict.get("object_name") + ]) + else: + continue + return ret_dict + + def get_stream_bbox_data(self): + ret_dict = { + "stream_name": binary2string(self.stream_name), + "out_plugin_id": self.out_stream_plugin_id, + "channel_id": None, + "frame_id": None, + "web_display_data_serialized": None, + "web_display_data_dict": None, + } + while True: + sleep(0.01) + infer_result = self.streams.GetProtobuf(self.stream_name, + self.out_stream_plugin_id, + self.stream_key_vec) + + if infer_result.size() == 0: + sleep(0.1) + continue + + item = infer_result[0] + if item.errorCode != 0: + continue + self.parse_item(item, ret_dict) + + return ret_dict + + def generate_fig(self, ret_dict, save_fig=False, base64_enc=False): + image = ret_dict.get("image") if ret_dict.get("image") is not None \ + else {} + image_b = image.get("image_b") + height = image.get("height") + width = image.get("width") + if not image_b: + return + + ret_dict["image"] = None + save_dir = None + if save_fig: + object_name = ret_dict.get("object_name") + channel_id = ret_dict.get("channel_id") + dir_name = object_name + "/" + str(channel_id) + if not os.path.exists(dir_name): + os.makedirs(dir_name, exist_ok=True) + file_name = f"{datetime.datetime.now().timestamp()}.jpg" + save_dir = os.path.join(dir_name, file_name) + + object_name = ret_dict.get("object_name") + clr_cvt_mtd = self.clr_cvt_map.get(object_name) + if not clr_cvt_mtd: + raise ValueError(f"Cannot find the color convert method with " + f"respect to object {object_name}") + + cvt_ret = yuv2bgr(image_b, + height, + width, + clr_cvt_mtd=clr_cvt_mtd, + output_path=save_dir, + base64_enc=base64_enc) + if cvt_ret: + ret_dict['image_encoded'] = cvt_ret + + def parse_item(self, item, ret_dict: dict): + item_key = binary2string(item.messageName) + item_value = item.messageBuf + data_struc = self.key_name2data_struc[item_key] + if data_struc == 'MxpiWebDisplayDataList': + data_parser = getattr(WebDisplayDataType, data_struc)() + else: + data_parser = getattr(MxpiDataType, data_struc)() + data_parser.ParseFromString(item_value) + + if data_struc == 'MxpiVisionList': + # Todo 目标类别识别方法需要优化 + if item_key == "mxpi_parallel2serial2": + ret_dict["object_name"] = "face" + + ret_dict["image"] = \ + {"image_b": data_parser.visionVec[0].visionData.dataStr, + "height": data_parser.visionVec[0].visionInfo.heightAligned, + "width": data_parser.visionVec[0].visionInfo.widthAligned} + + elif data_struc == 'MxpiFeatureVectorList': + result = json_format.MessageToDict(data_parser) + ret_dict["feature_vector"] = result['featureVec'][0][ + 'featureValues'] + + elif data_struc == 'MxpiAttributeList': + result = json_format.MessageToDict(data_parser) + if ret_dict["attribute"] is None: + ret_dict["attribute"] = result['attributeVec'] + else: + for item in result['attributeVec']: + ret_dict["attribute"].append(item) + + elif data_struc == 'MxpiFrameInfo': + ret_dict["channel_id"] = data_parser.channelId + ret_dict["frame_id"] = data_parser.frameId + + elif data_struc == 'MxpiObjectList': + result = json_format.MessageToDict(data_parser) + ret_dict["object"] = result + ret_dict["object_name"] = result['objectVec'][0]['classVec'][0][ + 'className'] + + elif data_struc == 'MxpiWebDisplayDataList': + result = data_parser.webDisplayDataVec[0].SerializeToString() + result_dict = json_format.MessageToDict(data_parser.webDisplayDataVec[0]) + ret_dict["channel_id"] = data_parser.webDisplayDataVec[0].channel_id + ret_dict["frame_id"] = data_parser.webDisplayDataVec[0].frame_index + ret_dict["web_display_data_serialized"] = result + ret_dict["web_display_data_dict"] = result_dict + + def put(self, image_b): + """ + send data only + """ + self.dataInput.data = image_b + self.out_plugin_id = self.streams.SendDataWithUniqueId( + self.stream_name, self.in_plugin_id, self.dataInput) + + def get(self): + inferResult = self.streams.GetResultWithUniqueId( + self.stream_name, self.out_plugin_id, 3000) + if inferResult.errorCode != 0: + print("GetResultWithUniqueId error. errorCode=%d, " + "errorMsg=%s" % + (inferResult.errorCode, inferResult.data.decode())) + + else: + return inferResult.data.decode() + + def infer(self, image_bin): + self.put(image_bin) + return self.get() + + def destory_stream(self): + self.streams.DestroyAllStreams() + + +if __name__ == '__main__': + pipeline_cfg_file_path = "../pipeline/AllObjectsStructuring" \ + ".pipeline" + stream_name_str = "detection" + plugin_id = 0 + desired_keys = [ + b'face_attribute', b'face_feature', b'mxpi_parallel2serial2', + b'motor_attr', b'car_plate', b'ReservedFrameInfo', + b'pedestrian_attribute', b'pedestrian_reid', b'vision', b'object' + ] + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + out_plugin_id=plugin_id, + keys=desired_keys) + while True: + buffer = pipeline.get_single_stream_ret(save_fig=True, base64_enc=True) + print(buffer) diff --git a/mxVision/AllObjectsStructuring/util/yuv.py b/mxVision/AllObjectsStructuring/util/yuv.py new file mode 100644 index 000000000..e291152d9 --- /dev/null +++ b/mxVision/AllObjectsStructuring/util/yuv.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import cv2 +import base64 +import numpy as np + +from PIL import Image + + +def yuv2bgr(image_b, + height, + width, + clr_cvt_mtd="COLOR_YUV2BGR_NV12", + output_path=None, + base64_enc=False): + yuv_vector = np.frombuffer(image_b, np.uint8) + + yuv_mtx = yuv_vector.reshape(height * 3 // 2, width) + bgr_mtx = cv2.cvtColor(yuv_mtx, getattr(cv2, clr_cvt_mtd)) + + if output_path: + cv2.imwrite(output_path, bgr_mtx) + + if base64_enc: + ret, buf = cv2.imencode(".jpg", bgr_mtx) + img_bin = Image.fromarray(np.uint8(buf)).tobytes() + encoded_ret = base64.b64encode(img_bin).decode() + return encoded_ret + + if not (output_path or base64_enc): + raise NotImplementedError("Please specify at least one method among " + "saving and encoding image.") + + +if __name__ == '__main__': + pass diff --git a/mxVision/InferOfflineVideo/regular/README.zh.md b/mxVision/InferOfflineVideo/regular/README.zh.md new file mode 100644 index 000000000..6c9a40db7 --- /dev/null +++ b/mxVision/InferOfflineVideo/regular/README.zh.md @@ -0,0 +1,52 @@ +# InferOfflineVideo + +## 1 简介 + +InferOfflineVideo基于mxVision SDK开发的参考用例,以昇腾Atlas300卡为主要的硬件平台,用于在视频流中检测出目标。 + +## 2 环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ----------------------------------- | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡(型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | + +## 3 准备 + +**步骤1:** 参考安装教程《mxVision 用户指南》安装 mxVision SDK。 + +**步骤2:** 配置 mxVision SDK 环境变量。 + +`export MX_SDK_HOME=${安装路径}/mxVision ` + +注:本例中mxVision SDK安装路径为 /root/MindX_SDK。 + +**步骤3:** 在regular目录下创建目录models `mkdir models`, 根据《mxVision 用户指南》中“模型支持列表”章节获取Yolov3种类模型,并放到该目录下。 + +**步骤4:** 修改regular/pipeline/regular.pipeline文件: + +①:将所有“rtspUrl”字段值替换为可用的 rtsp 流源地址(目前只支持264格式的rtsp流,例:"rtsp://xxx.xxx.xxx.xxx:xxx/input.264", 其中xxx.xxx.xxx.xxx:xxx为ip和端口号); + +②:将所有“deviceId”字段值替换为实际使用的device的id值,可用的 device id 值可以使用如下命令查看:`npu-smi info` + +③:如需配置多路输入视频流,需要配置多个拉流、解码、缩放、推理、序列化插件,然后将多个序列化插件的结果输出发送到串流插件mxpi_parallel2serial(有关串流插件使用请参考《mxVision 用户指南》中“串流插件”章节),最后连接到appsink0插件。 + +## 4 运行 + +运行 +`bash run.sh` + +正常启动后,控制台会输出检测到各类目标的对应信息,结果日志将保存到`${安装路径}/mxVision/logs`中 + +手动执行ctrl + C结束程序 diff --git a/mxVision/InferOfflineVideo/regular/main.cpp b/mxVision/InferOfflineVideo/regular/main.cpp new file mode 100644 index 000000000..b22f526e9 --- /dev/null +++ b/mxVision/InferOfflineVideo/regular/main.cpp @@ -0,0 +1,71 @@ +/* +* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +#include +#include +#include "MxBase/Log/Log.h" +#include "MxStream/StreamManager/MxStreamManager.h" + +namespace { +bool signalRecieved = false; +} + +static void SigHandler(int signal) +{ + if (signal == SIGINT) { + signalRecieved = true; + } +} + +APP_ERROR TestInferOfflineVideo() +{ + // init stream manager + MxStream::MxStreamManager mxStreamManager; + APP_ERROR ret = mxStreamManager.InitManager(); + if (ret != APP_ERR_OK) { + LogInfo << "Failed to init Stream manager, ret = " << ret << "."; + return ret; + } + + // create stream by pipeline config file + std::string pipelineConfigPath = "./pipeline/regular.pipeline"; + ret = mxStreamManager.CreateMultipleStreamsFromFile(pipelineConfigPath); + if (ret != APP_ERR_OK) { + LogInfo << "Failed to create Stream, ret = " << ret << "."; + return ret; + } + signal(SIGINT, SigHandler); + + while (!signalRecieved) { + MxStream::MxstDataOutput* output = mxStreamManager.GetResult("inferofflinevideo", 0); + if (output->errorCode != APP_ERR_OK) { + LogInfo << "Failed to get pipeline output, ret = " << output->errorCode; + mxStreamManager.DestroyAllStreams(); + return output->errorCode; + } + std::string result = std::string((char *)output->dataPtr, output->dataSize); + LogInfo << "Results:" << result << "\n\n"; + } + + // destroy streams + mxStreamManager.DestroyAllStreams(); + return APP_ERR_OK; +} + +int main(int argc, char* argv[]) +{ + TestInferOfflineVideo(); + return 0; +} diff --git a/mxVision/InferOfflineVideo/regular/pipeline/regular.pipeline b/mxVision/InferOfflineVideo/regular/pipeline/regular.pipeline new file mode 100644 index 000000000..5c50515f0 --- /dev/null +++ b/mxVision/InferOfflineVideo/regular/pipeline/regular.pipeline @@ -0,0 +1,60 @@ +{ + "inferofflinevideo": { + "stream_config": { + "deviceId": "0" + }, + "mxpi_rtspsrc0": { + "props": { + "rtspUrl": "rtsp://xxx.xxx.xxx.xxx:xxx/input.264", + "channelId": "0" + }, + "factory": "mxpi_rtspsrc", + "next": "mxpi_videodecoder0" + }, + "mxpi_videodecoder0": { + "props": { + "inputVideoFormat": "H264", + "outputImageFormat": "YUV420SP_NV12", + "deviceId": "0", + "vdecChannelId": "0" + }, + "factory": "mxpi_videodecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_videodecoder0", + "resizeHeight": "416", + "resizeWidth": "416", + "deviceId": "0" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "./models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "./models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "./models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so", + "deviceId": "0" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "409600000" + }, + "factory": "appsink" + } + } +} diff --git a/mxVision/InferOfflineVideo/regular/run.sh b/mxVision/InferOfflineVideo/regular/run.sh new file mode 100644 index 000000000..f63eeffbb --- /dev/null +++ b/mxVision/InferOfflineVideo/regular/run.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib":"${MX_SDK_HOME}/opensource/lib":"${MX_SDK_HOME}/opensource/lib64":"/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64":${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER="${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner" +export GST_PLUGIN_PATH="${MX_SDK_HOME}/opensource/lib/gstreamer-1.0":"${MX_SDK_HOME}/lib/plugins" + +# complie +g++ main.cpp -I "${MX_SDK_HOME}/include/" -I "${MX_SDK_HOME}/opensource/include/" -L "${MX_SDK_HOME}/lib/" \ +-L "${MX_SDK_HOME}/opensource/lib/" -std=c++11 -pthread -D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private -fPIC -fstack-protector-all \ +-g -Wl,-z,relro,-z,now,-z,noexecstack -pie -Wall -lglog -lmxbase -lstreammanager -lcpprest -lmindxsdk_protobuf -o main + +# run +./main +exit 0 diff --git a/mxVision/MediaCodec/CMakeLists.txt b/mxVision/MediaCodec/CMakeLists.txt new file mode 100644 index 000000000..c6ee71bf0 --- /dev/null +++ b/mxVision/MediaCodec/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.5.1) +project(Test) + +set(CMAKE_CXX_STANDARD 11) + +if(NOT DEFINED ENV{MX_SDK_HOME}) + message(FATAL_ERROR "MX_SDK_HOME is not defined, please set it first.") +else() + set(MX_SDK_HOME $ENV{MX_SDK_HOME}) + message("MX_SDK_HOME=$ENV{MX_SDK_HOME}") +endif() + +add_definitions(-D _GLIBCXX_USE_CXX11_ABI=0) +set(TARGET_NAME mxVisionMediaCodec) + +include_directories(${MX_SDK_HOME}/include) +include_directories(${MX_SDK_HOME}/opensource/include) +include_directories(${MX_SDK_HOME}/opensource/include/opencv4) +include_directories(${MX_SDK_HOME}/opensource/include/gstreamer-1.0) +include_directories(${MX_SDK_HOME}/opensource/include/glib-2.0) +include_directories(${MX_SDK_HOME}/opensource/lib/glib-2.0/include) + +link_directories(${MX_SDK_HOME}/lib) +link_directories(${MX_SDK_HOME}/opensource/lib) + +link_directories(/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64) + +add_executable(${TARGET_NAME} main.cpp) +target_link_libraries(${TARGET_NAME} streammanager cpprest) diff --git a/mxVision/MediaCodec/README.zh.md b/mxVision/MediaCodec/README.zh.md new file mode 100644 index 000000000..bd9d3656a --- /dev/null +++ b/mxVision/MediaCodec/README.zh.md @@ -0,0 +1,155 @@ +# Media Codec + +## 1.介绍 + +视频转码样例是基于`mxVision`提供的插件库实现将视频解码、缩放、编码的流程。目前能满足如下的性能: + +| 格式 | 路数 | +| - | - | +| **D1**(height: 480 width: 720) | 10 | +| **CIF**(height: 288 width: 352) | 16 | + +## 2.环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ------------------------------------ | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡(型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | + +## 3.预准备 + +脚本转换为unix格式以及添加脚本执行权限 + +```bash +sed -i 's/\r$//' ./script/*.sh +chmod +x ./script/*.sh +``` + +## 4.编译 + +- 配置环境变量 + +```bash +export MX_SDK_HOME=${安装路径}/mxVision +``` + +- 执行`./script/build.sh`,在dist文件夹中会生成`mxVisonCodec`。 + +```bash +./script/build.sh +``` + +## 5.运行 + +### 5.1 运行前配置 + +- 构建rtsp视频流服务 + +构建rtsp视频流服务生成rtsp流地址。 + +- 修改pipeline + +转码过程主要是:`视频拉流`--》`视频解码`--》`图像缩放`--》`视频编码`,根据用户要求修改芯片deviceId、rtsp视频流地址、vdecChannelId、缩放大小(**D1**/**CIF**)等。 + +为了提升修改效率,提供了`./script/create_pipeline.sh`脚本,需要修改脚本里面的配置参数。 + +```bash +#!/bin/bash +file_path=$(cd `dirname $0`; pwd) + +#需要生成的多少路pipelibe +channel_nums=xxx + +#每路pipeline的rtsp流地址,数组长度跟${channel_nums}一致,请确保rtsp地址存在。 +#若使用live555推流工具构架rtsp流,rstp流格式为rtsp://${ip_addres}:${port}/${h264_file} +#${ip_addres}:起流的机器ip地址。如果是本地起流可以设置为127.0.0.1;如果是其他机器起流,那需要配置该台机器的ip地址 +#${port}:可使用的rtsp流的端口 +#${h264_file}:需要推流的h264视频文件,一般都是以.264结尾的文件 +rtsp_array=(xxx xxx xxx) + +#配置pipeline运行的npu编号 +device_id=xxx + +#输出图像尺寸. CIF(height: 288 width: 352),D1(height: 480 width: 720) +height=xxx +width=xxx + +#是否打印转码的帧率. 0:不打印,1:打印 +fps=xxx + +#I帧间隔.一般设置视频帧率大小,25或者30 +i_frame_interval=xxx +``` + +执行脚本,生成的pipeline文件在`./pipeline/`目录下,文件名类似`testxxx.pipeline`。 + +``` +./script/create_pipeline.sh +``` + +**注意**:解码模块`mxpi_videodecoder`的**vdecChannelId**配置项要保证不重用;缩放模块`mxpi_imageresize`的**resizeHeight**和**resizeWidth**要与编码模块的`mxpi_videoencoder`的**imageHeight**和**imageWidth**保持一致;`mxpi_videoencoder`编码模块的**fps**用于控制是否打印帧率,默认值是**0**表示不打印,若要打印,可设置为**1**;**deviceId**配置为需要运行的npu芯片编号,具体可以通过`npu-smi info`查看。 + +- 修改MindXSDK的日志配置文件 + +参考mxVision用户指南D.2章节,修改`${MX_SDK_HOME}/mxVision/config/logging.conf`,调节输出日志级别为info级别。 + +```bash +# will output to stderr, where level >= global_level,default is 0 +# Log level: -1-debug, 0-info, 1-warn, 2-error, 3-fatal。 +global_level=0 +``` + +### 5.2 运行 + +- 根据实际情况选择运行的转码路数,每路输出的日志会重定向到`./logs/output*.log`中。 + +```bash +./script/run.sh ${nums} +``` + +**注意:**调用`./script/run.sh`长时间跑生成的日志可能写爆磁盘。 + +- 显示每路运行情况 + +```bash +./script/show.sh +#可以使用定时打印结果(可选) +watch -n 1 ./script/show.sh +``` + +- 显示结果 + +**显示转码帧率** + +若设置了输出转码的帧率,会额外输出如下结果,可用于观察性能。若打印的fps和视频的帧率保持一致,说明性能满足要求。否则,说明性能达不到要求。 + +```bash + Plugin(mxpi_videoencoder*) in fps (xx) +``` + +**显示每帧结果(可选)** + +显示每一路每一帧的编码输出结果(需要修改日志级别为debug级别)。**mxpi_videodencoder***编码模块输出**frame id**为xxx 数据长度**stream size**为xxxx。 + +``` +Plugin(mxpi_videodencoder*) encode frameId(xxxx) stream size(xxxx) +``` + +- 停止运行 + +``` +./script/stop.sh +``` + +**注意**:受限于ACL接口约束,只能实现多进程多路视频转码,run.sh脚本实际上是启动了多个进程实现多路转码,每一路对应一个进程。 \ No newline at end of file diff --git a/mxVision/MediaCodec/dist/.gitkeep b/mxVision/MediaCodec/dist/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mxVision/MediaCodec/logs/.gitkeep b/mxVision/MediaCodec/logs/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mxVision/MediaCodec/main.cpp b/mxVision/MediaCodec/main.cpp new file mode 100644 index 000000000..98f84ca3a --- /dev/null +++ b/mxVision/MediaCodec/main.cpp @@ -0,0 +1,65 @@ +/* +* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include "MxStream/StreamManager/MxStreamManager.h" + +namespace { + const int SIGNAL_CHECK_TIMESTEP = 10000; + static bool signalRecieved = false; +} + +static void SigHandler(int signal) +{ + if (signal == SIGINT) { + signalRecieved = true; + } +} + +APP_ERROR TestVideoEncoder(std::string &pipelinePath) +{ + // read image file and build stream input + std::cout << "Begin Load " << pipelinePath << std::endl; + // init stream manager + MxStream::MxStreamManager mxStreamManager; + APP_ERROR ret = mxStreamManager.InitManager(); + if (ret != APP_ERR_OK) { + std::cout << "Failed to init Stream manager, ret = " << ret << "." << std::endl; + return ret; + } + ret = mxStreamManager.CreateMultipleStreamsFromFile(pipelinePath); + if (ret != APP_ERR_OK) { + std::cout << "Failed to create Stream, ret = " << ret << "." << std::endl; + return ret; + } + + signal(SIGINT, SigHandler); + while (!signalRecieved) { + usleep(SIGNAL_CHECK_TIMESTEP); + } + + // destroy streams + mxStreamManager.DestroyAllStreams(); + return APP_ERR_OK; +} + +int main(int argc, char* argv[]) +{ + std::string pipelinePath(argv[1]); + TestVideoEncoder(pipelinePath); + return 0; +} diff --git a/mxVision/MediaCodec/pipeline/test.pipeline b/mxVision/MediaCodec/pipeline/test.pipeline new file mode 100644 index 000000000..a2e139f3a --- /dev/null +++ b/mxVision/MediaCodec/pipeline/test.pipeline @@ -0,0 +1,90 @@ +{ + "encoder": { + "stream_config": { + "deviceId": "xxx" + }, + "mxpi_rtspsrcxxx": { + "factory": "mxpi_rtspsrc", + "props": { + "rtspUrl": "xxx", + "channelId": "xxx" + }, + "next": "queue0" + }, + + "queue0":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next": "mxpi_videodecoderxxx" + }, + + "mxpi_videodecoderxxx": { + "factory": "mxpi_videodecoder", + "props": { + "inputVideoFormat": "H264", + "outputImageFormat": "YUV420SP_NV12", + "deviceId": "xxx", + "vdecChannelId": "xxx" + }, + "former": "mxpi_rtspsrcxxx", + "next": "queue1" + }, + + "queue1":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next": "mxpi_imageresizexxx" + }, + + "mxpi_imageresizexxx": { + "props": { + "dataSource": "mxpi_videodecoderxxx", + "resizeHeight": "xxx", + "resizeWidth": "xxx", + "deviceId": "xxx" + }, + "factory": "mxpi_imageresize", + "next": "queue2" + }, + + "queue2":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next": "mxpi_videoencoderxxx" + }, + + "mxpi_videoencoderxxx": { + "props": { + "dataSource": "mxpi_imageresizexxx", + "imageHeight": "xxx", + "imageWidth": "xxx", + "inputFormat": "YUV420SP_NV12", + "outputFormat": "H264", + "fps": "xxx", + "iFrameInterval": "xxx", + "deviceId": "xxx" + }, + "factory": "mxpi_videoencoder", + "next": "queue3" + }, + + "queue3":{ + "props":{ + "max-size-buffers":"50" + }, + "factory":"queue", + "next": "fakesinkxxx" + }, + + "fakesinkxxx": { + "factory": "fakesink", + "former": "mxpi_videoencoderxxx" + } + } +} \ No newline at end of file diff --git a/mxVision/MediaCodec/script/build.sh b/mxVision/MediaCodec/script/build.sh new file mode 100644 index 000000000..7a0f90e0d --- /dev/null +++ b/mxVision/MediaCodec/script/build.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) +build_type="Debug" + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +function build_mxvision() { + + build_path=$file_path/../build + if [ -d "$build_path" ]; then + rm -rf $build_path + else + echo "file $build_path is not exist." + fi + mkdir -p $build_path + cd $build_path + cmake -DCMAKE_BUILD_TYPE=$build_type .. + make -j + if [ $? -ne 0 ]; then + echo "Build Failed" + exit -1 + fi + cd .. + cp ./build/mxVisionMediaCodec ./dist/ + exit 0 +} + +build_mxvision +exit 0 \ No newline at end of file diff --git a/mxVision/MediaCodec/script/create_pipeline.sh b/mxVision/MediaCodec/script/create_pipeline.sh new file mode 100644 index 000000000..8a891e853 --- /dev/null +++ b/mxVision/MediaCodec/script/create_pipeline.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) + +#需要生成的多少路pipelibe +channel_nums=xxx + +#每路pipeline的rtsp流地址,数组长度跟${channel_nums}一致,请确保rtsp地址存在。 +#若使用live555推流工具构架rtsp流,rstp流格式为rtsp://${ip_addres}:${port}/${h264_file} +#${ip_addres}:起流的机器ip地址。如果是本地起流可以设置为127.0.0.1;如果是其他机器起流,那需要配置该台机器的ip地址 +#${port}:可使用的rtsp流的端口 +#${h264_file}:需要推流的h264视频文件,一般都是以.264结尾的文件 +rtsp_array=(xxx xxx xxx) + +#配置pipeline运行的npu编号 +device_id=xxx + +#输出图像尺寸. CIF(height: 288 width: 352),D1(height: 480 width: 720) +height=xxx +width=xxx + +#是否打印转码的帧率. 0:不打印,1:打印 +fps=xxx + +#I帧间隔.一般设置视频帧率大小,25或者30 +i_frame_interval=xxx + + +pipeline_path=$file_path/../pipeline +rm -rf $pipeline_path/test?.pipeline +for((i=0;(i<${channel_nums});i++)); +do + cp $pipeline_path/test.pipeline $pipeline_path/test${i}.pipeline + + sed -i "s/\"mxpi_rtspsrcxxx\"/\"mxpi_rtspsrc${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"mxpi_videodecoderxxx\"/\"mxpi_videodecoder${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"mxpi_imageresizexxx\"/\"mxpi_imageresize${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"mxpi_videoencoderxxx\"/\"mxpi_videoencoder${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"fakesinkxxx\"/\"fakesink${i}\"/g" $pipeline_path/test${i}.pipeline + + sed -i "s/\"deviceId\"\: \"xxx\"/\"deviceId\"\: \"${device_id}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"vdecChannelId\"\: \"xxx\"/\"vdecChannelId\"\: \"${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"imageHeight\"\: \"xxx\"/\"imageHeight\"\: \"${height}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"imageWidth\"\: \"xxx\"/\"imageWidth\"\: \"${width}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"resizeHeight\"\: \"xxx\"/\"resizeHeight\"\: \"${height}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"resizeWidth\"\: \"xxx\"/\"resizeWidth\"\: \"${width}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"channelId\"\: \"xxx\"/\"channelId\"\: \"${i}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"fps\"\: \"xxx\"/\"fps\"\: \"${fps}\"/g" $pipeline_path/test${i}.pipeline + sed -i "s/\"iFrameInterval\"\: \"xxx\"/\"iFrameInterval\"\: \"${i_frame_interval}\"/g" $pipeline_path/test${i}.pipeline + + rtsp=$(echo ${rtsp_array[${i}]} | sed -e 's/\//\\\//g' | sed -e 's/\:/\\\:/g' | sed -e 's/\_/\\\_/g') + sed -i "s/\"rtspUrl\"\: \"xxx\"/\"rtspUrl\"\: \"${rtsp}\"/g" $pipeline_path/test${i}.pipeline +done +exit 0 diff --git a/mxVision/MediaCodec/script/run.sh b/mxVision/MediaCodec/script/run.sh new file mode 100644 index 000000000..172cddde9 --- /dev/null +++ b/mxVision/MediaCodec/script/run.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) + +num=$1 + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +pipeline_path=${file_path}/../pipeline +log_path=${file_path}/../logs +cd ${file_path}/../ +rm -rf ${log_path}/* +for((i=0;i<(num);i++)); +do + nohup ./dist/mxVisionMediaCodec ${pipeline_path}/test${i}.pipeline > ${log_path}/output${i}.log 2>&1 & + sleep 0.05 +done +exit 0 \ No newline at end of file diff --git a/mxVision/MediaCodec/script/show.sh b/mxVision/MediaCodec/script/show.sh new file mode 100644 index 000000000..09536c8c9 --- /dev/null +++ b/mxVision/MediaCodec/script/show.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) + +log_path=${file_path}/../logs +num=$(ls ${log_path} | grep .log | grep output | wc -w) + +for((i=0;(i<${num});i++)); +do + cat ${log_path}/output${i}.log | tail -n 100 | grep "mxpi_videoencoder" | grep "fps" | tail -n 2 +done + +for((i=0;(i<${num});i++)); +do + cat ${log_path}/output${i}.log | tail -n 100 | grep "stream size" | tail -n 2 +done + +exit 0 \ No newline at end of file diff --git a/mxVision/MediaCodec/script/stop.sh b/mxVision/MediaCodec/script/stop.sh new file mode 100644 index 000000000..996319bca --- /dev/null +++ b/mxVision/MediaCodec/script/stop.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +ps -as | grep mxVisionMediaCodec +ps -ef | grep mxVisionMediaCodec | grep -v grep | cut -c 9-15 | xargs kill -2 +exit 0 \ No newline at end of file diff --git a/mxVision/MultiThread/C++/CMakeLists.txt b/mxVision/MultiThread/C++/CMakeLists.txt new file mode 100644 index 000000000..b242805d4 --- /dev/null +++ b/mxVision/MultiThread/C++/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.5.1) +project(Test) + +set(CMAKE_CXX_STANDARD 11) + +if(NOT DEFINED ENV{MX_SDK_HOME}) + message(FATAL_ERROR "MX_SDK_HOME is not defined, please set it first.") +else() + set(MX_SDK_HOME $ENV{MX_SDK_HOME}) + message("MX_SDK_HOME=$ENV{MX_SDK_HOME}") +endif() +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +set(TARGET_NAME mxVisionMultiThread) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -pthread") +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +include_directories(${MX_SDK_HOME}/include) +include_directories(${MX_SDK_HOME}/opensource/include) +include_directories(${MX_SDK_HOME}/opensource/include/opencv4) +include_directories(${MX_SDK_HOME}/opensource/include/gstreamer-1.0) +include_directories(${MX_SDK_HOME}/opensource/include/glib-2.0) +include_directories(${MX_SDK_HOME}/opensource/lib/glib-2.0/include) + +link_directories(${MX_SDK_HOME}/lib) +link_directories(${MX_SDK_HOME}/opensource/lib) + +link_directories(/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64) + +add_executable(${TARGET_NAME} main.cpp) +target_link_libraries(${TARGET_NAME} mxbase plugintoolkit mxpidatatype streammanager cpprest glog mindxsdk_protobuf opencv_world) diff --git a/mxVision/MultiThread/C++/EasyStream.pipeline b/mxVision/MultiThread/C++/EasyStream.pipeline new file mode 100644 index 000000000..72dc5944d --- /dev/null +++ b/mxVision/MultiThread/C++/EasyStream.pipeline @@ -0,0 +1,140 @@ +{ + "detection0": { + "stream_config":{ + "deviceId":"0" + }, + "appsrc0": { + "factory": "appsrc", + "next": "mxpi_imagedecoder0", + "props": { + "blocksize": "250806" + } + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_imagedecoder0", + "resizeHeight": "416", + "resizeWidth": "416" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection1": { + "stream_config":{ + "deviceId":"1" + }, + "appsrc0": { + "factory": "appsrc", + "next": "mxpi_imagedecoder0", + "props": { + "blocksize": "250806" + } + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_imagedecoder0", + "resizeHeight": "416", + "resizeWidth": "416" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection2": { + "stream_config":{ + "deviceId":"2" + }, + "appsrc0": { + "factory": "appsrc", + "next": "mxpi_imagedecoder0", + "props": { + "blocksize": "250806" + } + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_imagedecoder0", + "resizeHeight": "416", + "resizeWidth": "416" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + } +} \ No newline at end of file diff --git a/mxVision/MultiThread/C++/EasyStream_protobuf.pipeline b/mxVision/MultiThread/C++/EasyStream_protobuf.pipeline new file mode 100644 index 000000000..d94cbd3a8 --- /dev/null +++ b/mxVision/MultiThread/C++/EasyStream_protobuf.pipeline @@ -0,0 +1,255 @@ +{ + "detection0": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + + "mxpi_modelinfer0": { + "props": { + "parentName": "mxpi_datatransfer0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection1": { + "stream_config": { + "deviceId": "1" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "parentName": "mxpi_datatransfer0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection2": { + "stream_config": { + "deviceId": "2" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "parentName": "mxpi_datatransfer0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection3": { + "stream_config": { + "deviceId": "3" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "parentName": "mxpi_datatransfer0", + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + } +} diff --git a/mxVision/MultiThread/C++/README.zh.md b/mxVision/MultiThread/C++/README.zh.md new file mode 100644 index 000000000..c9cd13ffb --- /dev/null +++ b/mxVision/MultiThread/C++/README.zh.md @@ -0,0 +1,125 @@ +# Multi Thread + +## 1.介绍 + +- 多线程调用SDK的发送和接收数据接口函数 + +## 2.环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ------------------------------------ | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡(型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | + +## 3.配置 + +- 当前测试用例包含2个样例: +1号样例调用EasyStream.pipeline文件 +2号样例调用EasyStream_protobuf.pipeline文件 + +### 1.1 修改业务及device ID + +以样例1为例: + +- 可以通过修改EasyStream.pipeline文件中插件的个数和属性,修改业务功能(业务名称为“detection0,detection1”,以此类推,方便调用),设定"deviceId":"0",绑定该业务使用的芯片号(不同的业务可以使用相同的芯片) +- 在main.cpp的TestMultiThread函数中,修改threadCount变量的值,实现多线程功能,修改streamName[i]的值,则是调用不同芯片对应的业务 + +注意: +- 调用样例2时,需要修改TestSendProbuf函数中width[i]和height[i]的值与业务中的模型输入的宽高一致 +- 每次main.cpp文件被修改后,都需要重新执行`./build.sh` + +### 3.2 输入数据 + +- 将测试图片拷贝到`MultiThread`下的`picture`目录 + +注意: +测试用例当前仅支持分辨率在(32,8192)之间的`.jpg`格式的文件,其他格式文件自动过滤 + +### 3.3 预准备 + +脚本转换为unix格式以及添加脚本执行权限 + +```bash +sed -i 's/\r$//' ./*.sh +chmod +x ./*.sh +``` + +## 4.编译 + +- 配置环境变量 +```bash +export MX_SDK_HOME=${安装路径}/mxVision +``` +- 执行`./build.sh`,在dist文件夹中会生成`mxVisionMultiThread`。 +```bash +./build.sh +``` + +## 5.运行 + +### 5.1 运行前配置 + +- 在`MultiThread`下创建`models`目录 +```bash +mkdir models +``` +- 获取`EasyStream.pipeline`中推理所需的om等文件,并拷贝到`models`路径下 + +### 5.2 运行 + +- 进入C++目录,执行`./run.sh 0`,运行程序 +```bash +./run.sh 0 +``` +注意: +0 表示执行main.cpp中的1号样例,还可以输入`./run.sh 1`执行2号样例(其他参数表示默认2号样例) + +### 5.3 结果 +- 样例1运行结果格式如下: + +`GetResult: {"MxpiObject":[{"classVec":[{"classId":15,"className":"cat","confidence":0.98471146799999998,"headerVec":[]}],"x0":86.182579000000004,"x1":247.33078,"y0":86.406199999999998,"y1":442.07312000000002},{"classVec":[{"classId":16,"className":"dog","confidence":0.99579948200000001,"headerVec":[]}],"x0":220.453766,"x1":434.736786,"y0":132.42176799999999,"y1":466.86648600000001}]} +` + +- 样例2运行结果格式如下: + +`value(detection2) = objectVec { + headerVec { + dataSource: "mxpi_datatransfer0" + } + x0: 57.241951 + y0: 67.1379089 + x1: 189.799133 + y1: 374.405853 + classVec { + classId: 15 + className: "cat" + confidence: 0.550405 + } + } + objectVec { + headerVec { + dataSource: "mxpi_datatransfer0" + } + x0: 180.270584 + y0: 74.9994659 + x1: 351.540314 + y1: 401.286896 + classVec { + classId: 16 + className: "dog" + confidence: 0.994807 + } + } + } +` diff --git a/mxVision/MultiThread/C++/build.sh b/mxVision/MultiThread/C++/build.sh new file mode 100644 index 000000000..7932723f1 --- /dev/null +++ b/mxVision/MultiThread/C++/build.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) +build_type="Debug" + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +function build_mxvision() { + + build_path=$file_path/./build + if [ -d "$build_path" ]; then + rm -rf $build_path + else + echo "file $build_path is not exist." + fi + mkdir -p $build_path + cd $build_path + cmake -DCMAKE_BUILD_TYPE=$build_type .. + make -j + if [ $? -ne 0 ]; then + echo "Build Failed" + exit -1 + fi + cd .. + cp ./build/mxVisionMultiThread ./dist/ + exit 0 +} + +build_mxvision +exit 0 \ No newline at end of file diff --git a/mxVision/MultiThread/C++/dist/.gitkeep b/mxVision/MultiThread/C++/dist/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mxVision/MultiThread/C++/main.cpp b/mxVision/MultiThread/C++/main.cpp new file mode 100644 index 000000000..db09474d7 --- /dev/null +++ b/mxVision/MultiThread/C++/main.cpp @@ -0,0 +1,329 @@ +/* +* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include +#include +#include +#include "MxBase/Log/Log.h" +#include "MxStream/StreamManager/MxStreamManager.h" +#include "MxBase/DeviceManager/DeviceManager.h" + +using namespace MxTools; +using namespace MxStream; +using namespace cv; + +namespace { + const int TIME_OUT = 15000; + const int INPUT_UINT8 = 1; +} + +std::string ReadFileContent(const std::string filePath) +{ + std::ifstream file(filePath, std::ios::binary); + if (!file) { + LogError << "Invalid file. filePath(" << filePath << ")"; + return ""; + } + + file.seekg(0, std::ifstream::end); + uint32_t fileSize = file.tellg(); + file.seekg(0); + std::vector buffer = {}; + buffer.resize(fileSize); + file.read(buffer.data(), fileSize); + file.close(); + + return std::string(buffer.data(), fileSize); +} + +static void GetDataBuf(std::vector& dataBufferVec, MxStream::MxstDataInput& dataInput, + int width, int height) +{ + std::shared_ptr objectList = std::make_shared(); + MxTools::MxpiVision* mxpiVision = objectList->add_visionvec(); + MxTools::MxpiVisionInfo *visionInfo = mxpiVision->mutable_visioninfo(); + const int format = 12; + visionInfo->set_format(format); + visionInfo->set_width(width); + visionInfo->set_height(height); + visionInfo->set_heightaligned(height); + visionInfo->set_widthaligned(width); + + MxTools::MxpiVisionData *visionData = mxpiVision->mutable_visiondata(); + visionData->set_dataptr((uint64_t)dataInput.dataPtr); + visionData->set_datasize(dataInput.dataSize); + visionData->set_memtype(MxTools::MXPI_MEMORY_HOST_NEW); + MxStream::MxstProtobufIn dataBuffer; + dataBuffer.key = "appsrc1"; + dataBuffer.messagePtr = std::static_pointer_cast(objectList); + dataBufferVec.push_back(dataBuffer); +} + +APP_ERROR GetOpenCVDataBuf(std::vector& dataBufferVec, int dataType, std::string filePath, + int modelWidth, int modelHeight) +{ + if (dataType <= 0) { + LogError << "The datatype must be larger than 0"; + return APP_ERR_COMM_FAILURE; + } + char c[PATH_MAX + 1] = { 0x00 }; + size_t count = filePath.copy(c, PATH_MAX + 1); + char realPath[PATH_MAX + 1] = { 0x00 }; + if (count != filePath.length() || (realpath(c, realPath) == nullptr)) { + LogError << "Failed to get image, the image path is (" << filePath << ")."; + return APP_ERR_COMM_NO_EXIST; + } + Mat img = imread(realPath, 1); + if (img.empty()) { + LogError << "Can not read this picture(" << realPath << ")"; + return APP_ERR_COMM_NO_EXIST; + } + int height = img.rows; + int width = img.cols; + Mat shrink; + if (height >= modelHeight && width >= modelWidth) { + Size dsize = Size(round(modelWidth), round(modelHeight)); + resize(img, shrink, dsize, 0, 0, INTER_AREA); + } else { + float fx = modelWidth / (float)width; + float fy = modelHeight / (float)height; + Mat enlarge; + resize(img, shrink, Size(), fx, fy, INTER_CUBIC); + } + height = shrink.rows; + width = shrink.cols; + Mat yuvImg = {}; + MxStream::MxstDataInput dataInput; + const int convert_3 = 3; + const int convert_2 = 2; + dataInput.dataSize = width * height * dataType * convert_3 / convert_2; + cvtColor(shrink, yuvImg, COLOR_RGB2YUV_I420); + dataInput.dataPtr = new (std::nothrow) uint32_t[dataInput.dataSize]; + std::copy(yuvImg.data, yuvImg.data + dataInput.dataSize / dataType, (char*)dataInput.dataPtr); + GetDataBuf(dataBufferVec, dataInput, modelWidth, modelHeight); + LogDebug << "width: " << width << ", height: " << height << ", dataSize: " << dataInput.dataSize; + return APP_ERR_OK; +} + +APP_ERROR GetPicture(std::string filePath, std::vector& pictureName) +{ + pictureName.clear(); + std::string filename; + DIR *pDir = nullptr; + struct dirent *ptr = nullptr; + if (!(pDir = opendir(filePath.c_str()))) { + LogError << "Folder doesn't Exist!"; + return APP_ERR_COMM_NO_EXIST; + } + int strLen = 4; + while ((ptr = readdir(pDir)) != nullptr) { + std::string tmpStr = ptr->d_name; + if (tmpStr.length() < strLen) { + continue; + } + tmpStr = tmpStr.substr(tmpStr.length() - strLen, tmpStr.length()); + if (tmpStr == ".jpg" || tmpStr == ".JPG") { + filename = filePath + "/" + ptr->d_name; + pictureName.push_back(filename); + } + } + return APP_ERR_OK; +} + +APP_ERROR streamCallback(MxStreamManager& mxStreamManager, std::string streamName, std::string picturePath) +{ + std::vector pictureName = {}; + auto ret = GetPicture(picturePath, pictureName); + if (ret != APP_ERR_OK) { + LogError << "Failed to get picture"; + return ret; + } + for (int i = 0; i < pictureName.size(); ++i) { + LogInfo << "Start to send picture(" << pictureName[i] << ")."; + MxstDataInput mxstDataInput = {}; + std::string catImage = ReadFileContent(pictureName[i]); + mxstDataInput.dataPtr = (uint32_t *) catImage.c_str(); + mxstDataInput.dataSize = catImage.size(); + ret = mxStreamManager.SendData(streamName, 0, mxstDataInput); + if (ret != APP_ERR_OK) { + LogError << "Failed to send data to stream"; + continue; + } + MxstDataOutput *outputPtr = mxStreamManager.GetResult(streamName, 0, TIME_OUT); + if (outputPtr == nullptr || outputPtr->errorCode != 0) { + LogError << "Failed to get data to stream"; + continue; + } + std::string dataStr = std::string((char *)outputPtr->dataPtr, outputPtr->dataSize); + LogInfo << "[" << streamName << "] GetResult: " << dataStr; + } + return APP_ERR_OK; +} + +APP_ERROR TestMultiThread(std::string pipelinePath) +{ + LogInfo << "********case TestMultiThread********" << std::endl; + MxStream::MxStreamManager mxStreamManager; + APP_ERROR ret = mxStreamManager.InitManager(); + if (ret != APP_ERR_OK) { + LogError << "Failed to init streammanager"; + return ret; + } + ret = mxStreamManager.CreateMultipleStreamsFromFile(pipelinePath); + if (ret != APP_ERR_OK) { + LogError << "Pipeline is no exit"; + return ret; + } + + int threadCount = 3; + std::thread threadSendData[threadCount]; + std::string streamName[threadCount]; + std::string picturePath = "../picture"; + for (int i = 0; i < threadCount; ++i) { + streamName[i] = "detection" + std::to_string(i); + threadSendData[i] = std::thread(streamCallback, std::ref(mxStreamManager), streamName[i], picturePath); + } + for (int j = 0; j < threadCount; ++j) { + threadSendData[j].join(); + } + + ret = mxStreamManager.DestroyAllStreams(); + if (ret != APP_ERR_OK) { + LogError << "Failed to destroy stream"; + } + return APP_ERR_OK; +} + +APP_ERROR sendDataCallback(MxStreamManager& mxStreamManager, std::string streamName, + std::vector& pictureName, int width, int height) +{ + APP_ERROR ret; + MxBase::DeviceManager* m = MxBase::DeviceManager::GetInstance(); + MxBase::DeviceContext deviceContext; + deviceContext.devId = 0; + m->InitDevices(); + m->SetDevice(deviceContext); + for (int i = 0; i < pictureName.size(); ++i) { + std::vector dataBufferVec; + ret = GetOpenCVDataBuf(dataBufferVec, INPUT_UINT8, pictureName[i], width, height); + if (ret != APP_ERR_OK) { + LogError << "Failed to get data buf"; + m->DestroyDevices(); + return ret; + } + LogInfo << "Start to send picture(" << pictureName[i] << ")."; + ret = mxStreamManager.SendProtobuf(streamName, 0, dataBufferVec); + if (ret != APP_ERR_OK) { + LogError << "Failed to send protobuf"; + m->DestroyDevices(); + return ret; + } + } + m->DestroyDevices(); + return APP_ERR_OK; +} + +APP_ERROR getDataCallback(MxStreamManager& mxStreamManager, std::string streamName, int count) +{ + std::vector strvec = {}; + strvec.push_back("mxpi_modelinfer0"); + for (int i = 0; i < count; ++i) { + std::vector bufvec = mxStreamManager.GetProtobuf(streamName, 0, strvec); + if (bufvec[0].errorCode != APP_ERR_OK) { + LogError << "Failed to get protobuf"; + continue; + } + for (int j = 0; j < bufvec.size(); ++j) { + LogInfo << "Value(" << streamName << ") = " << bufvec[0].messagePtr.get()->DebugString(); + } + } + return APP_ERR_OK; +} + +APP_ERROR TestSendProtobuf(std::string pipelinePath) +{ + LogInfo << "********case TestSendProtobuf********"; + MxStreamManager mxStreamManager; + APP_ERROR ret = mxStreamManager.InitManager(); + if (ret != APP_ERR_OK) { + LogError << "Failed to init streammanager"; + return ret; + } + ret = mxStreamManager.CreateMultipleStreamsFromFile(pipelinePath); + if (ret != APP_ERR_OK) { + LogError << "Pipeline is no exit"; + return ret; + } + std::vector pictureName = {}; + std::string picturePath = "../picture"; + ret = GetPicture(picturePath, pictureName); + if (ret != APP_ERR_OK) { + LogError << "Failed to get picture"; + return ret; + } + const int threadCount = 4; + std::thread threadSendData[threadCount]; + std::thread threadGetData[threadCount]; + std::string streamName[threadCount]; + int width[threadCount] = {416, 416, 416, 416}; + int height[threadCount] = {416, 416, 416, 416}; + for (int i = 0; i < threadCount; ++i) { + streamName[i] = "detection" + std::to_string(i); + threadGetData[i] = std::thread(getDataCallback, std::ref(mxStreamManager), streamName[i], pictureName.size()); + threadSendData[i] = std::thread(sendDataCallback, std::ref(mxStreamManager), streamName[i], + std::ref(pictureName), width[i], height[i]); + } + + for (int j = 0; j < threadCount; ++j) { + threadSendData[j].join(); + threadGetData[j].join(); + } + + ret = mxStreamManager.DestroyAllStreams(); + if (ret != APP_ERR_OK) { + LogError << "Failed to destroy stream"; + } + return APP_ERR_OK; +} + +int main(int argc, char *argv[]) +{ + if (argc == 1) { + LogWarn << "Parameter cannot be empty"; + return 0; + } + std::string type = argv[1]; + struct timeval inferStartTime = { 0 }; + struct timeval inferEndTime = { 0 }; + gettimeofday(&inferStartTime, nullptr); + APP_ERROR ret; + if (type == "0") { + ret = TestMultiThread("EasyStream.pipeline"); + } else { + ret = TestSendProtobuf("EasyStream_protobuf.pipeline"); + } + if (ret == APP_ERR_OK) { + float SEC2MS = 1000.0; + gettimeofday(&inferEndTime, nullptr); + double inferCostTime = SEC2MS * (inferEndTime.tv_sec - inferStartTime.tv_sec) + + (inferEndTime.tv_usec - inferStartTime.tv_usec) / SEC2MS; + LogInfo << "Total time: " << inferCostTime / SEC2MS; + } + return 0; +} diff --git a/mxVision/MultiThread/C++/run.sh b/mxVision/MultiThread/C++/run.sh new file mode 100644 index 000000000..c4bea9129 --- /dev/null +++ b/mxVision/MultiThread/C++/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +file_path=$(cd $(dirname $0); pwd) + +num=$1 + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +./dist/mxVisionMultiThread $1 +exit 0 \ No newline at end of file diff --git a/mxVision/MultiThread/picture/.gitkeep b/mxVision/MultiThread/picture/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mxVision/MultiThread/python/EasyStream.pipeline b/mxVision/MultiThread/python/EasyStream.pipeline new file mode 100644 index 000000000..393aa6c0d --- /dev/null +++ b/mxVision/MultiThread/python/EasyStream.pipeline @@ -0,0 +1,190 @@ +{ + "detection0": { + "stream_config":{ + "deviceId":"0" + }, + "appsrc0": { + "factory": "appsrc", + "next": "queue0", + "props": { + "blocksize": "250806" + } + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "factory": "appsink" + } + }, + "detection1": { + "stream_config":{ + "deviceId":"5" + }, + "appsrc0": { + "factory": "appsrc", + "next": "queue0", + "props": { + "blocksize": "250806" + } + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "factory": "appsink" + } + }, + "detection2": { + "stream_config":{ + "deviceId":"2" + }, + "appsrc0": { + "factory": "appsrc", + "next": "queue0", + "props": { + "blocksize": "250806" + } + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "factory": "appsink" + } + }, + "detection3": { + "stream_config":{ + "deviceId":"3" + }, + "appsrc0": { + "factory": "appsrc", + "next": "queue0", + "props": { + "blocksize": "250806" + } + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/yolov3/yolov3_tf_bs1_fp16.om", + "postProcessConfigPath": "../models/yolov3/yolov3_tf_bs1_fp16.cfg", + "labelPath": "../models/yolov3/coco.names", + "postProcessLibPath": "libMpYOLOv3PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "factory": "appsink" + } + } +} \ No newline at end of file diff --git a/mxVision/MultiThread/python/EasyStream_protobuf.pipeline b/mxVision/MultiThread/python/EasyStream_protobuf.pipeline new file mode 100644 index 000000000..8dfa0ea3e --- /dev/null +++ b/mxVision/MultiThread/python/EasyStream_protobuf.pipeline @@ -0,0 +1,254 @@ +{ + "detection0": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/cascadercnn/cascade_bs4.om", + "postProcessConfigPath": "../models/cascadercnn/faster_rcnn_coco_uncut.cfg", + "labelPath": "../models/cascadercnn/faster_rcnn_coco.names", + "tensorFormat":"1", + "waitingTime":"3000" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection1": { + "stream_config": { + "deviceId": "1" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/cascadercnn/cascade_bs4.om", + "postProcessConfigPath": "../models/cascadercnn/faster_rcnn_coco_uncut.cfg", + "labelPath": "../models/cascadercnn/faster_rcnn_coco.names", + "tensorFormat":"1", + "waitingTime":"3000" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection2": { + "stream_config": { + "deviceId": "2" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/cascadercnn/cascade_bs4.om", + "postProcessConfigPath": "../models/cascadercnn/faster_rcnn_coco_uncut.cfg", + "labelPath": "../models/cascadercnn/faster_rcnn_coco.names", + "tensorFormat":"1", + "waitingTime":"3000" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + }, + "detection3": { + "stream_config": { + "deviceId": "3" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "queue0" + }, + "appsink0": { + "factory": "appsink" + }, + "mxpi_datatransfer0": { + "props": { + "dataSource": "appsrc1", + "transferMode": "auto", + "removeSourceData": "yes" + }, + "factory": "mxpi_datatransfer", + "next": "queue2" + }, + "queue0": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_datatransfer0" + }, + "queue2": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": "../models/cascadercnn/cascade_bs4.om", + "postProcessConfigPath": "../models/cascadercnn/faster_rcnn_coco_uncut.cfg", + "labelPath": "../models/cascadercnn/faster_rcnn_coco.names", + "tensorFormat":"1", + "waitingTime":"3000" + }, + "factory": "mxpi_modelinfer", + "next": "queue1" + }, + "queue1": { + "props": { + "max-size-buffers": "50" + }, + "factory": "queue", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + } + } +} diff --git a/mxVision/MultiThread/python/README.zh.md b/mxVision/MultiThread/python/README.zh.md new file mode 100644 index 000000000..fa181c23b --- /dev/null +++ b/mxVision/MultiThread/python/README.zh.md @@ -0,0 +1,99 @@ +# Multi Thread + +## 1.介绍 + +多线程调用SDK的发送和接收数据接口函数 + +## 2.环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ------------------------------------ | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡(型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | +| Python | 3.7.5 | +| opencv-python | 3.4+ | +| mmcv | - | +## 3.预准备 + +脚本转换为unix格式以及添加脚本执行权限 + +```bash +sed -i 's/\r$//' ./*.sh +chmod +x ./*.sh +``` + +配置环境变量 + +```bash +export MX_SDK_HOME=${安装路径}/mxVision +``` + +## 5.运行 + +### 5.1 运行前配置 +- 在`MultiThread`下创建`models`目录 +```bash +mkdir models +``` +- 获取pipeline文件中推理所需的om等文件,并拷贝到`models`路径下 + +- 将测试图片(仅支持.jpg格式),拷贝到`MultiThread`下的 `picture`目录(必须包含test.jpg) + +- 修改run.sh文件的32行,选择执行的py文件(不同的py文件中调用不同的SDK函数或者使用不同的调用方式) + +### 5.2 运行 + +- 使用默认pipeline文件 + +```bash +./run.sh +``` +注意: + +1.调用main.py文件时,对应EasyStream.pipeline文件(此样例只获取picture/test.jpg) + +2.调用main_sendprotobuf.py文件时,对应EasyStream_protobuf.pipeline文件(此样例获取picture文件下的所有.jpg) + +- 自定义pipeline文件(例如:mypipeline.pipeline) + +```bash +./run.sh ./mypipeline.pipeline +``` + +### 5.3 结果 + +main.py运行结果格式如下: + +`End to get data, threadId = 1, result = b'{"MxpiObject":[{"classVec":[{"classId":15,"className":"cat","confidence":0.98471146799999998,"headerVec":[]}],"x0":86.182579000000004,"x1":247.33078,"y0":86.406199999999998,"y1":442.07312000000002},{"classVec":[{"classId":16,"className":"dog","confidence":0.99579948200000001,"headerVec":[]}],"x0":220.453766,"x1":434.736786,"y0":132.42176799999999,"y1":466.86648600000001}]}'` + +main_sendprotobuf.py运行结果格式如下: + +`result: cat:0.9853515625 + result: dog:0.978515625 + result: dog:0.99658203125 + result: dog:0.99609375 + result: dog:0.99560546875 + result: dog:0.9951171875 + poss stream process finish +` + +## 6.FAQ + +### 6.1 运行程序时,cv2报错 + +参照`AllObjectsStructuring`样例的README.md文件中的`4 准备`的步骤8安装相关依赖库 + +### 6.2 运行程序时,mmcv报错 + +执行`pip3.7.5 install mmcv`下载相关依赖库 \ No newline at end of file diff --git a/mxVision/MultiThread/python/main.py b/mxVision/MultiThread/python/main.py new file mode 100644 index 000000000..5bc85c7f9 --- /dev/null +++ b/mxVision/MultiThread/python/main.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# coding=utf-8 + +""" +Copyright 2020 Huawei Technologies Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import sys +from StreamManagerApi import MxDataInput, StreamManagerApi +import threading + + +def SendAndGetData(streamManagerApi, streamName, threadId, dataInput): + for i in range(5): + print("Start to send data, threadId = %d" % (threadId)) + ret = streamManagerApi.SendData(streamName, 0, dataInput) + if ret != 0: + print("Failed to send data to stream.") + exit() + + # Obtain the inference result by specifying streamName and uniqueId. + inferResult = streamManagerApi.GetResult(streamName, 0, 15000) + if inferResult.errorCode != 0: + print("GetResult error. errorCode=%d" % (inferResult.errorCode)) + exit() + print("End to get data, threadId = %d, result = %s" % (threadId, inferResult.data)) + + +if __name__ == '__main__': + if len(sys.argv) > 1: + fileName = sys.argv[1] + else: + fileName = "./EasyStream.pipeline" + # init stream manager + streamManagerApi = StreamManagerApi() + ret = streamManagerApi.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + + # create streams by pipeline config file + with open(fileName, 'rb') as f: + pipelineStr = f.read() + ret = streamManagerApi.CreateMultipleStreams(pipelineStr) + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + + # Construct the input of the stream + dataInput = MxDataInput() + with open("../picture/test.jpg", 'rb') as f: + dataInput.data = f.read() + + # Inputs data to a specified stream based on streamName. + streamName = b'detection0' + # Multi thread + + thread1_1 = threading.Thread(target=SendAndGetData, name='SendAndGetData0', args=(streamManagerApi, + streamName, 1, dataInput)) + thread1_2 = threading.Thread(target=SendAndGetData, name='SendAndGetData1', args=(streamManagerApi, + streamName, 2, dataInput)) + streamName = b'detection1' + thread2_1 = threading.Thread(target=SendAndGetData, name='SendAndGetData2', args=(streamManagerApi, + streamName, 3, dataInput)) + thread2_2 = threading.Thread(target=SendAndGetData, name='SendAndGetData3', args=(streamManagerApi, + streamName, 4, dataInput)) + streamName = b'detection2' + thread3_1 = threading.Thread(target=SendAndGetData, name='SendAndGetData4', args=(streamManagerApi, + streamName, 5, dataInput)) + thread3_2 = threading.Thread(target=SendAndGetData, name='SendAndGetData5', args=(streamManagerApi, + streamName, 6, dataInput)) + thread1_1.start() + thread1_2.start() + thread2_1.start() + thread2_2.start() + thread3_1.start() + thread3_2.start() + + thread1_1.join() + thread1_2.join() + thread2_1.join() + thread2_2.join() + thread3_1.join() + thread3_2.join() + # destroy streams + streamManagerApi.DestroyAllStreams() diff --git a/mxVision/MultiThread/python/main_sendprotobuf.py b/mxVision/MultiThread/python/main_sendprotobuf.py new file mode 100644 index 000000000..793d8a599 --- /dev/null +++ b/mxVision/MultiThread/python/main_sendprotobuf.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import cv2 +import MxpiDataType_pb2 as MxpiDataType +from StreamManagerApi import MxDataInput, StreamManagerApi, StringVector, InProtobufVector, MxProtobufIn +import numpy as np +import sys +import argparse +import mmcv +import threading +from multiprocessing import Process +from multiprocessing import Queue +import time + + +def preprocess_FasterRCNN_mmdet(input_image): + # define the output file name + one_img = mmcv.imread(os.path.join(input_image)) + two_img = one_img.copy() + one_img = mmcv.imresize(one_img, (1216, 800)) + mean = np.array([123.675, 116.28, 103.53], np.float32) + std = np.array([58.395, 57.12, 57.375], np.float32) + one_img = mmcv.imnormalize(one_img, mean, std) + one_img = one_img.transpose(2, 0, 1) + return one_img, two_img + + +def parse_arguments(argv): + parser = argparse.ArgumentParser() + + parser.add_argument('--src_pic_path', type=str, default='../picture', + help='ground truth, having default value ./test') + parser.add_argument('--des_pic_path', type=str, default='./output_multi/', + help='des box txt, having default value ./output_path/') + return parser.parse_args(argv) + + +def readlabels(labelconf): + labellist = dict() + linenum = 0 + with open(labelconf, 'rb') as fp: + while True: + line = fp.readline() + if not line: + break + labellist[linenum] = line.decode().replace('\n', '') + linenum = linenum + 1 + return labellist + + +class sendStreamThread(threading.Thread): + def __init__(self, deviceId, streamName, src_dir_name, streamManagerApi, qrecv, qsend, count): + threading.Thread.__init__(self) + self.deviceId = deviceId + self.streamName = streamName + self.src_dir_name = src_dir_name + self.streamManagerApi = streamManagerApi + self.qrecv = qrecv + self.qsend = qsend + self.count = count + + def __sendStream(self, img): + print("start sendStream") + start_time = time.time() + + img = img.astype(np.float32) + array_bytes = img.tobytes() + inPluginId = 0 + dataInput = MxDataInput() + dataInput.data = array_bytes + key = b'appsrc1' + protobufVec = InProtobufVector() + visionList = MxpiDataType.MxpiVisionList() + visionVec = visionList.visionVec.add() + visionVec.visionInfo.format = 1 + visionVec.visionInfo.width = 1216 + visionVec.visionInfo.height = 800 + visionVec.visionInfo.widthAligned = 1216 + visionVec.visionInfo.heightAligned = 800 + visionVec.visionData.deviceId = self.deviceId + visionVec.visionData.memType = 0 + visionVec.visionData.dataStr = dataInput.data + visionVec.visionData.dataSize = 11673600 + protobuf = MxProtobufIn() + protobuf.key = key + protobuf.type = b'MxTools.MxpiVisionList' + protobuf.protobuf = visionList.SerializeToString() + protobufVec.push_back(protobuf) + + uniqueId = self.streamManagerApi.SendProtobuf(self.streamName, inPluginId, protobufVec) + + end_time = time.time() + print("sendStream:", end_time - start_time) + if uniqueId < 0: + print("Failed to send data to stream.") + return False + return True + + def __getStream(self): + start_time = time.time() + inPluginId = 0 + keyVec = StringVector() + keyVec.push_back(b'mxpi_modelinfer0') + inferResult = self.streamManagerApi.GetProtobuf(self.streamName, inPluginId, keyVec) + if inferResult.size() == 0: + print("inferResult is null") + exit() + if inferResult[0].errorCode != 0: + print("GetProtobuf error. errorCode=%d" % ( + inferResult[0].errorCode)) + exit() + + result = MxpiDataType.MxpiTensorPackageList() + result.ParseFromString(inferResult[0].messageBuf) + bbox_conf = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, np.float32) + class_id = np.frombuffer(result.tensorPackageVec[0].tensorVec[1].dataStr, np.int64) + bbox_conf_vec = [] + for i in range(0, len(bbox_conf), 5): + temp = [] + for j in range(5): + temp.append(bbox_conf[j + i]) + temp.append(class_id[i // 5]) + bbox_conf_vec.append(temp) + bbox_conf_vec = np.array(bbox_conf_vec) + bbox = bbox_conf_vec[:, :4] + scores = bbox_conf_vec[:, 4] + class_id = bbox_conf_vec[:, 5] + order = scores.ravel().argsort()[::-1] + order = order[:10] + class_id = class_id[order] + scores = scores[order] + bbox = bbox[order, :] + keep = np.where(scores > 0.40)[0] + class_id = class_id[keep] + scores = scores[keep] + bbox = bbox[keep, :] + end_time = time.time() + print("recvStream:", end_time - start_time) + return class_id, scores, bbox + + def run(self): + q11 = Queue() + while True: + sendCount = 0 + while True: + file_name = self.qrecv.get() + if file_name == "NULL": + break + else: + print(file_name) + file_path = [self.src_dir_name, "/", file_name] + file_path = "".join(file_path) + img, src_img = preprocess_FasterRCNN_mmdet(file_path) + if self.__sendStream(img): + q11.put(file_name) + sendCount = sendCount + 1 + + if sendCount == self.count: + break + + for i in range(0, sendCount): + file_name = q11.get() + class_id, scores, bbox = self.__getStream() + self.qsend.put((file_name, class_id, scores, bbox)) + + if sendCount != self.count: + self.qsend.put(("NULL", "", "", "")) + break + + +class streamProcess(Process): + def __init__(self, deviceId, streamName, src_dir_name, qrecv, qsend, count): + Process.__init__(self) + self.deviceId = deviceId + self.streamName = streamName + self.src_dir_name = src_dir_name + self.qrecv = qrecv + self.qsend = qsend + self.count = count + + def run(self): + streamManagerApi = StreamManagerApi() + ret = streamManagerApi.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + + # create streams by pipeline config file + with open(pipeline_path, 'rb') as f: + pipelineStr = f.read() + ret = streamManagerApi.CreateMultipleStreams(pipelineStr) + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + + t1 = sendStreamThread(self.deviceId, self.streamName, self.src_dir_name, streamManagerApi, self.qrecv, + self.qsend, self.count) + t1.start() + t1.join() + + streamManagerApi.DestroyAllStreams() + + +class postStreamProcess(Process): + def __init__(self, labellist, src_dir_name, res_dir_name, qrecv): + Process.__init__(self) + self.labellist = labellist + self.src_dir_name = src_dir_name + self.res_dir_name = res_dir_name + self.qrecv = qrecv + + def __savePic(self, file_name, class_id, scores, bbox): + file_path = [self.src_dir_name, "/", file_name] + file_path = "".join(file_path) + portion = os.path.splitext(file_name) + img, src_img = preprocess_FasterRCNN_mmdet(file_path) + + img = img.astype(np.float32) + img = img.transpose(1, 2, 0) + for index, value in enumerate(bbox): # y1, x1, y2, x2 -> x1 y1 x2 y2 + src_shape = src_img.shape + dst_shape = img.shape + dw = dst_shape[1] / src_shape[1] + dh = dst_shape[0] / src_shape[0] + labelname = self.labellist[int(class_id[index])] + text = [labelname, ":", str(scores[index])] + text = "".join(text) + cv2.rectangle(src_img, (int(value[0] / dw), int(value[1] / dh)), (int(value[2] / dw), int(value[3] / dh)), + (0, 255, 0)) + font = cv2.FONT_HERSHEY_SIMPLEX + print("result: ", text) + cv2.putText(src_img, text, (int(value[0] / dw), int((value[1] / dh + 30))), font, 1, (0, 0, 255), 2) + cv2.imwrite(self.res_dir_name + portion[0] + '_res' + portion[1], src_img) + + def run(self): + while True: + file_name, class_id, scores, bbox = self.qrecv.get() + if file_name == "NULL": + print("poss stream process finish") + break + else: + self.__savePic(file_name, class_id, scores, bbox) + + +if __name__ == '__main__': + args = parse_arguments(sys.argv[1:]) + pipeline_path = "./EasyStream_protobuf.pipeline" + labelconf = "../models/cascadercnn/faster_rcnn_coco.names" + labellist = readlabels(labelconf) + src_dir_name = args.src_pic_path + res_dir_name = args.des_pic_path + + q0 = Queue() + q1 = Queue() + + p1 = streamProcess(0, b'detection0', src_dir_name, q0, q1, 4) + p2 = streamProcess(1, b'detection1', src_dir_name, q0, q1, 4) + p3 = streamProcess(2, b'detection2', src_dir_name, q0, q1, 4) + p4 = streamProcess(3, b'detection3', src_dir_name, q0, q1, 4) + + p5 = postStreamProcess(labellist, src_dir_name, res_dir_name, q1) + p6 = postStreamProcess(labellist, src_dir_name, res_dir_name, q1) + p7 = postStreamProcess(labellist, src_dir_name, res_dir_name, q1) + p8 = postStreamProcess(labellist, src_dir_name, res_dir_name, q1) + + p1.start() + p2.start() + p3.start() + p4.start() + p5.start() + p6.start() + p7.start() + p8.start() + + start_time = time.time() + # Inputs data to a specified stream based on streamName. + file_list = os.listdir(src_dir_name) + if not os.path.exists(res_dir_name): + os.makedirs(res_dir_name) + for file_name in file_list: + if file_name.endswith(".JPG") or file_name.endswith(".jpg"): + q0.put(file_name) + + q0.put("NULL") + q0.put("NULL") + q0.put("NULL") + q0.put("NULL") + + p1.join() + p2.join() + p3.join() + p4.join() + p5.join() + p6.join() + p7.join() + p8.join() + + end_time = time.time() + print('total:\n', end_time - start_time) diff --git a/mxVision/MultiThread/python/run.sh b/mxVision/MultiThread/python/run.sh new file mode 100644 index 000000000..5b218672b --- /dev/null +++ b/mxVision/MultiThread/python/run.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exit ; } ; pwd) + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +#to set PYTHONPATH, import the StreamManagerApi.py +export PYTHONPATH=${MX_SDK_HOME}/python:$PYTHONPATH + +python3.7 main.py $1 +exit 0 \ No newline at end of file diff --git a/mxVision/VideoQualityDetection/README.zh.md b/mxVision/VideoQualityDetection/README.zh.md new file mode 100644 index 000000000..b167e49a0 --- /dev/null +++ b/mxVision/VideoQualityDetection/README.zh.md @@ -0,0 +1,87 @@ +# VideoQualityDetection + +## 1 简介 + +VideoQualityDetection基于mxVision SDK开发的参考用例,以昇腾Atlas300卡为主要的硬件平台,用于对IPC视频进行不同的算法功能质量诊断,在日志中记录诊断信息。目前支持的算法功能有:视频亮度检测,视频遮挡检测,视频模糊检测,视频噪声检测,视频偏色检测,视频条纹检测,视频黑屏检测,视频冻结检测,视频抖动检测,视频突变检测,PTZ云台运动检测 +## 2 环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ----------------------------------- | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡(型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| -------- | ------ | +| cmake | 3.5.1+ | +| mxVision | 0.2 | + +## 3 准备 + +**步骤1:** 参考安装教程《mxVision 用户指南》安装 mxVision SDK。 + +**步骤2:** 配置 mxVision SDK 环境变量。 + +`export MX_SDK_HOME=${安装路径}/mxVision ` + +注:本例中mxVision SDK安装路径为 /root/MindX_SDK。 + +**步骤3:** 修改项目根目录下 VideoQualityDetection/pipeline/VideoQualityDetection.pipeline文件: + +①:将所有“rtspUrl”字段值("rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264")替换为可用的 rtsp 流源地址(目前只支持264格式的rtsp流,264视频的分辨率范围最小为128*128,最大为4096*4096,不支持本地视频); + +②:将所有“deviceId”字段值替换为实际使用的device的id值,可用的 device id 值可以使用如下命令查看:`npu-smi info` + +③:如需配置多路输入视频流,需要配置多个拉流、解码、质量诊断插件。 + +④:用户可以自定义视频质量检测插件“qualityDetectionConfigContent“字段的值,用例会优先根据该字段的配置作为参数值运行,未配置到的参数会使用默认值,下面是该字段所有参数值的介绍,限制和默认值: +"FRAME_LIST_LEN":插件存放视频帧队列长度,当视频帧队列满了之后才开始质量诊断,必须是大于等于2的正整数,下面所有算法的帧间隔须小于该值,默认值“20” +"BRIGHTNESS_SWITCH":视频亮度检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"BRIGHTNESS_FRAME_INTERVAL":视频亮度检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"BRIGHTNESS_THRESHOLD":视频亮度检测算法阈值,用户可根据实际场景进行修改,默认值“1” +"OCCLUSION_SWITCH":视频遮挡检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"OCCLUSION_FRAME_INTERVAL":视频遮挡检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"OCCLUSION_THRESHOLD":视频遮挡检测算法阈值,阈值范围是0-1,用户可根据实际场景进行修改,默认值“0.32” +"BLUR_SWITCH":视频模糊检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"BLUR_FRAME_INTERVAL":视频模糊检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"BLUR_THRESHOLD":视频模糊检测算法阈值,用户可根据实际场景进行修改,默认值“2000” +"NOISE_SWITCH":视频噪声检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"NOISE_FRAME_INTERVAL":视频噪声检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"NOISE_THRESHOLD":视频噪声检测算法阈值,用户可根据实际场景进行修改,默认值“0.005” +"COLOR_CAST_SWITCH":视频偏色检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"COLOR_CAST_FRAME_INTERVAL":视频偏色检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"COLOR_CAST_THRESHOLD":视频偏色检测算法阈值,用户可根据实际场景进行修改,默认值“1.5” +"STRIPE_SWITCH":视频条纹检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"STRIPE_FRAME_INTERVAL":视频条纹检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"STRIPE_THRESHOLD":视频条纹检测算法阈值,用户可根据实际场景进行修改,默认值“0.0015” +"DARK_SWITCH":视频黑屏检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"DARK_FRAME_INTERVAL":视频黑屏检测帧间隔,每隔该间隔数就会抽取当前视频帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"DARK_THRESHOLD":视频黑屏检测算法阈值,阈值范围是0-1,用户可根据实际场景进行修改,默认值“0.72” +"VIDEO_FREEZE_SWITCH":视频冻结检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"VIDEO_FREEZE_FRAME_INTERVAL":视频冻结检测帧间隔,每隔该间隔数就会抽取当前视频帧与前一间隔帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"VIDEO_FREEZE_THRESHOLD":视频冻结检测算法阈值,阈值范围是0-1,用户可根据实际场景进行修改,默认值“0.1” +"VIEW_SHAKE_SWITCH":视频抖动检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"VIEW_SHAKE_FRAME_INTERVAL":视频抖动检测帧间隔,每隔该间隔数就会抽取当前视频帧与前一间隔帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"VIEW_SHAKE_THRESHOLD":视频抖动检测算法阈值,阈值范围是10-100,用户可根据实际场景进行修改,默认值“20” +"SCENE_MUTATION_SWITCH":视频突变检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"SCENE_MUTATION_FRAME_INTERVAL":视频突变检测帧间隔,每隔该间隔数就会抽取当前视频帧与前一间隔帧来进行质量诊断,必须是正整数并且小于FRAME_LIST_LEN,默认值“10” +"SCENE_MUTATION_THRESHOLD":视频突变检测算法阈值,阈值范围是0-1,用户可根据实际场景进行修改,默认值“0.5” +"PTZ_MOVEMENT_SWITCH":PTZ云台运动检测算法开关,“true”为进行该功能检测,“false”为不进行该功能检测,默认值为“false” +"PTZ_MOVEMENT_FRAME_INTERVAL":PTZ云台运动检测帧间隔,每隔该间隔数就会抽取该间隔的视频帧进行质量诊断,必须是大于1的正整数并且小于FRAME_LIST_LEN,默认值“10” +"PTZ_MOVEMENT_THRESHOLD":PTZ云台运动检测算法阈值,用户可根据实际场景进行修改,默认值“0.95” + +## 4 运行 + +运行 +`bash run.sh` + +正常启动后,控制台会输出视频质量检测结果,结果日志将保存到`${安装路径}/mxVision/logs`中 + +手动执行ctrl + C结束程序 + +若中途视频流获取异常(例如视频流中断),程序会等待处理,不会自动退出 diff --git a/mxVision/VideoQualityDetection/main.cpp b/mxVision/VideoQualityDetection/main.cpp new file mode 100644 index 000000000..cc9161b90 --- /dev/null +++ b/mxVision/VideoQualityDetection/main.cpp @@ -0,0 +1,65 @@ +/* +* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include "MxBase/Log/Log.h" +#include "MxStream/StreamManager/MxStreamManager.h" + +namespace { +const int SIGNAL_CHECK_TIMESTEP = 10000; +static bool signalRecieved = false; +} + +static void SigHandler(int signal) +{ + if (signal == SIGINT) { + signalRecieved = true; + } +} + +APP_ERROR TestVideoQualityDetection() +{ + std::string pipelineConfigPath = "./pipeline/VideoQualityDetection.pipeline"; + // init stream manager + MxStream::MxStreamManager mxStreamManager; + APP_ERROR ret = mxStreamManager.InitManager(); + if (ret != APP_ERR_OK) { + LogInfo << "Failed to init Stream manager, ret = " << ret << "."; + return ret; + } + + ret = mxStreamManager.CreateMultipleStreamsFromFile(pipelineConfigPath); + if (ret != APP_ERR_OK) { + LogInfo << "Failed to create Stream, ret = " << ret << "."; + return ret; + } + + signal(SIGINT, SigHandler); + while (!signalRecieved) { + usleep(SIGNAL_CHECK_TIMESTEP); + } + + // destroy streams + mxStreamManager.DestroyAllStreams(); + return APP_ERR_OK; +} + +int main(int argc, char *argv[]) +{ + TestVideoQualityDetection(); + return 0; +} diff --git a/mxVision/VideoQualityDetection/pipeline/VideoQualityDetection.pipeline b/mxVision/VideoQualityDetection/pipeline/VideoQualityDetection.pipeline new file mode 100644 index 000000000..e35095e0b --- /dev/null +++ b/mxVision/VideoQualityDetection/pipeline/VideoQualityDetection.pipeline @@ -0,0 +1,70 @@ +{ + "detection": { + "stream_config": { + "deviceId": "0" + }, + "mxpi_rtspsrc0": { + "factory": "mxpi_rtspsrc", + "props": { + "rtspUrl": "rtsp://xxx.xxx.xxx.xxx:xxxx/xxxx.264", + "channelId": "0" + }, + "next": "mxpi_videodecoder0" + }, + "mxpi_videodecoder0": { + "factory": "mxpi_videodecoder", + "props": { + "inputVideoFormat": "H264", + "outputImageFormat": "YUV420SP_NV12", + "vdecChannelId": "0" + }, + "former": "mxpi_rtspsrc0", + "next": "mxpi_qualitydetection0" + }, + "mxpi_qualitydetection0": { + "props": { + "qualityDetectionConfigContent": { + "FRAME_LIST_LEN": "20", + "BRIGHTNESS_SWITCH": "true", + "BRIGHTNESS_FRAME_INTERVAL": "10", + "BRIGHTNESS_THRESHOLD": "1", + "OCCLUSION_SWITCH": "true", + "OCCLUSION_FRAME_INTERVAL": "10", + "OCCLUSION_THRESHOLD": "0.32", + "BLUR_SWITCH": "true", + "BLUR_FRAME_INTERVAL": "10", + "BLUR_THRESHOLD": "2000", + "NOISE_SWITCH": "true", + "NOISE_FRAME_INTERVAL": "10", + "NOISE_THRESHOLD": "0.005", + "COLOR_CAST_SWITCH": "true", + "COLOR_CAST_FRAME_INTERVAL": "10", + "COLOR_CAST_THRESHOLD": "1.5", + "STRIPE_SWITCH": "true", + "STRIPE_FRAME_INTERVAL": "10", + "STRIPE_THRESHOLD": "0.0015", + "DARK_SWITCH": "true", + "DARK_FRAME_INTERVAL": "10", + "DARK_THRESHOLD": "0.72", + "VIDEO_FREEZE_SWITCH": "true", + "VIDEO_FREEZE_FRAME_INTERVAL": "10", + "VIDEO_FREEZE_THRESHOLD": "0.1", + "VIEW_SHAKE_SWITCH": "true", + "VIEW_SHAKE_FRAME_INTERVAL": "10", + "VIEW_SHAKE_THRESHOLD": "20", + "SCENE_MUTATION_SWITCH": "true", + "SCENE_MUTATION_FRAME_INTERVAL": "10", + "SCENE_MUTATION_THRESHOLD": "0.5", + "PTZ_MOVEMENT_SWITCH": "true", + "PTZ_MOVEMENT_FRAME_INTERVAL": "10", + "PTZ_MOVEMENT_THRESHOLD": "0.95" + } + }, + "factory": "mxpi_qualitydetection", + "next": "fakesink0" + }, + "fakesink0": { + "factory": "fakesink" + } + } +} \ No newline at end of file diff --git a/mxVision/VideoQualityDetection/run.sh b/mxVision/VideoQualityDetection/run.sh new file mode 100644 index 000000000..5b010023b --- /dev/null +++ b/mxVision/VideoQualityDetection/run.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib":"${MX_SDK_HOME}/opensource/lib":"${MX_SDK_HOME}/opensource/lib64":"/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64":${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER="${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner" +export GST_PLUGIN_PATH="${MX_SDK_HOME}/opensource/lib/gstreamer-1.0":"${MX_SDK_HOME}/lib/plugins" + +# complie +g++ main.cpp -I "${MX_SDK_HOME}/include/" -I "${MX_SDK_HOME}/opensource/include/" -L "${MX_SDK_HOME}/lib/" \ +-L "${MX_SDK_HOME}/opensource/lib/" -std=c++11 -pthread -D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private -fPIC -fstack-protector-all \ +-g -Wl,-z,relro,-z,now,-z,noexecstack -pie -Wall -lglog -lmxbase -lstreammanager -lcpprest -lprotobuf -o main + +# run +./main +exit 0 diff --git a/tools/precision_analysis/README.md b/tools/precision_analysis/README.md new file mode 100644 index 000000000..1aa2e4c80 --- /dev/null +++ b/tools/precision_analysis/README.md @@ -0,0 +1,135 @@ +# 模型精度评估工具 + +## 1 内容简介 + +本样例是由昇腾系列Atlas服务器搭载,用于AI模型精度评估的实用工具。 + +此工具提供以下功能: + +1. 指定数据集和模型,进行自动推理,收集结果; +2. 指定推理结果和评估标准,进行精度评估。 + +此工具具有以下特点: + +1. 简易部署:部署Atlas服务器环境后,只需要根据模型文件的格式安装对应的推理框架即可使用。 +2. 应用场景广泛:此工具当前已支持多种框架下的模型推理,包括:MindSpore, Caffe, TensorFlow, PyTorch, 以及基于MindX SDK的离线推理框架;另外,此工具还支持多种数据集的自动处理和结果自动评估,包括:COCO, SVT等。 +3. 可扩展性:此工具提供简易的开发方式,用户可自行开发包括但不限于: + 1. 其他数据集的处理与评估方法; + 2. 基于其他框架的推理方法。 + + + +## 2 环境依赖 + +- 支持的硬件形态和操作系统版本 + +| 硬件形态 | 操作系统版本 | +| ------------------------------------- | -------------- | +| x86_64+Atlas 300I 推理卡(型号3010) | Ubuntu 18.04.1 | +| x86_64+Atlas 300I 推理卡 (型号3010) | CentOS 7.6 | +| ARM+Atlas 300I 推理卡 (型号3000) | Ubuntu 18.04.1 | +| ARM+Atlas 300I 推理卡 (型号3000) | CentOS 7.6 | + +- 软件依赖 + +| 软件名称 | 版本 | +| ----------- | ----- | +| pycocotools | 2.0 | +| MindX SDK | 2.0.1 | +| Python | 3.7.5 | + + + +## 3 项目目录 + +本代码仓名称为precision_analysis,工程目录如下图所示: + +``` +├── precision_analysis +│   ├── executor +│   | ├── data +| | | ├── dataloader.py +| | | └── image_loader.py +│   | ├── model +| | | ├── base.py +| | | ├── caffe_model.py +| | | ├── mindspore_model.py +| | | ├── onnx_model.py +| | | ├── pb_model.py +| | | └── pipeline.py +│   | ├── element.py +│   | ├── inference.py +│   | └── postprocess.py +│   ├── indicator +│   | ├── criterion.py +│   | └── metrics.py +│   ├── interface +│   | ├── compare.py +│   | ├── eval.py +│   | └── summary.py +│   ├── test +│   | ├── build.py +│   | ├── test_CRNN.py +│   | ├── test_Metrics.py +│   | └── test_ssd_mobilenet_fpn.py +│   ├── utils +│   | ├── checker.py +│   | ├── coding_conversion.py +│   | ├── collection.py +│   | ├── arguments.py +│   | ├── constants.py +│   | └── parser.py +│   ├── main.py +│   ├── test.py +│   ├── make_boarder_test.py +│   ├── README.md +│   └── requirements.txt +``` + + + +## 4 使用方法与自定义开发 + +通常情况下的使用方法可参考 main.py ,以下为各模块功能具体介绍。 + +此工具包含以下功能模块: + +1. ModelExecutor:覆盖多个框架推理功能的基类,另外还可完成基于MindX SDK的推理任务。 +2. DataLoader:提供数据集自动化处理功能,将数据集进行格式转换和适当预处理后,送入推理流程,同样作为流程化计算模块而存在;并且,该模块可同时生成数据集信息参数列表(命名为 shared_params),并传入后续流程,用户也可以根据自己的数据集生成相关参数,利用shared_params进行传递(常见参数有:图片宽高、图片格式等)。 +3. InferenceExecutor:可灵活使用以上三个流程化计算模块,并采用流程编排的方式组合而成。是多个计算流程的串行集合,并整体作为推理服务模块的提供者,供精度评估过程使用。 +4. EvalCriterion:提供多个常见的精度指标的计算方法。 +5. Evaluator:对外提供精度评估接口,对内通过调用InferenceExecutor完成推理计算,再通过调用EvalCriterion完成精度评估。 +6. PipeElement:可自定义函数继承该基类,并作为计算方法在InferenceExecutor中使用 + +以上所有模块均采用自定义开发方式,已提供多种常见方法,用户如需其他功能,可参考已有方法自行开发。 + + + +## 5 运行样例 + +此目录已包含 SSD_MobileNet_FPN 与 CRNN 两个模型的多个运行样例,这里以 SSD_MobileNet_FPN 的运行流程作为样例介绍。 + +1. 运行 + + `python main.py --mode test.ssd_mobilenet_fpn.pipeline -data-loading-path ${coco数据集路径} -label-loading-path ${coco数据集标签路径} -pipeline-cfg-path ${SDK_pipeline文件路径} -stream-name ${pipeline配置stream名称}` + + 可通过配置 --mode 参数的不同值选择不同的运行样例(当前仅提供 test 运行模式),其中包括(可参考 utils.constants): + + 1. test.ssd_mobilenet_fpn.pipeline 为运行 MindX SDK pipeline 推理样例 + 2. test.ssd_mobilenet_fpn.inference 为运行 推理流程模块 的样例 + 3. test.ssd_mobilenet_fpn.evaluation 为运行 模型精度评估 的样例 + + 配置 ${coco数据集路径}:下载coco数据集,配置数据集图片路径,例如:./coco/val2017 + + 配置 ${coco数据集标签路径}:配置数据集标签文件路径:例如:./coco/annotations/instances_val2017.json + + 配置 ${SDK_pipeline文件路径}:运行的pipeline的存放路径 + + 配置 ${pipeline配置stream名称}:运行的pipeline中的stream名称 + +2. 该函数调用 test 目录下的 test_ssd_mobilenet_fpn.py 中的 test_pipeline 功能得到样例运行结果 + +3. 可修改 ssd_mobilenet_fpn 为 crnn 来运行 CRNN 模型的样例 + + + diff --git a/tools/precision_analysis/executor/__init__.py b/tools/precision_analysis/executor/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/executor/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/executor/data/__init__.py b/tools/precision_analysis/executor/data/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/executor/data/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/executor/data/dataloader.py b/tools/precision_analysis/executor/data/dataloader.py new file mode 100644 index 000000000..21ec4b4a0 --- /dev/null +++ b/tools/precision_analysis/executor/data/dataloader.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from utils.checker import check_loading_path + + +class DataLoader(object): + def __init__(self, loading_path, *args, **kwargs): + check_loading_path(loading_path, "loading_path") + self.loading_path = loading_path + self.load_dataset(*args, **kwargs) + + self.batch_size = 1 + + def load_dataset(self, *args, **kwargs): + raise NotImplementedError("Please specify a data loading method.") + + def __iter__(self): + raise NotImplementedError("Please specify a data loading method.") + + def set_batch_size(self, batch_size): + self.batch_size = batch_size diff --git a/tools/precision_analysis/executor/data/image_loader.py b/tools/precision_analysis/executor/data/image_loader.py new file mode 100644 index 000000000..b4c387286 --- /dev/null +++ b/tools/precision_analysis/executor/data/image_loader.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import cv2 + +from executor.data.dataloader import DataLoader + + +class ImageLoaderDir(DataLoader): + def load_dataset(self, *args, **kwargs): + """ + + :param args: + :param kwargs: + image_format indicate 'RGB' or 'BGR' + suffix_set such as .jpg .json ... + shared_params storing the params such as image height, weight ... + :return: + """ + image_format = kwargs.get("image_format") + self.image_format = image_format if image_format else "BGR" + if self.image_format not in ["BGR", "RGB"]: + raise ValueError("Please specify the image format within ['BGR', " + "'RGB']") + + suffix_set = kwargs.get("suffix_set") + self.suffix_set = suffix_set if suffix_set else (".jpg", ) + if not isinstance(self.suffix_set, (list, tuple)): + raise ValueError("You should assign one or one set of suffixes.") + + for item in self.suffix_set: + if not isinstance(item, str): + raise ValueError("Suffix format looks like '.jpg', '.json'.") + + shared_params = kwargs.get("shared_params") + self.shared_params = shared_params if shared_params is not None \ + else {} + + self.file_list = os.listdir(self.loading_path) + self.sample_cnt = len(self.file_list) + + def __iter__(self): + idx = 0 + while idx < self.sample_cnt: + file = self.file_list[idx] + print(f"filename: {file}") + _, suffix = os.path.splitext(file) + if self.suffix_set and suffix not in self.suffix_set: + idx += 1 + continue + img_path = os.path.join(self.loading_path, file) + image = cv2.imread(img_path) + if self.image_format == "RGB": + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + h, w, _ = image.shape + self.shared_params["height"] = h + self.shared_params["weight"] = w + self.shared_params["file_name"] = file + + yield image + idx += 1 diff --git a/tools/precision_analysis/executor/element.py b/tools/precision_analysis/executor/element.py new file mode 100644 index 000000000..dab6f143b --- /dev/null +++ b/tools/precision_analysis/executor/element.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class PipeElement(object): + def __init__(self, name="my_element", shard_params=None): + if isinstance(name, str): + self.name = name + else: + raise ValueError("Please specify a string as the instance name.") + + if isinstance(shard_params, (type(None), dict)): + self.shard_params = shard_params + else: + raise ValueError("A valid shard_params should be 'dict'.") + + def __call__(self, input_numpy): + self.run(input_numpy) + + def run(self, input_numpy): + raise NotImplementedError("Please specify the run method.") diff --git a/tools/precision_analysis/executor/inference.py b/tools/precision_analysis/executor/inference.py new file mode 100644 index 000000000..12694bc0a --- /dev/null +++ b/tools/precision_analysis/executor/inference.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from collections import Iterable + + +class InferenceExecutor(object): + def __init__(self, + elements, + data_loader=None, + name="my_inference", + shared_params=None, + verbose=False): + """ + + :param elements: callable component of inference process + :param data_loader: + :param name: name of this instance + :param shared_params: used for passing params throughout the whole + process + :param verbose: indicate whether print progress of inference + """ + if isinstance(name, str): + self.name = name + else: + raise ValueError("Param name should be a string.") + + # shared_params is used for transit throughout the process of + # inference. + if shared_params is None: + self.shared_params = {} + elif isinstance(shared_params, dict): + self.shared_params = shared_params + else: + raise ValueError("Param params should be a string.") + + if isinstance(elements, list): + self.pipe = elements + + else: + self.pipe = [elements] + + for element in self.pipe: + if not hasattr(element, '__call__'): + raise ValueError("Param elements should be a callable " + "instance or a list of callable elements.") + + self.verbose = bool(verbose) + + self.set_data_loader(data_loader) + + self.ret = None + self.ret_collection = [] + + def infer(self, input_numpy): + self.ret = input_numpy + for element in self.pipe: + self.ret = element(self.ret) + + if self.verbose: + print(f"Inference result: {self.ret}") + + return self.ret + + def __call__(self, input_numpy): + return self.infer(input_numpy) + + def execute(self): + if not self.data_loader: + raise RuntimeError("You have not specify a data loader, " + "executing is forbidden.") + + for data in self.data_loader: + self.infer(data) + self.collect_ret() + + return self.ret_collection + + def push(self, element): + self.check_element(element) + self.pipe.append(element) + + def pop(self): + self.pipe.pop() + + def lpush(self, element): + self.check_element(element) + self.pipe.index(0, element) + + def lpop(self): + self.pipe.pop(0) + + @staticmethod + def check_element(element): + if hasattr(element, '__call__'): + raise ValueError("Param element should be a callable.") + + def set_data_loader(self, data_loader): + if data_loader and not isinstance(data_loader, Iterable): + raise ValueError("Data_loader should be iterable.") + else: + self.data_loader = data_loader + + def collect_ret(self): + if isinstance(self.ret, list): + self.ret_collection.extend(self.ret) + + else: + self.ret_collection.append(self.ret) + + +if __name__ == "__main__": + pipeline = None + input_numpy = None + inference = InferenceExecutor(pipeline) + ret = inference(input_numpy) diff --git a/tools/precision_analysis/executor/model/__init__.py b/tools/precision_analysis/executor/model/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/executor/model/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/executor/model/base.py b/tools/precision_analysis/executor/model/base.py new file mode 100644 index 000000000..b9d04db73 --- /dev/null +++ b/tools/precision_analysis/executor/model/base.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from utils.checker import check_loading_path, check_model_input + + +class ModelExecutor(object): + def __init__(self, model_path): + self.input_name = None + self.output_name = None + self.input_cnt = 1 + self.output_cnt = 1 + check_loading_path(model_path, "model_path") + self.build_model(model_path) + + def build_model(self, model_path): + raise NotImplementedError(f"Please specify a graph building method.") + + def infer(self, input_numpy): + raise NotImplementedError(f"Please specify a model infer method.") + + def __call__(self, input_numpy): + check_model_input(input_numpy) + return self.infer(input_numpy) diff --git a/tools/precision_analysis/executor/model/mindspore_model.py b/tools/precision_analysis/executor/model/mindspore_model.py new file mode 100644 index 000000000..fa560c7c2 --- /dev/null +++ b/tools/precision_analysis/executor/model/mindspore_model.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import numpy as np + +from mindspore import Model, load_checkpoint, load_param_into_net, Tensor +from mindspore.nn import Cell + +from executor.model.base import ModelExecutor + + +class MSModel(ModelExecutor): + def build_model(self, model_path): + """ + :param model_path: + """ + network = self.build_graph() + if not isinstance(network, Cell): + raise ValueError("Please return a Cell instance.") + + self.model = Model(network) + param_dict = load_checkpoint(model_path) + load_param_into_net(network, param_dict) + + def build_graph(self, *args, **kwargs) -> Cell: + raise NotImplementedError("Please specify a graph building method.") + + def forward(self, input_numpy): + if isinstance(input_numpy, np.ndarray): + input_tensor = [input_numpy] + + else: + input_tensor = input_numpy + + input_tensor = [Tensor(tensor) for tensor in input_tensor] + output = self.model.predict(*input_tensor) + return output diff --git a/tools/precision_analysis/executor/model/onnx_model.py b/tools/precision_analysis/executor/model/onnx_model.py new file mode 100644 index 000000000..ee0f4405f --- /dev/null +++ b/tools/precision_analysis/executor/model/onnx_model.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import onnxruntime +import numpy as np + +from executor.model.base import ModelExecutor + + +class ONNXModel(ModelExecutor): + def build_model(self, model_path): + """ + :param model_path: + """ + self.onnx_session = onnxruntime.InferenceSession(model_path) + self.input_name = self.get_input_name() + self.output_name = self.get_output_name() + print("input_name:{}".format(self.input_name)) + print("output_name:{}".format(self.output_name)) + + def get_output_name(self): + self.output_name = [] + for node in self.onnx_session.get_outputs(): + self.output_name.append(node.name) + + self.output_cnt = len(self.output_name) + + def get_input_name(self): + self.input_name = [] + for node in self.onnx_session.get_inputs(): + self.input_name.append(node.name) + + self.input_cnt = len(self.input_name) + + def get_input_feed(self, image_numpy): + if isinstance(image_numpy, np.ndarray) and self.input_cnt == 1: + image_numpy = [image_numpy] + + input_feed = {} + for name, input_data in zip(self.input_name, image_numpy): + input_feed[name] = input_data + return input_feed + + def forward(self, input_numpy): + input_feed = self.get_input_feed(input_numpy) + output = self.onnx_session.run(self.output_name, input_feed=input_feed) + return output diff --git a/tools/precision_analysis/executor/model/pb_model.py b/tools/precision_analysis/executor/model/pb_model.py new file mode 100644 index 000000000..5bffd6d92 --- /dev/null +++ b/tools/precision_analysis/executor/model/pb_model.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import cv2 +import numpy as np +import tensorflow as tf + +from tensorflow.python.platform import gfile + +from executor.model.base import ModelExecutor + + +class PBModel(ModelExecutor): + def __init__(self, model_path, input_name, output_name): + super(PBModel, self).__init__(model_path) + if not isinstance(input_name, list): + raise ValueError("Param input_name should be a list.") + + if not isinstance(output_name, list): + raise ValueError("Param output_name should be a list.") + + if isinstance(input_name, str): + self.input_name = [input_name] + self.input_cnt = 1 + elif isinstance(input_name, list): + self.input_cnt = len(input_name) + else: + raise ValueError("Param input_name should be either str or list.") + + if isinstance(output_name, str): + self.output_name = [output_name] + self.output_cnt = 1 + elif isinstance(output_name, list): + self.output_cnt = len(output_name) + else: + raise ValueError("Param output_name should be either str or list.") + + print("input_name:{}".format(self.input_name)) + print("output_name:{}".format(self.output_name)) + + def build_model(self, model_path): + """ + :param model_path: + """ + config = tf.ConfigProto() + self.sess = tf.Session(config=config) + with gfile.FastGFile(model_path, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + self.sess.graph.as_default() + tf.import_graph_def(graph_def, name="") + + def get_input_tensor(self): + self.input_tensor = [] + for name in self.input_name: + tensor = tf.get_default_graph().get_tensor_by_name(name) + self.input_tensor.append(tensor) + + def get_output_tensor(self): + self.output_tensor = [] + for name in self.output_name: + tensor = tf.get_default_graph().get_tensor_by_name(name) + self.output_tensor.append(tensor) + + def get_input_feed(self, image_numpy): + if isinstance(image_numpy, np.ndarray) and self.input_cnt == 1: + image_numpy = [image_numpy] + + input_feed = {} + for tensor, input_data in zip(self.input_tensor, image_numpy): + input_feed[tensor] = input_data + return input_feed + + def forward(self, input_numpy): + input_feed = self.get_input_feed(input_numpy) + return self.sess.run(self.output_tensor, feed_dict=input_feed) + + +if __name__ == "__main__": + input_name = "" + output_name = [ + "detector/yolo-v4-tiny/Conv_17/BiasAdd:0", + "detector/yolo-v4-tiny/Conv_20/BiasAdd:0" + ] + model_path = "" + img_path = "" + img = cv2.imread(img_path) + model = PBModel(model_path, input_name, output_name) + out = model(img) + print(out) diff --git a/tools/precision_analysis/executor/model/pipeline.py b/tools/precision_analysis/executor/model/pipeline.py new file mode 100644 index 000000000..c200f0922 --- /dev/null +++ b/tools/precision_analysis/executor/model/pipeline.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import cv2 +import numpy as np +from PIL import Image + +from StreamManagerApi import MxDataInput, StreamManagerApi, StringVector + +from utils.coding_conversion import string2binary + + +class Pipeline: + def __init__(self, + pipeline_cfg_file=None, + stream_name=None, + in_plugin_id=0, + out_plugin_id=None, + encoding_format=None, + keys=None, + parser=None, + shared_params=None): + self.stream_name = string2binary(stream_name) + self.in_plugin_id = in_plugin_id + self.out_plugin_id = out_plugin_id + self.key_vec = None + self.dataInput = MxDataInput() + self.encoding_format = encoding_format if encoding_format else "cv" + self.parser = parser + self.shared_params = shared_params if shared_params is not None \ + else {} + self.set_fetch_keys(keys) + + if not os.path.exists(pipeline_cfg_file): + raise IsADirectoryError("Given pipeline config path is invalid.") + + with open(pipeline_cfg_file, 'rb') as f: + pipeline_str = f.read() + + self.streams = StreamManagerApi() + ret = self.streams.InitManager() + if ret != 0: + raise SystemError("Failed to init Stream manager, ret=%s" % + str(ret)) + + if not isinstance(self.encoding_format, str) or self.encoding_format\ + not in ['cv']: + raise ValueError("Param encoding_format must be a string among " + "['cv'].") + + if self.parser and not hasattr(self.parser, "__call__"): + raise ValueError("Param parser must be callable.") + + ret = self.streams.CreateMultipleStreams(pipeline_str) + if ret != 0: + raise IOError("Failed to create Stream, ret=%s" % str(ret)) + + def set_fetch_keys(self, keys): + if not keys: + return + + if isinstance(keys, (list, tuple)): + self.key_vec = StringVector() + for key in keys: + self.key_vec.push_back(string2binary(key)) + + def parse_ret(self): + if self.parser: + return self.parser(self.ret, self.shared_params) + else: + return self.ret + + def put(self, input_b): + self.dataInput.data = input_b + self.out_plugin_id = self.streams.SendDataWithUniqueId( + self.stream_name, self.in_plugin_id, self.dataInput) + + def get(self): + inferResult = self.streams.GetResultWithUniqueId( + self.stream_name, self.out_plugin_id, 3000) + if inferResult.errorCode != 0: + print("GetResultWithUniqueId error. errorCode=%d, " + "errorMsg=%s" % + (inferResult.errorCode, inferResult.data.decode())) + + else: + return inferResult.data.decode() + + def infer(self, input_numpy): + input_bin = self.encode(input_numpy) + self.put(input_bin) + self.ret = self.get() + return self.parse_ret() + + def destroy_stream(self): + self.streams.DestroyAllStreams() + + def encode(self, input_numpy): + if self.encoding_format == "cv": + _, buf = cv2.imencode(".jpg", input_numpy) + return Image.fromarray(np.uint8(buf)).tobytes() + + def __call__(self, input_numpy): + return self.infer(input_numpy) diff --git a/tools/precision_analysis/indicator/__init__.py b/tools/precision_analysis/indicator/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/indicator/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/indicator/criterion.py b/tools/precision_analysis/indicator/criterion.py new file mode 100644 index 000000000..4bf83d12b --- /dev/null +++ b/tools/precision_analysis/indicator/criterion.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import numpy as np +import subprocess +from pycocotools.cocoeval import COCOeval +from pycocotools.coco import COCO + +from collections import Iterable + +from utils.checker import check_loading_path + + +class Criterion(object): + def __init__(self, loading_path, *args, **kwargs): + """ + All configurable params should be set here. + """ + check_loading_path(loading_path, "loading_path for label") + self.loading_path = loading_path + self.init_criterion(*args, **kwargs) + + def init_criterion(self, *args, **kwargs): + raise NotImplementedError("Please specify a criterion initialization " + "function") + + def eval(self, pred: Iterable) -> int or float: + raise NotImplementedError("Please the evaluation function.") + + def __call__(self, pred): + if not isinstance(pred, Iterable): + raise ValueError("Param pred must be an iterable object.") + + return self.eval(pred) + + +class COCOEvaluation(Criterion): + def init_criterion(self, *args, **kwargs): + self.coco_gt = COCO(self.loading_path) + self.img_list = kwargs.get("img_list") + if self.img_list and not isinstance(self.img_list, list): + raise ValueError("Please specify the image list which is used " + "for evaluating.") + + def eval(self, pred: Iterable or str) -> int or float: + + if isinstance(pred, str): + check_loading_path(pred, "prediction file path") + coco_dt = self.coco_gt.loadRes(pred) + + elif isinstance(pred, np.ndarray): + coco_dt = self.coco_gt.loadRes(pred) + + elif isinstance(pred, list): + obj_pred = self.convert_format(pred) + coco_dt = self.coco_gt.loadRes(obj_pred) + else: + raise ValueError("Wrong pred format was given.") + + E = COCOeval(self.coco_gt, coco_dt, iouType='bbox') + if self.img_list: + E.params.imgIds = self.img_list + E.evaluate() + E.accumulate() + E.summarize() + print("mAP: ", E.stats[0]) + return E.stats[0] + + @staticmethod + def convert_format(pred): + # Todo 解析模型推理获得的结果 + ret = [] + for item in pred: + obj = [item["image_id"]] + obj.extend(item["bbox"]) + obj.append(item["score"]) + obj.append(item["category_id"]) + ret.append(obj) + + return np.array(ret) + + +class PerCharPrecision(Criterion): + def init_criterion(self, *args, **kwargs): + self.label_list = [] + if not os.path.isdir(self.loading_path): + raise ValueError("Given loading_path should be a direction.") + + file_list = os.listdir(self.loading_path) + for file in file_list: + if file.endswith(".jpg"): + label = file.split(".")[0].lower() + label = label.split('_')[-1] + self.label_list.append(label) + + def eval(self, pred: Iterable) -> int or float: + accuracy = [] + + for index, (label, prediction) in enumerate(zip(self.label_list, + pred)): + if not isinstance(prediction, str): + raise ValueError("Please check your predictions, " + "all predictions should be string.") + + prediction = prediction.lower() + sample_count = len(label) + correct_count = 0 + for i, tmp in enumerate(label): + if i >= len(prediction): + break + elif tmp == prediction[i]: + correct_count += 1 + + try: + accuracy.append(correct_count / sample_count) + except ZeroDivisionError: + if len(prediction) == 0: + accuracy.append(1) + else: + accuracy.append(0) + + temp = np.array(accuracy).astype(np.float32) + avg_accuracy = np.mean(temp, axis=0) + + print('PerChar Precision is {:5f}'.format(avg_accuracy)) + return avg_accuracy + + +class FullSequencePrecision(Criterion): + def init_criterion(self, *args, **kwargs): + self.label_list = [] + if not os.path.isdir(self.loading_path): + raise ValueError("Given loading_path should be a direction.") + + file_list = os.listdir(self.loading_path) + for file in file_list: + if file.endswith(".jpg"): + label = file.split(".")[0].lower() + label = label.split('_')[-1] + self.label_list.append(label) + + def eval(self, pred: Iterable) -> int or float: + try: + correct_count = 0 + for index, (label, prediction) in enumerate(zip( + self.label_list, pred)): + if not isinstance(prediction, str): + raise ValueError("Please check your predictions, " + "all predictions should be string.") + + prediction = prediction.lower() + if prediction == label: + correct_count += 1 + else: + print("mistake index: " + str(index)) + print(prediction + " :: " + label) + avg_accuracy = correct_count / len(self.label_list) + except ZeroDivisionError: + if not pred: + avg_accuracy = 1 + else: + avg_accuracy = 0 + print('Full Sequence Precision is {:5f}'.format(avg_accuracy)) + + return avg_accuracy + + +class TextDetectEvaluation(Criterion): + def init_criterion(self, *args, **kwargs): + self.gt_path = self.loading_path + self.result_path = kwargs.get("result_path") + self.result_zip_name = kwargs.get("result_zip_name") + self.dt_path = os.path.join(self.result_path, self.result_zip_name) + self.input_info = {'g': self.gt_path, 's': self.dt_path, 'o': './'} + + def prepare_dt_data(self): + zip_command = "cd {};/usr/bin/zip -qr {} res_*.txt".format(self.result_path, self.result_zip_name) + if subprocess.call(zip_command, shell=True) == 0: + print("Successful zip {}".format(self.result_zip_name)) + else: + raise ValueError("zip {} failed".format(self.result_zip_name)) + + def eval(self, pred: Iterable or str): + self.prepare_dt_data() + return True + + +if __name__ == "__main__": + loading_path = "../models/COCO/instances_val2017.json" + criterion = COCOEvaluation(loading_path=loading_path) + + print("Done !") diff --git a/tools/precision_analysis/indicator/metrics.py b/tools/precision_analysis/indicator/metrics.py new file mode 100644 index 000000000..1d4f80714 --- /dev/null +++ b/tools/precision_analysis/indicator/metrics.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from easydict import EasyDict as edict +import numpy as np + + +class Metrics(object): + def __init__(self, cfg: dict or None = None): + """ + All configurable params should be set here. + """ + self.multi_sample = False + self.batch_size = None + self.cfg = None + self.axis = None + self.load_cfg(cfg) + + def load_cfg(self, cfg): + if cfg is None: + cfg = edict() + + if not isinstance(cfg, (type(None), dict)): + raise TypeError(f"Param 'cfg' must be None or an easydict.") + + self.cfg = cfg + + try: + self.multi_sample = self.cfg.multi_sample + if not isinstance(self.multi_sample, bool): + raise TypeError(f"Param multi_sample should be a value of " + f"bool.") + + except AttributeError: + pass + + def compare(self, ref: np.ndarray, tgt: np.ndarray) -> int or float: + raise NotImplementedError + + def __call__(self, ref, tgt): + if not isinstance(self.cfg, edict): + raise TypeError(f"Params must be passed by a easydict instance.") + + if not isinstance(ref, type(tgt)): + raise TypeError(f"Given ref and tgt must be the same type.") + + if isinstance(ref, list): + ref, tgt = np.array(ref), np.array(tgt) + + if not isinstance(ref, np.ndarray): + raise TypeError(f"Given params ref and tgt can only be a type " + f"within list and np.ndarray.") + + if ref.shape != tgt.shape: + raise AssertionError(f"The shape of ref and tgt should be the " + f"same.") + self.shape = ref.shape + self.batch_size = self.shape[0] + if len(self.shape) == 1: + self.multi_sample = False + + if self.multi_sample: + ref = np.reshape(ref, (self.batch_size, -1)) + tgt = np.reshape(tgt, (self.batch_size, -1)) + + self.axis = 1 if self.multi_sample else None + + return self.compare(ref, tgt) + + +class RE(Metrics): + """ + relative error + """ + + def __init__(self, cfg=None): + super(RE, self).__init__(cfg) + try: + self.epsilon = self.cfg.epsilon + if not isinstance(self.epsilon, float) or self.epsilon <= 0: + raise ValueError("Param epsilon must a float larger than 0.") + except AttributeError: + self.epsilon = 1e-6 + + +class MRE(RE): + """ + Mean relative error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.mean(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon), + axis=1) + + else: + return np.mean(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon)) + + +class MaxRE(RE): + """ + Max relative error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.max(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon), + axis=1) + + else: + return np.max(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon)) + + +class MinRE(RE): + """ + Min relative error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.min(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon), + axis=1) + + else: + return np.min(np.abs(tgt - ref) / (np.abs(ref) + self.epsilon)) + + +class MAE(Metrics): + """ + Mean absolute error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.mean(np.abs(tgt - ref), axis=1) + + else: + return np.mean(np.abs(tgt - ref)) + + +class MaxAE(Metrics): + """ + Max absolute error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.max(np.abs(tgt - ref), axis=1) + + else: + return np.max(np.abs(tgt - ref)) + + +class MinAE(Metrics): + """ + Min absolute error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.min(np.abs(tgt - ref), axis=1) + + else: + return np.min(np.abs(tgt - ref)) + + +class CAE(Metrics): + """ + Cumulative absolute error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.sum(np.abs(tgt - ref), axis=1) + + else: + return np.sum(np.abs(tgt - ref)) + + +class MSE(Metrics): + """ + Mean square error + """ + + def compare(self, ref, tgt): + if self.multi_sample: + return np.mean(np.square(tgt - ref), axis=1) + + else: + return np.mean(np.square(tgt - ref)) + + +class RMSE(Metrics): + """ + Root mean square error + """ + + def compare(self, ref, tgt): + return np.sqrt(np.mean(np.square(tgt - ref)), axis=self.axis) + + +class RatioAlmostEqual(Metrics): + def __init__(self, cfg): + super(RatioAlmostEqual).__init__(cfg) + try: + self.err_thresh = self.cfg.err_thresh + + except AttributeError: + raise AttributeError(f"Please specify the threshold of error.") + + +class RelativeRatioAlmostEqual(RE, RatioAlmostEqual): + def compare(self, ref, tgt): + relative_error = np.abs(tgt - ref) / (np.abs(ref) + self.epsilon) + element_cnt = self.shape[1] + numerator = np.sum(relative_error > self.err_thresh, axis=self.axis) + return numerator / element_cnt + + +class AbsoluteRatioAlmostEqual(RatioAlmostEqual): + def compare(self, ref, tgt): + absolute_error = np.abs(tgt - ref) + element_cnt = self.shape[1] + numerator = np.sum(absolute_error > self.err_thresh, axis=self.axis) + return numerator / element_cnt + + +class CosineDistance(Metrics): + def compare(self, ref, tgt): + ref_norm = np.linalg.norm(ref, axis=self.axis) + tgt_norm = np.linalg.norm(tgt, axis=self.axis) + if self.multi_sample: + return np.dot(ref, tgt.T).diagonal() / (tgt_norm * ref_norm) + + else: + return np.dot(ref, tgt.T) / (tgt_norm * ref_norm) + + +if __name__ == "__main__": + pass diff --git a/tools/precision_analysis/interface/__init__.py b/tools/precision_analysis/interface/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/interface/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/interface/eval.py b/tools/precision_analysis/interface/eval.py new file mode 100644 index 000000000..b28ae5348 --- /dev/null +++ b/tools/precision_analysis/interface/eval.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from executor.inference import InferenceExecutor +from indicator.criterion import Criterion + + +class Evaluator(object): + def __init__(self, + criterion, + inference_executor=None, + inference_result=None): + if not isinstance(criterion, (list, tuple)): + criterion = [criterion] + + self.inference_executor = inference_executor + self.inference_result = inference_result + self.criterion = criterion + + for ctn in self.criterion: + if not isinstance(ctn, Criterion): + raise ValueError("Given criterion must be an instance of the " + "class of Criterion in package " + "indicator.criterion.") + + if not (inference_executor or inference_result): + raise IOError("Your must assign one inference method.") + + if inference_executor and inference_result: + raise IOError("Only one inference method can be assigned.") + + if not isinstance(inference_executor, InferenceExecutor): + raise ValueError("Param inference_executor must be the instance " + "of InferenceExecutor in package " + "executor.inference") + + def eval(self): + if self.inference_executor: + ret = self.inference_executor.execute() + + else: + ret = self.inference_result + + eval_score = {} + for ctn in self.criterion: + score = ctn(ret) + ctn_name = type(ctn).__name__ + eval_score[ctn_name] = score + + print(f"The score of {ctn_name} is {score} .") + + return eval_score diff --git a/tools/precision_analysis/main.py b/tools/precision_analysis/main.py new file mode 100644 index 000000000..17b98f867 --- /dev/null +++ b/tools/precision_analysis/main.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from test.build import TEST_MAP +from utils.arguments import get_args +from utils import constants + + +def main(): + args = get_args() + mode_str = args.mode + data_loading_path = args.data_loading_path + label_loading_path = args.label_loading_path + pipeline_cfg_path = args.pipeline_cfg_path + stream_name = args.stream_name + + cfg = {"data_loading_path": data_loading_path, + "label_loading_path": label_loading_path, + "pipeline_cfg_path": pipeline_cfg_path, + "stream_name": stream_name} + + mode_list = mode_str.split(".") + if len(mode_list) <= 1: + raise NotImplementedError("Only test mode is supported.") + + elif len(mode_list) != 3: + raise ValueError(f"Please offer a format like " + f"'test.{constants.ModelName.CRNN.value}" + f".{constants.UnitName.PIPELINE.value}'.") + + mode = mode_list[0].lower() + model_name = mode_list[1].lower() + testing_unit = mode_list[2].lower() + + if mode != "test": + raise ValueError("Only test mode is supported.") + + if model_name not in [ + constants.ModelName.CRNN.value, constants.ModelName.SMF.value + ]: + raise ValueError(f"We can only offer demos with respect to models [" + f"{constants.ModelName.CRNN.value}, " + f"{constants.ModelName.SMF.value}].") + + if testing_unit not in [ + constants.UnitName.PIPELINE.value, + constants.UnitName.INFERENCE.value, + constants.UnitName.EVALUATION.value + ]: + raise ValueError(f"Only 3 unit, {constants.UnitName.PIPELINE.value} " + f"{constants.UnitName.INFERENCE.value} and " + f"{constants.UnitName.EVALUATION.value}, " + f"can be tested.") + + exec_func = TEST_MAP.get(model_name).get(testing_unit) + exec_func(cfg) + + +if __name__ == '__main__': + main() diff --git a/tools/precision_analysis/requirements.txt b/tools/precision_analysis/requirements.txt new file mode 100644 index 000000000..8785f5e15 --- /dev/null +++ b/tools/precision_analysis/requirements.txt @@ -0,0 +1,9 @@ +easydict +pycoco +pycocotools +mindspore +numpy +opencv-python +Pillow +pytorch +tensorflow==1.15 \ No newline at end of file diff --git a/tools/precision_analysis/test/__init__.py b/tools/precision_analysis/test/__init__.py new file mode 100644 index 000000000..7a6cbaed1 --- /dev/null +++ b/tools/precision_analysis/test/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__import__("test.test_CRNN") +__import__("test.test_ssd_mobilenet_fpn") diff --git a/tools/precision_analysis/test/build.py b/tools/precision_analysis/test/build.py new file mode 100644 index 000000000..088902482 --- /dev/null +++ b/tools/precision_analysis/test/build.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from utils.collection import Collection +from utils import constants + +CRNN_TEST_COLLECTION = Collection("crnn_test") +SSD_MOBILENET_FPN_TEST_COLLECTION = Collection("ssd_mobilenet_fpn_test") + +TEST_MAP = {constants.ModelName.CRNN.value: CRNN_TEST_COLLECTION, + constants.ModelName.SMF.value: SSD_MOBILENET_FPN_TEST_COLLECTION} diff --git a/tools/precision_analysis/test/test_CRNN.py b/tools/precision_analysis/test/test_CRNN.py new file mode 100644 index 000000000..0e2ae82bf --- /dev/null +++ b/tools/precision_analysis/test/test_CRNN.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from executor.data.image_loader import ImageLoaderDir +from executor.inference import InferenceExecutor +from executor.model.pipeline import Pipeline +from indicator.criterion import PerCharPrecision, FullSequencePrecision +from interface.eval import Evaluator +from test.build import CRNN_TEST_COLLECTION +from utils.parser import crnn_parsing_func +from utils import constants + + +@CRNN_TEST_COLLECTION.register(constants.UnitName.PIPELINE.value) +def test_pipeline(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + crnn_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=crnn_parsing_func) + + for image in crnn_data_loader: + ret = pipeline(image) + print(ret) + + +@CRNN_TEST_COLLECTION.register(constants.UnitName.INFERENCE.value) +def test_inference(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + crnn_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=crnn_parsing_func) + + crnn_inference = InferenceExecutor(pipeline, + data_loader=crnn_data_loader, + name="crnn_inference", + shared_params=shared_params, + verbose=True) + pred = crnn_inference.execute() + print(pred) + + +@CRNN_TEST_COLLECTION.register(constants.UnitName.EVALUATION.value) +def test_evaluation(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + crnn_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=crnn_parsing_func) + + crnn_inference = InferenceExecutor(pipeline, + data_loader=crnn_data_loader, + name="crnn_inference", + shared_params=shared_params, + verbose=True) + label_loading_path = cfg.get("label_loading_path") + criterion_per_char = PerCharPrecision(loading_path=label_loading_path) + criterion_full_seq = FullSequencePrecision(loading_path=label_loading_path) + crnn_evaluator = Evaluator([criterion_per_char, criterion_full_seq], + inference_executor=crnn_inference) + + eval_score = crnn_evaluator.eval() + print(f"Eval score is {eval_score}.") diff --git a/tools/precision_analysis/test/test_ctpn.py b/tools/precision_analysis/test/test_ctpn.py new file mode 100644 index 000000000..0e8ebe597 --- /dev/null +++ b/tools/precision_analysis/test/test_ctpn.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from executor.data.image_loader import ImageLoaderDir +from executor.inference import InferenceExecutor +from executor.model.pipeline import Pipeline +from indicator.criterion import TextDetectEvaluation +from interface.eval import Evaluator +from test.build import CTPN_TEST_COLLECTION +from utils.parser import ctpn_parse_and_save_func +from utils import constants + + +@CTPN_TEST_COLLECTION.register(constants.UnitName.EVALUATION.value) +def test_evaluation(): + loading_path = "./test/models/icdar2013/data" + result_path = "./test/models/icdar2013/result" + result_zip_name = "icdar2013_test.zip" + shared_params = dict() + shared_params['result_path'] = result_path + shared_params['result_zip_name'] = result_zip_name + icdar_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + pipeline_cfg_file_path = "./test/models/ctpn_single_cv.pipeline" + stream_name_str = "detection" + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=ctpn_parse_and_save_func, + shared_params=shared_params) + + ctpn_inference = InferenceExecutor([pipeline], + data_loader=icdar_data_loader, + name="ctpn_mindspore_inference", + shared_params=shared_params, + verbose=False) + ctpn_gt_path = "./test/models/icdar2013/icdar2013_gt.zip" + criterion_coco = TextDetectEvaluation(loading_path=ctpn_gt_path, result_path=result_path, + result_zip_name=result_zip_name) + ctpn_evaluator = Evaluator(criterion_coco, inference_executor=ctpn_inference) + eval_score = ctpn_evaluator.eval() + print(f"Eval score is {eval_score}.") diff --git a/tools/precision_analysis/test/test_ssd_mobilenet_fpn.py b/tools/precision_analysis/test/test_ssd_mobilenet_fpn.py new file mode 100644 index 000000000..0350719fa --- /dev/null +++ b/tools/precision_analysis/test/test_ssd_mobilenet_fpn.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from executor.data.image_loader import ImageLoaderDir +from executor.inference import InferenceExecutor +from executor.model.pipeline import Pipeline +from indicator.criterion import COCOEvaluation +from interface.eval import Evaluator +from test.build import SSD_MOBILENET_FPN_TEST_COLLECTION +from utils.parser import ssd_mobilenet_fpn_parsing_func, label_mapping_for_coco +from utils import constants + + +def transform_coco_cat_id(annotations): + for ann in annotations: + ann["category_id"] = label_mapping_for_coco(ann["category_id"]) + + return annotations + + +@SSD_MOBILENET_FPN_TEST_COLLECTION.register(constants.UnitName.PIPELINE.value) +def test_pipeline(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + coco_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=ssd_mobilenet_fpn_parsing_func, + shared_params=shared_params) + + for image in coco_data_loader: + ret = pipeline(image) + ret = transform_coco_cat_id(ret) + print(ret) + + print("Done !") + + +@SSD_MOBILENET_FPN_TEST_COLLECTION.register(constants.UnitName.INFERENCE.value) +def test_inference(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + coco_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=ssd_mobilenet_fpn_parsing_func, + shared_params=shared_params) + + ssd_inference = InferenceExecutor([pipeline, transform_coco_cat_id], + data_loader=coco_data_loader, + name="ssd_mobilenet_fpn_inference", + shared_params=shared_params, + verbose=True) + pred = ssd_inference.execute() + print(pred) + + +@SSD_MOBILENET_FPN_TEST_COLLECTION.register(constants.UnitName.EVALUATION.value) +def test_evaluation(cfg): + loading_path = cfg.get("data_loading_path") + shared_params = dict() + coco_data_loader = ImageLoaderDir(loading_path=loading_path, + shared_params=shared_params) + + pipeline_cfg_file_path = cfg.get("pipeline_cfg_path") + stream_name_str = cfg.get("stream_name") + pipeline = Pipeline(pipeline_cfg_file=pipeline_cfg_file_path, + stream_name=stream_name_str, + parser=ssd_mobilenet_fpn_parsing_func, + shared_params=shared_params) + + ssd_inference = InferenceExecutor([pipeline, transform_coco_cat_id], + data_loader=coco_data_loader, + name="ssd_mobilenet_fpn_inference", + shared_params=shared_params, + verbose=False) + coco_annotation_path = cfg.get("label_loading_path") + criterion_coco = COCOEvaluation(loading_path=coco_annotation_path) + ssd_evaluator = Evaluator(criterion_coco, inference_executor=ssd_inference) + eval_score = ssd_evaluator.eval() + print(f"Eval score is {eval_score}.") diff --git a/tools/precision_analysis/utils/__init__.py b/tools/precision_analysis/utils/__init__.py new file mode 100644 index 000000000..53bfa2473 --- /dev/null +++ b/tools/precision_analysis/utils/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/tools/precision_analysis/utils/arguments.py b/tools/precision_analysis/utils/arguments.py new file mode 100644 index 000000000..30af5fd1a --- /dev/null +++ b/tools/precision_analysis/utils/arguments.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse + + +def get_args(): + parser = argparse.ArgumentParser('precision analysis arguments.') + parser.add_argument('--mode', + type=str, + default='test.crnn.pipeline', + help='to specify the running mode', + dest='mode') + parser.add_argument('-data-loading-path', + type=str, + required=True, + help='to specify the data_loading_path', + dest='data_loading_path') + parser.add_argument('-label-loading-path', + type=str, + required=True, + help='to specify the label_loading_path', + dest='label_loading_path') + parser.add_argument('-pipeline-cfg-path', + type=str, + required=True, + help='to specify the pipeline_cfg_path', + dest='pipeline_cfg_path') + parser.add_argument('-stream-name', + type=str, + required=True, + help='to specify the stream_name', + dest='stream_name') + + return parser.parse_args() diff --git a/tools/precision_analysis/utils/checker.py b/tools/precision_analysis/utils/checker.py new file mode 100644 index 000000000..d9b8f77c8 --- /dev/null +++ b/tools/precision_analysis/utils/checker.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +import numpy as np + + +def check_loading_path(loading_path, path_name, suffix_set=()): + if not isinstance(suffix_set, (list, tuple)): + suffix_set = [suffix_set] + + for item in suffix_set: + if not isinstance(item, str): + raise TypeError("Wrong suffix type.") + + if not isinstance(loading_path, str): + raise ValueError(f"please specify a {path_name} as string.") + + _, suffix = os.path.splitext(loading_path) + if suffix_set and suffix not in suffix_set: + raise ValueError(f"Please specify a {path_name} with suffix " + f"{suffix_set}.") + + if not os.path.exists(loading_path): + raise NotADirectoryError(f"Given {path_name} does not exist.") + + +def check_saving_path(saving_path, path_name, suffix_set=()): + if not isinstance(suffix_set, (list, tuple)): + suffix_set = [suffix_set] + + for item in suffix_set: + if not isinstance(item, str): + raise TypeError("Wrong suffix type.") + + if not isinstance(saving_path, str): + raise ValueError(f"please assign a string as the {path_name}") + + root_dir, file_name = os.path.split(saving_path) + root_dir = "./" if root_dir == "" else root_dir + + os.makedirs(root_dir, exist_ok=True) + + _, suffix = os.path.splitext(file_name) + + if suffix not in suffix_set: + raise ValueError(f"Please assign suffix '.index' to {path_name}.") + + +def check_model_input(input_numpy): + if isinstance(input_numpy, np.ndarray): + pass + + else: + if not isinstance(input_numpy, list): + raise ValueError(f"Model input should be np.ndarry or a list of " + f"np.ndarry.") + + for element in input_numpy: + if not isinstance(element, np.ndarray): + raise ValueError(f"Model input should be np.ndarry or a list " + f"of np.ndarry.") diff --git a/tools/precision_analysis/utils/coding_conversion.py b/tools/precision_analysis/utils/coding_conversion.py new file mode 100644 index 000000000..3a1bd03f7 --- /dev/null +++ b/tools/precision_analysis/utils/coding_conversion.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +def binary2string(string): + """ + 二进制转字符串 + :param string: + :return: + """ + if isinstance(string, str): + return string + + return string.decode() + + +def string2binary(string): + """ + 字符串转二进制 + :param string: + :return: + """ + if isinstance(string, str): + return string.encode() + + return string diff --git a/tools/precision_analysis/utils/collection.py b/tools/precision_analysis/utils/collection.py new file mode 100644 index 000000000..2fcf40845 --- /dev/null +++ b/tools/precision_analysis/utils/collection.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class Collection(object): + """ + The collection use name -> class mapping, to support the customized + implementation of a derivative class . + To create a collection (e.g. a Metrics collection): + Step 1: + METRICS_COLLECTION = Collection('METRICS') + Step 2: + @METRICS_COLLECTION.register() + class MyMetrics(): + ... + """ + + def __init__(self, table_name) -> None: + """ + Args: + table_name (str): the name of this collection + """ + self.table_name = table_name + self.object_map = {} + + def _do_register(self, name: str, obj: object) -> None: + if name in self.object_map: + raise KeyError(f"The class named '{name}' was already registered!") + + self.object_map[name] = obj + + def register(self, name=None): + """ + Register the given class under the the name `class.__name__`. + Please use it as a decorator. + """ + def wrapper(cls_or_func): + if name and isinstance(name, str): + register_name = name + else: + register_name = cls_or_func.__name__ + + self._do_register(register_name, cls_or_func) + return cls_or_func + + return wrapper + + def get(self, name): + ret = self.object_map.get(name) + if ret is None: + raise KeyError(f"The class named {name} does not exist!") + + return ret diff --git a/tools/precision_analysis/utils/constants.py b/tools/precision_analysis/utils/constants.py new file mode 100644 index 000000000..017fca92f --- /dev/null +++ b/tools/precision_analysis/utils/constants.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from enum import Enum + + +# Model name +class ModelName(Enum): + CRNN = "crnn" + SMF = "ssd_mobilenet_fpn" + + +# unit name +class UnitName(Enum): + PIPELINE = "pipeline" + INFERENCE = "inference" + EVALUATION = "evaluation" + + +if __name__ == "__main__": + print("ModelName.CRNN: ", ModelName.CRNN.value) diff --git a/tools/precision_analysis/utils/parser.py b/tools/precision_analysis/utils/parser.py new file mode 100644 index 000000000..bbf26e263 --- /dev/null +++ b/tools/precision_analysis/utils/parser.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Copyright 2020 Huawei Technologies Co., Ltd +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os +from json import loads + + +def crnn_parsing_func(ret, shared_params): + try: + ret = loads(ret) + return ret["MxpiTextsInfo"][0]["text"][0] + except BaseException: + return "" + + +def ssd_mobilenet_fpn_parsing_func(ret, shared_params): + """ + Basic format is as follow: + [{“bbox”: [225.7, 207.6, 128.7, 140.2], “score”: 0.999, “image_id”: 581929, “category_id”: 17}, + {“bbox”: [231.1, 110.6, 33.5, 36.7], “score”: 0.992, “image_id”: 581929, “category_id”: 17},…] + :param ret: + :param shared_params: + :return: + """ + ret = loads(ret) + objs = ret.get("MxpiObject") + if not isinstance(objs, list): + return [] + + annotations = [] + for obj in objs: + x0, y0 = obj.get("x0"), obj.get("y0") + x1, y1 = obj.get("x1"), obj.get("y1") + w, h = x1 - x0, y1 - y0 + bbox = [x0, y0, w, h] + score = obj.get("classVec")[0].get("confidence") + file_name = shared_params.get("file_name") + image_id = int(os.path.splitext(file_name)[0]) + category_id = obj.get("classVec")[0].get("classId") + ann = { + "bbox": bbox, + "score": score, + "image_id": image_id, + "category_id": category_id + } + + annotations.append(ann) + + return annotations + + +def label_mapping_for_coco(k): + if k >= 1 and k <= 11: + class_id = k + elif k >= 12 and k <= 24: + class_id = k + 1 + elif k >= 25 and k <= 26: + class_id = k + 2 + elif k >= 27 and k <= 40: + class_id = k + 4 + elif k >= 41 and k <= 60: + class_id = k + 5 + elif k == 61: + class_id = k + 6 + elif k == 62: + class_id = k + 8 + elif k >= 63 and k <= 73: + class_id = k + 9 + elif k >= 74 and k <= 80: + class_id = k + 10 + else: + raise ValueError("Category id is out of memory.") + + return class_id + + +def ctpn_parse_and_save_func(ret, shared_params): + """ + Basic format is as follow: + {"MxpiTextObject":[{"confidence":0.98540580300000002,"text":"","x0":341.33334400000001,"x1":1109.3333700000001, + "x2":1109.3333700000001,334400000001,"y0":766.46466099999998,"y1":766.46466099999998,"y2":795.03021200000001, + "y3":795.03021200000001},{"confidence":0.965757309ext":"","x0":405.33331299999998,"x1":1066.6666299999999, + "x2":1066.6666299999999,"x3":405.33331299999998,"y0":741.29064900000003,"y1":7400003,"y2":767.99176, + "y3":767.99176}]} + :param ret: + :param shared_params: + :return: None + """ + ret = loads(ret) + result_path = shared_params['result_path'] + file_name = shared_params['file_name'].strip().split('.')[0] + if not os.path.exists(result_path): + os.makedirs(result_path) + objs = ret.get("MxpiTextObject") + if objs is None: + with open(os.path.join(result_path, 'res_' + file_name + '.txt'), 'w') as f_write: + f_write.write("") + return True + boxes = [] + for res in objs: + boxes.append([int(res['x0']), int(res['y0']), int(res['x2']), int(res['y2'])]) + with open(os.path.join(result_path, 'res_' + file_name + '.txt'), 'w') as f_write: + for i, box in enumerate(boxes): + line = ",".join(str(box[k]) for k in range(4)) + f_write.writelines(line) + f_write.write('\n') + return True -- Gitee