diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt index ba47bdc44cc888b46fd5d692270ee63842772b9e..ce0b9beda560b8719300414326a69f7e7641edd5 100644 --- a/.jenkins/check/config/filter_cpplint.txt +++ b/.jenkins/check/config/filter_cpplint.txt @@ -1,8 +1,66 @@ -# Overall -"models/" "build/header_guard" -"models/" "build/c++11" +# MindSpore +"mindspore/" "build/header_guard" +"mindspore/" "build/c++11" +"mindspore/include/inference.h" "runtime/references" +"mindspore/include/infer_tensor.h" "runtime/references" +"mindspore/include/inference.h" "runtime/explicit" +"mindspore/mindspore/core/abstract/utils.cc" "build/include_what_you_use" +"mindspore/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc" "build/include_what_you_use" +"mindspore/mindspore/ccsrc/runtime/device/ascend/profiling/profiling_callback_register.cc" "runtime/references" +"mindspore/mindspore/core/mindrt/src/actor/actormgr.h" "runtime/references" +"mindspore/mindspore/core/mindrt/src/actor/actorpolicyinterface.h" "runtime/references" +"mindspore/mindspore/core/mindrt/src/actor/actorthread.h" "runtime/references" +"mindspore/mindspore/core/mindrt/src/actor/actorpolicy.h" "runtime/references" +"mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/" "readability/casting" +"mindspore/mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc" "build/include_what_you_use" +"mindspore/mindspore/ccsrc/utils/convert_utils_py.cc" "whitespace/indent" # Modelzoo "mindspore/model_zoo/official/cv/yolov4_tiny/infer/mxbase/src/Yolov4TinyDetection.h" "runtime/references" "mindspore/model_zoo/official/cv/yolov4_tiny/infer/mxbase/src/PostProcess/Yolov4TinyMindsporePost.h" "runtime/references" -"mindspore/model_zoo/official/cv/resnet/infer/ResNet18/mxbase/Resnet18ClassifyOpencv.h" "runtime/references" +"mindspore/model_zoo/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.h" "runtime/references" +"mindspore/model_zoo/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.cpp" "runtime/references" + + +# MindData +"mindspore/mindspore/ccsrc/minddata/mindrecord/include/shard_page.h" "runtime/string" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.h" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/engine/tdt/tdt_plugin.h" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/util/bit.h" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc" "runtime/int" +"mindspore/mindspore/ccsrc/minddata/dataset/util" "build/include" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/image_process.h" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/image_process.cc" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/lite_mat.h" "runtime/explicit" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/resize_cubic_op.h" "runtime/references" +"mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/resize_cubic_op.cc" "runtime/references" + +# Lite +"mindspore/mindspore/lite/include/lite_utils.h" "build/include_what_you_use" +"mindspore/mindspore/lite/nnacl/" "readability/casting" +"mindspore/mindspore/lite/micro/coder/wrapper/" "readability/casting" +"mindspore/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h" "runtime/references" +"mindspore/mindspore/lite/tools/converter/model_parser.h" "build/namespaces" +"mindspore/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc" "readability/casting" +"mindspore/mindspore/lite/tools/converter/optimizer.h" "build/namespaces" +"mindspore/mindspore/lite/tools/converter/quantizer/quantize_util.h" "runtime/references" +"mindspore/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h" "runtime/references" +"mindspore/mindspore/lite/tools/benchmark/benchmark.cc" "runtime/threadsafe_fn" +"mindspore/mindspore/lite/tools/benchmark/benchmark_base.cc" "runtime/threadsafe_fn" +"mindspore/mindspore/lite/tools/benchmark/benchmark_unified_api.cc" "runtime/threadsafe_fn" +"mindspore/mindspore/lite/tools/benchmark/run_benchmark.cc" "runtime/threadsafe_fn" +"mindspore/mindspore/lite/src/executor.h" "runtime/references" +"mindspore/mindspore/lite/src/lite_kernel.h" "runtime/references" +"mindspore/mindspore/lite/src/runtime/opencl/opencl_runtime.h" "runtime/references" +"mindspore/mindspore/lite/src/runtime/opencl/opencl_executor.h" "runtime/references" +"mindspore/mindspore/lite/src/runtime/opencl/opencl_wrapper.h" "readability/casting" +"mindspore/mindspore/lite/src/runtime/kernel/opencl/cl/" "legal/copyright" +"mindspore/mindspore/lite/src/runtime/kernel/opencl/cl/" "readability/casting" +"mindspore/mindspore/lite/src/runtime/kernel/opencl/cl/" "readability/fn_size" +"mindspore/mindspore/lite/src/runtime/thread_pool.c" "readability/casting" +"mindspore/mindspore/lite/src/runtime/thread_pool.c" "runtime/arrays" +"mindspore/mindspore/lite/src/runtime/thread_pool.c" "runtime/int" +"mindspore/mindspore/lite/src/ops/ops_def.cc" "runtime/int" +"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "legal/copyright" +"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "readability/casting" +"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "readability/fn_size" diff --git a/.jenkins/check/config/filter_pylint.txt b/.jenkins/check/config/filter_pylint.txt index 0be60f38a67d76e2bcd6217e64e1b5bc6b80c2ba..3df60c7abf15f41232dbbd40d9747c6aa3b6fc43 100644 --- a/.jenkins/check/config/filter_pylint.txt +++ b/.jenkins/check/config/filter_pylint.txt @@ -1,11 +1,123 @@ # Overall -"models/" "invalid-name" -"models/" "arguments-differ" -"models/" "unused-argument" -"models/" "no-value-for-parameter" -"models/" "import-self" +"mindspore/" "invalid-name" +"mindspore/" "arguments-differ" +"mindspore/" "unused-argument" +"mindspore/model_zoo/" "no-value-for-parameter" +"mindspore/model_zoo/" "import-self" +"mindspore/tests/" "missing-docstring" +"mindspore/tests/" "import-self" -# official -"models/official/cv" "missing-docstring" -"models/official/cv" "c-extension-no-member" -"models/official/nlp/bert_thor/src/bert_model.py" "redefined-outer-name" +# Mindspore +"mindspore/mindspore/_check_deps_version.py" "broad-except" +"mindspore/mindspore/_check_version.py" "unused-import" +"mindspore/mindspore/_check_version.py" "broad-except" +"mindspore/mindspore/common/parameter.py" "protected-access" +"mindspore/mindspore/context.py" "protected-access" +"mindspore/mindspore/ops/operations" "super-init-not-called" +"mindspore/mindspore/ops/operations/_quant_ops.py" "unused-import" +"mindspore/mindspore/ops/operations/nn_ops.py" "redefined-builtin" +"mindspore/mindspore/ops/operations/_thor_ops.py" "dangerous-default-value" +"mindspore/mindspore/ops/operations/_thor_ops.py" "redefined-outer-name" +"mindspore/mindspore/ops/operations/_thor_ops.py" "unused-import" +"mindspore/mindspore/ops/_op_impl/_custom_op" "dangerous-default-value +"mindspore/mindspore/ops/_op_impl/_custom_op" "simplifiable-if-expression" +"mindspore/mindspore/ops/_op_impl/_custom_op" "unused-variable" +"mindspore/mindspore/ops/composite/base.py" "protected-acces" +"mindspore/mindspore/ops/primitive.py" "assignment-from-none" +"mindspore/mindspore/nn/cell.py" "assignment-from-none" +"mindspore/mindspore/_extends/parse/resources.py" "bad-whitespace" +"mindspore/mindspore/_extends/parse/parser.py" "broad-except" +"mindspore/mindspore/_extends/parse/parser.py" "eval-used" +"mindspore/mindspore/nn/cell.py" "protected-access" +"mindspore/mindspore/nn/optim/ftrl.py" "unused-import" +"mindspore/mindspore/train/amp.py" "protected-access" +"mindspore/mindspore/train/serialization.py" "protected-access" +"mindspore/mindspore/train/model.py" "protected-access" +"mindspore/mindspore/log.py" "protected-access" +"mindspore/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py" "unsupported-assignment-operation" +"mindspore/model_zoo/official/cv" "missing-docstring" +"mindspore/model_zoo/official/cv" "c-extension-no-member" +"mindspore/model_zoo/official/nlp/bert_thor/src/bert_model.py" "redefined-outer-name" +"mindspore/mindspore/_extends/parallel_compile/akg_compiler/akg_process.py" "Catching too general exception BaseException" +"mindspore/mindspore/_extends/graph_kernel/model/model.py" "super-on-old-class" +"mindspore/model_zoo/official/cv/vgg16/infer/sdk/main.py" "unused-variable" + +# MindData +"mindspore/mindspore/dataset/__init__.py" "redefined-builtin" +"mindspore/mindspore/dataset/engine/__init__.py" "redefined-builtin" +"mindspore/mindspore/dataset/engine/datasets.py" "redefined-builtin" +"mindspore/mindspore/dataset/engine/datasets.py" "broad-except" +"mindspore/mindspore/dataset/transforms/py_transforms_util.py" "broad-except" + +# Tests +"mindspore/tests/vm_impl/array_ops_vm_impl.py" "unused-variable" +"mindspore/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_compile.py" "unused-import" +"mindspore/tests/ut/cpp/python_input/gtest_input/pipeline/infer/primitive_test.py" "super-init-not-called" +"mindspore/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_primitive.py" "super-init-not-called" +"mindspore/tests/ut/cpp/python_input/gtest_input/pre_activate" "unused-variable" +"mindspore/tests/ut/cpp/python_input/gtest_input/tbe" "unused-variable" +"mindspore/tests/ut/python/train/summary/test_summary_abnormal_input.py" "bare-except" +"mindspore/tests/ut/python/train/summary/test_graph_summary.py" "protected-access" +"mindspore/tests/ut/python/parameter_feature/test_parameter.py" "unused-variable" +"mindspore/tests/ut/python/parameter_feature/test_parameter.py" "singleton-comparison" +"mindspore/tests/ut/python/optimizer/test_debug_location.py" "no-value-for-parameter" +"mindspore/tests/ut/python/ops/test_control_ops.py" "superfluous-parens" +"mindspore/tests/ut/python/ops/test_control_ops.py" "unused-variable" +"mindspore/tests/ut/python/keep_order/test_keep_order.py" "redefined-outer-name" +"mindspore/tests/ut/python/ir/test_tensor.py" "protected-access" +"mindspore/tests/ut/python/dataset/test_sampler.py" "unused-variable" +"mindspore/tests/ut/python/dataset/test_minddataset_padded.py" "redefined-outer-name" +"mindspore/tests/ut/python/dataset/test_minddataset_padded.py" "unused-variable" +"mindspore/tests/ut/python/dataset/test_minddataset_padded.py" "expression-not-assigned" +"mindspore/tests/ut/python/dataset/test_batch.py" "broad-except" +"mindspore/tests/ut/python/dataset/test_config.py" "broad-except" +"mindspore/tests/ut/python/dataset/test_minddataset.py" "redefined-outer-name" +"mindspore/tests/ut/python/dataset/test_minddataset_sampler.py" "redefined-outer-name" +"mindspore/tests/ut/python/dataset/test_serdes_dataset.py" "redefined-outer-name" +"mindspore/tests/ut/python/dataset/test_serdes_dataset.py" "unused-import" +"mindspore/tests/ut/python/dataset/test_shuffle.py" "broad-except" +"mindspore/tests/ut/python/dataset/test_to_type.py" "broad-except" +"mindspore/tests/ut/python/dataset/test_uniform_augment.py" "broad-except" +"mindspore/tests/ut/python/dataset/test_zip.py" "broad-except" +"mindspore/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py" "redefined-outer-name" +"mindspore/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py" "redefined-outer-name" +"mindspore/tests/ut/python/mindrecord/test_imagenet_to_mindrecord.py" "redefined-outer-name" +"mindspore/tests/ut/python/mindrecord/test_mindrecord_exception.py" "redefined-outer-name" +"mindspore/tests/ut/python/mindrecord/test_mnist_to_mr.py" "redefined-outer-name" +"mindspore/tests/ut/python/nn/test_batchnorm.py" "no-value-for-parameter" +"mindspore/tests/ut/python/onnx/test_onnx.py" "unused-variable" +"mindspore/tests/ut/python/ops" "super-init-not-called" +"mindspore/tests/ut/python/ops/test_tensor_slice.py" "redefined-outer-name" +"mindspore/tests/ut/python/optimizer/test_debug_location.py" "super-init-not-called" +"mindspore/tests/ut/python/parallel/" "protected-access" +"mindspore/tests/ut/python/parameter_feature/test_var_grad.py" "bad-super-call" +"mindspore/tests/ut/python/parameter_feature/test_var_grad.py" "redefined-outer-name" +"mindspore/tests/ut/python/pipeline/parse/test_cont_break.py" "unused-variable" +"mindspore/tests/ut/python/pynative_mode" "no-else-return" +"mindspore/tests/ut/python/pynative_mode" "superfluous-parens" +"mindspore/tests/ut/python/pynative_mode" "unused-variable" +"mindspore/tests/ut/python/pynative_mode/test_stop_gradient.py" "redefined-outer-name" +"mindspore/tests/ut/python/pynative_mode/test_stop_gradient.py" "super-init-not-called" +"mindspore/tests/ut/python/test_log.py" "possibly-unused-variable" +"mindspore/tests/ut/python/test_log.py" "protected-access" +"mindspore/tests/ut/python/train/summary/test_summary_collector.py" "protected-access" +"mindspore/tests/ut/python/pipeline/parse/test_super.py" "bad-super-call" +"mindspore/tests/ut/python/pipeline/parse/test_super.py" "assignment-from-none" +"mindspore/tests/ut/python/pipeline/parse/test_dictionary.py" "consider-iterating-dictionary" +"mindspore/tests/ut/python/pipeline/parse/test_use_undefined_name_or_unsupported_builtin_function.py" "pointless-statement" +"mindspore/tests/st/networks/test_gpu_resnet.py" "superfluous-parens" +"mindspore/tests/st/networks/test_gpu_resnet.py" "unused-variable" +"mindspore/tests/st/networks/test_gpu_lstm.py" "unused-variable" +"mindspore/tests/st/networks/test_gpu_lstm.py" "redefined-outer-name" +"mindspore/tests/st/networks/test_gpu_lstm.py" "superfluous-parens" +"mindspore/tests/st/networks/test_gpu_alexnet.py" "unused-variable" +"mindspore/tests/st/networks/test_gpu_lenet.py" "unused-variable" +"mindspore/tests/st/ops/custom_ops_tbe/cus_add3.py" "unused-import" +"mindspore/tests/st/ops/ascend/test_aicpu_ops/test_strided_slice.py" "redefined-outer-name" +"mindspore/tests/st/ops/ascend/test_aicpu_ops/test_strided_slice.py" "redefined-builtin" +"mindspore/tests/st/ops/ascend/test_aicpu_ops/test_strided_slice_grad.py" "redefined-outer-name" +"mindspore/tests/st/pynative/parser/test_parser_construct.py" "bad-super-call" +"mindspore/tests/st/explainer/benchmark/_attribution/test_localization.py" "protected-access" +"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_gradcam.py" "not-callable" +"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_gradient.py" "not-callable" +"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_modified_relu.py" "not-callable" diff --git a/official/cv/vgg16/infer/Dockerfile b/official/cv/vgg16/infer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..fcf82bb3c154f114293f8f84a61465de446b540a --- /dev/null +++ b/official/cv/vgg16/infer/Dockerfile @@ -0,0 +1,10 @@ +ARG FROM_IMAGE_NAME +#Base mirror +FROM $FROM_IMAGE_NAME +ARG SDK_PKG +#Copy the SDK installation package to the image +COPY ./$SDK_PKG . +#Install SDK +RUN ./$SDK_PKG --install +#Make environment variables take effect +RUN /bin/bash -c "source ~/.bashrc" \ No newline at end of file diff --git a/official/cv/vgg16/infer/README.md b/official/cv/vgg16/infer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..00d195a35b3f8cbce3f09b41507c368b3341e172 --- /dev/null +++ b/official/cv/vgg16/infer/README.md @@ -0,0 +1,504 @@ +# 离线推理过程 + +## 准备容器环境 + +1、将源代码(vgg16_mindspore_1.3.0_code)上传至服务器任意目录(如:/home/data/),并进入该目录。 + +源码目录结构如下图所示: + +```bash +/home/data/vgg16_mindspore_1.3.0_code +├── infer # MindX高性能预训练模型新增 +│ └── README.md # 离线推理文档 +│ ├── convert # 转换om模型命令,AIPP +│ │ ├──aipp_vgg16_rgb.config +│ │ └──atc.sh +│ ├── data # 包括模型文件、模型输入数据集、模型相关配置文件(如label、SDK的pipeline) +│ │ ├── model +│ │ ├── input +│ │ └── config +│ │ │ ├──imagenet1000_clsidx_to_labels.names +│ │ │ ├──vgg16.cfg +│ │ │ └──vgg16.pipeline +│ ├── mxbase # 基于mxbase推理 +│ │ ├── build +│ │ ├── src +│ │ │ ├── Vgg16Classify.cpp +│ │ │ ├── Vgg16Classify.h +│ │ │ ├── main.cpp +│ │ │ └── include #包含运行所需库 +│ │ ├── CMakeLists.txt +│ │ └── build.sh +│ └── sdk # 基于sdk run包推理;如果是C++实现,存放路径一样 +│ │ ├── main.py +│ │ └── run.sh +│ └── util # 精度验证脚本 +│ │ ├──imagenet2012_val.txt +│ │ └──task_metric.py +│ ├──Dockerfile #容器文件 +│ └──docker_start_infer.sh # 启动容器脚本 +``` + +2、下载SDK安装包,将其上传至infer文件夹内(如/home/data/vgg16_mindspore_1.3.0_code/infer/) + +- SDK版本号:2.0.1 + +- 下载路径:[mxManufacture-昇腾社区 (hiascend.com)](https://www.hiascend.com/software/mindx-sdk/sdk-detail) + + Dockerfile文件内容: + + ~~~ dockerfile + ARG FROM_IMAGE_NAME + #基础镜像 + FROM $FROM_IMAGE_NAME + ARG SDK_PKG + #将SDK安装包拷贝到镜像中 + COPY ./$SDK_PKG . + #安装SDK + RUN ./$SDK_PKG --install + #使环境变量生效 + RUN /bin/bash -c "source ~/.bashrc" + ~~~ + +3、编译推理镜像 + +非root权限,需在指令前面加"sudo" + +```bash +docker build -t infer_image --build-arg FROM_IMAGE_NAME=base_image:tag --build-arg SDK_PKG=sdk_pkg . +``` + +| 参数 | 说明 | +| ------------- | ------------------------------------------------------------ | +| *infer_image* | 推理镜像名称,根据需求写入。 | +| *base_image* | 基础镜像,可从Ascend Hub上下载,如ascendhub.huawei.com/public-ascendhub/ascend-infer-x86 | +| *tag* | 镜像tag,请根据实际配置,如:21.0.1。 | +| sdk_pkg | 下载的mxManufacture包名称,如Ascend-mindxsdk-mxmanufacture_*{version}*_linux-*{arch}*.run | + +注:指令末尾的”.“一定不能省略,这代表当前目录 +4、启动容器 +执行以下命令,启动容器实例。 + +```bash +bash docker_start_infer.sh docker_image:tag model_dir +``` + +| 参数 | 说明 | +| -------------- | -------------------------------------------- | +| *docker_image* | 推理镜像名称,推理镜像请从Ascend Hub上下载。 | +| *tag* | 镜像tag,请根据实际配置,如:21.0.2。 | +| *model_dir* | 推理容器挂载路径,本例中为/home/data | + +启动容器时会将推理芯片和数据路径挂载到容器中。 +其中docker_start_infer.sh(vgg16_mindspore_1.3.0_code/infer/docker_start_infer.sh)内容如下。 +docker_start_infer.sh文件内容 + +```shell +#!/bin/bash +docker_image=$1 +model_dir=$2 +if [ -z "${docker_image}" ]; then + echo "please input docker_image" + exit 1 +fi +if [ ! -d "${model_dir}" ]; then + echo "please input model_dir" + exit 1 +fi +docker run -it \ + --device=/dev/davinci0 \ #请根据芯片的情况更改 + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm \ + --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v ${model_dir}:${model_dir} \ + ${docker_image} \ + /bin/bash +``` + +## MindX SDK推理 + +1、将air模型放入/vgg16_mindspore_1.3.0_code/infer/data/model目录下。 + +2、准备AIPP配置文件 + +AIPP需要配置aipp.config文件,在ATC转换的过程中插入AIPP算子,即可与DVPP处理后的数据无缝对接,AIPP参数配置请参见《[CANN 开发辅助工具指南 (推理)](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=developer-documents&subcategory=auxiliary-development-tools)》中“ATC工具使用指南”。 + +aipp.config文件内容如下,该文件放在/vgg16_mindspore_1.3.0_code/infer/convert目录下 + +~~~ config +aipp_op { + aipp_mode: static + input_format: RGB888_U8 + + rbuv_swap_switch: true + + min_chn_0: 123.675 + min_chn_1: 116.28 + min_chn_2: 103.33 + var_reci_chn_0: 0.0171247538316637 + var_reci_chn_1: 0.0175070028011204 + var_reci_chn_2: 0.0174291938997821 +} +~~~ + +3、进入vgg16_mindspore_1.3.0_code/infer/convert目录,执行命令**bash atc.sh ../data/model/vgg16_bs1.air**(本例中模型名称为vgg16_bs1.air)。利用ATC工具将air模型转换为om模型,om模型会自动放在vgg16_mindspore_1.3.0_code/infer/data/model/文件夹下。 + +atc.sh + +~~~ shell +model=$1 +/usr/local/Ascend/atc/bin/atc \ + --model=$model \ + --framework=1 \ + --output=../data/model/vgg16 \ + --input_shape="input:1,224,224,3" \ + --enable_small_channel=1 \ + --log=error \ + --soc_version=Ascend310 \ + --insert_op_conf=aipp_vgg16_rgb.config + +~~~ + +参数说明: + +- --model:待转换的air模型所在路径。 + +- --framework:1代表MindSpore框架。 + +- --output:转换后输出的om模型存放路径以及名称。 + +- --input_shape:输入数据的shape。 + +- --insert_op_conf:aipp配置文件所在路径。 + +4、数据准备 + +将推理图片数据集放在vgg16_mindspore_1.3.0_code/infer/data/input目录下 + +- 本例推理使用的数据集是[ImageNet2012](http://www.image-net.org/)中的验证集,input目录下已有其中一张测试图片 + +- 测试集:6.4 GB,50, 000张图像 + +5、准备模型推理所需文件 + +(1)在“/home/data/vgg16_mindspore_1.3.0_code/infer/data/config/”目录下编写pipeline文件。 + +根据实际情况修改vgg16.pipeline文件中图片规格、模型路径、配置文件路径和标签路径。 + +更多介绍请参见《[mxManufacture 用户指南](https://ascend.huawei.com/#/software/mindx-sdk/sdk-detail)》中“基础开发”章节。 + +vgg16.pipeline + +```pipeline +{ + "im_vgg16": { + "stream_config": { + "deviceId": "0" + }, + "mxpi_imagedecoder0": { + "props": { + "handleMethod": "opencv" + }, + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "handleMethod": "opencv", + "resizeHeight": "256", + "resizeWidth": "256", + "resizeType": "Resizer_Stretch" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_imagecrop0:1" + }, + "mxpi_imagecrop0": { + "props": { + "dataSource": "appsrc1", + "dataSourceImage": "mxpi_imageresize0", + "handleMethod": "opencv" + }, + "factory": "mxpi_imagecrop", + "next": "mxpi_tensorinfer0" + }, + "mxpi_tensorinfer0": { + "props": { + "dataSource": "mxpi_imagecrop0", + "modelPath": "../data/model/vgg16.om", + "waitingTime": "1", + "outputDeviceId": "-1" + }, + "factory": "mxpi_tensorinfer", + "next": "mxpi_classpostprocessor0" + }, + "mxpi_classpostprocessor0": { + "props": { + "dataSource": "mxpi_tensorinfer0", + "postProcessConfigPath": "../data/config/vgg16.cfg", + "labelPath": "../data/config/imagenet1000_clsidx_to_labels.names", + "postProcessLibPath": "../../../mxManufacture/lib/modelpostprocessors/libresnet50postprocess.so" + }, + "factory": "mxpi_classpostprocessor", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_classpostprocessor0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsrc1": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagecrop0:0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} +``` + +参数说明: + +- resizeHeight:图片缩放后高度,请根据实际需求尺寸输入。 +- resizeWidth:图片缩放后宽度,请根据实际需求尺寸输入。 +- modelPath:模型路径,请根据模型实际路径修改。 +- postProcessConfigPath:模型配置文件路径,请根据模型配置文件的实际路径修改。 +- labelPath:标签文件路径,请根据标签文件的实际路径修改。 + +(2)在“/home/data/vgg16_mindspore_1.3.0_code/infer/data/config/”目录下编写vgg16.cfg配置文件。 + +配置文件vgg16.cfg内容如下。 + +```cfg +CLASS_NUM=1000 +SOFTMAX=false +TOP_K=5 +``` + +(3)进入“/home/data/vgg16_mindspore_1.3.0_code/infer/sdk/”目录。 + +根据实际情况修改main.py文件中裁剪图片的位置和**pipeline**文件路径。 + +```pipeline + ... + def _predict_gen_protobuf(self): + object_list = MxpiDataType.MxpiObjectList() + object_vec = object_list.objectVec.add() + object_vec.x0 = 16 + object_vec.y0 = 16 + object_vec.x1 = 240 + object_vec.y1 = 240 +... +def main(): + pipeline_conf = "../data/config/vgg16.pipeline" + stream_name = b'im_vgg16' + + args = parse_args() + result_fname = get_file_name(args.result_file) + pred_result_file = f"{result_fname}.txt" + dataset = GlobDataLoader(args.glob, limit=None) + with ExitStack() as stack: + predictor = stack.enter_context(Predictor(pipeline_conf, stream_name)) + result_fd = stack.enter_context(open(pred_result_file, 'w')) + + for fname, pred_result in predictor.predict(dataset): + result_fd.write(result_encode(fname, pred_result)) + + print(f"success, result in {pred_result_file}") +... +``` + +6、进入vgg16_mindspore_1.3.0_code/infer/sdk文件夹,执行**bash run.sh**,推理结果保存在当前目录下的vgg16_sdk_pred_result.txt文件中。 + +run.sh + +~~~ shell +set -e + +CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exit ; } ; pwd) + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +#to set PYTHONPATH, import the StreamManagerApi.py +export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python + +python3.7 main.py "../data/input/*" vgg16_sdk_pred_result.txt +exit 0 +~~~ + +推理结果 + +~~~bash +ILSVRC2012_val_00047110 221,267,266,206,220 +ILSVRC2012_val_00014552 505,550,804,899,859 +ILSVRC2012_val_00006604 276,287,289,275,285 +ILSVRC2012_val_00016859 2,3,4,148,5 +ILSVRC2012_val_00020009 336,649,350,371,972 +ILSVRC2012_val_00025515 917,921,446,620,692 +ILSVRC2012_val_00046794 427,504,463,412,686 +ILSVRC2012_val_00035447 856,866,595,730,603 +ILSVRC2012_val_00016392 54,67,68,60,66 +...... +~~~ + +7、验证精度,进入vgg16_mindspore_1.3.0_code/infer/util目录下,执行**python3.7 task_metric.py ../sdk/vgg16_sdk_pred_result.txt imagenet2012_val.txt vgg16_sdk_pred_result_acc.json 5** + +参数说明: + +- 第一个参数(../sdk/vgg16_sdk_pred_result.txt):推理结果保存路径。 + +- 第二个参数(imagenet2012_val.txt):验证集标签文件。 + +- 第三个参数(vgg16_sdk_pred_result_acc.json):结果文件 + +- 第四个参数(5):"1"表示TOP-1准确率,“5”表示TOP-5准确率。 + +8、查看推理精度结果 + +~~~ bash +cat vgg16_sdk_pred_result_acc.json +~~~ + +top-5推理精度 + +~~~bash +"total": 50000, + "accuracy": [ + 0.73328, + 0.83924, + 0.8786, + 0.90034, + 0.91496 +] +... +~~~ + +## mxBase推理 + +1、添加环境变量。 + +通过**vi ~/.bashrc**命令打开~/.bashrc文件,将下面的环境变量添加进当前环境,添加好环境变量以后退出文件编辑,执行**source ~/.bashrc**使环境变量生效。 + +```bash +export ASCEND_HOME="/usr/local/Ascend" +export ASCEND_VERSION="nnrt/latest" +export ARCH_PATTERN="." +export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib/modelpostprocessors:${LD_LIBRARY_PATH}" +export MXSDK_OPENSOURCE_DIR="${MX_SDK_HOME}/opensource" +``` + +2、进入vgg16_mindspore_1.3.0_code/infer/mxbase目录,执行指令**bash build.sh** + +build.sh + +~~~ shell +path_cur=$(dirname $0) + +function check_env() +{ + # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user + if [ ! "${ASCEND_VERSION}" ]; then + export ASCEND_VERSION=ascend-toolkit/latest + echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}" + else + echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user" + fi + if [ ! "${ARCH_PATTERN}" ]; then + # set ARCH_PATTERN to ./ when it was not specified by user + export ARCH_PATTERN=./ + echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}" + else + echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user" + fi +} +function build_vgg16() +{ + cd $path_cur + rm -rf build + mkdir -p build + cd build + cmake .. + make + ret=$? + if [ ${ret} -ne 0 ]; then + echo "Failed to build vgg16." + exit ${ret} + fi + make install +} +check_env +build_vgg16 +~~~ + +3、执行**./vgg16 ../data/input 50000**,推理结果保存在当前目录下的mx_pred_result.txt文件下。 + +- 第一个参数(../data/input):图片输入路径。 +- 第二个参数(50000):图片输入数量。 + +推理结果 + +~~~bash +ILSVRC2012_val_00047110 221,267,266,206,220, +ILSVRC2012_val_00014552 505,550,804,899,859, +ILSVRC2012_val_00006604 276,287,289,275,285, +ILSVRC2012_val_00016859 2,3,4,148,5, +ILSVRC2012_val_00020009 336,649,350,371,972, +ILSVRC2012_val_00025515 917,921,446,620,692, +ILSVRC2012_val_00046794 427,504,463,412,686, +ILSVRC2012_val_00035447 856,866,595,730,603, +ILSVRC2012_val_00016392 54,67,68,60,66, +ILSVRC2012_val_00023902 50,49,44,39,62, +ILSVRC2012_val_00000719 268,151,171,158,104, +...... +~~~ + +4、验证精度,进入vgg16_mindspore_1.3.0_code/infer/util目录下,执行**python3.7 task_metric.py ../mxbase/mx_pred_result.txt imagenet2012_val.txt vgg16_mx_pred_result_acc.json 5** + +参数说明: + +- 第一个参数(../mxbase/mx_pred_result.txt):推理结果保存路径。 +- 第二个参数(image2012_val.txt):验证集标签文件。 +- 第三个参数(vgg16_mx_pred_result_acc.json):结果文件 +- 第四个参数(5):"1"表示TOP-1准确率,“5”表示TOP-5准确率。 + +5、查看推理精度结果 + +~~~ bash +cat vgg16_mx_pred_result_acc.json +~~~ + +top-5推理精度 + +~~~ bash + "accuracy": [ + 0.73328, + 0.83924, + 0.8786, + 0.90034, + 0.91496 +] +... +~~~ + diff --git a/official/cv/vgg16/infer/convert/aipp_vgg16_rgb.config b/official/cv/vgg16/infer/convert/aipp_vgg16_rgb.config new file mode 100644 index 0000000000000000000000000000000000000000..49e00dfd45942375ac54c2ebfc95345e0bb58bb2 --- /dev/null +++ b/official/cv/vgg16/infer/convert/aipp_vgg16_rgb.config @@ -0,0 +1,13 @@ +aipp_op { + aipp_mode: static + input_format: RGB888_U8 + + rbuv_swap_switch: true + + min_chn_0: 123.675 + min_chn_1: 116.28 + min_chn_2: 103.33 + var_reci_chn_0: 0.0171247538316637 + var_reci_chn_1: 0.0175070028011204 + var_reci_chn_2: 0.0174291938997821 +} \ No newline at end of file diff --git a/official/cv/vgg16/infer/convert/atc.sh b/official/cv/vgg16/infer/convert/atc.sh new file mode 100644 index 0000000000000000000000000000000000000000..f50c69119273413a1d97e3157ae67638f031bde3 --- /dev/null +++ b/official/cv/vgg16/infer/convert/atc.sh @@ -0,0 +1,26 @@ +#!/usr/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +model=$1 +/usr/local/Ascend/atc/bin/atc \ + --model=$model \ + --framework=1 \ + --output=../data/model/vgg16 \ + --input_shape="input:1,224,224,3" \ + --enable_small_channel=1 \ + --log=error \ + --soc_version=Ascend310 \ + --insert_op_conf=aipp_vgg16_rgb.config +exit 0 diff --git a/official/cv/vgg16/infer/data/config/vgg16.cfg b/official/cv/vgg16/infer/data/config/vgg16.cfg new file mode 100644 index 0000000000000000000000000000000000000000..581fc76d3d75445323ea9a387f7152a72bedd1d3 --- /dev/null +++ b/official/cv/vgg16/infer/data/config/vgg16.cfg @@ -0,0 +1,3 @@ +CLASS_NUM=1000 +SOFTMAX=false +TOP_K=5 diff --git a/official/cv/vgg16/infer/data/config/vgg16.pipeline b/official/cv/vgg16/infer/data/config/vgg16.pipeline new file mode 100644 index 0000000000000000000000000000000000000000..d6be782e5c620e66e5a380f9afc8b34babc36119 --- /dev/null +++ b/official/cv/vgg16/infer/data/config/vgg16.pipeline @@ -0,0 +1,80 @@ +{ + "im_vgg16": { + "stream_config": { + "deviceId": "0" + }, + "mxpi_imagedecoder0": { + "props": { + "handleMethod": "opencv" + }, + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "handleMethod": "opencv", + "resizeHeight": "256", + "resizeWidth": "256", + "resizeType": "Resizer_Stretch" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_imagecrop0:1" + }, + "mxpi_imagecrop0": { + "props": { + "dataSource": "appsrc1", + "dataSourceImage": "mxpi_imageresize0", + "handleMethod": "opencv" + }, + "factory": "mxpi_imagecrop", + "next": "mxpi_tensorinfer0" + }, + "mxpi_tensorinfer0": { + "props": { + "dataSource": "mxpi_imagecrop0", + "modelPath": "../data/model/vgg16.om", + "waitingTime": "1", + "outputDeviceId": "-1" + }, + "factory": "mxpi_tensorinfer", + "next": "mxpi_classpostprocessor0" + }, + "mxpi_classpostprocessor0": { + "props": { + "dataSource": "mxpi_tensorinfer0", + "postProcessConfigPath": "../data/config/vgg16.cfg", + "labelPath": "../data/config/imagenet1000_clsidx_to_labels.names", + "postProcessLibPath": "/usr/local/sdk_home/mxManufacture/lib/modelpostprocessors/libresnet50postprocess.so" + }, + "factory": "mxpi_classpostprocessor", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_classpostprocessor0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsrc1": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagecrop0:0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} \ No newline at end of file diff --git a/official/cv/vgg16/infer/data/input/ILSVRC2012_val_00000001.JPEG b/official/cv/vgg16/infer/data/input/ILSVRC2012_val_00000001.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..fd3a93f59385d6ff632483646e6caee300b56d09 Binary files /dev/null and b/official/cv/vgg16/infer/data/input/ILSVRC2012_val_00000001.JPEG differ diff --git a/official/cv/vgg16/infer/docker_start_infer.sh b/official/cv/vgg16/infer/docker_start_infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..a8aceaa0bd51aa57e3fdd0197e4cccb335ccc7bf --- /dev/null +++ b/official/cv/vgg16/infer/docker_start_infer.sh @@ -0,0 +1,23 @@ +#!/bin/bash +docker_image=$1 +model_dir=$2 + +if [ -z "${docker_image}" ]; then + echo "please input docker_image" + exit 1 +fi + +if [ ! -d "${model_dir}" ]; then + echo "please input model_dir" + exit 1 +fi + +docker run -it \ + --device=/dev/davinci0 \ + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm \ + --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v ${model_dir}:${model_dir} \ + ${docker_image} \ + /bin/bash \ No newline at end of file diff --git a/official/cv/vgg16/infer/mxbase/CMakeLists.txt b/official/cv/vgg16/infer/mxbase/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2dea1407d6fd882b87bae20fd0226258179bca74 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/CMakeLists.txt @@ -0,0 +1,53 @@ +cmake_minimum_required(VERSION 3.10.0) +project(vgg16) + +set(TARGET vgg16) + +add_definitions(-DENABLE_DVPP_INTERFACE) + +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) + +add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall) +add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie) + +# Check environment variable +if(NOT DEFINED ENV{ASCEND_HOME}) + message(FATAL_ERROR "please define environment variable:ASCEND_HOME") +endif() +if(NOT DEFINED ENV{ASCEND_VERSION}) + message(WARNING "please define environment variable:ASCEND_VERSION") +endif() +if(NOT DEFINED ENV{ARCH_PATTERN}) + message(WARNING "please define environment variable:ARCH_PATTERN") +endif() +set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include) +set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64) + +set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME}) +set(MXBASE_INC ${MXBASE_ROOT_DIR}/include) +set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib) +set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors) +set(MXBASE_POST_PROCESS_DIR ${PROJECT_SOURCE_DIR}/src/include) +if(DEFINED ENV{MXSDK_OPENSOURCE_DIR}) + set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR}) +else() + set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource) +endif() + +include_directories(${ACL_INC_DIR}) +include_directories(${OPENSOURCE_DIR}/include) +include_directories(${OPENSOURCE_DIR}/include/opencv4) + +include_directories(${MXBASE_INC}) +include_directories(${MXBASE_POST_PROCESS_DIR}) + +link_directories(${ACL_LIB_DIR}) +link_directories(${OPENSOURCE_DIR}/lib) +link_directories(${MXBASE_LIB_DIR}) +link_directories(${MXBASE_POST_LIB_DIR}) + +add_executable(${TARGET} ./src/main.cpp ./src/Vgg16Classify.cpp) +target_link_libraries(${TARGET} glog cpprest mxbase resnet50postprocess opencv_world stdc++fs) + +install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/) \ No newline at end of file diff --git a/official/cv/vgg16/infer/mxbase/build.sh b/official/cv/vgg16/infer/mxbase/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..76534223dd3dee09b8ccd66a23015dbfa6140377 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/build.sh @@ -0,0 +1,41 @@ +#!/usr/bin/bash +path_cur=$(dirname $0) + +function check_env() +{ + # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user + if [ ! "${ASCEND_VERSION}" ]; then + export ASCEND_VERSION=ascend-toolkit/latest + echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}" + else + echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user" + fi + + if [ ! "${ARCH_PATTERN}" ]; then + # set ARCH_PATTERN to ./ when it was not specified by user + export ARCH_PATTERN=./ + echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}" + else + echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user" + fi +} + +function build_vgg16() +{ + cd $path_cur + rm -rf build + mkdir -p build + cd build + cmake .. + make + ret=$? + if [ ${ret} -ne 0 ]; then + echo "Failed to build vgg16." + exit ${ret} + fi + make install +} + +check_env +build_vgg16 +exit 0 \ No newline at end of file diff --git a/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.cpp b/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a20b9b2c71a1d1cbd38c1768d75d59d8b9035f14 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.cpp @@ -0,0 +1,230 @@ +/* + * Copyright 2021. Huawei Technologies Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include for map<> +#include for make_shared<> +#include for vector<> +#include for string +#include "Vgg16Classify.h" +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/Log/Log.h" + +namespace { + const uint32_t YUV_BYTE_NU = 3; + const uint32_t YUV_BYTE_DE = 2; + const uint32_t VPC_H_ALIGN = 2; +} + +APP_ERROR Vgg16Classify::Init(const InitParam &initParam) { + deviceId_ = initParam.deviceId; + APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); + if (ret != APP_ERR_OK) { + LogError << "Init devices failed, ret=" << ret << "."; + return ret; + } + ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId); + if (ret != APP_ERR_OK) { + LogError << "Set context failed, ret=" << ret << "."; + return ret; + } + dvppWrapper_ = std::make_shared(); + ret = dvppWrapper_->Init(); + if (ret != APP_ERR_OK) { + LogError << "DvppWrapper init failed, ret=" << ret << "."; + return ret; + } + model_ = std::make_shared(); + ret = model_->Init(initParam.modelPath, modelDesc_); + if (ret != APP_ERR_OK) { + LogError << "ModelInferenceProcessor init failed, ret=" << ret << "."; + return ret; + } + MxBase::ConfigData configData; + const std::string softmax = initParam.softmax ? "true" : "false"; + const std::string checkTensor = initParam.checkTensor ? "true" : "false"; + + configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum)); + configData.SetJsonValue("TOP_K", std::to_string(initParam.topk)); + configData.SetJsonValue("SOFTMAX", softmax); + configData.SetJsonValue("CHECK_MODEL", checkTensor); + + auto jsonStr = configData.GetCfgJson().serialize(); + std::map> config; + config["postProcessConfigContent"] = std::make_shared(jsonStr); + config["labelPath"] = std::make_shared(initParam.labelPath); + + post_ = std::make_shared(); + ret = post_->Init(config); + if (ret != APP_ERR_OK) { + LogError << "Resnet50PostProcess init failed, ret=" << ret << "."; + return ret; + } + tfile_.open("mx_pred_result.txt"); + if (!tfile_) { + LogError << "Open result file failed."; + return APP_ERR_COMM_OPEN_FAIL; + } + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::DeInit() { + dvppWrapper_->DeInit(); + model_->DeInit(); + post_->DeInit(); + MxBase::DeviceManager::GetInstance()->DestroyDevices(); + tfile_.close(); + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::ReadImage(const std::string &imgPath, cv::Mat &imageMat) { + imageMat = cv::imread(imgPath, cv::IMREAD_COLOR); + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::Resize(const cv::Mat &srcImageMat, cv::Mat &dstImageMat) { + static constexpr uint32_t resizeHeight = 256; + static constexpr uint32_t resizeWidth = 256; + cv::resize(srcImageMat, dstImageMat, cv::Size(resizeHeight, resizeWidth)); + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::Crop(const cv::Mat &srcMat, cv::Mat &dstMat) { + static cv::Rect rectOfImg(16, 16, 224, 224); + dstMat = srcMat(rectOfImg).clone(); + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) { + const uint32_t dataSize = imageMat.cols * imageMat.rows * imageMat.channels(); + MxBase::MemoryData MemoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_); + MxBase::MemoryData MemoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC); + APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(MemoryDataDst, MemoryDataSrc); + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "Memory malloc failed."; + return ret; + } + std::vector shape = {static_cast(imageMat.rows), + static_cast(imageMat.cols), static_cast(imageMat.channels())}; + tensorBase = MxBase::TensorBase(MemoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8); + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::Inference(const std::vector &inputs, + std::vector &outputs) { + auto dtypes = model_->GetOutputDataType(); + for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) { + std::vector shape = {}; + for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) { + shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]); + } + MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_); + APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor); + if (ret != APP_ERR_OK) { + LogError << "TensorBaseMalloc failed, ret=" << ret << "."; + return ret; + } + outputs.push_back(tensor); + } + MxBase::DynamicInfo dynamicInfo = {}; + dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH; + auto startTime = std::chrono::high_resolution_clock::now(); + APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo); + auto endTime = std::chrono::high_resolution_clock::now(); + double costMs = std::chrono::duration(endTime - startTime).count(); + g_inferCost.push_back(costMs); + if (ret != APP_ERR_OK) { + LogError << "ModelInference failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::PostProcess(const std::vector &inputs, + std::vector> &clsInfos) { + APP_ERROR ret = post_->Process(inputs, clsInfos); + if (ret != APP_ERR_OK) { + LogError << "Process failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::Process(const std::string &imgPath) { + cv::Mat imageMat; + APP_ERROR ret = ReadImage(imgPath, imageMat); + if (ret != APP_ERR_OK) { + LogError << "ReadImage failed, ret=" << ret << "."; + return ret; + } + ret = Resize(imageMat, imageMat); + if (ret != APP_ERR_OK) { + LogError << "Resize failed, ret=" << ret << "."; + return ret; + } + cv::Mat cropImage; + ret = Crop(imageMat, cropImage); + if (ret != APP_ERR_OK) { + LogError << "Crop failed, ret=" << ret << "."; + return ret; + } + MxBase::TensorBase tensorBase; + ret = CVMatToTensorBase(cropImage, tensorBase); + if (ret != APP_ERR_OK) { + LogError << "CVMatToTensorBase failed, ret=" << ret << "."; + return ret; + } + std::vector inputs = {}; + std::vector outputs = {}; + inputs.push_back(tensorBase); + ret = Inference(inputs, outputs); + if (ret != APP_ERR_OK) { + LogError << "Inference failed, ret=" << ret << "."; + return ret; + } + std::vector> BatchClsInfos = {}; + ret = PostProcess(outputs, BatchClsInfos); + if (ret != APP_ERR_OK) { + LogError << "PostProcess failed, ret=" << ret << "."; + return ret; + } + ret = SaveResult(imgPath, BatchClsInfos); + if (ret != APP_ERR_OK) { + LogError << "Export result to file failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Vgg16Classify::SaveResult(const std::string &imgPath, const std::vector> \ + &BatchClsInfos) { + uint32_t batchIndex = 0; + std::string fileName = imgPath.substr(imgPath.find_last_of("/") + 1); + size_t dot = fileName.find_last_of("."); + for (const auto &clsInfos : BatchClsInfos) { + std::string resultStr; + for (const auto &clsInfo : clsInfos) { + resultStr += std::to_string(clsInfo.classId) + ","; + } + tfile_ << fileName.substr(0, dot) << " " << resultStr << std::endl; + if (tfile_.fail()) { + LogError << "Failed to write the result to file."; + return APP_ERR_COMM_WRITE_FAIL; + } + batchIndex++; + } + return APP_ERR_OK; +} diff --git a/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.h b/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.h new file mode 100644 index 0000000000000000000000000000000000000000..7512c33eef5542ba0c6261be0beb4ce2b5961ee5 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.h @@ -0,0 +1,62 @@ +/* + * Copyright 2021. Huawei Technologies Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MXBASE_VGG16CLASSIFY_H +#define MXBASE_VGG16CLASSIFY_H + +#include for string +#include for vector<> +#include for shared_ptr<> +#include +#include "MxBase/DvppWrapper/DvppWrapper.h" +#include "MxBase/ModelInfer/ModelInferenceProcessor.h" +#include "Resnet50PostProcess.h" +#include "MxBase/Tensor/TensorContext/TensorContext.h" + +extern std::vector g_inferCost; + +struct InitParam { + uint32_t deviceId; + std::string labelPath; + uint32_t classNum; + uint32_t topk; + bool softmax; + bool checkTensor; + std::string modelPath; +}; + +class Vgg16Classify { + public: + APP_ERROR Init(const InitParam &initParam); + APP_ERROR DeInit(); + APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat); + APP_ERROR Resize(const cv::Mat &srcImageMat, cv::Mat &dstImageMat); + APP_ERROR Crop(const cv::Mat &srcMat, cv::Mat &dstMat); + APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase); + APP_ERROR Inference(const std::vector &inputs, std::vector &outputs); + APP_ERROR PostProcess(const std::vector &inputs, + std::vector> &clsInfos); + APP_ERROR Process(const std::string &imgPath); + APP_ERROR SaveResult(const std::string &imgPath, const std::vector> &BatchClsInfos); + private: + std::shared_ptr dvppWrapper_; + std::shared_ptr model_; + std::shared_ptr post_; + MxBase::ModelDesc modelDesc_; + uint32_t deviceId_ = 0; + std::ofstream tfile_; +}; +#endif diff --git a/official/cv/vgg16/infer/mxbase/src/include/Resnet50PostProcess.h b/official/cv/vgg16/infer/mxbase/src/include/Resnet50PostProcess.h new file mode 100644 index 0000000000000000000000000000000000000000..0140642f5399700bf41ed1186146c985a02cfcb4 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/src/include/Resnet50PostProcess.h @@ -0,0 +1,54 @@ +/* + * Copyright 2021. Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RESNET50_POST_PROCESS_H +#define RESNET50_POST_PROCESS_H + +#include for map<> +#include for string +#include for vector<> +#include for shared_ptr<> +#include "MxBase/PostProcessBases/ClassPostProcessBase.h" + +namespace MxBase { +class Resnet50PostProcess : public ClassPostProcessBase { + public: + Resnet50PostProcess() = default; + ~Resnet50PostProcess() = default; + Resnet50PostProcess(const Resnet50PostProcess &other) = default; + APP_ERROR Init(const std::map> &postConfig) override; + APP_ERROR DeInit() override; + APP_ERROR Process(const std::vector &tensors, std::vector> &classInfos, + const std::map> &configParamMap = {}) override; + bool IsValidTensors(const std::vector &tensors) const override; + Resnet50PostProcess &operator=(const Resnet50PostProcess &other); + uint64_t GetCurrentVersion() override { + return CURRENT_VERSION; + } + + private: + const int CURRENT_VERSION = 2000001; + uint32_t classNum_ = 0; + bool softmax_ = false; + uint32_t topK_ = 1; +}; + +#ifdef ENABLE_POST_PROCESS_INSTANCE +extern "C" { +std::shared_ptr GetClassInstance(); +} +#endif +} // namespace MxBase +#endif diff --git a/official/cv/vgg16/infer/mxbase/src/main.cpp b/official/cv/vgg16/infer/mxbase/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ada7e5eb8f30fb80f81c66bc10a37096b4810325 --- /dev/null +++ b/official/cv/vgg16/infer/mxbase/src/main.cpp @@ -0,0 +1,70 @@ +/* + * Copyright 2021. Huawei Technologies Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "Vgg16Classify.h" +#include "MxBase/Log/Log.h" + +namespace fs = std::experimental::filesystem; +namespace { +const uint32_t CLASS_NUM = 1000; +} +std::vector g_inferCost; + +int main(int argc, char* argv[]) { + if (argc <= 1) { + LogWarn << "Please input image path, such as './main ../data/input 10'."; + return APP_ERR_OK; + } + InitParam initParam = {}; + initParam.deviceId = 0; + initParam.classNum = CLASS_NUM; + initParam.labelPath = "../data/config/imagenet1000_clsidx_to_labels.names"; + initParam.topk = 5; + initParam.softmax = false; + initParam.checkTensor = true; + initParam.modelPath = "../data/model/vgg16.om"; + auto vgg16 = std::make_shared(); + APP_ERROR ret = vgg16->Init(initParam); + if (ret != APP_ERR_OK) { + LogError << "Vgg16Classify init failed, ret=" << ret << "."; + return ret; + } + std::string imgDir = argv[1]; + int limit = std::strtol(argv[2], nullptr, 0); + int index = 0; + for (auto & entry : fs::directory_iterator(imgDir)) { + if (index == limit) { + break; + } + index++; + LogInfo << "read image path " << entry.path(); + ret = vgg16->Process(entry.path()); + if (ret != APP_ERR_OK) { + LogError << "Vgg16Classify process failed, ret=" << ret << "."; + vgg16->DeInit(); + return ret; + } + } + vgg16->DeInit(); + double costSum = 0; + for (unsigned int i = 0; i < g_inferCost.size(); i++) { + costSum += g_inferCost[i]; + } + LogInfo << "Infer images sum " << g_inferCost.size() << ", cost total time: " << costSum << " ms."; + LogInfo << "The throughput: " << g_inferCost.size() * 1000 / costSum << " images/sec."; + return APP_ERR_OK; +} diff --git a/official/cv/vgg16/infer/sdk/main.py b/official/cv/vgg16/infer/sdk/main.py new file mode 100644 index 0000000000000000000000000000000000000000..8002328bea54f93bab4b2590cbdc04bd98a7ab0b --- /dev/null +++ b/official/cv/vgg16/infer/sdk/main.py @@ -0,0 +1,167 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import argparse +import glob +import json +import os +from contextlib import ExitStack + +from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, \ + MxProtobufIn +import MxpiDataType_pb2 as MxpiDataType + + +class GlobDataLoader(): + def __init__(self, glob_pattern, limit=None): + self.glob_pattern = glob_pattern + self.limit = limit + self.file_list = self.get_file_list() + self.cur_index = 0 + + def get_file_list(self): + return glob.iglob(self.glob_pattern) + + def __iter__(self): + return self + + def __next__(self): + if self.cur_index == self.limit: + raise StopIteration() + label = None + file_path = next(self.file_list) + with open(file_path, 'rb') as fd: + data = fd.read() + + self.cur_index += 1 + return get_file_name(file_path), label, data + + +class Predictor(): + def __init__(self, pipeline_conf, stream_name): + self.pipeline_conf = pipeline_conf + self.stream_name = stream_name + + def __enter__(self): + self.stream_manager_api = StreamManagerApi() + ret = self.stream_manager_api.InitManager() + if ret != 0: + raise Exception(f"Failed to init Stream manager, ret={ret}") + + # create streams by pipeline config file + with open(self.pipeline_conf, 'rb') as f: + pipeline_str = f.read() + ret = self.stream_manager_api.CreateMultipleStreams(pipeline_str) + if ret != 0: + raise Exception(f"Failed to create Stream, ret={ret}") + self.data_input = MxDataInput() + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # destroy streams + self.stream_manager_api.DestroyAllStreams() + + def predict(self, dataset): + print("Start predict........") + print('>' * 30) + for name, label, data in dataset: + self.data_input.data = data + yield self._predict(name, self.data_input) + print("predict end.") + print('<' * 30) + + def _predict(self, name, data): + plugin_id = 0 + protobuf_data = self._predict_gen_protobuf() + self._predict_send_protobuf(self.stream_name, 1, protobuf_data) + unique_id = self._predict_send_data(self.stream_name, plugin_id, data) + result = self._predict_get_result(self.stream_name, unique_id) + return name, json.loads(result.data.decode()) + + def _predict_gen_protobuf(self): + object_list = MxpiDataType.MxpiObjectList() + object_vec = object_list.objectVec.add() + object_vec.x0 = 16 + object_vec.y0 = 16 + object_vec.x1 = 240 + object_vec.y1 = 240 + + protobuf = MxProtobufIn() + protobuf.key = b'appsrc1' + protobuf.type = b'MxTools.MxpiObjectList' + protobuf.protobuf = object_list.SerializeToString() + protobuf_vec = InProtobufVector() + protobuf_vec.push_back(protobuf) + return protobuf_vec + + def _predict_send_protobuf(self, stream_name, in_plugin_id, data): + self.stream_manager_api.SendProtobuf(stream_name, in_plugin_id, data) + + def _predict_send_data(self, stream_name, in_plugin_id, data_input): + unique_id = self.stream_manager_api.SendData(stream_name, in_plugin_id, + data_input) + if unique_id < 0: + raise Exception("Failed to send data to stream") + return unique_id + + def _predict_get_result(self, stream_name, unique_id): + result = self.stream_manager_api.GetResult(stream_name, unique_id) + if result.errorCode != 0: + raise Exception( + f"GetResultWithUniqueId error." + f"errorCode={result.errorCode}, msg={result.data.decode()}") + return result + + +def get_file_name(file_path): + return os.path.splitext(os.path.basename(file_path.rstrip('/')))[0] + + +def result_encode(file_name, result): + sep = ',' + pred_class_ids = sep.join( + str(i.get('classId')) for i in result.get("MxpiClass", [])) + return f"{file_name} {pred_class_ids}\n" + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('glob', help='img pth glob pattern.') + parser.add_argument('result_file', help='result file') + return parser.parse_args() + + +def main(): + pipeline_conf = "../data/config/vgg16.pipeline" + stream_name = b'im_vgg16' + + args = parse_args() + result_fname = get_file_name(args.result_file) + pred_result_file = f"{result_fname}.txt" + dataset = GlobDataLoader(args.glob, limit=None) + with ExitStack() as stack: + predictor = stack.enter_context(Predictor(pipeline_conf, stream_name)) + result_fd = stack.enter_context(open(pred_result_file, 'w')) + + for fname, pred_result in predictor.predict(dataset): + result_fd.write(result_encode(fname, pred_result)) + + print(f"success, result in {pred_result_file}") + + +if __name__ == "__main__": + main() diff --git a/official/cv/vgg16/infer/sdk/run.sh b/official/cv/vgg16/infer/sdk/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4581c561b7a19687679b8e62626d1ea03244312 --- /dev/null +++ b/official/cv/vgg16/infer/sdk/run.sh @@ -0,0 +1,16 @@ +#!/usr/bin/bash +set -e + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +#to set PYTHONPATH, import the StreamManagerApi.py +export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python + +python3.7 main.py "../data/input/*.JPEG" vgg16_sdk_pred_result.txt +exit 0 diff --git a/official/cv/vgg16/infer/util/task_metric.py b/official/cv/vgg16/infer/util/task_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..d395381bbd27c485381424acdf5884140a911aad --- /dev/null +++ b/official/cv/vgg16/infer/util/task_metric.py @@ -0,0 +1,108 @@ +# coding: utf-8 +""" +Copyright 2021 Huawei Technologies Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse +import json +import os + +import numpy as np + + +def get_file_name(file_path): + return os.path.splitext(os.path.basename(file_path.rstrip('/')))[0] + + +def load_gt(gt_file): + gt = {} + with open(gt_file, 'r') as fd: + for line in fd.readlines(): + img_name, img_label_index = line.strip().split(" ", 1) + gt[get_file_name(img_name)] = img_label_index + return gt + + +def load_pred(pred_file): + pred = {} + with open(pred_file, 'r') as fd: + for line in fd.readlines(): + ret = line.strip().split(" ", 1) + if len(ret) < 2: + print(f"Warning: load pred, no result, line:{line}") + continue + img_name, ids = ret + img_name = get_file_name(img_name) + pred[img_name] = [x.strip() for x in ids.split(',')] + return pred + + +def calc_accuracy(gt_map, pred_map, top_k=5): + hits = [0] * top_k + miss_match = [] + total = 0 + for img, preds in pred_map.items(): + gt = gt_map.get(img) + if gt is None: + print(f"Warning: {img}'s gt is not exists.") + continue + try: + index = preds.index(gt, 0, top_k) + hits[index] += 1 + except ValueError: + miss_match.append({'img': img, 'gt': gt, 'prediction': preds}) + finally: + total += 1 + + top_k_hit = np.cumsum(hits) + accuracy = top_k_hit / total + return { + 'total': total, + 'accuracy': [acc for acc in accuracy], + 'miss': miss_match, + } + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('prediction', help='prediction result file') + parser.add_argument('gt', help='ground true result file') + parser.add_argument('result_json', help='metric result file') + parser.add_argument('top_k', help='top k', type=int) + return parser.parse_args() + + +def main(): + args = parse_args() + prediction_file = args.prediction + gt_file = args.gt + top_k = args.top_k + result_json = args.result_json + + gt = load_gt(gt_file) + prediction = load_pred(prediction_file) + result = calc_accuracy(gt, prediction, top_k) + result.update({ + 'prediction_file': prediction_file, + 'gt_file': gt_file, + }) + with open(result_json, 'w') as fd: + json.dump(result, fd, indent=2) + print(f"\nsuccess, result in {result_json}") + + +if __name__ == '__main__': + main()