diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt index ba47bdc44cc888b46fd5d692270ee63842772b9e..9ef1486c9ddaf7685fb05ef1f2f4df65f289a8d1 100644 --- a/.jenkins/check/config/filter_cpplint.txt +++ b/.jenkins/check/config/filter_cpplint.txt @@ -6,3 +6,5 @@ "mindspore/model_zoo/official/cv/yolov4_tiny/infer/mxbase/src/Yolov4TinyDetection.h" "runtime/references" "mindspore/model_zoo/official/cv/yolov4_tiny/infer/mxbase/src/PostProcess/Yolov4TinyMindsporePost.h" "runtime/references" "mindspore/model_zoo/official/cv/resnet/infer/ResNet18/mxbase/Resnet18ClassifyOpencv.h" "runtime/references" +"models/official/cv/alexnet/infer/mxbase/src/Alexnet.h" "runtime/references" +"models/official/cv/alexnet/infer/mxbase/src/main.cpp" "runtime/references" \ No newline at end of file diff --git a/official/cv/alexnet/README.md b/official/cv/alexnet/README.md index 86bda455d1c6485e2e0bcc333e1ec2152c82cfce..1ab7d4b7f67080eb3b82ff1b98d7c0cebd2b2db8 100644 --- a/official/cv/alexnet/README.md +++ b/official/cv/alexnet/README.md @@ -59,8 +59,8 @@ Dataset used: [CIFAR-10]() - Framework - [MindSpore](https://www.mindspore.cn/install/en) - For more information, please check the resources below: - - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/en/r1.3/index.html) - - [MindSpore Python API](https://www.mindspore.cn/docs/api/en/r1.3/index.html) + - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/en/master/index.html) + - [MindSpore Python API](https://www.mindspore.cn/docs/api/en/master/index.html) ## [Quick Start](#contents) @@ -68,9 +68,12 @@ After installing MindSpore via the official website, you can start training and ```python # enter script dir, train AlexNet -sh run_standalone_train_ascend.sh [DATA_PATH] [CKPT_SAVE_PATH] +bash run_standalone_train_ascend.sh [DATA_PATH] [CKPT_SAVE_PATH] +# example: bash run_standalone_train_ascend.sh /home/DataSet/Cifar10/cifar-10-batches-bin/ /home/model/alexnet/ckpt/ + # enter script dir, evaluate AlexNet -sh run_standalone_eval_ascend.sh [DATA_PATH] [CKPT_NAME] +bash run_standalone_eval_ascend.sh [DATA_PATH] [CKPT_NAME] +# example: bash run_standalone_eval_ascend.sh /home/DataSet/cifar10/cifar-10-verify-bin /home/model/cv/alxnet/ckpt/checkpoint_alexnet-1_1562.ckpt ``` - Running on [ModelArts](https://support.huaweicloud.com/modelarts/) @@ -228,7 +231,7 @@ Major parameters in train.py and config.py as follows: ```bash python train.py --config_path default_config.yaml --data_path cifar-10-batches-bin --ckpt_path ckpt > log 2>&1 & # or enter script dir, and run the script - sh run_standalone_train_ascend.sh cifar-10-batches-bin ckpt + bash run_standalone_train_ascend.sh /home/DataSet/Cifar10/cifar-10-batches-bin/ /home/model/alexnet/ckpt/ ``` After training, the loss value will be achieved as follows: @@ -250,7 +253,7 @@ Major parameters in train.py and config.py as follows: ```bash python train.py --config_path default_config.yaml --device_target "GPU" --data_path cifar-10-batches-bin --ckpt_path ckpt > log 2>&1 & # or enter script dir, and run the script - sh run_standalone_train_for_gpu.sh cifar-10-batches-bin ckpt + bash run_standalone_train_for_gpu.sh cifar-10-batches-bin ckpt ``` After training, the loss value will be achieved as follows: @@ -275,7 +278,7 @@ Before running the command below, please check the checkpoint path used for eval ```bash python eval.py --config_path default_config.yaml --data_path cifar-10-verify-bin --ckpt_path ckpt/checkpoint_alexnet-1_1562.ckpt > eval_log.txt 2>&1 & # or enter script dir, and run the script - sh run_standalone_eval_ascend.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-1_1562.ckpt + bash run_standalone_eval_ascend.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-1_1562.ckpt ``` You can view the results through the file "eval_log". The accuracy of the test dataset will be as follows: @@ -290,7 +293,7 @@ Before running the command below, please check the checkpoint path used for eval ```bash python eval.py --config_path default_config.yaml --device_target "GPU" --data_path cifar-10-verify-bin --ckpt_path ckpt/checkpoint_alexnet-30_1562.ckpt > eval_log 2>&1 & # or enter script dir, and run the script - sh run_standalone_eval_for_gpu.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-30_1562.ckpt + bash run_standalone_eval_for_gpu.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-30_1562.ckpt ``` You can view the results through the file "eval_log". The accuracy of the test dataset will be as follows: diff --git a/official/cv/alexnet/README_CN.md b/official/cv/alexnet/README_CN.md index 3eac8b30773f5e4a8d352357fb392b0e710fcaff..3ab37b04fa8c6b19b5307872b9eb397f8440e12b 100644 --- a/official/cv/alexnet/README_CN.md +++ b/official/cv/alexnet/README_CN.md @@ -61,8 +61,8 @@ AlexNet由5个卷积层和3个全连接层组成。多个卷积核用于提取 - 框架 - [MindSpore](https://www.mindspore.cn/install) - 如需查看详情,请参见如下资源: - - [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/r1.3/index.html) - - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/r1.3/index.html) + - [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html) + - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html) ## 快速入门 @@ -70,9 +70,14 @@ AlexNet由5个卷积层和3个全连接层组成。多个卷积核用于提取 ```python # 进入脚本目录,训练AlexNet -sh run_standalone_train_ascend.sh [DATA_PATH] [CKPT_SAVE_PATH] +bash run_standalone_train_ascend.sh [DATA_PATH] [CKPT_SAVE_PATH] +# example: bash run_standalone_train_ascend.sh /home/DataSet/Cifar10/cifar-10-batches-bin/ /home/model/alexnet/ckpt/ + +# 分布式训练AlexNet + # 进入脚本目录,评估AlexNet -sh run_standalone_eval_ascend.sh [DATA_PATH] [CKPT_NAME] +bash run_standalone_eval_ascend.sh [DATA_PATH] [CKPT_NAME] +# example: bash run_standalone_eval_ascend.sh /home/DataSet/cifar10/cifar-10-verify-bin /home/model/cv/alxnet/ckpt/checkpoint_alexnet-1_1562.ckpt ``` - 在 ModelArts 进行训练 (如果你想在modelarts上运行,可以参考以下文档 [modelarts](https://support.huaweicloud.com/modelarts/)) @@ -216,7 +221,7 @@ train.py和config.py中主要参数如下: ```bash python train.py --config_path default_config.yaml --data_path cifar-10-batches-bin --ckpt_path ckpt > log 2>&1 & # 或进入脚本目录,执行脚本 - sh run_standalone_train_ascend.sh cifar-10-batches-bin ckpt + bash run_standalone_train_ascend.sh /home/DataSet/Cifar10/cifar-10-batches-bin/ /home/model/alexnet/ckpt/ ``` 经过训练后,损失值如下: @@ -238,7 +243,7 @@ train.py和config.py中主要参数如下: ```bash python train.py --config_path default_config.yaml --device_target "GPU" --data_path cifar-10-batches-bin --ckpt_path ckpt > log 2>&1 & # 或进入脚本目录,执行脚本 - sh run_standalone_train_for_gpu.sh cifar-10-batches-bin ckpt + bash run_standalone_train_for_gpu.sh cifar-10-batches-bin ckpt ``` 经过训练后,损失值如下: @@ -263,7 +268,7 @@ train.py和config.py中主要参数如下: ```bash python eval.py --config_path default_config.yaml --data_path cifar-10-verify-bin --ckpt_path ckpt/checkpoint_alexnet-1_1562.ckpt > eval_log.txt 2>&1 & #或进入脚本目录,执行脚本 - sh run_standalone_eval_ascend.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-1_1562.ckpt + bash run_standalone_eval_ascend.sh /home/DataSet/cifar10/cifar-10-verify-bin /home/model/cv/alxnet/ckpt/checkpoint_alexnet-1_1562.ckpt ``` 可通过"eval_log”文件查看结果。测试数据集的准确率如下: @@ -278,7 +283,7 @@ train.py和config.py中主要参数如下: ```bash python eval.py --config_path default_config.yaml --device_target "GPU" --data_path cifar-10-verify-bin --ckpt_path ckpt/checkpoint_alexnet-30_1562.ckpt > eval_log 2>&1 & #或进入脚本目录,执行脚本 - sh run_standalone_eval_for_gpu.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-30_1562.ckpt + bash run_standalone_eval_for_gpu.sh cifar-10-verify-bin ckpt/checkpoint_alexnet-30_1562.ckpt ``` 可通过"eval_log”文件查看结果。测试数据集的准确率如下: diff --git a/official/cv/alexnet/ascend310_infer/src/utils.cc b/official/cv/alexnet/ascend310_infer/src/utils.cc index d545acd312fbd7dc3ae2da3a7d29aea3c6db86e3..2c11078eaaa99822578aa37135dd46bf7730540b 100644 --- a/official/cv/alexnet/ascend310_infer/src/utils.cc +++ b/official/cv/alexnet/ascend310_infer/src/utils.cc @@ -101,8 +101,8 @@ std::vector GetAllFiles(std::string_view dirName) { int WriteResult(const std::string& imageFile, const std::vector &outputs) { std::string homePath = "./result_Files"; - const int INVALID_POINTER = -1; - const int ERROR = -2; + int INVALID_POINTER = -1; + int ERROR = -2; for (size_t i = 0; i < outputs.size(); ++i) { size_t outputSize; std::shared_ptr netOutput; diff --git a/official/cv/alexnet/default_config.yaml b/official/cv/alexnet/default_config.yaml index c254a8af2b28b08b5804354020942d50b4c4aa2f..1b3988f7e293dfc881799e807d906ed8aa95292a 100644 --- a/official/cv/alexnet/default_config.yaml +++ b/official/cv/alexnet/default_config.yaml @@ -53,4 +53,4 @@ device_target: 'Target device type' enable_profiling: 'Whether enable profiling while training, default: False' --- -device_target: ['Ascend', 'GPU', 'CPU'] \ No newline at end of file +device_target: ['Ascend', 'GPU', 'CPU'] diff --git a/official/cv/alexnet/infer/Dockerfile b/official/cv/alexnet/infer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..cf41421187f59658a8343fe4981494edff62f43a --- /dev/null +++ b/official/cv/alexnet/infer/Dockerfile @@ -0,0 +1,20 @@ +ARG FROM_IMAGE_NAME +FROM ${FROM_IMAGE_NAME} + +ARG SDK_PKG + +RUN ln -s /usr/local/python3.7.5/bin/python3.7 /usr/bin/python + +RUN apt-get update && \ + apt-get install libglib2.0-dev -y || \ + rm -rf /var/lib/dpkg/info && \ + mkdir /var/lib/dpkg/info && \ + apt-get install libglib2.0-dev dos2unix -y && \ + pip install pytest-runner==5.3.0 + +COPY requirements.txt . +RUN pip3.7 install -r requirements.txt + +# pip install sdk_run +COPY $SDK_PKG . +RUN ls -hrlt diff --git a/official/cv/alexnet/infer/convert/aipp.cfg b/official/cv/alexnet/infer/convert/aipp.cfg new file mode 100644 index 0000000000000000000000000000000000000000..84886ae1a2432b993d0990221de2f76424f679b4 --- /dev/null +++ b/official/cv/alexnet/infer/convert/aipp.cfg @@ -0,0 +1,16 @@ +aipp_op { + aipp_mode: static + input_format : RGB888_U8 + + rbuv_swap_switch : true + + mean_chn_0 : 0 + mean_chn_1 : 0 + mean_chn_2 : 0 + min_chn_0 : 123.675 + min_chn_1 : 116.28 + min_chn_2 : 103.53 + var_reci_chn_0 : 0.0171247538316637 + var_reci_chn_1 : 0.0175070028011204 + var_reci_chn_2 : 0.0174291938997821 +} diff --git a/official/cv/alexnet/infer/convert/convert_om.sh b/official/cv/alexnet/infer/convert/convert_om.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2d5edaf10ff6b9a9463ccc6e7f1ffe9fae3c00f --- /dev/null +++ b/official/cv/alexnet/infer/convert/convert_om.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ $# -ne 3 ] +then + echo "Wrong parameter format." + echo "Usage:" + echo " bash $0 [INPUT_AIR_PATH] [AIPP_PATH] [OUTPUT_OM_PATH_NAME]" + echo "Example: " + echo " bash convert_om.sh xxx.air ./aipp.cfg xx" + + exit 1 +fi + +input_air_path=$1 +aipp_cfg_file=$2 +output_om_path=$3 + +export install_path=/usr/local/Ascend/ + +export ASCEND_ATC_PATH=${install_path}/atc +export LD_LIBRARY_PATH=${install_path}/atc/lib64:$LD_LIBRARY_PATH +export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:${install_path}/latest/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/atc/python/site-packages/schedule_search.egg +export ASCEND_OPP_PATH=${install_path}/opp +echo "Input AIR file path: ${input_air_path}" +echo "Output OM file path: ${output_om_path}" + +atc --input_format=NCHW \ + --framework=1 \ + --model="${input_air_path}" \ + --input_shape="actual_input_1:1,3,227,227" \ + --output="${output_om_path}" \ + --insert_op_conf="${aipp_cfg_file}" \ + --enable_small_channel=1 \ + --log=error \ + --soc_version=Ascend310 \ + --op_select_implmode=high_precision diff --git a/official/cv/alexnet/infer/data/config/cifar10.names b/official/cv/alexnet/infer/data/config/cifar10.names new file mode 100644 index 0000000000000000000000000000000000000000..fa30c22b95d76d0dc0466cce95d10f3eb8ddad53 --- /dev/null +++ b/official/cv/alexnet/infer/data/config/cifar10.names @@ -0,0 +1,10 @@ +airplane +automobile +bird +cat +deer +dog +frog +horse +ship +truck diff --git a/official/cv/alexnet/infer/data/config/infer.txt b/official/cv/alexnet/infer/data/config/infer.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3ea5f60e0060e767859fd0a5fd0259e5e71d621 --- /dev/null +++ b/official/cv/alexnet/infer/data/config/infer.txt @@ -0,0 +1 @@ +"../data/images/cifar-10-verify-bin" diff --git a/official/cv/alexnet/infer/data/images/cifar10.py b/official/cv/alexnet/infer/data/images/cifar10.py new file mode 100644 index 0000000000000000000000000000000000000000..d1954822a07978daa9c1c5355de4af40810d450e --- /dev/null +++ b/official/cv/alexnet/infer/data/images/cifar10.py @@ -0,0 +1,58 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import cv2 +import numpy as np + +loc_1 = './train_cifar10/' +loc_2 = './test_cifar10/' + +def unpickle(file): + import pickle + with open(file, 'rb') as fo: + dict_res = pickle.load(fo, encoding='bytes') + return dict_res + +def cifar10_img(): + file_dir = './cifar-10-batches-py' + for i in range(1, 6): + data_name = os.path.join(file_dir, 'data_batch_' + str(i)) + data_dict = unpickle(data_name) + print('{} is processing'.format(data_name)) + for j in range(10000): + img = np.reshape(data_dict[b'data'][j], (3, 32, 32)) + img = np.transpose(img, (1, 2, 0)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img_name = "%s%s%s.jpg" % (loc_1, str(data_dict[b'labels'][j]), str((i)*10000 + j)) + cv2.imwrite(img_name, img) + print('{} is done'.format(data_name)) + test_data_name = file_dir + '/test_batch' + print('{} is processing'.format(test_data_name)) + test_dict = unpickle(test_data_name) + for m in range(10): + img = np.reshape(test_dict[b'data'][m], (3, 32, 32)) + img = np.transpose(img, (1, 2, 0)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img_name = '%s%s%s%s' % (loc_2, str(test_dict[b'labels'][m]), str(10000 + m), '.jpg') + img_label = "%s%s.jpg" % (str(test_dict[b'labels'][m]), str(10000 + m)) + cv2.imwrite(img_name, img) + with open("test_label.txt", "a") as f: + f.write(img_label + " "*10 + str(test_dict[b'labels'][m])) + f.write("\n") + print("{} is done".format(test_data_name)) + print('Finish transforming to image') + +if __name__ == '__main__': + cifar10_img() diff --git a/official/cv/alexnet/infer/docker_start_infer.sh b/official/cv/alexnet/infer/docker_start_infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..64cf90a2311bdfb21d68a4e90e08602670fdf632 --- /dev/null +++ b/official/cv/alexnet/infer/docker_start_infer.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker_image=$1 +data_dir=$2 + +function show_help() { + echo "Usage: docker_start.sh docker_image data_dir" +} + +function param_check() { + if [ -z "${docker_image}" ]; then + echo "please input docker_image" + show_help + exit 1 + fi + + if [ -z "${data_dir}" ]; then + echo "please input data_dir" + show_help + exit 1 + fi +} + +param_check + +docker run -it \ + --device=/dev/davinci0 \ + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm \ + --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v ${data_dir}:${data_dir} \ + ${docker_image} \ + /bin/bash diff --git a/official/cv/alexnet/infer/mxbase/CMakeLists.txt b/official/cv/alexnet/infer/mxbase/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce562b00b2412dfd402e58b0695f0a9adefa253e --- /dev/null +++ b/official/cv/alexnet/infer/mxbase/CMakeLists.txt @@ -0,0 +1,53 @@ +cmake_minimum_required(VERSION 3.14.0) +project(alexnet) +set(TARGET alexnet) +add_definitions(-DENABLE_DVPP_INTERFACE) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall) +add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie) + + +# Check environment variable +if(NOT DEFINED ENV{ASCEND_HOME}) + message(FATAL_ERROR "please define environment variable:ASCEND_HOME") +endif() +if(NOT DEFINED ENV{ASCEND_VERSION}) + message(WARNING "please define environment variable:ASCEND_VERSION") +endif() +if(NOT DEFINED ENV{ARCH_PATTERN}) + message(WARNING "please define environment variable:ARCH_PATTERN") +endif() +set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include) +set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64) + + +set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME}) + +set(MXBASE_INC ${MXBASE_ROOT_DIR}/include) +set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib) +set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors) +set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include) + +if(DEFINED ENV{MXSDK_OPENSOURCE_DIR}) + set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR}) +else() + set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource/dist) +endif() + +include_directories(${ACL_INC_DIR}) +include_directories(${OPENSOURCE_DIR}/include) +include_directories(${OPENSOURCE_DIR}/include/opencv4) + +include_directories(${MXBASE_INC}) +include_directories(${MXBASE_POST_PROCESS_DIR}) +link_directories(${ACL_LIB_DIR}) +link_directories(${OPENSOURCE_DIR}/lib) +link_directories(${MXBASE_LIB_DIR}) +link_directories(${MXBASE_POST_LIB_DIR}) + +add_executable(${TARGET} src/main.cpp src/Alexnet.cpp) + +target_link_libraries(${TARGET} glog cpprest mxbase resnet50postprocess opencv_world) + +install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/) diff --git a/official/cv/alexnet/infer/mxbase/build.sh b/official/cv/alexnet/infer/mxbase/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..3a3bb9ee66b8b5be0b33c315753a0622a5355882 --- /dev/null +++ b/official/cv/alexnet/infer/mxbase/build.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +path_cur=$(dirname $0) + +function check_env() +{ + # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user + if [ ! "${ASCEND_HOME}" ]; then + export ASCEND_HOME=/usr/local/Ascend/ + echo "Set ASCEND_HOME to the default value: ${ASCEND_HOME}" + else + echo "ASCEND_HOME is set to ${ASCEND_HOME} by user" + fi + + if [ ! "${ASCEND_VERSION}" ]; then + export ASCEND_VERSION=nnrt/latest + echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}" + else + echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user" + fi + + if [ ! "${ARCH_PATTERN}" ]; then + # set ARCH_PATTERN to ./ when it was not specified by user + export ARCH_PATTERN=./ + echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}" + else + echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user" + + fi + + + +} + +function build_resnet50() +{ + cd $path_cur + rm -rf build + mkdir -p build + cd build + cmake .. + make + ret=$? + if [ ${ret} -ne 0 ]; then + echo "Failed to build resnet50." + exit ${ret} + fi + make install +} + +check_env +build_resnet50 diff --git a/official/cv/alexnet/infer/mxbase/src/Alexnet.cpp b/official/cv/alexnet/infer/mxbase/src/Alexnet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..abbdb18fb7de1fc47d90254470092d0ba65498b9 --- /dev/null +++ b/official/cv/alexnet/infer/mxbase/src/Alexnet.cpp @@ -0,0 +1,223 @@ +/* +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + */ +#include +#include +#include +#include +#include "Alexnet.h" +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/Log/Log.h" + +namespace { + const uint32_t YUV_BYTE_NU = 3; + const uint32_t YUV_BYTE_DE = 2; + const uint32_t VPC_H_ALIGN = 2; + const uint32_t MAX_LENGTH = 128; + +} + +APP_ERROR Alexnet::Init(const InitParam &initParam) { + deviceId_ = initParam.deviceId; + APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); + if (ret != APP_ERR_OK) { + LogError << "Init devices failed, ret=" << ret << "."; + return ret; + } + ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId); + if (ret != APP_ERR_OK) { + LogError << "Set context failed, ret=" << ret << "."; + return ret; + } + dvppWrapper_ = std::make_shared(); + ret = dvppWrapper_->Init(); + if (ret != APP_ERR_OK) { + LogError << "DvppWrapper init failed, ret=" << ret << "."; + return ret; + } + model_ = std::make_shared(); + ret = model_->Init(initParam.modelPath, modelDesc_); + if (ret != APP_ERR_OK) { + LogError << "ModelInferenceProcessor init failed, ret=" << ret << "."; + return ret; + } + MxBase::ConfigData configData; + const std::string softmax = initParam.softmax ? "true" : "false"; + const std::string checkTensor = initParam.checkTensor ? "true" : "false"; + + configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum)); + configData.SetJsonValue("TOP_K", std::to_string(initParam.topk)); + configData.SetJsonValue("SOFTMAX", softmax); + configData.SetJsonValue("CHECK_MODEL", checkTensor); + + auto jsonStr = configData.GetCfgJson().serialize(); + std::map> config; + config["postProcessConfigContent"] = std::make_shared(jsonStr); + config["labelPath"] = std::make_shared(initParam.labelPath); + + post_ = std::make_shared(); + ret = post_->Init(config); + if (ret != APP_ERR_OK) { + LogError << "AlexnetPostProcess init failed, ret=" << ret << "."; + return ret; + } + + return APP_ERR_OK; +} + +APP_ERROR Alexnet::DeInit() { + dvppWrapper_->DeInit(); + model_->DeInit(); + post_->DeInit(); + MxBase::DeviceManager::GetInstance()->DestroyDevices(); + return APP_ERR_OK; +} + +APP_ERROR Alexnet::ReadImage(const std::string &imgPath, cv::Mat &imageMat) { + imageMat = cv::imread(imgPath, cv::IMREAD_COLOR); + LogInfo << "data" << imageMat.size(); + return APP_ERR_OK; +} + +void Alexnet::ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat) { + static constexpr uint32_t resizeHeight = 227; + static constexpr uint32_t resizeWidth = 227; + cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight)); +} + +APP_ERROR Alexnet::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) { + const uint32_t dataSize = imageMat.cols * imageMat.rows * YUV444_RGB_WIDTH_NU; + LogInfo << "image size after crop" << imageMat.cols << " " << imageMat.rows; + MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_); + MemoryData memoryDataSrc(imageMat.data, dataSize, MemoryData::MEMORY_HOST_MALLOC); + + APP_ERROR ret = MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc); + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "Memory malloc failed."; + return ret; + } + std::vector shape = {imageMat.rows * YUV444_RGB_WIDTH_NU, static_cast(imageMat.cols)}; + tensorBase = TensorBase(memoryDataDst, false, shape, TENSOR_DTYPE_UINT8); + return APP_ERR_OK; +} + +APP_ERROR Alexnet::Inference(const std::vector &inputs, + std::vector &outputs) { + auto dtypes = model_->GetOutputDataType(); + for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) { + std::vector shape = {}; + for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) { + shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]); + } + TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_); + APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor); + if (ret != APP_ERR_OK) { + LogError << "TensorBaseMalloc failed, ret=" << ret << "."; + return ret; + } + outputs.push_back(tensor); + } + DynamicInfo dynamicInfo = {}; + dynamicInfo.dynamicType = DynamicType::STATIC_BATCH; + auto startTime = std::chrono::high_resolution_clock::now(); + APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo); + auto endTime = std::chrono::high_resolution_clock::now(); + double costMs = std::chrono::duration(endTime - startTime).count(); + inferCostTimeMilliSec += costMs; + if (ret != APP_ERR_OK) { + LogError << "ModelInference failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Alexnet::PostProcess(const std::vector &inputs, + std::vector> &clsInfos) { + APP_ERROR ret = post_->Process(inputs, clsInfos); + if (ret != APP_ERR_OK) { + LogError << "Process failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Alexnet::SaveResult(const std::string &imgPath, +const std::vector> &batchClsInfos) { + LogInfo << "image path" << imgPath; + std::string fileName = imgPath.substr(imgPath.find_last_of("/") + 1); + size_t dot = fileName.find_last_of("."); + std::string resFileName = "./result/" + fileName.substr(0, dot) + "_1.txt"; + LogInfo << "file path for saving result" << resFileName; + std::ofstream outfile(resFileName); + if (outfile.fail()) { + LogError << "Failed to open result file: "; + return APP_ERR_COMM_FAILURE; + } + uint32_t batchIndex = 0; + for (auto clsInfos : batchClsInfos) { + std::string resultStr; + for (auto clsInfo : clsInfos) { + LogDebug << " className:" << clsInfo.className << " confidence:" << clsInfo.confidence << + " classIndex:" << clsInfo.classId; + resultStr += std::to_string(clsInfo.classId) + " "; + } + outfile << resultStr << std::endl; + batchIndex++; + } + outfile.close(); + return APP_ERR_OK; +} + +APP_ERROR Alexnet::Process(const std::string &imgPath) { + cv::Mat imageMat; + APP_ERROR ret = ReadImage(imgPath, imageMat); + if (ret != APP_ERR_OK) { + LogError << "ReadImage failed, ret=" << ret << "."; + return ret; + } + ResizeImage(imageMat, imageMat); + std::vector inputs = {}; + std::vector outputs = {}; + TensorBase tensorBase; + ret = CVMatToTensorBase(imageMat, tensorBase); + if (ret != APP_ERR_OK) { + LogError << "CVMatToTensorBase failed, ret=" << ret << "."; + return ret; + } + inputs.push_back(tensorBase); + auto startTime = std::chrono::high_resolution_clock::now(); + ret = Inference(inputs, outputs); + auto endTime = std::chrono::high_resolution_clock::now(); + double costMs = std::chrono::duration(endTime - startTime).count(); + inferCostTimeMilliSec += costMs; + if (ret != APP_ERR_OK) { + LogError << "Inference failed, ret=" << ret << "."; + return ret; + } + std::vector> BatchClsInfos = {}; + LogError << "postprocess output=" << outputs.size(); + ret = PostProcess(outputs, BatchClsInfos); + if (ret != APP_ERR_OK) { + LogError << "PostProcess failed, ret=" << ret << "."; + return ret; + } + ret = SaveResult(imgPath, BatchClsInfos); + if (ret != APP_ERR_OK) { + LogError << "Save infer results into file failed. ret = " << ret << "."; + return ret; + } + return APP_ERR_OK; +} diff --git a/official/cv/alexnet/infer/mxbase/src/Alexnet.h b/official/cv/alexnet/infer/mxbase/src/Alexnet.h new file mode 100644 index 0000000000000000000000000000000000000000..3cc49ebad32ebb908fa6bdddce16846525415513 --- /dev/null +++ b/official/cv/alexnet/infer/mxbase/src/Alexnet.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#ifndef MxBase_ALEXNET_H +#define MxBase_ALEXNET_H +#include + +#include "MxBase/DvppWrapper/DvppWrapper.h" +#include "MxBase/ModelInfer/ModelInferenceProcessor.h" +#include "MxBase/postprocess/include/ClassPostProcessors/Resnet50PostProcess.h" +#include "MxBase/Tensor/TensorContext/TensorContext.h" + + +struct InitParam { + uint32_t deviceId; + std::string labelPath; + uint32_t classNum; + uint32_t topk; + bool softmax; + bool checkTensor; + std::string modelPath; +}; + +struct ImageShape { + uint32_t width; + uint32_t height; +}; + +class Alexnet { + public: + APP_ERROR Init(const InitParam &initParam); + APP_ERROR DeInit(); + APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat); + void ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat); + APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase); + APP_ERROR Inference(const std::vector &inputs, std::vector &outputs); + APP_ERROR PostProcess(const std::vector &inputs, + std::vector> &clsInfos); + APP_ERROR Process(const std::string &imgPath); + double GetInferCostMilliSec() const {return inferCostTimeMilliSec;} + private: + APP_ERROR SaveResult(const std::string &imgPath, const std::vector> &batchClsInfos); + private: + std::shared_ptr dvppWrapper_; + std::shared_ptr model_; + std::shared_ptr post_; + MxBase::ModelDesc modelDesc_; + uint32_t deviceId_ = 0; + double inferCostTimeMilliSec = 0.0; +}; +#endif diff --git a/official/cv/alexnet/infer/mxbase/src/main.cpp b/official/cv/alexnet/infer/mxbase/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e02cea34ed3b6f20c7fcedb4dc9d71612a75d98 --- /dev/null +++ b/official/cv/alexnet/infer/mxbase/src/main.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "Alexnet.h" +#include "MxBase/Log/Log.h" + + +namespace { +const uint32_t CLASS_NUM = 10; +} + +APP_ERROR ScanImages(const std::string &path, std::vector &imgFiles) { + DIR *dirPtr = opendir(path.c_str()); + if (dirPtr == nullptr) { + LogError << "opendir failed. dir:" << path << path.c_str(); + return APP_ERR_INTERNAL_ERROR; + } + dirent *direntPtr = nullptr; + while ((direntPtr = readdir(dirPtr)) != nullptr) { + std::string fileName = direntPtr->d_name; + if (fileName == "." || fileName == "..") { + continue; + } + + imgFiles.emplace_back(path + "/" + fileName); + } + LogInfo << "opendir ok. dir:"; + closedir(dirPtr); + return APP_ERR_OK; +} + +int main(int argc, char* argv[]) { + if (argc <= 1) { + LogWarn << "Please input image path, such as './alexnet image_dir'"; + return APP_ERR_OK; + } + + InitParam initParam = {}; + initParam.deviceId = 0; + initParam.classNum = CLASS_NUM; + initParam.labelPath = "../data/config/cifar10.names"; + initParam.topk = 5; + initParam.softmax = false; + initParam.checkTensor = true; + initParam.modelPath = "../data/models/alexnet_bs1.om"; + auto alexnet = std::make_shared(); + APP_ERROR ret = alexnet->Init(initParam); + if (ret != APP_ERR_OK) { + alexnet->DeInit(); + LogError << "AlexnetClassify init failed, ret=" << ret << "."; + return ret; + } + + std::string imgPath = argv[1]; + std::vector imgFilePaths; + ret = ScanImages(imgPath, imgFilePaths); + if (ret != APP_ERR_OK) { + return ret; + } + auto startTime = std::chrono::high_resolution_clock::now(); + for (auto &imgFile : imgFilePaths) { + ret = alexnet->Process(imgFile); + if (ret !=APP_ERR_OK) { + LogError << "AlexnetClassify process failed, ret=" << ret << "."; + alexnet->DeInit(); + return ret; + } + } + auto endTime = std::chrono::high_resolution_clock::now(); + alexnet->DeInit(); + double costMilliSecs = std::chrono::duration(endTime - startTime).count(); + double fps = 1000.0*imgFilePaths.size() / alexnet->GetInferCostMilliSec(); + LogInfo << "[Process Delay] cost:" << costMilliSecs << " ms\tfps: " << fps << "imgs/sec"; + return APP_ERR_OK; +} + diff --git a/official/cv/alexnet/infer/sdk/classfication_task_metric.py b/official/cv/alexnet/infer/sdk/classfication_task_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbb075a824280b6e5fefc8c0bcb966ef6b5d801 --- /dev/null +++ b/official/cv/alexnet/infer/sdk/classfication_task_metric.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import numpy as np + +np.set_printoptions(threshold=sys.maxsize) +LABEL_FILE = "HiAI_label.json" + +def gen_file_name(img_name): + full_name = img_name.split('/')[-1] + return os.path.splitext(full_name) + +def cre_groundtruth_dict(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + for gt_file in os.listdir(gtfile_path): + if gt_file != LABEL_FILE: + with open(os.path.join(gtfile_path, gtfile), 'r') as f: + gt = json.load(f) + ret = gt["image"]["annotations"][0]["category_id"] + img_gt_dict[gen_file_name(gtfile)] = ret + return img_gt_dict + +def cre_groundtruth_dict_fromtxt(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + with open(gtfile_path, 'r')as f: + + for line in f.readlines(): + temp = line.strip().split(" ") + img_name = temp[0].split(".")[0] + img_lab = temp[-1] + img_gt_dict[img_name] = img_lab + return img_gt_dict + +def load_statistical_predict_result(filepath): + """ + function: + the prediction esult file data extraction + input: + result file:filepath + output: + n_label:numble of label + data_vec: the probabilitie of prediction in the 1000 + :return: probabilities, numble of label, in_type, color + """ + with open(filepath, 'r')as f: + data = f.readline() + temp = data.strip().split(" ") + n_label = len(temp) + data_vec = np.zeros((n_label), dtype=np.float32) + in_type = '' + color = '' + if n_label == 0: + in_type = f.readline() + color = f.readline() + else: + for ind, cls_ind in enumerate(temp): + data_vec[ind] = np.int(cls_ind) + return data_vec, n_label, in_type, color + +def create_visualization_statistical_result(prediction_file_path, + result_store_path, json_file_name, + img_gt_dict, topn=5): + writer = open(os.path.join(result_store_path, json_file_name), 'w') + table_dict = {} + table_dict["title"] = "Overall statistical evaluation" + table_dict["value"] = [] + count = 0 + res_cnt = 0 + n_labels = "" + count_hit = np.zeros(topn) + for tfile_name in os.listdir(prediction_file_path): + count += 1 + temp = tfile_name.split('.')[0] + index = temp.rfind('_') + img_name = temp[:index] + filepath = os.path.join(prediction_file_path, tfile_name) + + ret = load_statistical_predict_result(filepath) + prediction = ret[0] + n_labels = ret[1] + gt = img_gt_dict[img_name] + if n_labels == 1000: + real_label = int(gt) + elif n_labels == 1001: + real_label = int(gt) + 1 + else: + real_label = int(gt) + res_cnt = min(len(prediction), topn) + for i in range(res_cnt): + if str(real_label) == str(int(prediction[i])): + count_hit[i] += 1 + break + if 'value' not in table_dict.keys(): + print("the item value does not exist!") + else: + table_dict["value"].extend( + [{"key": "Number of images", "value": str(count)}, + {"key": "Number of classes", "value": str(n_labels)}]) + if count == 0: + accuracy = 0 + else: + accuracy = np.cumsum(count_hit) / count + for i in range(res_cnt): + table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", + "value": str(round(accuracy[i] * 100, 2)) + '%'}) + json.dump(table_dict, writer) + writer.close() + +def run(): + folder_davinci_target = sys.argv[1] + annotation_file_path = sys.argv[2] + result_json_path = sys.argv[3] + json_file = sys.argv[4] + if not os.path.exists(folder_davinci_target): + print("Target file folder does not exist.") + if not os.path.exists(annotation_file_path): + print("Ground truth file does not exist.") + if not os.path.exists(result_json_path): + print("Result folder doesn't exist.") + img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) + create_visualization_statistical_result(folder_davinci_target, + result_json_path, json_file, + img_label_dict, topn=5) + +if __name__ == '__main__': + run() diff --git a/official/cv/alexnet/infer/sdk/main.py b/official/cv/alexnet/infer/sdk/main.py new file mode 100644 index 0000000000000000000000000000000000000000..9a00b09738cecdb72176b17500ebcc2ccf0d3524 --- /dev/null +++ b/official/cv/alexnet/infer/sdk/main.py @@ -0,0 +1,89 @@ +#coding = utf-8 +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import json +import os +import sys + +from StreamManagerApi import StreamManagerApi +from StreamManagerApi import MxDataInput + + +def run(): + # init stream manager + stream_manager_api = StreamManagerApi() + ret = stream_manager_api.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + + # create streams by pipeline config file + with open("./pipeline/alexnet.pipeline", 'rb') as f: + pipelineStr = f.read() + ret = stream_manager_api.CreateMultipleStreams(pipelineStr) + + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + + # Construct the input of the stream + data_input = MxDataInput() + dir_name = sys.argv[1] + res_dir_name = sys.argv[2] + file_list = os.listdir(dir_name) + if not os.path.exists(res_dir_name): + os.makedirs(res_dir_name) + for file_name in file_list: + file_path = os.path.join(dir_name, file_name) + if not (file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg")): + continue + with open(file_path, 'rb') as f: + data_input.data = f.read() + stream_name = b'im_alexnet' + in_plugin_id = 0 + unique_id = stream_manager_api.SendData(stream_name, in_plugin_id, data_input) + if unique_id < 0: + print("Failed to send data to stream.") + exit() + # Obtain the inference result by specifying streamName and uniqueId. + start_time = datetime.datetime.now() + infer_result = stream_manager_api.GetResult(stream_name, unique_id) + end_time = datetime.datetime.now() + print('sdk run time: {}'.format((end_time - start_time).microseconds)) + if infer_result.errorCode != 0: + print("GetResultWithUniqueId error. errorCode=%d, errorMsg=%s" % ( + infer_result.errorCode, infer_result.data.decode())) + exit() + # print the infer result + infer_res = infer_result.data.decode() + print("process img: {}, infer result: {}".format(file_name, infer_res)) + load_dict = json.loads(infer_result.data.decode()) + if load_dict.get('MxpiClass') is None: + with open(res_dir_name + "/" + file_name[:-5] + '.txt', 'w') as f_write: + f_write.write("") + continue + res_vec = load_dict.get('MxpiClass') + res_name = os.path.join(res_dir_name, '{}_1.txt'.format(file_name[:-4])) + with open(res_name, 'w') as f_write: + res_list = [str(item.get("classId"))+ " " for item in res_vec] + f_write.writelines(res_list) + f_write.write('\n') + + # destroy streams + stream_manager_api.DestroyAllStreams() + +if __name__ == '__main__': + run() diff --git a/official/cv/alexnet/infer/sdk/models/alexnet/alexnet.cfg b/official/cv/alexnet/infer/sdk/models/alexnet/alexnet.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b18718a1da5891d2b8a4498b06638a2b4c7619a9 --- /dev/null +++ b/official/cv/alexnet/infer/sdk/models/alexnet/alexnet.cfg @@ -0,0 +1,3 @@ +CLASS_NUM=10 +SOFTMAX=false +TOP_K=5 diff --git a/official/cv/alexnet/infer/sdk/models/alexnet/cifar10.names b/official/cv/alexnet/infer/sdk/models/alexnet/cifar10.names new file mode 100644 index 0000000000000000000000000000000000000000..fa30c22b95d76d0dc0466cce95d10f3eb8ddad53 --- /dev/null +++ b/official/cv/alexnet/infer/sdk/models/alexnet/cifar10.names @@ -0,0 +1,10 @@ +airplane +automobile +bird +cat +deer +dog +frog +horse +ship +truck diff --git a/official/cv/alexnet/infer/sdk/pipeline/alexnet.pipeline b/official/cv/alexnet/infer/sdk/pipeline/alexnet.pipeline new file mode 100644 index 0000000000000000000000000000000000000000..d4ed641b1407e2faf76c12c4d65567da72624874 --- /dev/null +++ b/official/cv/alexnet/infer/sdk/pipeline/alexnet.pipeline @@ -0,0 +1,64 @@ +{ + "im_alexnet": { + "stream_config": { + "deviceId": "0" + }, + "appsrc1": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "props": { + "handleMethod": "opencv" + }, + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "handleMethod": "opencv", + "resizeType": "Resizer_Stretch", + "resizeHeight": "227", + "resizeWidth": "227" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_tensorinfer0" + }, + "mxpi_tensorinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "../data/models/alexnet_bs1.om", + "waitingTime": "2000", + "outputDeviceId": "-1" + }, + "factory": "mxpi_tensorinfer", + "next": "mxpi_classpostprocessor0" + }, + "mxpi_classpostprocessor0": { + "props": { + "dataSource": "mxpi_tensorinfer0", + "postProcessConfigPath": "./models/alexnet/alexnet.cfg", + "labelPath": "./models/alexnet/cifar10.names", + "postProcessLibPath": "/usr/local/sdk_home/mxManufacture/lib/modelpostprocessors/libresnet50postprocess.so" + }, + "factory": "mxpi_classpostprocessor", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_classpostprocessor0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} diff --git a/official/cv/alexnet/infer/sdk/run_sdk_infer.sh b/official/cv/alexnet/infer/sdk/run_sdk_infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..8489286b4fde2bce370d18d20e6b53bf7f07f95d --- /dev/null +++ b/official/cv/alexnet/infer/sdk/run_sdk_infer.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +image_path=$1 +result_dir=$2 + +set -e + + + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/driver/lib64/:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +#to set PYTHONPATH, import the StreamManagerApi.py +export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python + +python3.7 main.py $image_path $result_dir +exit 0 diff --git a/official/cv/alexnet/modelarts/train.start.py b/official/cv/alexnet/modelarts/train.start.py new file mode 100644 index 0000000000000000000000000000000000000000..3d56e2c24c8e138f7ad3923f4d1499914ef1003e --- /dev/null +++ b/official/cv/alexnet/modelarts/train.start.py @@ -0,0 +1,206 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +""" +######################## train alexnet example ######################## +train alexnet and get network model files(.ckpt) : +python train.py --data_path /YourDataPath +""" + +import os +import argparse +import glob +import sys +import numpy as np +import moxing as mox + +from src.model_utils.moxing_adapter import moxing_wrapper +from src.model_utils.device_adapter import get_device_id, get_device_num, get_rank_id, get_job_id +from src.dataset import create_dataset_cifar10, create_dataset_imagenet +from src.generator_lr import get_lr_cifar10, get_lr_imagenet +from src.alexnet import AlexNet +from src.get_param_groups import get_param_groups + +import mindspore.nn as nn +from mindspore.communication.management import init, get_rank +from mindspore import context +from mindspore import export +from mindspore import Tensor +from mindspore.train import Model +from mindspore.context import ParallelMode +from mindspore.nn.metrics import Accuracy +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed + +sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../')) + +parser = argparse.ArgumentParser(description='Image classification') +parser.add_argument("--enable_modelarts", type=bool, default=True, help="") +parser.add_argument("--output_path", type=str, default="/cache/train", help="setting dir of training output") +parser.add_argument("--checkpoint_path", type=str, default="./checkpoint/", help="setting dir of checkpoint output") +parser.add_argument('--device_target', type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"), + help="Device target, support Ascend, GPU and CPU.") +parser.add_argument("--dataset_name", type=str, default="cifar10", choices=("cifar10", "imagenet"), + help="Dataset Name, support cifar10 and imagenet") +parser.add_argument("--learning_rate", type=float, default=0.002, help="") +parser.add_argument("--epoch_size", type=int, default=30, help="") +parser.add_argument("--data_path", type=str, default="/cache/data", help="path to dataset") +parser.add_argument("--batch_size", type=int, default=32, help="") +parser.add_argument("--num_classes", type=int, default=10, help="") +parser.add_argument("--sink_size", type=int, default=-1, help="") +parser.add_argument("--momentum", type=float, default=0.9, help="") +parser.add_argument("--save_checkpoint_steps", type=int, default=1562, help="") +parser.add_argument("--keep_checkpoint_max", type=int, default=10, help="") +parser.add_argument("--data_url", type=str, default="", help="") +parser.add_argument("--train_url", type=str, default="", help="") +parser.add_argument("--image_height", type=int, default=227, help="") +parser.add_argument("--image_width", type=int, default=227, help="") +parser.add_argument("--buffer_size", type=int, default=1000, help="") +parser.add_argument("--dataset_sink_mode", type=bool, default=True, help="") +parser.add_argument("--weight_decay", type=float, default=0.0001, help="") +parser.add_argument("--loss_scale", type=int, default=1024, help="") +parser.add_argument("--is_dynamic_loss_scale", type=int, default=0, help="") +config = parser.parse_args() + +set_seed(1) + +def modelarts_pre_process(): + #pass + config.ckpt_path = os.path.join(config.output_path, str(get_rank_id()), config.checkpoint_path) + +def frozen_to_air(net, args): + param_dict = load_checkpoint(args.get("ckpt_file")) + load_param_into_net(net, param_dict) + input_arr = Tensor(np.zeros([args.get("batch_size"), \ + 3, args.get("image_height"), args.get("image_width")], np.float32)) + export(net, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format")) + +@moxing_wrapper(pre_process=modelarts_pre_process) +def train_alexnet_model(): + print(config) + print('device id:', get_device_id()) + print('device num:', get_device_num()) + print('rank id:', get_rank_id()) + print('job id:', get_job_id()) + device_target = config.device_target + context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) + context.set_context(save_graphs=False) + if device_target == "GPU": + context.set_context(enable_graph_kernel=True) + context.set_context(graph_kernel_flags="--enable_cluster_ops=MatMul") + + device_num = get_device_num() + if config.dataset_name == "cifar10": + if device_num > 1: + config.learning_rate = config.learning_rate * device_num + config.epoch_size = config.epoch_size * 2 + elif config.dataset_name == "imagenet": + pass + else: + raise ValueError("Unsupported dataset.") + + if device_num > 1: + context.reset_auto_parallel_context() + context.set_auto_parallel_context(device_num=device_num, \ + parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True) + if device_target == "Ascend": + context.set_context(device_id=get_device_id()) + init() + elif device_target == "GPU": + init() + else: + context.set_context(device_id=get_device_id()) + + if config.dataset_name == "cifar10": + ds_train = create_dataset_cifar10(config, config.data_path, config.batch_size, target=config.device_target) + elif config.dataset_name == "imagenet": + ds_train = create_dataset_imagenet(config, config.data_path, config.batch_size) + else: + raise ValueError("Unsupported dataset.") + + if ds_train.get_dataset_size() == 0: + raise ValueError("Please check dataset size > 0 and batch_size <= dataset size") + + network = AlexNet(config.num_classes, phase='train') + + loss_scale_manager = None + metrics = None + step_per_epoch = ds_train.get_dataset_size() if config.sink_size == -1 else config.sink_size + + if config.dataset_name == 'cifar10': + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + lr = Tensor(get_lr_cifar10(0, config.learning_rate, config.epoch_size, step_per_epoch)) + opt = nn.Momentum(network.trainable_params(), lr, config.momentum) + metrics = {"Accuracy": Accuracy()} + + elif config.dataset_name == 'imagenet': + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + lr = Tensor(get_lr_imagenet(config.learning_rate, config.epoch_size, step_per_epoch)) + opt = nn.Momentum(params=get_param_groups(network), + learning_rate=lr, + momentum=config.momentum, + weight_decay=config.weight_decay, + loss_scale=config.loss_scale) + + from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager + if config.is_dynamic_loss_scale == 1: + loss_scale_manager = DynamicLossScaleManager(init_loss_scale=65536, scale_factor=2, scale_window=2000) + else: + loss_scale_manager = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False) + else: + raise ValueError("Unsupported dataset.") + + if device_target == "Ascend": + model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", keep_batchnorm_fp32=False, + loss_scale_manager=loss_scale_manager) + elif device_target == "GPU": + model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", + loss_scale_manager=loss_scale_manager) + else: + raise ValueError("Unsupported platform.") + + if device_num > 1: + ckpt_save_dir = os.path.join(config.ckpt_path + "_" + str(get_rank())) + else: + ckpt_save_dir = config.ckpt_path + + time_cb = TimeMonitor(data_size=step_per_epoch) + config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps, + keep_checkpoint_max=config.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix="checkpoint_alexnet", directory=ckpt_save_dir, config=config_ck) + + print("============== Starting Training ==============") + model.train(config.epoch_size, ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()], + dataset_sink_mode=config.dataset_sink_mode, sink_size=config.sink_size) + ckpt_list = glob.glob(str(ckpt_save_dir) + "/*alexnet*.ckpt") + if not ckpt_list: + print("ckpt file not generated") + ckpt_list.sort(key=os.path.getmtime) + ckpt_model = ckpt_list[-1] + + network = AlexNet(config.num_classes, phase='train') + + frozen_to_air_args = {"ckpt_file": ckpt_model, + "batch_size": 1, + "image_height": 227, + "image_width": 227, + "file_name": "/cache/train/alexnet", + "file_format": "AIR"} + frozen_to_air(network, frozen_to_air_args) + mox.file.copy_parallel(config.output_path, config.train_url) + +if __name__ == "__main__": + train_alexnet_model() diff --git a/official/cv/alexnet/scripts/docker_start.sh b/official/cv/alexnet/scripts/docker_start.sh new file mode 100644 index 0000000000000000000000000000000000000000..e9c72417605ab31045fb933537bff480d55ca53d --- /dev/null +++ b/official/cv/alexnet/scripts/docker_start.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +docker_image=$1 +data_dir=$2 +model_dir=$3 + +docker run -it --ipc=host \ + --device=/dev/davinci0 \ + --device=/dev/davinci1 \ + --device=/dev/davinci2 \ + --device=/dev/davinci3 \ + --device=/dev/davinci4 \ + --device=/dev/davinci5 \ + --device=/dev/davinci6 \ + --device=/dev/davinci7 \ + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \ + -v ${model_dir}:${model_dir} \ + -v ${data_dir}:${data_dir} \ + -v ~/ascend/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \ + -v ~/ascend/log/npu/slog/:/var/log/npu/slog -v ~/ascend/log/npu/profiling/:/var/log/npu/profiling \ + -v ~/ascend/log/npu/dump/:/var/log/npu/dump -v ~/ascend/log/npu/:/usr/slog ${docker_image} \ + /bin/bash diff --git a/official/cv/alexnet/scripts/run_standalone_eval_ascend.sh b/official/cv/alexnet/scripts/run_standalone_eval_ascend.sh index 8adffeb29fece6e7ac8922552152cff8e2777a96..00892ae683f8d7dd7fd06957430c9dd6576a2f0b 100644 --- a/official/cv/alexnet/scripts/run_standalone_eval_ascend.sh +++ b/official/cv/alexnet/scripts/run_standalone_eval_ascend.sh @@ -28,9 +28,9 @@ export DEVICE_ID=$4 BASE_PATH=$(cd ./"`dirname $0`" || exit; pwd) if [ $# -ge 1 ]; then - if [ $1 == 'imagenet' ]; then + if [ $1 = 'imagenet' ]; then CONFIG_FILE="${BASE_PATH}/../config_imagenet.yaml" - elif [ $1 == 'cifar10' ]; then + elif [ $1 = 'cifar10' ]; then CONFIG_FILE="${BASE_PATH}/../default_config.yaml" else echo "Unrecognized parameter" @@ -40,7 +40,7 @@ else CONFIG_FILE="${BASE_PATH}/../default_config.yaml" fi -python ../eval.py --config_path=$CONFIG_FILE --dataset_name=$DATASET_NAME \ +python3.7 ../eval.py --config_path=$CONFIG_FILE --dataset_name=$DATASET_NAME \ --data_path=$DATA_PATH --ckpt_path=$CKPT_PATH \ - --device_id=$DEVICE_ID --device_target="Ascend" > eval_log 2>&1 & + --device_id=$DEVICE_ID --device_target="Ascend" > eval.log 2>&1 & diff --git a/official/cv/alexnet/scripts/run_standalone_train_ascend.sh b/official/cv/alexnet/scripts/run_standalone_train_ascend.sh index 2593316c8263c0ca46fd93ef600380ccc749fddb..23e4f7db42a376de7104aac9ed6f06e61893060d 100644 --- a/official/cv/alexnet/scripts/run_standalone_train_ascend.sh +++ b/official/cv/alexnet/scripts/run_standalone_train_ascend.sh @@ -28,9 +28,9 @@ export CKPT_PATH=$4 BASE_PATH=$(cd ./"`dirname $0`" || exit; pwd) if [ $# -ge 1 ]; then - if [ $1 == 'imagenet' ]; then + if [ $1 = 'imagenet' ]; then CONFIG_FILE="${BASE_PATH}/../config_imagenet.yaml" - elif [ $1 == 'cifar10' ]; then + elif [ $1 = 'cifar10' ]; then CONFIG_FILE="${BASE_PATH}/../default_config.yaml" else echo "Unrecognized parameter" @@ -40,6 +40,6 @@ else CONFIG_FILE="${BASE_PATH}/../default_config.yaml" fi -python ../train.py --config_path=$CONFIG_FILE --dataset_name=$DATASET_NAME --data_path=$DATA_PATH \ ---ckpt_path=$CKPT_PATH --device_id=$DEVICE_ID --device_target="Ascend" > log 2>&1 & +python3.7 ../train.py --config_path=$CONFIG_FILE --dataset_name=$DATASET_NAME --data_path=$DATA_PATH \ +--ckpt_path=$CKPT_PATH --device_id=$DEVICE_ID --device_target="Ascend" > train.log 2>&1 & diff --git a/official/cv/alexnet/src/config.py b/official/cv/alexnet/src/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3c653b0cd7b732d9fb7cb3395c65bcda006e6334 --- /dev/null +++ b/official/cv/alexnet/src/config.py @@ -0,0 +1,54 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +network config setting, will be used in train.py +""" + +from easydict import EasyDict as edict + +alexnet_cifar10_cfg = edict({ + 'num_classes': 10, + 'learning_rate': 0.002, + 'momentum': 0.9, + 'epoch_size': 30, + 'batch_size': 32, + 'buffer_size': 1000, + 'image_height': 227, + 'image_width': 227, + 'save_checkpoint_steps': 1562, + 'keep_checkpoint_max': 10, + 'air_name': "alexnet.air", +}) + +alexnet_imagenet_cfg = edict({ + 'num_classes': 1000, + 'learning_rate': 0.13, + 'momentum': 0.9, + 'epoch_size': 150, + 'batch_size': 256, + 'buffer_size': None, # invalid parameter + 'image_height': 224, + 'image_width': 224, + 'save_checkpoint_steps': 625, + 'keep_checkpoint_max': 10, + 'air_name': "alexnet.air", + + # opt + 'weight_decay': 0.0001, + 'loss_scale': 1024, + + # lr + 'is_dynamic_loss_scale': 0, +}) diff --git a/official/cv/alexnet/src/dataset.py b/official/cv/alexnet/src/dataset.py index 0b1ff86017ce7e366ae33ab1f1065031275f4221..ee23529bc9ebf0f867f355a4eee641f141b3ad23 100644 --- a/official/cv/alexnet/src/dataset.py +++ b/official/cv/alexnet/src/dataset.py @@ -85,7 +85,7 @@ def create_dataset_imagenet(cfg, dataset_path, batch_size=32, repeat_num=1, trai num_parallel_workers = 16 if device_num == 1: - num_parallel_workers = 48 + num_parallel_workers = 8 ds.config.set_prefetch_size(8) else: ds.config.set_numa_enable(True)