diff --git a/mxVision/AsyncInfer/CMakeLists.txt b/mxVision/AsyncInfer/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..066da4967b3ad58abc2340fb7dca599e9b91f2a6 --- /dev/null +++ b/mxVision/AsyncInfer/CMakeLists.txt @@ -0,0 +1,43 @@ +cmake_minimum_required(VERSION 3.10) +projcet(C++Sample) + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + +set(TARGET C++Sample) +add_compile_options(-std=c++11, -fPIE -fstack-protector-all -fPIC- Wl,z,relro,-z,now,-z,noexecstack -s -pie -Wall) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private) + +set(MX_SDK_HOME $ENV{MX_SDK_HOME}) + +if (NOT DEFINED ENV{MX_SDK_HOME}) + string(REGEX REPLACE "(.*)/(.*)/(.*)/(.*)" "\\1" MX_SDK_HOME ${CMAKE_CURRENT_SOURCE_DIR}) + message(STATUS "set default MX_SDK_HOME: ${MX_SDK_HOME}") +else () + message(STATUS "env MX_SDK_HOME: ${MX_SDK_HOME}") +endif () + +include_directories( + ${MX_SDK_HOME}/include + ${MX_SDK_HOME}/include/MxBase/postprocess/include + ${MX_SDK_HOME}/opensource/include + ${MX_SDK_HOME}/opensource/include/opencv4 + /usr/local/Ascend/ascend-toolkit/latest/aarch64-linux/include +) + +link_directories( + ${MX_SDK_HOME}/lib + ${MX_SDK_HOME}/opensource/lib + ${MX_SDK_HOME}/lib/modelpostprocessors + /usr/local/Ascend/ascend-toolkit/latest/acllib/lib64 + /usr/local/Ascend/driver/lib64 +) + +add_executable(sample main.cpp) +target_link_libraries( + sample + glog + mxbase + yolov3postprocess + cpprest + opencv_world +) \ No newline at end of file diff --git a/mxVision/AsyncInfer/README.MD b/mxVision/AsyncInfer/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..0d7e063116c51b764c4a71c46a37714dea90e6be --- /dev/null +++ b/mxVision/AsyncInfer/README.MD @@ -0,0 +1,41 @@ +# 异步检测分类 + +## 1. 介绍 +异步检测分类样例基于mxVision SDK进行开发,以310P为主要的硬件平台,支持基于图像的异步检测分类任务 + +### 1.1 支持的产品 +(硬件) +### 1.2 支持的版本 +(软件包括第三方依赖)请列出环境依赖和版本 +eg: 推荐系统为ubuntu 18.04 或者centos7.6,环境依赖软件和版本如下表: + +|软件名称|版本号| +|-------|-------| +|xxxx软件|版本号| + +#### 安装依赖 + +(依赖搭建安装或者获取方式的具体步骤) + +### 1.3 代码目录结构与说明 + +(代码目录和结构) + +## 2. 编译与运行 + +在编译运行项目前,需要设置环境变量: +(请列出所需环境变量,作用,若不涉及个人变量,列出实际设置环境变量的命令) + +* 环境变量介绍 +```shell +. ${sdk_path}/set_env.sh +. ${ascend_toolkit_path}/set_env.sh +``` +(描述项目安装运行的全部步骤,如果不涉及个人路径,请直接列出具体执行命令) +示例步骤如下: + + + +## 3.常见问题 + +请按照问题重要程度,详细列出可能遇到的问题和解决办法 diff --git a/mxVision/AsyncInfer/main.cpp b/mxVision/AsyncInfer/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..41794c777b9668d89951a3e0866d1bd513129cf3 --- /dev/null +++ b/mxVision/AsyncInfer/main.cpp @@ -0,0 +1,532 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved. + * Description: Yolov3+ResNet + * Author: + * Create: 2021 + * History: NA + * */ + +#include "MxBase/E2eInfer/ImageProcessor/ImageProcessor.h" +#include "MxBase/MxBase.h" +#include +#include +#include +#include "MxBase/Maths/FastMath.h" +#include "MxBase/postprocess/include/ObjectPostProcessors/Yolov3PostProcess.h" +#include +#include +#include +#include +#include + + +using namespace MxBase; +using namespace std; + +APP_ERROR ret = APP_ERR_OK; +int32_t g_deviceId = 1; + +int BATCH_SIZE = 4; +int DATA_SIZE = 50; +int STREAM_NUM = 4; + +std::vector ImageVec; +ImageProcessor imageProcessor(g_deviceId); + +std::vector AscendStreamVec; +std::vector StreamVec_; + +std::vector decodeImageVec; +std::vector resizeImageVec; +std::vector > cropConfigVecVec; +std::vector > cropResizeImageVecVec; +std::vector tensorImgVec; +std::vector resnetTensorVec; + +std::string yoloPath = "./model/yolov3/yolov3_tf_bs1_fp16.om"; +Model yoloV3(yoloPath, g_deviceId); + +std::string resnetPath = "./model/resnet50/resnet50_aipp_tf.om"; +Model resnet50(resnetPath, g_deviceId); + +std::vector> decodeImageBatch(BATCH_SIZE); +std::vector> resizeImageBatch(BATCH_SIZE); +std::vector> tensorImageBatch(BATCH_SIZE); +std::vector> resnetTensorBatch(BATCH_SIZE); +std::vector>> resnetInputBatch(BATCH_SIZE); +std::vector>> yoloV3OutputTensorBatch(BATCH_SIZE); +std::vector>> resnetOutputTensorBatch(BATCH_SIZE); +std::vector>> cropResizeImageBatch(BATCH_SIZE); +std::vector>> cropConfigRectBatch(BATCH_SIZE); +std::vector>> yoloV3InputTensorBatch(BATCH_SIZE); + +int SplitImage(const std::string& imgDir); + +struct Params{ + std::vector& DecodeImageBatch; + std::vector& ResizeImageBatch; + std::vector& TensorImageBatch; + std::vector& ResnetTensorBatch; + std::vector>& ResnetInputBatch; + std::vector>& CropResizeImageBatch; + std::vector>& YoloV3OutputTensorBatch; + std::vector>& ResnetOutputTensorBatch; + std::vector>& CropConfigRectBatch; + std::vector>& yoloV3InputTensorBatch; +}; + +struct AsyncYoloV3PostProcessParam { + vector& yoloV3Outputs; + int batchIndex; + Params* params; + int dataIndex; +}; + +struct AsyncResnetYoloV3PostProcessParam { + vector resnetOutput; +}; + +struct ConvertToTensorParam { + bool isYolo; + Params* params; + int dataIndex; +}; + +struct E2eInferParams { + int batchIdx; + Params* params; +}; + +struct MallocYoloTensor { + Tensor output1; + Tensor output2; + Tensor output3; +}; + +struct MallocResNetTensor { + Tensor output1; + Tensor output2; +}; + +struct HoldResourceParam { + Tensor outTensor1; + Tensor outTensor2; + Tensor outTensor3; + Tensor resnetOutput1; + Tensor resnetOutput2; + vector* yoloV3Outputs; + ConvertToTensorParam* convertToTensorParam1; + ConvertToTensorParam* convertToTensorParam2; + MallocYoloTensor* mallocYoloTensor; + MallocResNetTensor* mallocResNetTensor; + vector* resnetoutput; + AsyncResnetYoloV3PostProcessParam* asyncResnetYoloV3PostProcessParam; + AsyncYoloV3PostProcessParam* asyncYoloV3PostProcessParam; +}; + +void HoldResourceCallback(void* args) +{ + HoldResourceParam* input = static_cast(args); + delete input->yoloV3Outputs; + delete input->convertToTensorParam1; + delete input->convertToTensorParam2; + delete input->mallocYoloTensor; + delete input->mallocResNetTensor; + delete input->resnetoutput; + delete input->asyncYoloV3PostProcessParam; + delete input->asyncResnetYoloV3PostProcessParam; + delete input; +} + +std::vector E2eInferParamsVec; + +void SDKYoloV3PostProcess(std::string& yoloV3ConfigPath, std::string& yoloV3LabelPath, vector& yoloV3Outputs, + std::vector& cropConfigVec, std::vector& imagePreProcessInfos) +{ + std::map postConfig; + postConfig.insert(pair("postProcessConfigPath", yoloV3ConfigPath)); + postConfig.insert(pair("labelPath", yoloV3LabelPath)); + + Yolov3PostProcess yolov3PostProcess; + yolov3PostProcess.Init(postConfig); + + std::vector tensors; + for (size_t i = 0; i < yoloV3Outputs.size(); i++) { + MemoryData memoryData(yoloV3Outputs[i].GetData(), yoloV3Outputs[i].GetByteSize()); + TensorBase tensorBase(memoryData, true, yoloV3Outputs[i].GetShape(), TENSOR_DTYPE_INT32); + tensors.push_back(tensorBase); + } + + std::vector> objectInfos; + + yolov3PostProcess.Process(tensors, objectInfos, imagePreProcessInfos); + + cout << "size of objectInfos is: " << objectInfos.size() << endl; + for (size_t i = 0; i < objectInfos.size(); i++) { + cout << "objectInfos-" << i << endl; + cout << "size of objectInfo-" << i << " is: " << objectInfos[i].size() << endl; + for (size_t j = 0; j < objectInfos[i].size(); j++) { + cout << " objectInfo-" << j << endl; + cout << " x0 is: " << objectInfos[i][j].x0 << endl; + cout << " y0 is: " << objectInfos[i][j].y0 << endl; + cout << " x1 is: " << objectInfos[i][j].x1 << endl; + cout << " y1 is: " << objectInfos[i][j].y1 << endl; + cout << " confidence is: " << objectInfos[i][j].confidence << endl; + cout << " classId is: " << objectInfos[i][j].classId << endl; + cout << " className is: " << objectInfos[i][j].className << endl; + } + } + + cropConfigVec.resize(objectInfos[0].size()); + + for (size_t i = 0; i < objectInfos[0].size(); i++) { + cropConfigVec[i].x0 = objectInfos[0][i].x0; + cropConfigVec[i].y0 = objectInfos[0][i].y0; + cropConfigVec[i].x1 = objectInfos[0][i].x1; + cropConfigVec[i].y1 = objectInfos[0][i].y1; + } +} + +void ResnetYoloV3PostProcess(vector resnetOutput_) +{ + int classNum_ = 1000; // 类别个数 + + if (resnetOutput_.empty()) { + std::cout << "resnet Infer failed.." << std::endl; + return; + } + + float *castData = static_cast(resnetOutput_[0].GetData()); + std::vector result; + for (int j = 0; j < classNum_; ++j) { + result.push_back(castData[j]); + } + + // 计算结果 + std::vector::iterator maxElement = std::max_element(std::begin(result), std::end(result)); + int argmaxIndex = maxElement - std::begin(result); + float confidence = *maxElement; + + std::cout << "==============推理结果===============" << std::endl; + cout << "argmaxIndex = " << argmaxIndex << endl; + cout << "confidence = " << confidence << endl; +} + +APP_ERROR PrepareData() +{ + string imagePath_ = "./imgs_bak"; + int totalImgs = SplitImage(imagePath_); + std::vector> imgFileVecs(BATCH_SIZE); + for (size_t i = 0; i < imgFileVecs.size(); i++) { + std::ifstream imgFileStream; + std::string imagePath = imagePath_ + "/imgSplitFile" + std::to_string(i); + imgFileStream.open(imagePath); + std::string imgFile; + std::vector imgVecs; + while (getline(imgFileStream, imgFile)) { + imgFileVecs[i].push_back(imgFile); + } + imgFileStream.close(); + } + for (size_t i = 0; i < decodeImageBatch.size(); i++) { + for (size_t j = 0; j < imgFileVecs[i].size(); j++) { + Image decodeImage; + ret = imageProcessor.Decode(imgFileVecs[i][j], decodeImage); + if (ret != APP_ERR_OK) { + std::cout << "imageProcessor Decode failed." << std::endl; + return -1; + } + decodeImageBatch[i].push_back(decodeImage); + } + } + + for (int i = 0; i< totalImgs; i++) { + + Image resizedImage; + resizeImageBatch[i % BATCH_SIZE].push_back(resizedImage); + + Tensor tensorImg; + tensorImageBatch[i % BATCH_SIZE].push_back(tensorImg); + + Tensor resnetTensor; + resnetTensorBatch[i % BATCH_SIZE].push_back(resnetTensor); + + vector resnetInput; + resnetInputBatch[i % BATCH_SIZE].push_back(resnetInput); + + std::vector cropResizedImageVec; + cropResizeImageBatch[i % BATCH_SIZE].push_back(cropResizedImageVec); + + vector yoloV3Outputs; + yoloV3OutputTensorBatch[i % BATCH_SIZE].push_back(yoloV3Outputs); + + vector resnetOutput; + resnetOutputTensorBatch[i % BATCH_SIZE].push_back(resnetOutput); + + vector cropConfigVec; + cropConfigRectBatch[i % BATCH_SIZE].push_back(cropConfigVec); + + vector yoloV3InputVec; + yoloV3InputTensorBatch[i % BATCH_SIZE].push_back(yoloV3InputVec); + } + + for (int i = 0; i < STREAM_NUM; i++) { + AscendStream Stream_ = AscendStream(g_deviceId); + Stream_.CreateAscendStream(); + StreamVec_.push_back(Stream_); + } + + for (int i = 0; i < BATCH_SIZE; i++) { + AscendStream Stream = AscendStream(g_deviceId); + Stream.CreateAscendStream(); + AscendStreamVec.push_back(Stream); + } + + return APP_ERR_OK; +} + +APP_ERROR AsyncYoloV3PostProcessPro(vector& yoloV3Outputs, int batchIndex, Params* params, int dataIndex) +{ + for (size_t i = 0; i < yoloV3Outputs.size(); i++) { + ret = yoloV3Outputs[i].ToHost(); + } + + std::cout << "======================yolov3 后处理 =============================" << endl; + string yoloV3Config = "./model/yolov3/yolov3_tf_bs1_fp16.cfg"; + string yoloV3LabelPath = "./model/yolov3/coco.names"; + + ImagePreProcessInfo imagePreProcessInfo(params->ResizeImageBatch[dataIndex].GetOriginalSize().width, + params->ResizeImageBatch[dataIndex].GetOriginalSize().height, + params->DecodeImageBatch[dataIndex].GetOriginalSize().width, + params->DecodeImageBatch[dataIndex].GetOriginalSize().height); + vector imagePreProcessInfos{imagePreProcessInfo}; + + SDKYoloV3PostProcess(yoloV3Config, yoloV3LabelPath, yoloV3Outputs, params->CropConfigRectBatch[dataIndex], imagePreProcessInfos); + if (params->CropConfigRectBatch[dataIndex].empty() || params->CropConfigRectBatch[dataIndex].size() == 0) { + std::cout << "Failed to run yolov3 postProcess." << std::endl; + return 0; + } + params->CropResizeImageBatch[dataIndex].resize(params->CropConfigRectBatch[dataIndex].size()); +} + +void AsyncYoloV3PostProcessCallbackFunc(void* args) +{ + ret = AsyncYoloV3PostProcessPro(static_cast(args)->yoloV3Outputs, + static_cast(args)->batchIndex, + static_cast(args)->params, + static_cast(args)->dataIndex); + if (ret != APP_ERR_OK) { + LogError << "Async execute yolov3 postprocess failed."; + } +} + +APP_ERROR AsyncResNetYoloV3PostPrcessPro(std::vector& resnetOutput_) +{ + for (size_t i = 0; i < resnetOutput_.size(); i++){ + resnetOutput_[i].ToHost(); + } + ResnetYoloV3PostProcess(resnetOutput_); + return APP_ERR_OK; +} + +void AsyncResNetYoloV3PostProcessCallbackFunc(void* args) +{ + AsyncResnetYoloV3PostProcessParam* input = static_cast(args); + ret = AsyncResNetYoloV3PostPrcessPro(input->resnetOutput); + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "Async execute resnet yolov3 postprocess failed."; + } +} + +void ConvertToTensorProcess(void* args) +{ + ConvertToTensorParam* convertToTensorParam = static_cast(args); + int dataIndex = convertToTensorParam->dataIndex; + if (convertToTensorParam->isYolo) { + convertToTensorParam->params->TensorImageBatch[dataIndex] = + convertToTensorParam->params->ResizeImageBatch[dataIndex].ConvertToTensor(); + convertToTensorParam->params->yoloV3InputTensorBatch[dataIndex] = + {convertToTensorParam->params->TensorImageBatch[dataIndex]}; + } else { + convertToTensorParam->params->ResnetTensorBatch[dataIndex] = + convertToTensorParam->params->CropResizeImageBatch[dataIndex][0].ConvertToTensor(); + convertToTensorParam->params->ResnetInputBatch[dataIndex] = + {convertToTensorParam->params->ResnetTensorBatch[dataIndex]}; + } +} + +void YoloMalloc(void* args) +{ + MallocYoloTensor* input = static_cast(args); + MxBase::Tensor::TensorMalloc(input->output1); + MxBase::Tensor::TensorMalloc(input->output2); + MxBase::Tensor::TensorMalloc(input->output3); +} + +void ResNetMalloc(void* args) +{ + MallocResNetTensor* input = static_cast(args); + MxBase::Tensor::TensorMalloc(input->output1); + MxBase::Tensor::TensorMalloc(input->output2); +} + +APP_ERROR E2eInferAsync(int batchIndex, Params* param) +{ + for (int i = 0; i < param->DecodeImageBatch.size(); i++) { + + ret = imageProcessor.Resize(param->DecodeImageBatch[i], Size(416, 416), param->ResizeImageBatch[i], + Interpolation::HUAWEI_HIGH_ORDER_FILTER, AscendStreamVec[batchIndex]); + if (ret != APP_ERR_OK) { + std::cout << "imageProcessor Resize failed. ret is " << ret << std::endl; + return ret; + } + + ConvertToTensorParam* convertToTensorParam1 = new ConvertToTensorParam{true, param, i}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(ConvertToTensorProcess, + static_cast(convertToTensorParam1)); + + cout << "====================== 图像前处理结束 =======================" << endl << endl; + cout << "====================== 目标检测模型推理 ======================" << endl; + + MxBase::Tensor outTensor1({1, 13, 13, 255}, MxBase::TensorDType::FLOAT32, g_deviceId); + MxBase::Tensor outTensor2({1, 26, 26, 255}, MxBase::TensorDType::FLOAT32, g_deviceId); + MxBase::Tensor outTensor3({1, 52, 52, 255}, MxBase::TensorDType::FLOAT32, g_deviceId); + + MallocYoloTensor* mallocYoloTensor = new MallocYoloTensor{outTensor1, outTensor2, outTensor3}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(YoloMalloc, static_cast(mallocYoloTensor)); + + vector* yoloV3outputs = new vector{outTensor1, outTensor2, outTensor3}; + ret = yoloV3.Infer(param->yoloV3InputTensorBatch[i], *yoloV3outputs, AscendStreamVec[batchIndex]); + + AsyncYoloV3PostProcessParam* asyncYoloV3PostProcessParam = new AsyncYoloV3PostProcessParam{*yoloV3outputs, + batchIndex, + param, + i}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(AsyncYoloV3PostProcessCallbackFunc, + static_cast(asyncYoloV3PostProcessParam)); + ret = imageProcessor.CropResize(param->DecodeImageBatch[i], param->CropConfigRectBatch[i], Size(224, 224), + param->CropResizeImageBatch[i], AscendStreamVec[batchIndex]); + + ConvertToTensorParam *convertToTensorParam2 = new ConvertToTensorParam{false, param, i}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(ConvertToTensorProcess, static_cast(convertToTensorParam2)); + + std::cout << "===================== resnet推理开始 ====================" << std::endl; + + MxBase::Tensor resnetOutput1({1, 1001}, MxBase::TensorDType::FLOAT32, g_deviceId); + MxBase::Tensor resnetOutput2({1}, MxBase::TensorDType::FLOAT32, g_deviceId); + + MallocResNetTensor* mallocResNetTensor = new MallocResNetTensor{resnetOutput1, resnetOutput2}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(ResNetMalloc, static_cast(mallocResNetTensor)); + vector* resnetoutput = new vector{resnetOutput1, resnetOutput2}; + + resnet50.Infer(param->ResnetInputBatch[i], *resnetoutput, AscendStreamVec[batchIndex]); + + AsyncResnetYoloV3PostProcessParam* asyncResnetYoloV3PostProcessParam = new AsyncResnetYoloV3PostProcessParam{ + *resnetoutput + }; + + ret = AscendStreamVec[batchIndex].LaunchCallBack(AsyncResNetYoloV3PostProcessCallbackFunc, + static_cast(asyncResnetYoloV3PostProcessParam)); + + HoldResourceParam* holdResourceParam = new HoldResourceParam{outTensor1, outTensor2, outTensor3, resnetOutput1, + resnetOutput2, yoloV3outputs}; + ret = AscendStreamVec[batchIndex].LaunchCallBack(HoldResourceCallback, static_cast(holdResourceParam)); + + } + + return ret; +} + +void AsyncE2eInferProcess(void* args) +{ + E2eInferParams* e2EInferParams = static_cast(args); + E2eInferAsync(e2EInferParams->batchIdx, e2EInferParams->params); +} + +APP_ERROR AsyncE2eInfer(AscendStream& stream, E2eInferParams* e2EInferParams) +{ + ret = stream.LaunchCallBack(AsyncE2eInferProcess, static_cast(e2EInferParams)); + return ret; +} + +int SplitImage(const std::string& imgDir) +{ + int totalImg = 0; + DIR* dir = nullptr; + struct dirent *ptr = nullptr; + if ((dir = opendir(imgDir.c_str())) == nullptr) { + LogError << "Open image dir failed, please check the input image dir existed."; + exit(1); + } + std::vector imgVec; + while ((ptr = readdir(dir)) != nullptr) { + if (strcmp(ptr->d_name, ".") == 0 || strcmp(ptr->d_name, "..") == 0) { + continue; + } else if (ptr->d_type == 10 || ptr->d_type == 4) { + continue; + } else { + std::string filePath = imgDir + "/" + ptr->d_name; + imgVec.push_back(filePath); + totalImg++; + } + } + closedir(dir); + sort(imgVec.begin(), imgVec.end()); + + std::vector> fileNum(BATCH_SIZE); + for (size_t i = 0; i < imgVec.size(); i++) { + fileNum[i % BATCH_SIZE].push_back(imgVec[i]); + } + + for (int i = 0; i < BATCH_SIZE; i++) { + std::ofstream imgFile; + std::string fileName = imgDir + "/imgSplitFile" + std::to_string(i); + imgFile.open(fileName, ios::out | ios::trunc); + for (const auto& img : fileNum[i]) { + imgFile << img << std::endl; + } + imgFile.close(); + } + LogInfo << "Split Image success."; + return totalImg; +} + +int main(int argc, char *argv[]) +{ + MxInit(); + + PrepareData(); + + for (int i = 0; i < BATCH_SIZE; i++) { + Params* params = new Params{decodeImageBatch[i], resizeImageBatch[i], tensorImageBatch[i], resnetTensorBatch[i], + resnetInputBatch[i], cropResizeImageBatch[i], yoloV3OutputTensorBatch[i], + resnetOutputTensorBatch[i], cropConfigRectBatch[i], yoloV3InputTensorBatch[i]}; + E2eInferParams* e2EInferParams = new E2eInferParams{i, params}; + E2eInferParamsVec.push_back(e2EInferParams); + } + + auto startTime = std::chrono::high_resolution_clock::now(); + for (int i = 0; i < BATCH_SIZE; i++) { + ret = AsyncE2eInfer(StreamVec_[i % BATCH_SIZE], E2eInferParamsVec[i]); + } + for (int i = 0; i < STREAM_NUM; i++) { + StreamVec_[i].Synchronize(); + } + for (int i = 0; i < BATCH_SIZE; i++) { + AscendStreamVec[i].Synchronize(); + } + auto endTime = std::chrono::high_resolution_clock::now(); + double costTime = std::chrono::duration(endTime - startTime).count(); + cout << "all time is " << costTime << "ms" << endl << endl; + if (ret != APP_ERR_OK) { + LogError << "Failed to run E2eInfer."; + } + + for (int i = 0; i < STREAM_NUM; i++) { + StreamVec_[i].DestroyAscendStream(); + } + for (int i = 0; i < BATCH_SIZE; i++) { + AscendStreamVec[i].DestroyAscendStream(); + } + return 0; +}