diff --git a/tutorials/ModleSample/C++/CMakeLists.txt b/tutorials/ModleSample/C++/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..49a02d9c5ed51b804230ffaa90e979dbec23af38 --- /dev/null +++ b/tutorials/ModleSample/C++/CMakeLists.txt @@ -0,0 +1,32 @@ +# CMake lowest version requirement +cmake_minimum_required(VERSION 3.5.2) +# project information +project(MindX_SDK_Sample) +set(MX_SDK_HOME $ENV{MX_SDK_HOME}) +if (NOT DEFINED ENV{MX_SDK_HOME}) +string(REGEX REPLACE "(.*)/(.*)/(.*)/(.*)" "\\1" MX_SDK_HOME ${CMAKE_CURRENT_SOURCE_DIR}) +message(STATUS "set default MX_SDK_HOME: ${MX_SDK_HOME}") +else () +message(STATUS "env MX_SDK_HOME: ${MX_SDK_HOME}") +endif() + +# Compile options +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +add_compile_options(-std=c++14 -fPIC -fstack-protector-all -Wall) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +set(CMAKE_CXX_FLAGS_DEBUG "-g") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,relro,-z,now,-z,noexecstack -s -pie") +set(CMAKE_SKIP_RPATH TRUE) + +include_directories( +${MX_SDK_HOME}/include/ +) + +link_directories( +${MX_SDK_HOME}/lib/ +) + +add_executable(main main.cpp) +target_link_libraries(main mxbase glog pthread) +install(TARGETS main DESTINATION ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) \ No newline at end of file diff --git a/tutorials/ModleSample/C++/main.cpp b/tutorials/ModleSample/C++/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49eedf0b81665e3f3dc3c9a0e9c7388a802887f1 --- /dev/null +++ b/tutorials/ModleSample/C++/main.cpp @@ -0,0 +1,94 @@ +/* + * Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include +#include +#include "MxBase/E2eInfer/Model/Model.h" +#include "MxBase/E2eInfer/Tensor/Tensor.h" +#include "MxBase/E2eInfer/GlobalInit/GlobalInit.h" +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/Asynchron/AscendStream.h" + +using namespace MxBase; + +std::string g_omModelPath = "../model/IAT_lol-sim.om"; // mindir模型路径 + +void ModelInfer() +{ + int32_t deviceId = 0; // 模型部署的芯片 + Model model(g_omModelPath, deviceId); + + MxBase::VisionDataFormat inputFormat = model.GetInputFormat(); // 获得模型输入的数据组织形式(NHWC 或者 NCHW) + switch (inputFormat) { + case MxBase::VisionDataFormat::NCHW: + std::cout << "Input format: NCHW" << std::endl; + break; + case MxBase::VisionDataFormat::NHWC: + std::cout << "Input format: NHWC" << std::endl; + break; + default: + std::cout << "Unknown input format" << std::endl; + break; + } + + std::cout << "model input tensor num: " << model.GetInputTensorNum() << std::endl; + std::cout << "model output tensor num: " << model.GetOutputTensorNum() << std::endl; + + std::vector inShape64 = model.GetInputTensorShape(); // 获得模型输入的对应Tensor的数据shape信息 + std::vector inShape; + std::cout << "inputShape:"; + for (auto s : inShape64) { + std::cout << " " << s; + // 使用查询的结果直接传入Tensor构造函数构造Tensor,需要将int64_t数据转换为uint32_t数据 + inShape.push_back(static_cast(s)); + } + std::cout << std::endl; + TensorDType dtype = model.GetInputTensorDataType(0); // 获得模型输入的对应Tensor的数据类型信息 + std::vector input; // 输入 + std::vector output; // 输出 + for (size_t i = 0; i < model.GetOutputTensorNum(); i++) { + // 获得模型输出的对应Tensor的数据shape信息 查询的结果可直接传入Tensor构造函数用来构造Tensor + std::vector outputShape = model.GetOutputTensorShape(i); + std::cout << "outputShape: " ; + for (size_t j = 0; j < outputShape.size(); ++j) { + std::cout << outputShape[j] << " "; + } + std::cout << std::endl; + MxBase::TensorDType outputDType = model.GetOutputTensorDataType(i); // 获得模型输出的对应Tensor的数据类型信息 + Tensor dst(outputShape, outputDType); + dst.Malloc(); + dst.ToDevice(0); + output.push_back(dst); + } + Tensor src(inShape, dtype); + src.Malloc(); + src.ToDevice(0); + input.push_back(src); + AscendStream stream(0); + stream.CreateAscendStream(); + auto ret = model.Infer(input, output, stream); // Model的推理接口 + stream.Synchronize(); + stream.DestroyAscendStream(); +} + +int main() +{ + MxBase::MxInit(); + ModelInfer(); + MxBase::MxDeInit(); +} \ No newline at end of file diff --git a/tutorials/ModleSample/C++/run.sh b/tutorials/ModleSample/C++/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..2bb033d1d7651c1ca8e3d127bc936440cdc031bd --- /dev/null +++ b/tutorials/ModleSample/C++/run.sh @@ -0,0 +1,10 @@ +rm -rf build + +# compile +cmake -S . -Bbuild +make -C ./build -j + +# run +./main + +exit 0 \ No newline at end of file diff --git a/tutorials/ModleSample/README.md b/tutorials/ModleSample/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c8c310206415b84aee0bc8ad0d600657a7bcb474 --- /dev/null +++ b/tutorials/ModleSample/README.md @@ -0,0 +1,130 @@ +## MxVision快速入门--Model接口基本使用教程 + +## 1、 介绍 + +### 1.1 简介 +Model类,作为模型的抽象,持有模型推理的资源,并主要开放推理接口。 + +本文主要使用C++和python接口的方式,根据传入的模型文件构造Model,然后调用相关接口输出模型信息。使用到的接口主要有: +- C++: +``` +Model::GetInputFormat(); // 获得模型输入的数据组织形式(NHWC 或者 NCHW)。 +Model::GetInputTensorNum(); // 获得模型的输入个数。 +Model::GetOutputTensorNum(); // 获得模型的输出个数。 +Model::GetInputTensorShape(uint32_t index = 0); // 获得模型的输入个数。 +Model::GetOutputTensorShape(uint32_t index = 0); // 获得模型输出的对应Tensor的数据shape信息。 +Model::Infer(std::vector& inputTensors, std::vector& outputTensors, AscendStream &stream = AscendStream::DefaultStream()); // 推理接口 +``` + +- python: +``` +input_shape(index: int) # 获得模型输入的对应Tensor的数据shape信息 +output_shape(index: int) # 获得模型输出的对应Tensor的数据shape信息。 +input_dtype(index: int) # 获得模型输入的对应Tensor的数据类型信息。 +infer(tensorList: List) # 通过输入Tensor列表进行模型推理 +``` +- python部分还使用了numpy相关接口实现了Tensor与numpy数组之间的生成和转换。 + +### 1.2 支持的产品 + +本项目支持昇腾Atlas 300I pro、 Atlas300V pro + +### 1.3 支持的版本 + +本样例配套的MxVision版本、CANN版本、Driver/Firmware版本如下所示: +| MxVision版本 | CANN版本 | Driver/Firmware版本 | +| --------- | ------------------ | -------------- | +| 6.0.RC3 | 8.0.RC3 | 24.1.RC3 | + +### 1.4 三方依赖 + +| 软件名称 | 版本 | +| ------------- | ---------------- | +| numpy | 2.0.2 | + +### 1.5 代码目录结构说明 + +本项目工程目录如下图所示: +```angular2html +|-------- C++ +| |---- main.cpp +| |---- CMakeLists.txt +| |---- run.sh +|-------- python +| |---- main.py +|-------- model +|-------- README.md +``` + +## 2、 设置环境变量 + +在编译运行项目前,需要设置环境变量: + +```bash +. /usr/local/Ascend/ascend-toolkit/set_env.sh #toolkit默认安装路径,根据实际安装路径修改 +. ${SDK_INSTALL_PATH}/mxVision/set_env.sh #sdk安装路径,根据实际安装路径修改 +``` + +## 3、 准备模型 + +**步骤1**:模型文件下载 + +本项目意在为开发者介绍使用mxVision软件包中Model相关的C++、python接口使用样例。使用的模型为[RGB图像的夜间增强参考设计](https://gitee.com/ascend/mindxsdk-referenceapps/tree/master/contrib/IAT)中用到的模型。 +原始pth模型源码[地址](https://github.com/cuiziteng/illumination-adaptive-transformer) +本文提供已从pth模型转换好的onnx模型直接使用:[IAT_lol-sim.onnx](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/IAT/IAT_lol-sim.onnx) +下载后放到项目根目录的model文件夹下。 + +**步骤2**:模型转换 + +将模型转换为om模型,在model文件夹下,执行以下命令生成om模型: +``` +atc --framework=5 --model=./IAT_lol-sim.onnx --input_shape="input_1:1,3,400,600" --output=IAT_lol-sim --soc_version=Ascend310P3 +``` +执行完模型转换脚本后,会生成相应的IAT_lol-sim.om模型文件。 执行后终端输出为(模型转换时出现的warn日志可忽略): + +```bash +ATC start working now, please wait for a moment. +ATC run success, welcome to the next use. +``` + +## 4、 编译与运行 + +### 4.1 C++样例运行 + +**步骤1**:进入`C++`文件夹,执行以下命令: +``` +bash run.sh +``` + +**步骤2**:查看结果 +命令执行成功后可在屏幕看到模型信息输出 +``` +Input format: NCHW +model input tensor num: 1 +model output tensor num: 1 +inputShape: 1 3 400 600 +ouputShape: 1 3 400 600 +``` + +### 4.2 python样例运行 + +**步骤1**:进入`python`文件夹,执行以下命令: +``` +python3 main.py +``` + +**步骤2**:查看结果 +命令执行成功后可在屏幕看到模型信息输出 +``` +input num: 1 +output num: 1 +input Tensor shape list: [1, 3, 400, 600] +output Tensor shape list: [1, 3, 400, 600] +Input dtype: dtype.float32 +output numpy array shape (1, 3, 400, 600) +``` + + + + + diff --git a/tutorials/ModleSample/model/.keepme b/tutorials/ModleSample/model/.keepme new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tutorials/ModleSample/python/main.py b/tutorials/ModleSample/python/main.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3b1739c7e2a20531ba9a3cdd3cdd285f087a0a --- /dev/null +++ b/tutorials/ModleSample/python/main.py @@ -0,0 +1,48 @@ +import numpy as np + +from mindx.sdk import Tensor +from mindx.sdk import base # mxVision 推理接口 +from mindx.sdk.base import Model # mxVision 推理接口 + + +def process(): + + device_id = 0 # 设备id + model_path = '../model/IAT_lol-sim.om' # 模型路径 + + # 模型加载的两种方式 + # 方式一 + model = base.model(modelPath=model_path, deviceId=device_id) # 函数返回参数为Model对象 + + # 方式二 开发者也可尝试使用该方式构建Model,使用时注释15行 + # model = Model(model_path, 0) # 直接使用Model类构造对象 + + print("input num:", model.input_num) # 获得模型的输入个数 + print("output num:", model.output_num) # 获得模型的输出个数 + + input_shape_vector = model.input_shape(0) # 获得模型输入的对应Tensor的数据shape信息 + print("input Tensor shape list:", input_shape_vector) + + output_shape_vector = model.output_shape(0) # 获得模型输出的对应Tensor的数据shape信息 + print("output Tensor shape list:", output_shape_vector) + + input_dtype = model.input_dtype(0) + print("Input dtype:", input_dtype) + + # 使用numpy生成输入数据 真实情况下读入图片进行推理也可以通过numpy转换为Tensor类型 + img = np.random.randint(\ + 0, 255, \ + size=(input_shape_vector[0], input_shape_vector[1], input_shape_vector[2], input_shape_vector[3]), \ + dtype=np.uint8\ + ).astype(np.float32) # 这里的size根据模型输入的要求确定 + img = Tensor(img) # 将numpy转为转为Tensor类 + output = model.infer([img]) # 执行推理。输入数据类型:List[base.Tensor], 返回模型推理输出的 List[base.Tensor] + + output[0].to_host() # 将 Tensor 数据转移到内存 + output = np.array(output[0]) # 将数据转为 numpy array 类型 + print("output numpy array shape", output.shape) + +if __name__ == "__main__": + base.mx_init() # 初始化 mxVision 资源 + process() + base.mx_deinit() \ No newline at end of file