From 7c0f52aaa6ea1dab8a2cbd359ebaafc44837bc66 Mon Sep 17 00:00:00 2001 From: huan <3174348550@qq.com> Date: Wed, 30 Jul 2025 11:25:56 +0800 Subject: [PATCH] delete files and modify contents --- .../api_python/dynamic_shape_primitive.md | 2 +- .../source_en/api_python/env_var_list.rst | 6 +- docs/mindspore/source_en/api_python/index.rst | 2 +- .../api_python/operator_list_parallel.md | 6 +- .../source_en/design/all_scenarios.md | 6 +- .../api_python/dynamic_shape_primitive.md | 2 +- .../ascend310_single_op_sample/CMakeLists.txt | 10 - .../ascend310_single_op_sample/README.md | 33 - .../ascend310_single_op_sample/main.cc | 80 --- .../tensor_add.mindir | 10 - docs/sample_code/comm_subgraph/run.sh | 18 - docs/sample_code/comm_subgraph/train.py | 92 --- .../convert_tf2ms_code/convert_tf2ms.py | 106 --- .../convert_tf2ms_code/ms_lenet.py | 76 -- .../convert_tf2ms_code/requirements.txt | 1 - .../convert_tf2ms_code/tf_lenet.py | 78 --- docs/sample_code/disaster_recover/recover.sh | 18 - docs/sample_code/disaster_recover/run.sh | 37 - docs/sample_code/disaster_recover/train.py | 68 -- .../distributed_graph_partition/lenet.py | 109 --- .../distributed_graph_partition/run.sh | 36 - .../distributed_graph_partition/train.py | 47 -- docs/sample_code/fault_recover/recover.sh | 18 - docs/sample_code/fault_recover/run.sh | 18 - docs/sample_code/fault_recover/train.py | 97 --- docs/sample_code/manual_parallel/run.sh | 18 - docs/sample_code/manual_parallel/train.py | 87 --- docs/sample_code/memory_offload/run.sh | 32 - docs/sample_code/memory_offload/train.py | 142 ---- .../profiler/profiling_feed_step.py | 76 -- .../mindinsight/profiler/profiling_step.py | 107 --- docs/sample_code/nnie_proposal/CMakeLists.txt | 21 - .../sample_code/nnie_proposal/src/proposal.cc | 650 ------------------ docs/sample_code/nnie_proposal/src/proposal.h | 93 --- .../nnie_proposal/src/proposal_fp32.cc | 195 ------ .../nnie_proposal/src/proposal_fp32.h | 52 -- .../nnie_proposal/src/proposal_infer.cc | 74 -- .../nnie_proposal/src/proposal_infer.h | 36 - .../nnie_proposal/third_patry/CMakeLists.txt | 22 - .../parallel_support_dynamic_shape/main.py | 86 --- .../parallel_support_dynamic_shape/run.sh | 18 - docs/sample_code/parameter_server/run.sh | 49 -- docs/sample_code/parameter_server/train.py | 87 --- docs/sample_code/sapp/run.sh | 18 - docs/sample_code/sapp/train.py | 91 --- docs/sample_code/sharding_propagation/run.sh | 18 - .../sample_code/sharding_propagation/train.py | 105 --- install/mindspore_ascend_install_source_en.md | 2 +- install/mindspore_cpu_install_source_en.md | 2 +- install/mindspore_gpu_install_source_en.md | 2 +- .../source_en/debug/error_analysis/mindir.md | 2 +- 51 files changed, 16 insertions(+), 3045 deletions(-) delete mode 100644 docs/sample_code/ascend310_single_op_sample/CMakeLists.txt delete mode 100644 docs/sample_code/ascend310_single_op_sample/README.md delete mode 100644 docs/sample_code/ascend310_single_op_sample/main.cc delete mode 100644 docs/sample_code/ascend310_single_op_sample/tensor_add.mindir delete mode 100644 docs/sample_code/comm_subgraph/run.sh delete mode 100644 docs/sample_code/comm_subgraph/train.py delete mode 100644 docs/sample_code/convert_tf2ms_code/convert_tf2ms.py delete mode 100644 docs/sample_code/convert_tf2ms_code/ms_lenet.py delete mode 100644 docs/sample_code/convert_tf2ms_code/requirements.txt delete mode 100644 docs/sample_code/convert_tf2ms_code/tf_lenet.py delete mode 100644 docs/sample_code/disaster_recover/recover.sh delete mode 100644 docs/sample_code/disaster_recover/run.sh delete mode 100644 docs/sample_code/disaster_recover/train.py delete mode 100644 docs/sample_code/distributed_graph_partition/lenet.py delete mode 100644 docs/sample_code/distributed_graph_partition/run.sh delete mode 100644 docs/sample_code/distributed_graph_partition/train.py delete mode 100644 docs/sample_code/fault_recover/recover.sh delete mode 100644 docs/sample_code/fault_recover/run.sh delete mode 100644 docs/sample_code/fault_recover/train.py delete mode 100644 docs/sample_code/manual_parallel/run.sh delete mode 100644 docs/sample_code/manual_parallel/train.py delete mode 100644 docs/sample_code/memory_offload/run.sh delete mode 100644 docs/sample_code/memory_offload/train.py delete mode 100644 docs/sample_code/mindinsight/profiler/profiling_feed_step.py delete mode 100644 docs/sample_code/mindinsight/profiler/profiling_step.py delete mode 100644 docs/sample_code/nnie_proposal/CMakeLists.txt delete mode 100644 docs/sample_code/nnie_proposal/src/proposal.cc delete mode 100644 docs/sample_code/nnie_proposal/src/proposal.h delete mode 100644 docs/sample_code/nnie_proposal/src/proposal_fp32.cc delete mode 100644 docs/sample_code/nnie_proposal/src/proposal_fp32.h delete mode 100644 docs/sample_code/nnie_proposal/src/proposal_infer.cc delete mode 100644 docs/sample_code/nnie_proposal/src/proposal_infer.h delete mode 100644 docs/sample_code/nnie_proposal/third_patry/CMakeLists.txt delete mode 100644 docs/sample_code/parallel_support_dynamic_shape/main.py delete mode 100644 docs/sample_code/parallel_support_dynamic_shape/run.sh delete mode 100644 docs/sample_code/parameter_server/run.sh delete mode 100644 docs/sample_code/parameter_server/train.py delete mode 100644 docs/sample_code/sapp/run.sh delete mode 100644 docs/sample_code/sapp/train.py delete mode 100644 docs/sample_code/sharding_propagation/run.sh delete mode 100644 docs/sample_code/sharding_propagation/train.py diff --git a/docs/mindspore/source_en/api_python/dynamic_shape_primitive.md b/docs/mindspore/source_en/api_python/dynamic_shape_primitive.md index 2bc71ec98a..09bfe8331a 100644 --- a/docs/mindspore/source_en/api_python/dynamic_shape_primitive.md +++ b/docs/mindspore/source_en/api_python/dynamic_shape_primitive.md @@ -2,7 +2,7 @@ [![View Source On Gitee](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/resource/_static/logo_source_en.svg)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/source_en/api_python/dynamic_shape_primitive.md) -> The following list provides primitive interfaces that support dynamic shape functionality in PYNATIVE mode. However, some primitive interfaces may have incomplete data type support. If you encounter such issues, you can resolve them by manually incorporating the [Cast](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Cast.html) operator. +> The following list provides primitive interfaces that support dynamic shape functionality in PyNative mode. However, some primitive interfaces may have incomplete data type support. If you encounter such issues, you can resolve them by manually incorporating the [Cast](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Cast.html) operator. > > Primitive interfaces outside of this list have limited support for dynamic shape functionality and may fail to execute. Additionally, in graph mode, dynamic shape functionality is also limited and may result in execution failures. > diff --git a/docs/mindspore/source_en/api_python/env_var_list.rst b/docs/mindspore/source_en/api_python/env_var_list.rst index 3ddc2731f5..46144ab9ec 100644 --- a/docs/mindspore/source_en/api_python/env_var_list.rst +++ b/docs/mindspore/source_en/api_python/env_var_list.rst @@ -1056,12 +1056,12 @@ Build from source - Value Range - Description * - MSLIBS_CACHE_PATH - - Path where third-pary software built alongside MindSpore will be installed to, when building MindSpore from source code. + - Path where third-party software built alongside MindSpore will be installed to, when building MindSpore from source code. - String - `~/.mslib`: Your expected path to install third-party software. Default value: None. - - When this environment variable is set, MindSpore will install third-party software built from source code to this path, enabling these software to be shared throughout multiple compilations and save time spent builing them. + - When this environment variable is set, MindSpore will install third-party software built from source code to this path, enabling these software to be shared throughout multiple compilations and save time spent building them. * - MSLIBS_SERVER - - Website where third-pary software' source code is downloaded from when building MindSpore from source code. + - Website where third-party software' source code is downloaded from when building MindSpore from source code. - String - `tools.mindspore.cn`: Official MindSpore image for downloading third-party source code. Default value: None. - When this environment variable is set, MindSpore will download third-party source code from given address, avoiding issues due to unstable access to github.com, improving speed of downloading source code. This variable is inactive when `-S on` is set in your compile options. diff --git a/docs/mindspore/source_en/api_python/index.rst b/docs/mindspore/source_en/api_python/index.rst index d7c5cb84b4..011ad93e38 100644 --- a/docs/mindspore/source_en/api_python/index.rst +++ b/docs/mindspore/source_en/api_python/index.rst @@ -52,7 +52,7 @@ MindSpore provides rich interfaces for model building, training, and inference. * - `mindspore.amp <./mindspore.amp.html>`_ - Mixed-precision interface. * - `mindspore.train <./mindspore.train.html>`_ - - Traning interface. + - Training interface. * - `mindspore.parallel <./mindspore.parallel.html>`_ - Auto Parallel interface. * - `mindspore.runtime <./mindspore.runtime.html>`_ diff --git a/docs/mindspore/source_en/api_python/operator_list_parallel.md b/docs/mindspore/source_en/api_python/operator_list_parallel.md index 6e12d68b11..030afe9512 100644 --- a/docs/mindspore/source_en/api_python/operator_list_parallel.md +++ b/docs/mindspore/source_en/api_python/operator_list_parallel.md @@ -10,8 +10,8 @@ | [mindspore.ops.Add](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Add.html) | None | Layout configuration is supported. The input layout should be the same or broadcastable. The output layout cannot be configured. | | [mindspore.ops.AddN](https://mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.AddN.html) | None | Not support config layout | | [mindspore.ops.ApproximateEqual](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.ApproximateEqual.html) | None | Not support config layout | -| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.ArgMaxWithValue.html) | When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. | Not support config layout | -| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.ArgMinWithValue.html) | When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. | Not support config layout | +| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.ArgMaxWithValue.html) | When the input_x is split on the axis dimension, the distributed result may be inconsistent with that on the single machine. | Not support config layout | +| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.ArgMinWithValue.html) | When the input_x is split on the axis dimension, the distributed result may be inconsistent with that on the single machine. | Not support config layout | | [mindspore.ops.Asin](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Asin.html) | None | Not support config layout | | [mindspore.ops.Asinh](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Asinh.html) | None | Not support config layout | | [mindspore.ops.Assign](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.Assign.html) | None | Not support config layout | @@ -26,7 +26,7 @@ | [mindspore.ops.BatchNorm](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BatchNorm.html) | It does not support GPU. | Not support config layout | | [mindspore.ops.BesselI0e](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BesselI0e.html) | None | Not support config layout | | [mindspore.ops.BesselI1e](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BesselI1e.html) | None | Not support config layout | -| [mindspore.ops.BiasAdd](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BiasAdd.html) | None | Support config layout. The second input, bias, should have ths same tensor layout as the last dimension of input_x. Output Layout is not open for configuration. | +| [mindspore.ops.BiasAdd](https://www.mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BiasAdd.html) | None | Support config layout. The second input, bias, should have the same tensor layout as the last dimension of input_x. Output Layout is not open for configuration. | | [mindspore.ops.BitwiseAnd](https://mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BitwiseAnd.html) | None | Not support config layout | | [mindspore.ops.BitwiseOr](https://mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BitwiseOr.html) | None | Not support config layout | | [mindspore.ops.BitwiseXor](https://mindspore.cn/docs/en/master/api_python/ops/mindspore.ops.BitwiseXor.html) | None | Not support config layout | diff --git a/docs/mindspore/source_en/design/all_scenarios.md b/docs/mindspore/source_en/design/all_scenarios.md index 4e95576649..e9a961a494 100644 --- a/docs/mindspore/source_en/design/all_scenarios.md +++ b/docs/mindspore/source_en/design/all_scenarios.md @@ -2,7 +2,7 @@ [![View Source On Gitee](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/resource/_static/logo_source_en.svg)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/source_en/design/all_scenarios.md) -MindSpore is designed to provide an device-edge-cloud full-scenarios AI framework that can be deployed in different hardware environments on the device-edge-cloud to meet the differentiated needs of different environments, such as supporting lightweight deployment on the device-side and rich training features such as automatic differentiation, hybrid precision and easy programming of models on the cloud side. +MindSpore is designed to provide a device-edge-cloud full-scenarios AI framework that can be deployed in different hardware environments on the device-edge-cloud to meet the differentiated needs of different environments, such as supporting lightweight deployment on the device-side and rich training features such as automatic differentiation, hybrid precision and easy programming of models on the cloud side. > The cloud side includes NVIDIA GPU, Huawei Ascend, Intel x86, etc., and the device side includes Arm, Qualcomm, Kirin, etc. @@ -36,11 +36,11 @@ An intermediate representation (IR) is a representation of a program between the MindSpore IR (MindIR) is a function-style IR based on graph representation. Its core purpose is to serve automatic differential transformation. Automatic differentiation uses the transformation method based on the function-style programming framework. Therefore, IR uses the semantics close to that of the ANF function. In addition, a manner of representation based on an explicit dependency graph is used by referring to excellent designs of Sea of Nodes[1] and Thorin[2]. For the specific introduction of ANF-IR, please refer to [MindSpore IR Syntax](https://www.mindspore.cn/docs/en/master/design/all_scenarios.html#syntax). -When a model compiled using MindSpore runs in the graph mode `set_context(mode=GRAPH_MODE)` and setting the environment variable `MS_DEV_SAVE_GRAPHS` to 1, some intermediate files will be generated during graph compliation. These intermediate files are called IR files. When more information about backend procedure should be analyzed, we can set the environment variable `MS_DEV_SAVE_GRAPHS` to 2. When more advanced information such as visualizing computing graphs or ir graphs of frontend with more details is required, we can set the environment variable `MS_DEV_SAVE_GRAPHS` to 3 to get more details. Currently, there are two IR files: +When a model compiled using MindSpore runs in the graph mode `set_context(mode=GRAPH_MODE)` and setting the environment variable `MS_DEV_SAVE_GRAPHS` to 1, some intermediate files will be generated during graph compilation. These intermediate files are called IR files. When more information about backend procedure should be analyzed, we can set the environment variable `MS_DEV_SAVE_GRAPHS` to 2. When more advanced information such as visualizing computing graphs or ir graphs of frontend with more details is required, we can set the environment variable `MS_DEV_SAVE_GRAPHS` to 3 to get more details. Currently, there are two IR files: - .ir file: An IR file that describes the model structure in text format and can be directly viewed using any text editors. -- .dot file: An IR file that describes the topology relationships between different nodes. You can use this file by [graphviz](http://graphviz.org) as thes input to generate images for users to view the model structure. +- .dot file: An IR file that describes the topology relationships between different nodes. You can use this file by [graphviz](http://graphviz.org) as the input to generate images for users to view the model structure. ### Syntax diff --git a/docs/mindspore/source_zh_cn/api_python/dynamic_shape_primitive.md b/docs/mindspore/source_zh_cn/api_python/dynamic_shape_primitive.md index a9996380c7..8127782c83 100644 --- a/docs/mindspore/source_zh_cn/api_python/dynamic_shape_primitive.md +++ b/docs/mindspore/source_zh_cn/api_python/dynamic_shape_primitive.md @@ -2,7 +2,7 @@ [![查看源文件](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/resource/_static/logo_source.svg)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/source_zh_cn/api_python/dynamic_shape_primitive.md) -> 以下列表列举了PYNATIVE模式下支持动态shape功能的算子。其中部分算子可能会存在数据类型支持不全的问题,如遇到此类问题,可以通过主动插入[Cast](https://www.mindspore.cn/docs/zh-CN/master/api_python/ops/mindspore.ops.Cast.html)算子解决。 +> 以下列表列举了PyNative模式下支持动态shape功能的算子。其中部分算子可能会存在数据类型支持不全的问题,如遇到此类问题,可以通过主动插入[Cast](https://www.mindspore.cn/docs/zh-CN/master/api_python/ops/mindspore.ops.Cast.html)算子解决。 > > 列表以外的算子对动态shape功能支持尚不完善,可能会执行失败。另外,图模式下,动态shape功能支持也不完善,可能会执行失败。 > diff --git a/docs/sample_code/ascend310_single_op_sample/CMakeLists.txt b/docs/sample_code/ascend310_single_op_sample/CMakeLists.txt deleted file mode 100644 index 13b50fa32e..0000000000 --- a/docs/sample_code/ascend310_single_op_sample/CMakeLists.txt +++ /dev/null @@ -1,10 +0,0 @@ -cmake_minimum_required(VERSION 3.14.1) -project (TensorAddSample[CXX]) -add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -fPIE -Wl,--allow-shlib-undefined") -option(MINDSPORE_PATH "mindspore install path" "") -include_directories(${MINDSPORE_PATH}) -find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib) - -add_executable(tensor_add_sample main.cc) -target_link_libraries(tensor_add_sample ${MS_LIB}) diff --git a/docs/sample_code/ascend310_single_op_sample/README.md b/docs/sample_code/ascend310_single_op_sample/README.md deleted file mode 100644 index bbfdf3bd07..0000000000 --- a/docs/sample_code/ascend310_single_op_sample/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# README - -Usage: - -Cd into the directory: - -```bash -cd ascend310_resnet50_preprocess_sample -``` - -Configure the cmake project, if MindSpore is installed by pip: - -```bash -cmake . -DMINDSPORE_PATH=`pip3 show mindspore | grep Location | awk '{print $2"/mindspore"}' | xargs realpath` -``` - -or installed by binary: - -```bash -cmake . -DMINDSPORE_PATH=path-to-your-custom-dir -``` - -Then compile: - -```bash -make -``` - -Run the sample: - -```bash -./tensor_add_sample -``` diff --git a/docs/sample_code/ascend310_single_op_sample/main.cc b/docs/sample_code/ascend310_single_op_sample/main.cc deleted file mode 100644 index 1a6926f6f2..0000000000 --- a/docs/sample_code/ascend310_single_op_sample/main.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include "include/api/context.h" -#include "include/api/model.h" -#include "include/api/serialization.h" - -namespace ms = mindspore; -constexpr auto tensor_add_file = "./tensor_add.mindir"; -static const std::vector input_data_1 = {1, 2, 3, 4}; -static const std::vector input_data_2 = {2, 3, 4, 5}; - -int main() { - // set context - auto context = std::make_shared(); - auto ascend310_info = std::make_shared(); - ascend310_info->SetDeviceID(0); - context->MutableDeviceInfo().push_back(ascend310_info); - - // define model - ms::Graph graph; - ms::Status ret = ms::Serialization::Load(tensor_add_file, ms::ModelType::kMindIR, &graph); - if (ret != ms::kSuccess) { - std::cout << "Load model failed." << std::endl; - return 1; - } - ms::Model tensor_add; - - // build model - ret = tensor_add.Build(ms::GraphCell(graph), context); - if (ret != ms::kSuccess) { - std::cout << "Build model failed." << std::endl; - return 1; - } - - // get model inputs - std::vector origin_inputs = tensor_add.GetInputs(); - if (origin_inputs.size() != 2) { - std::cout << "Invalid model inputs size " << origin_inputs.size() << std::endl; - return 1; - } - - // prepare input - std::vector outputs; - std::vector inputs; - inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(), - input_data_1.data(), sizeof(float) * input_data_1.size()); - inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(), - input_data_2.data(), sizeof(float) * input_data_2.size()); - - // infer - ret = tensor_add.Predict(inputs, &outputs); - if (ret != ms::kSuccess) { - std::cout << "Predict model failed." << std::endl; - return 1; - } - - // print - for (auto &buffer : outputs) { - const float *p = reinterpret_cast(buffer.MutableData()); - for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) { - std::cout << p[i] << std::endl; - } - } - - return 0; -} diff --git a/docs/sample_code/ascend310_single_op_sample/tensor_add.mindir b/docs/sample_code/ascend310_single_op_sample/tensor_add.mindir deleted file mode 100644 index 80a58db0e4..0000000000 --- a/docs/sample_code/ascend310_single_op_sample/tensor_add.mindir +++ /dev/null @@ -1,10 +0,0 @@ - -0.1.0 MindSpore*1.1.0: - -8_7_5_construct_wrapper:x_ -8_7_5_construct_wrapper:y_8_7_5_construct_wrapper:1:2"8_7_5_construct_wrapper:[CNode]0:1"Add*b:shape1z shape:shape1,*0 - output_namesZoutputzscalar:List[value1,],*4 - input_namesZxZyzscalar:List[value1,value2,],:Default/Add-op38_7_5_construct_wrapper*$ -8_7_5_construct_wrapper:x_*$ -8_7_5_construct_wrapper:y_2% -8_7_5_construct_wrapper:1:2 \ No newline at end of file diff --git a/docs/sample_code/comm_subgraph/run.sh b/docs/sample_code/comm_subgraph/run.sh deleted file mode 100644 index f01c4c21fd..0000000000 --- a/docs/sample_code/comm_subgraph/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ -export MS_COMM_COMPILER_OPT=3 -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py diff --git a/docs/sample_code/comm_subgraph/train.py b/docs/sample_code/comm_subgraph/train.py deleted file mode 100644 index d41344c476..0000000000 --- a/docs/sample_code/comm_subgraph/train.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Subgraph Reuse Example""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn -from mindspore.communication import init - -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL, enable_parallel_optimizer=True) -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.layer1 = nn.Dense(28*28, 512) - self.layer2 = nn.Dense(512, 512) - self.layer3 = nn.Dense(512, 10) - self.relu = nn.ReLU() - - def construct(self, x): - x = self.flatten(x) - x = self.layer1(x) - x = self.relu(x) - x = self.layer2(x) - x = self.relu(x) - logits = self.layer3(x) - return logits - -net = Network() -for item in net.trainable_params(): - print(f"The parameter {item.name}'s fusion id is {item.comm_fusion}") - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = nn.CrossEntropyLoss() - -def forward_fn(data, target): - """forward propagation""" - logits = net(data) - loss = loss_fn(logits, target) - return loss, logits - -grad_fn = ms.value_and_grad(forward_fn, None, net.trainable_params(), has_aux=True) - -@ms.jit -def train_step(inputs, targets): - """train_step""" - (loss_value, _), grads = grad_fn(inputs, targets) - optimizer(grads) - return loss_value - -for epoch in range(2): - i = 0 - for image, label in data_set: - loss_output = train_step(image, label) - if i % 10 == 0: - print("epoch: %s, step: %s, loss is %s" % (epoch, i, loss_output)) - i += 1 diff --git a/docs/sample_code/convert_tf2ms_code/convert_tf2ms.py b/docs/sample_code/convert_tf2ms_code/convert_tf2ms.py deleted file mode 100644 index 9903deb2f6..0000000000 --- a/docs/sample_code/convert_tf2ms_code/convert_tf2ms.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""convert tensorflow checkpoint to mindspore checkpoint""" -import os -import numpy as np -from mindspore import Tensor -from mindspore import save_checkpoint -import tensorflow.compat.v1 as tf -from ms_lenet import LeNet5, mindspore_running -from tf_lenet import tf_running -os.environ["CUDA_VISIBLE_DEVICES"] = "4" - -params_mapping = { - "conv1/weight": "conv1.weight", - "conv2/weight": "conv2.weight", - "fc1/dense/kernel": "fc1.weight", - "fc1/dense/bias": "fc1.bias", - "fc2/dense/kernel": "fc2.weight", - "fc2/dense/bias": "fc2.bias", - "fc3/dense/kernel": "fc3.weight", - "fc3/dense/bias": "fc3.bias", -} - - -def tensorflow_param(ckpt_path): - """Get TensorFlow parameter and shape""" - tf_param = {} - reader = tf.train.load_checkpoint(ckpt_path) - for name in reader.get_variable_to_shape_map(): - try: - print(name, reader.get_tensor(name).shape) - tf_param[name] = reader.get_tensor(name) - except AttributeError as e: - print(e) - return tf_param - - -def mindspore_params(net): - """Get MindSpore parameter and shape""" - ms_param = {} - for param in net.get_parameters(): - name = param.name - value = param.data.asnumpy() - print(name, value.shape) - ms_param[name] = value - return ms_param - - -def tensorflow2mindspore(tf_ckpt_dir, param_mapping_dict, ms_ckpt_path): - """convert tensorflow ckpt to mindspore ckpt""" - reader = tf.train.load_checkpoint(tf_ckpt_dir) - new_params_list = [] - for name in param_mapping_dict: - param_dict = {} - parameter = reader.get_tensor(name) - if 'conv' in name and 'weight' in name: - # 对卷积权重进行转置 - parameter = np.transpose(parameter, axes=[3, 2, 0, 1]) - if 'fc' in name and 'kernel' in name: - parameter = np.transpose(parameter, axes=[1, 0]) - param_dict['name'] = param_mapping_dict[name] - param_dict['data'] = Tensor(parameter) - new_params_list.append(param_dict) - save_checkpoint(new_params_list, os.path.join(ms_ckpt_path, 'tf2mindspore.ckpt')) - - -def mean_relative_error(y_expect, y_pred): - """mean relative error""" - if y_expect.dtype == np.bool: - y_expect = y_expect.astype(np.int32) - y_pred = y_pred.astype(np.int32) - - rerror = np.abs(y_expect - y_pred)/np.maximum(np.abs(y_expect), np.abs(y_pred)) - rerror = rerror[~np.isnan(rerror)] - rerror = rerror[~np.isinf(rerror)] - relative_error_out = np.mean(rerror) - return relative_error_out - - -if __name__ == '__main__': - tf_model_path = './model' - tf_outputs = tf_running(tf_model_path) - tf_params = tensorflow_param(tf_model_path) - print("*" * 30) - network = LeNet5() - ms_params = mindspore_params(network) - tensorflow2mindspore(tf_model_path, params_mapping, './') - ms_outputs = mindspore_running('./tf2mindspore.ckpt') - print("************tensorflow outputs**************") - print(tf_outputs) - print("************mindspore outputs**************") - print(ms_outputs) - relative_error = mean_relative_error(tf_outputs, ms_outputs) - print("Diff: ", relative_error) diff --git a/docs/sample_code/convert_tf2ms_code/ms_lenet.py b/docs/sample_code/convert_tf2ms_code/ms_lenet.py deleted file mode 100644 index da5bea72e8..0000000000 --- a/docs/sample_code/convert_tf2ms_code/ms_lenet.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""LeNet.""" -import numpy as np -import mindspore as ms -import mindspore.nn as nn -from mindspore import Tensor, load_checkpoint, load_param_into_net -from mindspore.common.initializer import One - - -class LeNet5(nn.Cell): - """ - Lenet network - - Args: - num_class (int): Number of classes. Default: 10. - num_channel (int): Number of channels. Default: 1. - - Returns: - Tensor, output tensor - Examples: - >>> LeNet(num_class=10) - - """ - def __init__(self, num_class=1, num_channel=1): - super(LeNet5, self).__init__() - self.conv1 = nn.Conv2d(num_channel, 6, 5, weight_init='ones', pad_mode='valid') - self.conv2 = nn.Conv2d(6, 16, 5, weight_init='ones', pad_mode='valid') - self.relu = nn.ReLU() - self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - self.flatten = nn.Flatten() - self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=One()) - self.fc2 = nn.Dense(120, 84, weight_init=One()) - self.fc3 = nn.Dense(84, num_class, weight_init=One()) - - def construct(self, x): - """construct""" - x = self.conv1(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.conv2(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.flatten(x) - x = self.relu(self.fc1(x)) - x = self.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -def mindspore_running(ckpt_path): - ms.set_context(mode=ms.GRAPH_MODE) - ms.set_device(device_target="GPU") - np_in = Tensor(np.ones([8, 1, 32, 32]).astype(np.float32)) - network = LeNet5() - params_dict = load_checkpoint(ckpt_path) - load_param_into_net(network, params_dict) - outs = network(np_in) - - return outs.asnumpy() - -if __name__ == '__main__': - ckpt_dir = './tf2mindspore.ckpt' - mindspore_running(ckpt_dir) diff --git a/docs/sample_code/convert_tf2ms_code/requirements.txt b/docs/sample_code/convert_tf2ms_code/requirements.txt deleted file mode 100644 index acc30b9ba7..0000000000 --- a/docs/sample_code/convert_tf2ms_code/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -tensorflow-gpu==1.15 diff --git a/docs/sample_code/convert_tf2ms_code/tf_lenet.py b/docs/sample_code/convert_tf2ms_code/tf_lenet.py deleted file mode 100644 index 549828b665..0000000000 --- a/docs/sample_code/convert_tf2ms_code/tf_lenet.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""tensorflow network""" -import os -import numpy as np -import tensorflow.compat.v1 as tf - - -def get_variable(name, shape=None, dtpye=tf.float32, initializer=tf.ones_initializer()): - """initializer""" - return tf.get_variable(name, shape, dtpye, initializer) - - -def lenet(inputs): - """lenet model definition""" - with tf.variable_scope('conv1'): - net = tf.nn.conv2d(input=inputs, filter=get_variable('weight', [5, 5, 1, 6]), - strides=[1, 1, 1, 1], padding='VALID') - net = tf.nn.relu(net) - - with tf.variable_scope('pool1'): - net = tf.nn.max_pool2d(input=net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') - - with tf.variable_scope('conv2'): - net = tf.nn.conv2d(input=net, filter=get_variable('weight', [5, 5, 6, 16]), - strides=[1, 1, 1, 1], padding='VALID') - net = tf.nn.relu(net) - - with tf.variable_scope('pool2'): - net = tf.nn.max_pool2d(input=net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') - - with tf.variable_scope('fc1'): - size = 5 * 5 * 16 - net = tf.reshape(net, shape=[-1, size]) - net = tf.layers.dense(inputs=net, units=120) - net = tf.nn.relu(net) - - with tf.variable_scope('fc2'): - net = tf.layers.dense(inputs=net, units=84) - net = tf.nn.relu(net) - - with tf.variable_scope('fc3'): - net = tf.layers.dense(inputs=net, units=1) - - return net - - -def tf_running(model_savel_path): - """tensorflow running""" - np_in = np.ones([8, 32, 32, 1]).astype(np.float32) - inputs = tf.convert_to_tensor(np_in) - - tf_network = lenet(inputs) - - init = tf.global_variables_initializer() - saver = tf.train.Saver() - with tf.Session() as sess: - sess.run(init) - res = sess.run(tf_network) - saver.save(sess, os.path.join(model_savel_path, 'lenet')) - tf_outputs = res - return tf_outputs - -if __name__ == '__main__': - model_dir = './' - outs = tf_running(model_dir) diff --git a/docs/sample_code/disaster_recover/recover.sh b/docs/sample_code/disaster_recover/recover.sh deleted file mode 100644 index 1b74c90aa1..0000000000 --- a/docs/sample_code/disaster_recover/recover.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash recover.sh" -echo "==============================================================================================================" - -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ -export MS_ENABLE_RECOVERY=1 # 开启容灾功能 -export MS_RECOVERY_PATH=/path/to/recovery/ # 设置容灾文件保存路径 - -# 启动1个Scheduler进程 -export MS_WORKER_NUM=8 # 设置集群中Worker进程数量为8 -export MS_SCHED_HOST=127.0.0.1 # 设置Scheduler IP地址为本地环路地址 -export MS_SCHED_PORT=8118 # 设置Scheduler端口 -export MS_ROLE=MS_SCHED # 设置启动的进程为MS_SCHED角色 -export MS_NODE_ID=sched # 设置本节点Node ID为'sched' -python ./train.py > device/scheduler.log 2>&1 & # 启动训练脚本 diff --git a/docs/sample_code/disaster_recover/run.sh b/docs/sample_code/disaster_recover/run.sh deleted file mode 100644 index 804cb9ee9a..0000000000 --- a/docs/sample_code/disaster_recover/run.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -rm -rf device -mkdir device -echo "start training" - -# 循环启动8个Worker训练进程 -for((i=0;i<8;i++)); -do - export MS_WORKER_NUM=8 # 设置集群中Worker进程数量为8 - export MS_SCHED_HOST=127.0.0.1 # 设置Scheduler IP地址为本地环路地址 - export MS_SCHED_PORT=8118 # 设置Scheduler端口 - export MS_ROLE=MS_WORKER # 设置启动的进程为MS_WORKER角色 - export MS_NODE_ID=$i # 设置进程id,可选 - python ./train.py > device/worker_$i.log 2>&1 & # 启动训练脚本 -done - -# 启动1个Scheduler进程 -export MS_WORKER_NUM=8 # 设置集群中Worker进程数量为8 -export MS_SCHED_HOST=127.0.0.1 # 设置Scheduler IP地址为本地环路地址 -export MS_SCHED_PORT=8118 # 设置Scheduler端口 -export MS_ROLE=MS_SCHED # 设置启动的进程为MS_SCHED角色 -python ./train.py > device/scheduler.log 2>&1 & # 启动训练脚本 diff --git a/docs/sample_code/disaster_recover/train.py b/docs/sample_code/disaster_recover/train.py deleted file mode 100644 index f50baabb25..0000000000 --- a/docs/sample_code/disaster_recover/train.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Disaster Recover Example""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn, train -from mindspore.communication import init, get_rank, get_group_size - -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True) -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.fc = nn.Dense(28*28, 10, weight_init="normal", bias_init="zeros") - self.relu = nn.ReLU() - - def construct(self, x): - x = self.flatten(x) - logits = self.relu(self.fc(x)) - return logits - -net = Network() - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - rank_id = get_rank() - rank_size = get_group_size() - dataset = ds.MnistDataset(dataset_path, num_shards=rank_size, shard_id=rank_id) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = nn.CrossEntropyLoss() -loss_cb = train.LossMonitor(20) -ckpt_config = train.CheckpointConfig(save_checkpoint_steps=100, keep_checkpoint_max=5) -ckpoint_cb = train.ModelCheckpoint(prefix='train', directory="./ckpt_of_rank/"+str(get_rank()), config=ckpt_config) -model = ms.Model(net, loss_fn=loss_fn, optimizer=optimizer) -model.train(10, data_set, callbacks=[loss_cb, ckpoint_cb]) diff --git a/docs/sample_code/distributed_graph_partition/lenet.py b/docs/sample_code/distributed_graph_partition/lenet.py deleted file mode 100644 index 2516891e53..0000000000 --- a/docs/sample_code/distributed_graph_partition/lenet.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''LeNet''' -import mindspore.dataset as ds -import mindspore.dataset.transforms as transforms -import mindspore.dataset.vision as vision -from mindspore import dtype as mstype -from mindspore.dataset.vision import Inter -import mindspore.nn as nn -from mindspore.common.initializer import TruncatedNormal - -def create_dataset(data_path, batch_size=32, repeat_size=1, - num_parallel_workers=1): - """ - create dataset for train or test - """ - # define dataset - mnist_ds = ds.MnistDataset(data_path) - - resize_height, resize_width = 32, 32 - rescale = 1.0 / 255.0 - shift = 0.0 - rescale_nml = 1 / 0.3081 - shift_nml = -1 * 0.1307 / 0.3081 - - # define map operations - resize_op = vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode - rescale_nml_op = vision.Rescale(rescale_nml, shift_nml) - rescale_op = vision.Rescale(rescale, shift) - hwc2chw_op = vision.HWC2CHW() - type_cast_op = transforms.TypeCast(mstype.int32) - - # apply map operations on images - mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers) - - # apply DatasetOps - buffer_size = 10000 - mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) - mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) - mnist_ds = mnist_ds.repeat(repeat_size) - return mnist_ds - -def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): - """weight initial for conv layer""" - weight = weight_variable() - return nn.Conv2d(in_channels, out_channels, - kernel_size=kernel_size, stride=stride, padding=padding, - weight_init=weight, has_bias=False, pad_mode="valid") -def fc_with_initialize(input_channels, out_channels): - """weight initial for fc layer""" - weight = weight_variable() - bias = weight_variable() - return nn.Dense(input_channels, out_channels, weight, bias) -def weight_variable(): - """weight initial""" - return TruncatedNormal(0.02) - -class LeNet(nn.Cell): - '''LeNet.''' - def __init__(self, num_class=10, channel=1): - super(LeNet, self).__init__() - self.num_class = num_class - self.conv1 = conv(channel, 6, 5) - self.conv2 = conv(6, 16, 5) - self.fc1 = fc_with_initialize(16 * 5 * 5, 120) - self.fc2 = fc_with_initialize(120, 84) - self.fc3 = fc_with_initialize(84, self.num_class) - self.relu = nn.ReLU() - self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - self.flatten = nn.Flatten() - - def construct(self, x): - '''Backbone.''' - x = self.conv1(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.conv2(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.flatten(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.relu(x) - x = self.fc3(x) - return x - -def get_optimizer(net, lr=0.01): - momentum = 0.9 - mom_optimizer = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum) - return mom_optimizer -def get_loss(): - return nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') diff --git a/docs/sample_code/distributed_graph_partition/run.sh b/docs/sample_code/distributed_graph_partition/run.sh deleted file mode 100644 index 4233688c3e..0000000000 --- a/docs/sample_code/distributed_graph_partition/run.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Launch scheduler and worker for distributed graph partition. -execute_path=$(pwd) - -if [ ! -d "${execute_path}/MNIST_Data" ]; then - if [ ! -f "${execute_path}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi - -self_path=$(dirname $0) - -# Set public environment. -export MS_WORKER_NUM=8 -export MS_SCHED_HOST=127.0.0.1 -export MS_SCHED_PORT=8118 -export DATA_PATH=${execute_path}/MNIST_Data/ - -# Launch scheduler. -export MS_ROLE=MS_SCHED -rm -rf ${execute_path}/sched/ -mkdir ${execute_path}/sched/ -cd ${execute_path}/sched/ || exit -python ${self_path}/../train.py > sched.log 2>&1 & - -# Launch workers. -export MS_ROLE=MS_WORKER -for((i=0;i<$MS_WORKER_NUM;i++)); -do - rm -rf ${execute_path}/worker_$i/ - mkdir ${execute_path}/worker_$i/ - cd ${execute_path}/worker_$i/ || exit - python ${self_path}/../train.py > worker_$i.log 2>&1 & -done -echo "Start training. The output is saved in sched and worker_* folder." \ No newline at end of file diff --git a/docs/sample_code/distributed_graph_partition/train.py b/docs/sample_code/distributed_graph_partition/train.py deleted file mode 100644 index 12824850b0..0000000000 --- a/docs/sample_code/distributed_graph_partition/train.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -'''Distributed graph partition''' -import os -import mindspore as ms -from mindspore.train import Accuracy -from mindspore.train import Model -from mindspore.train import LossMonitor, TimeMonitor -from mindspore.communication import init, get_rank -from lenet import LeNet, get_optimizer, get_loss, create_dataset - - -ms.set_context(mode=ms.GRAPH_MODE) -init() -net = LeNet() -net.fc1.place("MS_WORKER", 0) -net.fc2.place("MS_WORKER", 1) -net.fc3.place("MS_WORKER", 2) -net.conv1.place("MS_WORKER", 3) -net.conv2.place("MS_WORKER", 4) - -opt = get_optimizer(net) -criterion = get_loss() -model = Model(net, criterion, opt, metrics={"Accuracy": Accuracy()}) - -print("================= Start training =================", flush=True) -ds_train = create_dataset(os.path.join(os.getenv("DATA_PATH"), 'train')) -model.train(10, ds_train, callbacks=[LossMonitor(), TimeMonitor()], dataset_sink_mode=False) - -print("================= Start testing =================", flush=True) -ds_eval = create_dataset(os.path.join(os.getenv("DATA_PATH"), 'test')) -acc = model.eval(ds_eval, dataset_sink_mode=False) - -if get_rank() == 0: - print("Accuracy is:", acc) diff --git a/docs/sample_code/fault_recover/recover.sh b/docs/sample_code/fault_recover/recover.sh deleted file mode 100644 index 93ab0c1593..0000000000 --- a/docs/sample_code/fault_recover/recover.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash recover.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py --is_recover=1 diff --git a/docs/sample_code/fault_recover/run.sh b/docs/sample_code/fault_recover/run.sh deleted file mode 100644 index c8ad13d637..0000000000 --- a/docs/sample_code/fault_recover/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py diff --git a/docs/sample_code/fault_recover/train.py b/docs/sample_code/fault_recover/train.py deleted file mode 100644 index da0af1634d..0000000000 --- a/docs/sample_code/fault_recover/train.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Fault Recover Example""" - -import os -import argparse -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn, ops, train -from mindspore.communication import init, get_rank -from mindspore.common.initializer import initializer - -parser = argparse.ArgumentParser(description="Transform checkpoint dir") -parser.add_argument("--is_recover", - type=int, - default=0, - choices=[1, 0], - help="Only compile and convert the net, default is disable.") -args_opt = parser.parse_args() - -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL) -ms.set_auto_parallel_context(enable_parallel_optimizer=True) -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = ops.Flatten() - self.fc1_weight = ms.Parameter(initializer("normal", [28*28, 512], ms.float32)) - self.fc2_weight = ms.Parameter(initializer("normal", [512, 512], ms.float32)) - self.fc3_weight = ms.Parameter(initializer("normal", [512, 10], ms.float32)) - self.matmul1 = ops.MatMul() - self.relu1 = ops.ReLU() - self.matmul2 = ops.MatMul() - self.relu2 = ops.ReLU() - self.matmul3 = ops.MatMul() - - def construct(self, x): - x = self.flatten(x) - x = self.matmul1(x, self.fc1_weight) - x = self.relu1(x) - x = self.matmul2(x, self.fc2_weight) - x = self.relu2(x) - logits = self.matmul3(x, self.fc3_weight) - return logits - -net = Network() -net.matmul1.shard(((2, 4), (4, 1))) -net.relu1.shard(((4, 1),)) - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = nn.CrossEntropyLoss() -loss_cb = train.LossMonitor() -ckpt_config = train.CheckpointConfig(save_checkpoint_steps=1000, keep_checkpoint_max=4, integrated_save=False) -ckpoint_cb = train.ModelCheckpoint(prefix="checkpoint", - directory="./checkpoints/rank_{}".format(get_rank()), - config=ckpt_config) -model = ms.Model(net, loss_fn=loss_fn, optimizer=optimizer) - -if bool(args_opt.is_recover): - param_dict = ms.load_checkpoint("./checkpoints/rank_{}/checkpoint-2_1875.ckpt".format(get_rank())) - model.infer_train_layout(data_set) - ms.load_param_into_net(net, param_dict) - -model.train(2, data_set, callbacks=[loss_cb, ckpoint_cb], dataset_sink_mode=True) diff --git a/docs/sample_code/manual_parallel/run.sh b/docs/sample_code/manual_parallel/run.sh deleted file mode 100644 index c8ad13d637..0000000000 --- a/docs/sample_code/manual_parallel/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py diff --git a/docs/sample_code/manual_parallel/train.py b/docs/sample_code/manual_parallel/train.py deleted file mode 100644 index 57186197eb..0000000000 --- a/docs/sample_code/manual_parallel/train.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Manual Parallel Programming Guide""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn, ops, train -from mindspore.communication import init, get_rank, get_group_size - -ms.set_context(mode=ms.GRAPH_MODE) -init() -cur_rank = get_rank() -batch_size = 32 -device_num = get_group_size() -shard_size = batch_size // device_num - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.layer1 = nn.Dense(28*28, 512) - self.relu1 = nn.ReLU() - self.layer2 = nn.Dense(512, 512) - self.relu2 = nn.ReLU() - self.layer3 = nn.Dense(512, 10) - - def construct(self, x): - x = x[cur_rank*shard_size:cur_rank*shard_size + shard_size] - x = self.flatten(x) - x = self.layer1(x) - x = self.relu1(x) - x = self.layer2(x) - x = self.relu2(x) - logits = self.layer3(x) - return logits - -net = Network() - -def create_dataset(): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset() - -class ReduceLoss(nn.Cell): - """create loss""" - def __init__(self): - super().__init__() - self.loss = nn.CrossEntropyLoss() - self.all_reduce = ops.AllReduce() - - def construct(self, data, label): - label = label[cur_rank*shard_size:cur_rank*shard_size + shard_size] - loss_value = self.loss(data, label) - loss_value = self.all_reduce(loss_value) / device_num - return loss_value - -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = ReduceLoss() -loss_cb = train.LossMonitor(20) -model = ms.Model(net, loss_fn=loss_fn, optimizer=optimizer) -model.train(10, data_set, callbacks=[loss_cb]) diff --git a/docs/sample_code/memory_offload/run.sh b/docs/sample_code/memory_offload/run.sh deleted file mode 100644 index 3700a19ea7..0000000000 --- a/docs/sample_code/memory_offload/run.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# applicable to Ascend or GPU - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh BATCH_SIZE MEMORY_OFFLOAD" -echo "For example: bash run.sh 96 ON" -echo "==============================================================================================================" -set -e -EXEC_PATH=$(pwd) -BATCH_SIZE=$1 -MEMORY_OFFLOAD=$2 -OFFLOAD_PARAM="cpu" -AUTO_OFFLOAD=true -OFFLOAD_CPU_SIZE="512GB" -OFFLOAD_DISK_SIZE="1024GB" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/cifar-10-binary" ]; then - if [ ! -f "${EXEC_PATH}/cifar-10-binary.tar.gz" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/cifar-10-binary.tar.gz - fi - tar -zxvf cifar-10-binary.tar.gz -fi -export DATA_PATH=${EXEC_PATH}/cifar-10-batches-bin - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py \ - --batch_size=$BATCH_SIZE --memory_offload=$MEMORY_OFFLOAD \ - --offload_param=$OFFLOAD_PARAM --auto_offload=$AUTO_OFFLOAD \ - --offload_cpu_size=$OFFLOAD_CPU_SIZE --offload_disk_size=$OFFLOAD_DISK_SIZE \ - --host_mem_block_size="1GB" --enable_pinned_mem=true --enable_aio=true diff --git a/docs/sample_code/memory_offload/train.py b/docs/sample_code/memory_offload/train.py deleted file mode 100644 index 512f78be97..0000000000 --- a/docs/sample_code/memory_offload/train.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Memory Offload Example""" - -import os -import argparse -import mindspore as ms -import mindspore.dataset as ds -import mindspore.runtime as rt -from mindspore import nn -from mindspore.communication import init, get_rank, get_group_size - -parser = argparse.ArgumentParser(description='Memory offload') -parser.add_argument('--batch_size', type=int, default=32, help='Batch size.') -parser.add_argument('--memory_offload', type=str, - default="OFF", help='Memory offload.') -parser.add_argument('--offload_path', type=str, - default="./offload/", help='Offload path.') -parser.add_argument('--auto_offload', type=bool, - default=True, help='auto offload.') -parser.add_argument('--offload_param', type=str, - default="cpu", help='Offload param.') -parser.add_argument('--offload_cpu_size', type=str, - default="512GB", help='offload cpu size.') -parser.add_argument('--offload_disk_size', type=str, - default="1024GB", help='offload disk size.') -parser.add_argument('--host_mem_block_size', type=str, - default="1GB", help='host memory block size.') -parser.add_argument('--enable_pinned_mem', type=bool, - default=False, help='enable pinned mem.') -parser.add_argument('--enable_aio', type=bool, - default=False, help='enable aio.') -args_opt = parser.parse_args() - -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True) -rt.set_memory(max_size="1GB") -if args_opt.memory_offload == "ON": - ms.set_context(memory_offload="ON") - offload_config = {"offload_path": args_opt.offload_path, "auto_offload": args_opt.auto_offload, - "offload_param": args_opt.offload_param, "offload_cpu_size": args_opt.offload_cpu_size, - "offload_disk_size": args_opt.offload_disk_size, - "host_mem_block_size": args_opt.host_mem_block_size, - "enable_aio": args_opt.enable_aio, "enable_pinned_mem": args_opt.enable_pinned_mem} - print("=====offload_config====\n", offload_config, flush=True) - ms.set_offload_context(offload_config=offload_config) -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(3, 16, 3) - self.relu = nn.ReLU() - self.conv2 = nn.Conv2d(16, 16, 3) - self.avgpool = nn.AdaptiveAvgPool2d(1) - self.dense = nn.Dense(16, 10) - - def construct(self, x): - x = self.conv1(x) - x = self.relu(x) - x = self.conv2(x) - x = self.avgpool(x).squeeze() - logits = self.dense(x) - return logits - -net = Network() - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - rank_id = get_rank() - rank_size = get_group_size() - cifar_ds = ds.Cifar10Dataset(dataset_path, num_shards=rank_size, shard_id=rank_id) - - resize_height = 224 - resize_width = 224 - rescale = 1.0 / 255.0 - shift = 0.0 - - # define map operations - random_crop_op = ds.vision.RandomCrop( - (32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT - random_horizontal_op = ds.vision.RandomHorizontalFlip() - # interpolation default BILINEAR - resize_op = ds.vision.Resize((resize_height, resize_width)) - rescale_op = ds.vision.Rescale(rescale, shift) - normalize_op = ds.vision.Normalize( - (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) - changeswap_op = ds.vision.HWC2CHW() - type_cast_op = ds.transforms.TypeCast(ms.int32) - - c_trans = [random_crop_op, random_horizontal_op] - c_trans += [resize_op, rescale_op, normalize_op, - changeswap_op] - # apply map operations on images - cifar_ds = cifar_ds.map(operations=type_cast_op, input_columns="label") - cifar_ds = cifar_ds.map(operations=c_trans, input_columns="image") - # apply shuffle operations - cifar_ds = cifar_ds.shuffle(buffer_size=10) - # apply batch operations - cifar_ds = cifar_ds.batch( - batch_size=batch_size, drop_remainder=True) - return cifar_ds - -data_set = create_dataset(args_opt.batch_size) -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = nn.CrossEntropyLoss() - -def forward_fn(data, target): - """forward propagation""" - logits = net(data) - loss = loss_fn(logits, target) - return loss, logits - -grad_fn = ms.value_and_grad(forward_fn, None, net.trainable_params(), has_aux=True) -grad_reducer = nn.DistributedGradReducer(optimizer.parameters) - -i = 0 -step = 10 -for image, label in data_set: - (loss_value, _), grads = grad_fn(image, label) - grads = grad_reducer(grads) - optimizer(grads) - print("step: %s, loss is %s" % (i, loss_value)) - if i >= step: - break - i += 1 diff --git a/docs/sample_code/mindinsight/profiler/profiling_feed_step.py b/docs/sample_code/mindinsight/profiler/profiling_feed_step.py deleted file mode 100644 index 3325c1aa5c..0000000000 --- a/docs/sample_code/mindinsight/profiler/profiling_feed_step.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Start profiling on step in custom training.""" -import numpy as np -from mindspore import nn -import mindspore as ms -import mindspore.dataset as ds - - -class StopAtStep(ms.Callback): - """ - Start profiling base on step. - - Args: - start_step (int): The start step number. - stop_step (int): The stop step number. - """ - def __init__(self, start_step, stop_step): - super(StopAtStep, self).__init__() - self.start_step = start_step - self.stop_step = stop_step - self.profiler = ms.Profiler(start_profile=False, output_path='./data_step') - - def on_train_step_begin(self, run_context): - cb_params = run_context.original_args() - step_num = cb_params.cur_step_num - if step_num == self.start_step: - self.profiler.start() - - def on_train_step_end(self, run_context): - cb_params = run_context.original_args() - step_num = cb_params.cur_step_num - if step_num == self.stop_step: - self.profiler.stop() - self.profiler.analyse() - - -class Net(nn.Cell): - """The test net""" - def __init__(self): - super(Net, self).__init__() - self.fc = nn.Dense(2, 2) - - def construct(self, x): - return self.fc(x) - - -def generator(): - for _ in range(10): - yield (np.ones([2, 2]).astype(np.float32), np.ones([2]).astype(np.int32)) - - -if __name__ == '__main__': - ms.set_context(mode=ms.GRAPH_MODE) - ms.set_device("Ascend") - - profile_call_back = StopAtStep(5, 8) - - net = Net() - optimizer = nn.Momentum(net.trainable_params(), 1, 0.9) - loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) - data = ds.GeneratorDataset(generator, ["data", "label"]) - model = ms.Model(net, loss, optimizer) - model.train(3, data, callbacks=[profile_call_back], dataset_sink_mode=False) diff --git a/docs/sample_code/mindinsight/profiler/profiling_step.py b/docs/sample_code/mindinsight/profiler/profiling_step.py deleted file mode 100644 index 207e63afdc..0000000000 --- a/docs/sample_code/mindinsight/profiler/profiling_step.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Start profiling on step in callback mode.""" -import mindspore -from mindspore import nn -from mindspore import ops -from mindspore.dataset import vision, transforms -from mindspore.dataset import MnistDataset -from mindspore import Profiler - - -class Network(nn.Cell): - """The test net""" - - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.dense_relu_sequential = nn.SequentialCell( - nn.Dense(28 * 28, 512), - nn.ReLU(), - nn.Dense(512, 512), - nn.ReLU(), - nn.Dense(512, 10) - ) - - def construct(self, x): - x = self.flatten(x) - logits = self.dense_relu_sequential(x) - return logits - - -def datapipe(dataset, batch_size): - """Get the dataset.""" - image_transforms = [ - vision.Rescale(1.0 / 255.0, 0), - vision.Normalize(mean=(0.1307,), std=(0.3081,)), - vision.HWC2CHW() - ] - label_transform = transforms.TypeCast(mindspore.int32) - - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - - -def train(epochs, model, dataset, loss_fn, optimizer): - """Train the net.""" - - def forward_fn(data, label): - logits = model(data) - loss = loss_fn(logits, label) - return loss, logits - - grad_fn = mindspore.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True) - - @mindspore.jit - def train_step(data, label): - (loss, _), grads = grad_fn(data, label) - loss = ops.depend(loss, optimizer(grads)) - return loss - - size = dataset.get_dataset_size() - model.set_train() - step = 0 - profiler = Profiler(start_profile=False, output_path='./data_step') - - iterator = dataset.create_tuple_iterator(num_epochs=epochs) - for e in range(epochs): - print(f"Epoch {e + 1}\n-------------------------------") - for batch, (data, label) in enumerate(iterator): - step = step + 1 - step_loss = train_step(data, label) - - if batch % 100 == 0: - step_loss, current = step_loss.asnumpy(), batch - print(f"loss: {step_loss:>7f} [{current:>3d}/{size:>3d}]") - - if step == 10: - profiler.start() - if step == 20: - profiler.stop() - profiler.analyse() - - -if __name__ == '__main__': - net = Network() - cross_loss = nn.CrossEntropyLoss() - opt = nn.SGD(net.trainable_params(), 1e-2) - train_dataset = MnistDataset('/dataset/MNIST_Data/train') - train_dataset = datapipe(train_dataset, 64) - - train(3, net, train_dataset, cross_loss, opt) - - print("Done!") diff --git a/docs/sample_code/nnie_proposal/CMakeLists.txt b/docs/sample_code/nnie_proposal/CMakeLists.txt deleted file mode 100644 index 1e1b050503..0000000000 --- a/docs/sample_code/nnie_proposal/CMakeLists.txt +++ /dev/null @@ -1,21 +0,0 @@ -cmake_minimum_required(VERSION 3.14) -project(nnie_proposal) - -set(MSLIB_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_patry/") -execute_process( - COMMAND ${CMAKE_COMMAND} -G ${CMAKE_GENERATOR} . - OUTPUT_VARIABLE result - WORKING_DIRECTORY ${MSLIB_DIR}) -execute_process( - COMMAND ${CMAKE_COMMAND} --build . - WORKING_DIRECTORY ${MSLIB_DIR}) - -#execute_process(COMMAND tar xzf "ms-prefix/src/mindspore-lite-1.0.1-runtime-arm64-cpu.tar.gz" WORKING_DIRECTORY ${MSLIB_DIR}) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_patry/ms-prefix/src/ms/include) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_patry/ms-prefix/src/third_patry/flatbuffers/include) - -aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/src SRC) - -add_library(nnie_proposal SHARED - ${SRC}) diff --git a/docs/sample_code/nnie_proposal/src/proposal.cc b/docs/sample_code/nnie_proposal/src/proposal.cc deleted file mode 100644 index 1461b302ab..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal.cc +++ /dev/null @@ -1,650 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/proposal.h" -#include -#include -#include -#include "include/errorcode.h" - -using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; - -namespace mindspore { -namespace proposal { - -uint32_t RpnTmpBufSize(uint32_t num_ratio_anchors, uint32_t num_scale_anchors, uint32_t input_height, - uint32_t input_width) { - uint32_t anchors_num = num_ratio_anchors * num_scale_anchors * input_height * input_width; - uint32_t anchors_size = sizeof(uint32_t) * COORDI_NUM * anchors_num; - uint32_t bbox_delta_size = anchors_size; - uint32_t proposal_size = sizeof(uint32_t) * PROPOSAL_WIDTH * anchors_num; - uint32_t ratio_anchors_size = sizeof(float) * num_ratio_anchors * COORDI_NUM; - uint32_t scale_anchors_size = sizeof(float) * num_ratio_anchors * num_scale_anchors * COORDI_NUM; - uint32_t score_size = sizeof(float) * anchors_num * 2; - uint32_t stack_size = sizeof(Stack) * anchors_num; - uint32_t total_size = - anchors_size + bbox_delta_size + proposal_size + ratio_anchors_size + scale_anchors_size + score_size + stack_size; - return total_size; -} - -static float exp_coef[10][16] = { - {1.0f, 1.00024f, 1.00049f, 1.00073f, 1.00098f, 1.00122f, 1.00147f, 1.00171f, 1.00196f, 1.0022f, 1.00244f, 1.00269f, - 1.00293f, 1.00318f, 1.00342f, 1.00367f}, - {1.0f, 1.00391f, 1.00784f, 1.01179f, 1.01575f, 1.01972f, 1.02371f, 1.02772f, 1.03174f, 1.03578f, 1.03984f, 1.04391f, - 1.04799f, 1.05209f, 1.05621f, 1.06034f}, - {1.0f, 1.06449f, 1.13315f, 1.20623f, 1.28403f, 1.36684f, 1.45499f, 1.54883f, 1.64872f, 1.75505f, 1.86825f, 1.98874f, - 2.117f, 2.25353f, 2.39888f, 2.55359f}, - {1.0f, 2.71828f, 7.38906f, 20.0855f, 54.5981f, 148.413f, 403.429f, 1096.63f, 2980.96f, 8103.08f, 22026.5f, 59874.1f, - 162755.0f, 442413.0f, 1.2026e+006f, 3.26902e+006f}, - {1.0f, 8.88611e+006f, 7.8963e+013f, 7.01674e+020f, 6.23515e+027f, 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, - 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, 5.54062e+034f, - 5.54062e+034f}, - {1.0f, 0.999756f, 0.999512f, 0.999268f, 0.999024f, 0.99878f, 0.998536f, 0.998292f, 0.998049f, 0.997805f, 0.997562f, - 0.997318f, 0.997075f, 0.996831f, 0.996588f, 0.996345f}, - {1.0f, 0.996101f, 0.992218f, 0.98835f, 0.984496f, 0.980658f, 0.976835f, 0.973027f, 0.969233f, 0.965455f, 0.961691f, - 0.957941f, 0.954207f, 0.950487f, 0.946781f, 0.94309f}, - {1.0f, 0.939413f, 0.882497f, 0.829029f, 0.778801f, 0.731616f, 0.687289f, 0.645649f, 0.606531f, 0.569783f, 0.535261f, - 0.502832f, 0.472367f, 0.443747f, 0.416862f, 0.391606f}, - {1.0f, 0.367879f, 0.135335f, 0.0497871f, 0.0183156f, 0.00673795f, 0.00247875f, 0.000911882f, 0.000335463f, - 0.00012341f, 4.53999e-005f, 1.67017e-005f, 6.14421e-006f, 2.26033e-006f, 8.31529e-007f, 3.05902e-007f}, - {1.0f, 1.12535e-007f, 1.26642e-014f, 1.42516e-021f, 1.60381e-028f, 1.80485e-035f, 2.03048e-042f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}; -static float QuickExp(int32_t value) { - if (value & 0x80000000) { - value = ~value + 0x00000001; - return exp_coef[5][value & 0x0000000F] * exp_coef[6][(value >> 4) & 0x0000000F] * - exp_coef[7][(value >> 8) & 0x0000000F] * exp_coef[8][(value >> 12) & 0x0000000F] * - exp_coef[9][(value >> 16) & 0x0000000F]; - } else { - return exp_coef[0][value & 0x0000000F] * exp_coef[1][(value >> 4) & 0x0000000F] * - exp_coef[2][(value >> 8) & 0x0000000F] * exp_coef[3][(value >> 12) & 0x0000000F] * - exp_coef[4][(value >> 16) & 0x0000000F]; - } -} - -static int32_t SoftMax(float *src, uint32_t num) { - float max = 0; - float sum = 0; - uint32_t i = 0; - - for (i = 0; i < num; ++i) { - if (max < src[i]) { - max = src[i]; - } - } - - for (i = 0; i < num; ++i) { - src[i] = QuickExp((int32_t)((src[i] - max) * QUANT_BASE)); - sum += src[i]; - } - - for (i = 0; i < num; ++i) { - src[i] /= sum; - } - return RET_OK; -} -static void Argswap(int32_t *src1, int32_t *src2) { - for (uint32_t i = 0; i < PROPOSAL_WIDTH; i++) { - int32_t tmp = src1[i]; - src1[i] = src2[i]; - src2[i] = tmp; - } -} - -static int32_t NonRecursiveArgQuickSort(int32_t *array, int32_t low, int32_t high, Stack *stack, int32_t max_num) { - int32_t top = 0; - stack[top].min_ = low; - stack[top].max_ = high; - - while (top > -1) { - low = stack[top].min_; - high = stack[top].max_; - int32_t i = low; - int32_t j = high; - - int32_t key_confidence = array[PROPOSAL_WIDTH * low + 4]; - top--; - while (i < j) { - while ((i < j) && (key_confidence > array[j * PROPOSAL_WIDTH + 4])) { - j--; - } - if (i < j) { - Argswap(&array[i * PROPOSAL_WIDTH], &array[j * PROPOSAL_WIDTH]); - i++; - } - - while ((i < j) && (key_confidence < array[i * PROPOSAL_WIDTH + 4])) { - i++; - } - if (i < j) { - Argswap(&array[i * PROPOSAL_WIDTH], &array[j * PROPOSAL_WIDTH]); - j--; - } - } - - if (low <= max_num) { - if (low < i - 1) { - top++; - stack[top].min_ = low; - stack[top].max_ = i - 1; - } - - if (high > i + 1) { - top++; - stack[top].min_ = i + 1; - stack[top].max_ = high; - } - } - } - return RET_OK; -} - -static int32_t FilterLowScoreBbox(int32_t *proposals, uint32_t anchors_num, uint32_t filter_thresh, - uint32_t *num_after_filter) { - uint32_t proposal_cnt = anchors_num; - - if (filter_thresh > 0) { - uint32_t i; - for (i = 0; i < anchors_num; i++) { - if (proposals[PROPOSAL_WIDTH * i + 4] < (int32_t)filter_thresh) { - proposals[PROPOSAL_WIDTH * i + 5] = 1; - } - } - - proposal_cnt = 0; - for (i = 0; i < anchors_num; i++) { - if (0 == proposals[PROPOSAL_WIDTH * i + 5]) { - proposals[PROPOSAL_WIDTH * proposal_cnt] = proposals[PROPOSAL_WIDTH * i]; - proposals[PROPOSAL_WIDTH * proposal_cnt + 1] = proposals[PROPOSAL_WIDTH * i + 1]; - proposals[PROPOSAL_WIDTH * proposal_cnt + 2] = proposals[PROPOSAL_WIDTH * i + 2]; - proposals[PROPOSAL_WIDTH * proposal_cnt + 3] = proposals[PROPOSAL_WIDTH * i + 3]; - proposals[PROPOSAL_WIDTH * proposal_cnt + 4] = proposals[PROPOSAL_WIDTH * i + 4]; - proposals[PROPOSAL_WIDTH * proposal_cnt + 5] = proposals[PROPOSAL_WIDTH * i + 5]; - proposal_cnt++; - } - } - } - *num_after_filter = proposal_cnt; - return RET_OK; -} - -static int32_t SVP_NNIE_Overlap(int32_t x_min1, int32_t y_min1, int32_t x_max1, int32_t y_max1, int32_t x_min2, - int32_t y_min2, int32_t x_max2, int32_t y_max2, int32_t *area_sum, - int32_t *area_inter) { - /*** Check the input, and change the Return value ***/ - int32_t inter = 0; - int32_t total = 0; - int32_t x_min = 0; - int32_t y_min = 0; - int32_t x_max = 0; - int32_t y_max = 0; - int32_t area1 = 0; - int32_t area2 = 0; - int32_t inter_width = 0; - int32_t inter_height = 0; - - x_min = MAX(x_min1, x_min2); - y_min = MAX(y_min1, y_min2); - x_max = MIN(x_max1, x_max2); - y_max = MIN(y_max1, y_max2); - - inter_width = x_max - x_min + 1; - inter_height = y_max - y_min + 1; - - inter_width = (inter_width >= 0) ? inter_width : 0; - inter_height = (inter_height >= 0) ? inter_height : 0; - - inter = inter_width * inter_height; - area1 = (x_max1 - x_min1 + 1) * (y_max1 - y_min1 + 1); - area2 = (x_max2 - x_min2 + 1) * (y_max2 - y_min2 + 1); - - total = area1 + area2 - inter; - - *area_sum = total; - *area_inter = inter; - return RET_OK; -} - -static int32_t SVP_NNIE_NonMaxSuppression(int32_t *proposals, uint32_t anchors_num, uint32_t nms_thresh, - uint32_t max_roi_num) { - /****** define variables *******/ - int32_t x_min1; - int32_t y_min1; - int32_t x_max1; - int32_t y_max1; - int32_t x_min2; - int32_t y_min2; - int32_t x_max2; - int32_t y_max2; - int32_t s32AreaTotal = 0; - int32_t area_inter = 0; - uint32_t i; - uint32_t j; - uint32_t num = 0; - bool bNoOverlap; - for (i = 0; i < anchors_num && num < max_roi_num; i++) { - if (proposals[PROPOSAL_WIDTH * i + 5] == 0) { - num++; - x_min1 = proposals[PROPOSAL_WIDTH * i]; - y_min1 = proposals[PROPOSAL_WIDTH * i + 1]; - x_max1 = proposals[PROPOSAL_WIDTH * i + 2]; - y_max1 = proposals[PROPOSAL_WIDTH * i + 3]; - for (j = i + 1; j < anchors_num; j++) { - if (proposals[PROPOSAL_WIDTH * j + 5] == 0) { - x_min2 = proposals[PROPOSAL_WIDTH * j]; - y_min2 = proposals[PROPOSAL_WIDTH * j + 1]; - x_max2 = proposals[PROPOSAL_WIDTH * j + 2]; - y_max2 = proposals[PROPOSAL_WIDTH * j + 3]; - bNoOverlap = (x_min2 > x_max1) || (x_max2 < x_min1) || (y_min2 > y_max1) || (y_max2 < y_min1); - if (bNoOverlap) { - continue; - } - (void)SVP_NNIE_Overlap(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, x_max2, y_max2, &s32AreaTotal, - &area_inter); - if (area_inter * QUANT_BASE > ((int32_t)nms_thresh * s32AreaTotal)) { - if (proposals[PROPOSAL_WIDTH * i + 4] >= proposals[PROPOSAL_WIDTH * j + 4]) { - proposals[PROPOSAL_WIDTH * j + 5] = 1; - } else { - proposals[PROPOSAL_WIDTH * i + 5] = 1; - } - } - } - } - } - } - return RET_OK; -} - -static void Rpn(float **inputs, uint32_t num_ratio_anchors, uint32_t num_scale_anchors, uint32_t *scales, - uint32_t *ratios, uint32_t ori_image_height, uint32_t ori_image_width, uint32_t *inputs_height, - uint32_t *inputs_width, uint32_t *inputs_channel, uint32_t inputs_stride, uint32_t max_rois, - uint32_t min_size, uint32_t spatial_scale, uint32_t nms_thresh, uint32_t filter_thresh, - uint32_t num_before_nms, char *pu32MemPool, float *proposal_result, uint32_t dst_stride, - uint32_t *num_rois) { -#if 1 - /******************** define parameters ****************/ - uint32_t size; - int32_t *anchors = nullptr; - int32_t *bbox_delta = nullptr; - int32_t *proposals = nullptr; - int32_t *ptr1 = nullptr; - int32_t *ptr2 = nullptr; - int32_t *ptr3 = nullptr; - uint32_t num_after_filter = 0; - uint32_t num_anchors; - float base_w; - float base_h; - float base_x_ctr; - float base_y_ctr; - float *ratio_anchors = nullptr; - float *f32_ptr = nullptr; - float *f32_ptr2 = nullptr; - float *scale_anchors = nullptr; - float *scores = nullptr; - float f32_size; - uint32_t pixel_interval; - uint32_t src_bbox_index; - uint32_t src_fg_prob_index; - uint32_t src_bg_prob_index; - uint32_t src_bbox_bias; - uint32_t src_prob_bias; - uint32_t des_box; - uint32_t bg_blob_size; - uint32_t anchors_per_pixel; - uint32_t map_size; - uint32_t line_size; - int32_t proposal_width; - int32_t proposal_height; - uint32_t roi_count; - Stack *stack = nullptr; - uint32_t c; - uint32_t h; - uint32_t w; - uint32_t i; - uint32_t j; - uint32_t p; - uint32_t q; - uint32_t z; - uint32_t base_anchor[4] = {0, 0, (min_size - 1), (min_size - 1)}; - - /*********************************** Faster RCNN *********************************************/ - /********* calculate the start pointer of each part in MemPool *********/ - anchors = reinterpret_cast(pu32MemPool); - num_anchors = num_ratio_anchors * num_scale_anchors * (inputs_height[0] * inputs_width[0]); - size = COORDI_NUM * num_anchors; - pu32MemPool += size * sizeof(int32_t); - - bbox_delta = reinterpret_cast(pu32MemPool); - pu32MemPool += size * sizeof(int32_t); - - proposals = reinterpret_cast(pu32MemPool); - size = PROPOSAL_WIDTH * num_anchors; - pu32MemPool += size * sizeof(int32_t); - - ratio_anchors = reinterpret_cast(static_cast(pu32MemPool)); - f32_ptr = reinterpret_cast(static_cast(pu32MemPool)); - size = num_ratio_anchors * COORDI_NUM; - f32_ptr = f32_ptr + size; - - scale_anchors = f32_ptr; - size = num_scale_anchors * num_ratio_anchors * COORDI_NUM; - f32_ptr = f32_ptr + size; - - scores = f32_ptr; - size = num_anchors * SCORE_NUM; - f32_ptr = f32_ptr + size; - - stack = reinterpret_cast(f32_ptr); - - /********************* Generate the base anchor ***********************/ - base_w = static_cast(base_anchor[2] - base_anchor[0] + 1); - base_h = static_cast(base_anchor[3] - base_anchor[1] + 1); - base_x_ctr = static_cast(base_anchor[0] + ((base_w - 1) * 0.5)); - base_y_ctr = static_cast(base_anchor[1] + ((base_h - 1) * 0.5)); - - /*************** Generate Ratio Anchors for the base anchor ***********/ - f32_ptr = ratio_anchors; - f32_size = base_w * base_h; - for (i = 0; i < num_ratio_anchors; i++) { - float f32_ratios = static_cast(ratios[i]) / QUANT_BASE; - base_w = sqrt(f32_size / f32_ratios); - base_w = static_cast(1.0 * ((base_w) >= 0 ? (int32_t)(base_w + HALF_VAL) : (int32_t)(base_w - HALF_VAL))); - base_h = base_w * f32_ratios; - base_h = static_cast(1.0 * ((base_h) >= 0 ? (int32_t)(base_h + HALF_VAL) : (int32_t)(base_h - HALF_VAL))); - - *f32_ptr++ = static_cast(base_x_ctr - ((base_w - 1) * HALF_VAL)); - *(f32_ptr++) = static_cast(base_y_ctr - ((base_h - 1) * HALF_VAL)); - *(f32_ptr++) = static_cast(base_x_ctr + ((base_w - 1) * HALF_VAL)); - *(f32_ptr++) = static_cast(base_y_ctr + ((base_h - 1) * HALF_VAL)); - } - - /********* Generate Scale Anchors for each Ratio Anchor **********/ - f32_ptr = ratio_anchors; - f32_ptr2 = scale_anchors; - /* Generate Scale Anchors for one pixel */ - for (i = 0; i < num_ratio_anchors; i++) { - for (j = 0; j < num_scale_anchors; j++) { - base_w = *(f32_ptr + 2) - *(f32_ptr) + 1; - base_h = *(f32_ptr + 3) - *(f32_ptr + 1) + 1; - base_x_ctr = static_cast(*(f32_ptr) + ((base_w - 1) * HALF_VAL)); - base_y_ctr = static_cast(*(f32_ptr + 1) + ((base_h - 1) * HALF_VAL)); - - *(f32_ptr2++) = - static_cast(base_x_ctr - ((base_w * (static_cast(scales[j]) / QUANT_BASE) - 1) * HALF_VAL)); - *(f32_ptr2++) = - static_cast(base_y_ctr - ((base_h * (static_cast(scales[j]) / QUANT_BASE) - 1) * HALF_VAL)); - *(f32_ptr2++) = - static_cast(base_x_ctr + ((base_w * (static_cast(scales[j]) / QUANT_BASE) - 1) * HALF_VAL)); - *(f32_ptr2++) = - static_cast(base_y_ctr + ((base_h * (static_cast(scales[j]) / QUANT_BASE) - 1) * HALF_VAL)); - } - f32_ptr += COORDI_NUM; - } - - /******************* Copy the anchors to every pixel in the feature map ******************/ - ptr1 = anchors; - pixel_interval = QUANT_BASE / spatial_scale; - - for (p = 0; p < inputs_height[0]; p++) { - for (q = 0; q < inputs_width[0]; q++) { - f32_ptr2 = scale_anchors; - for (z = 0; z < num_scale_anchors * num_ratio_anchors; z++) { - *(ptr1++) = (int32_t)(q * pixel_interval + *(f32_ptr2++)); - *(ptr1++) = (int32_t)(p * pixel_interval + *(f32_ptr2++)); - *(ptr1++) = (int32_t)(q * pixel_interval + *(f32_ptr2++)); - *(ptr1++) = (int32_t)(p * pixel_interval + *(f32_ptr2++)); - } - } - } - - /********** do transpose, convert the blob from (M,C,H,W) to (M,H,W,C) **********/ - map_size = inputs_height[1] * inputs_stride / sizeof(uint32_t); - anchors_per_pixel = num_ratio_anchors * num_scale_anchors; - bg_blob_size = anchors_per_pixel * map_size; - line_size = inputs_stride / sizeof(uint32_t); - src_prob_bias = 0; - src_bbox_bias = 0; - - for (c = 0; c < inputs_channel[1]; c++) { - for (h = 0; h < inputs_height[1]; h++) { - for (w = 0; w < inputs_width[1]; w++) { - src_bbox_index = src_bbox_bias + c * map_size + h * line_size + w; - src_bg_prob_index = src_prob_bias + (c / COORDI_NUM) * map_size + h * line_size + w; - src_fg_prob_index = bg_blob_size + src_bg_prob_index; - - des_box = (anchors_per_pixel) * (h * inputs_width[1] + w) + c / COORDI_NUM; - - uint32_t des_bbox_delta_index = COORDI_NUM * des_box + c % COORDI_NUM; - bbox_delta[des_bbox_delta_index] = (int32_t)(inputs[1][src_bbox_index] * QUANT_BASE); - - uint32_t des_score_index = (SCORE_NUM)*des_box; - scores[des_score_index] = inputs[0][src_bg_prob_index]; - scores[des_score_index + 1] = inputs[0][src_fg_prob_index]; - } - } - } - - /************************* do softmax ****************************/ - f32_ptr = scores; - for (i = 0; i < num_anchors; i++) { - SoftMax(f32_ptr, SCORE_NUM); - f32_ptr += SCORE_NUM; - } - - /************************* BBox Transform *****************************/ - /* use parameters from Conv3 to adjust the coordinates of anchors */ - for (i = 0; i < num_anchors; i++) { - ptr1 = anchors; - ptr1 = ptr1 + COORDI_NUM * i; - ptr2 = proposals; - ptr2 = ptr2 + PROPOSAL_WIDTH * i; - ptr3 = bbox_delta; - ptr3 = ptr3 + COORDI_NUM * i; - f32_ptr = scores; - f32_ptr = f32_ptr + i * (SCORE_NUM); - - proposal_width = *(ptr1 + 2) - *(ptr1) + 1; - proposal_height = *(ptr1 + 3) - *(ptr1 + 1) + 1; - int32_t proposal_center_x = *(ptr1) + (int32_t)(proposal_width * HALF_VAL); - int32_t proposal_center_y = *(ptr1 + 1) + (int32_t)(proposal_height * HALF_VAL); - int32_t pred_center_x = (int32_t)((static_cast(*(ptr3)) / QUANT_BASE) * proposal_width + proposal_center_x); - int32_t pred_center_y = - (int32_t)((static_cast(*(ptr3 + 1)) / QUANT_BASE) * proposal_height + proposal_center_y); - - int32_t pred_w = (int32_t)(proposal_width * QuickExp((int32_t)(*(ptr3 + 2)))); - int32_t pred_h = (int32_t)(proposal_height * QuickExp((int32_t)(*(ptr3 + 3)))); - *(ptr2) = (int32_t)(pred_center_x - HALF_VAL * pred_w); - *(ptr2 + 1) = (int32_t)(pred_center_y - HALF_VAL * pred_h); - *(ptr2 + 2) = (int32_t)(pred_center_x + HALF_VAL * pred_w); - *(ptr2 + 3) = (int32_t)(pred_center_y + HALF_VAL * pred_h); - *(ptr2 + 4) = (int32_t)(*(f32_ptr + 1) * QUANT_BASE); - *(ptr2 + 5) = 0; - } - - /************************ clip bbox *****************************/ - for (i = 0; i < num_anchors; i++) { - ptr1 = proposals; - ptr1 = ptr1 + PROPOSAL_WIDTH * i; - *ptr1 = MAX(MIN(*ptr1, (int32_t)ori_image_width - 1), 0); - *(ptr1 + 1) = MAX(MIN(*(ptr1 + 1), (int32_t)ori_image_height - 1), 0); - *(ptr1 + 2) = MAX(MIN(*(ptr1 + 2), (int32_t)ori_image_width - 1), 0); - *(ptr1 + 3) = MAX(MIN(*(ptr1 + 3), (int32_t)ori_image_height - 1), 0); - } - - /************ remove the bboxes which are too small *************/ - for (i = 0; i < num_anchors; i++) { - ptr1 = proposals; - ptr1 = ptr1 + PROPOSAL_WIDTH * i; - proposal_width = *(ptr1 + 2) - *(ptr1) + 1; - proposal_height = *(ptr1 + 3) - *(ptr1 + 1) + 1; - if (proposal_width < (int32_t)min_size || proposal_height < (int32_t)min_size) { - *(ptr1 + 5) = 1; - } - } - - /********** remove low score bboxes ************/ - (void)FilterLowScoreBbox(proposals, num_anchors, filter_thresh, &num_after_filter); - - /********** sort ***********/ - (void)NonRecursiveArgQuickSort(proposals, 0, num_after_filter - 1, stack, static_cast(num_before_nms)); - num_after_filter = (num_after_filter < num_before_nms) ? num_after_filter : num_before_nms; - - /* do nms to remove highly overlapped bbox */ - (void)SVP_NNIE_NonMaxSuppression(proposals, num_after_filter, nms_thresh, max_rois); /* function NMS */ - - /************** write the final result to output ***************/ - roi_count = 0; - for (i = 0; i < num_after_filter; i++) { - ptr1 = proposals; - ptr1 = ptr1 + PROPOSAL_WIDTH * i; - if (*(ptr1 + 5) == 0) { - /*In this sample,the output Roi coordinates will be input in hardware, - so the type coordinates are convert to HI_S20Q12*/ - proposal_result[dst_stride / sizeof(uint32_t) * roi_count] = *ptr1; - proposal_result[dst_stride / sizeof(uint32_t) * roi_count + 1] = *(ptr1 + 1); - proposal_result[dst_stride / sizeof(uint32_t) * roi_count + 2] = *(ptr1 + 2); - proposal_result[dst_stride / sizeof(uint32_t) * roi_count + 3] = *(ptr1 + 3); - roi_count++; - } - if (roi_count >= max_rois) { - break; - } - } - - *num_rois = roi_count; -#endif -} - -int32_t ProposalInit(ProposalParam *param, std::vector *inputs, uint32_t max_roi_num, - uint32_t ori_image_height, uint32_t ori_image_width) { - uint32_t tmp_buf_size = 0; - uint32_t bbox_buf_size = 0; - uint32_t total_size = 0; - param->max_roi_num_ = max_roi_num; - - param->num_ratio_anchors_ = 1; - param->num_scale_anchors_ = 9; - param->scales_[0] = 1.5 * QUANT_BASE; - param->scales_[1] = 2.1 * QUANT_BASE; - param->scales_[2] = 2.9 * QUANT_BASE; - param->scales_[3] = 4.1 * QUANT_BASE; - param->scales_[4] = 5.8 * QUANT_BASE; - param->scales_[5] = 8.0 * QUANT_BASE; - param->scales_[6] = 11.3 * QUANT_BASE; - param->scales_[7] = 15.8 * QUANT_BASE; - param->scales_[8] = 22.1 * QUANT_BASE; - param->ratios_[0] = 2.44 * QUANT_BASE; - - param->ori_image_height_ = ori_image_height; - param->ori_image_width_ = ori_image_width; - param->min_size_ = 16; - param->spatial_scale_ = (uint32_t)(0.0625 * QUANT_BASE); - param->nms_thresh_ = (uint32_t)(0.7 * QUANT_BASE); - param->filter_thresh_ = 0; - param->num_before_nms_ = 6000; - - param->rpn_bounding_box_.chn_ = 1; - param->rpn_bounding_box_.height_ = max_roi_num; - param->rpn_bounding_box_.width_ = COORDI_NUM; - param->rpn_bounding_box_.stride_ = COORDI_NUM * sizeof(float); - param->rpn_bounding_box_.num_ = 1; - if (inputs->size() < 2) { - LOGE("inputs tensor size error."); - return RET_ERROR; - } - - for (int i = 0; i < 2; i++) { - auto input_data_type = inputs->at(i)->data_type(); - if (input_data_type == mindspore::kNumberTypeFloat32) { - auto ptr_shape = (*inputs)[i]->shape(); - // (*inputs)[i]->Format() 检查下format - if ((ptr_shape.size() == 4)) { - param->inputs_height_[i] = ptr_shape[2]; - param->inputs_width_[i] = ptr_shape[3]; - param->inputs_channel_[i] = ptr_shape[1]; - if (0 == i) { - param->inputs_stride_ = ptr_shape[3] * sizeof(float); - } - } - } - } - - tmp_buf_size = RpnTmpBufSize(param->num_ratio_anchors_, param->num_scale_anchors_, param->inputs_height_[0], - param->inputs_width_[0]); - - bbox_buf_size = param->rpn_bounding_box_.num_ * param->rpn_bounding_box_.height_ * param->rpn_bounding_box_.stride_; - total_size = tmp_buf_size + bbox_buf_size; - - if (param->rpn_tmp_buf_ != nullptr) { - free(param->rpn_tmp_buf_); - param->rpn_tmp_buf_ = nullptr; - } - param->rpn_tmp_buf_ = malloc(total_size); - if (param->rpn_tmp_buf_ == nullptr) { - LOGE("malloc buf fail."); - return RET_ERROR; - } - param->rpn_bounding_box_.data_ = reinterpret_cast(param->rpn_tmp_buf_) + tmp_buf_size; - - return RET_OK; -} - -int32_t ProposalRun(std::vector *inputs, - std::vector *outputs, ProposalParam *param) { - if (inputs->size() < 2) { - LOGE("inputs tensor size error."); - return RET_ERROR; - } - if (outputs->size() != 1) { - LOGE("outputs tensor size error."); - return RET_ERROR; - } - for (int i = 0; i < 2; i++) { - auto input_data_type = inputs->at(i)->data_type(); - if (input_data_type == mindspore::kNumberTypeFloat32) { - param->inputs_[i] = reinterpret_cast((*inputs)[i]->MutableData()); - } - } - auto output_data_type = (*outputs)[0]->data_type(); - if (output_data_type != mindspore::kNumberTypeFloat32) { - LOGE("outputs tensor data type error."); - return RET_ERROR; - } - - Rpn(param->inputs_, param->num_ratio_anchors_, param->num_scale_anchors_, param->scales_, param->ratios_, - param->ori_image_height_, param->ori_image_width_, param->inputs_height_, param->inputs_width_, - param->inputs_channel_, param->inputs_stride_, param->max_roi_num_, param->min_size_, param->spatial_scale_, - param->nms_thresh_, param->filter_thresh_, param->num_before_nms_, reinterpret_cast(param->rpn_tmp_buf_), - reinterpret_cast(param->rpn_bounding_box_.data_), param->rpn_bounding_box_.stride_, - ¶m->rpn_bounding_box_.height_); - - std::vector shape{static_cast(param->rpn_bounding_box_.height_), COORDI_NUM}; - (*outputs)[0]->set_shape(shape); - auto output_data = (*outputs)[0]->MutableData(); - memcpy(output_data, param->rpn_bounding_box_.data_, param->rpn_bounding_box_.height_ * COORDI_NUM * sizeof(float)); - - return RET_OK; -} - -void ProposalDeInit(ProposalParam *param) { - if (param->rpn_tmp_buf_ != 0) { - free(param->rpn_tmp_buf_); - param->rpn_tmp_buf_ = 0; - } -} -} // namespace proposal -} // namespace mindspore diff --git a/docs/sample_code/nnie_proposal/src/proposal.h b/docs/sample_code/nnie_proposal/src/proposal.h deleted file mode 100644 index 19ec43e25c..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_H_ -#define MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_H_ -#include -#include "include/ms_tensor.h" - -#define LOG_TAG1 "Proposal" -#define LOGE(format, ...) \ - do { \ - if (1) { \ - fprintf(stderr, "\n[ERROR] " LOG_TAG1 " [" __FILE__ ":%d] %s] ", __LINE__, __FUNCTION__); \ - fprintf(stderr, format, ##__VA_ARGS__); \ - } \ - } while (0) - -#define LOGW(format, ...) \ - do { \ - if (1) { \ - fprintf(stderr, "\n[Warning] " LOG_TAG1 " [" __FILE__ ":%d] %s] ", __LINE__, __FUNCTION__); \ - fprintf(stderr, format, ##__VA_ARGS__); \ - } \ - } while (0) - -namespace mindspore { -namespace proposal { - -typedef struct { - uint32_t stride_; - void *data_; - uint32_t num_; - uint32_t width_; - uint32_t height_; - uint32_t chn_; -} RpnBoundingBox; - -#define MAX(a, b) (((a) > (b)) ? (a) : (b)) -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) -#define HALF_VAL 0.5f /*the half value*/ -#define COORDI_NUM 4 /*coordinate numbers*/ -#define PROPOSAL_WIDTH 6 /*the number of proposal values*/ -#define QUANT_BASE 4096 /*the base value*/ -#define SCORE_NUM 2 /*the num of RPN scores*/ - -typedef struct { - uint32_t scales_[9]; - uint32_t ratios_[9]; - uint32_t inputs_height_[2]; - uint32_t inputs_width_[2]; - uint32_t inputs_channel_[2]; - uint32_t inputs_stride_; - uint32_t num_ratio_anchors_; - uint32_t num_scale_anchors_; - uint32_t ori_image_height_; - uint32_t ori_image_width_; - uint32_t min_size_; - uint32_t spatial_scale_; - uint32_t nms_thresh_; - uint32_t filter_thresh_; - uint32_t max_roi_num_; - uint32_t num_before_nms_; - float *inputs_[2]; - void *rpn_tmp_buf_; - RpnBoundingBox rpn_bounding_box_; -} ProposalParam; - -typedef struct { - int32_t min_; - int32_t max_; -} Stack; - -int32_t ProposalInit(ProposalParam *param, std::vector *inputs, uint32_t max_roi_num, - uint32_t ori_image_height, uint32_t ori_image_width); -int32_t ProposalRun(std::vector *inputs, - std::vector *outputs, ProposalParam *param); -void ProposalDeInit(ProposalParam *param); -} // namespace proposal -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_H_ diff --git a/docs/sample_code/nnie_proposal/src/proposal_fp32.cc b/docs/sample_code/nnie_proposal/src/proposal_fp32.cc deleted file mode 100644 index a90b61870d..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal_fp32.cc +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/proposal_fp32.h" -#include -#include -#include "include/schema/model_generated.h" -#include "include/registry/register_kernel.h" -#include "include/errorcode.h" - -using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Custom; -#define MAX_SIZE 1024 - -namespace mindspore { -namespace proposal { -int ProposalCPUKernel::Prepare() { - if (inputs_.size() < 2) { - LOGE("inputs tensor num error."); - return RET_ERROR; - } - if (outputs_.size() != 1) { - LOGE("outputs tensor num error."); - return RET_ERROR; - } - std::vector inputs_name = {"rpn_cls_score", "rpn_bbox_pred"}; - std::vector inputs; - for (size_t i = 0; i < inputs_name.size(); i++) { - bool find_flag = false; - for (auto &input : inputs_) { - if (input->tensor_name() == inputs_name[i]) { - inputs.push_back(input); - find_flag = true; - break; - } - } - if (!find_flag) { - for (auto &input : inputs_) { - if (std::find(inputs.begin(), inputs.end(), input) != inputs.end()) { - continue; - } - inputs.push_back(input); - LOGW("input tensor name diff '%s' vs '%s'.", inputs_name[i].c_str(), input->tensor_name().c_str()); - break; - } - } - } - if (inputs.size() != inputs_name.size()) { - LOGE("inputs size error."); - return RET_ERROR; - } - - this->set_inputs(inputs); - if (inputs[0]->shape()[0] != 1) { - LOGE("proposal only support input num == 1."); - return RET_ERROR; - } - - outputs_[0]->set_tensor_name("proposal"); - - int max_roi_num_int = 300; - auto *max_roi_num = std::getenv("MAX_ROI_NUM"); - if (max_roi_num != nullptr) { - auto iter = - std::find_if(max_roi_num, max_roi_num + strlen(max_roi_num), [](char val) { return val < '0' || val > '9'; }); - if (iter != max_roi_num) { - *iter = '\0'; - max_roi_num_int = atoi(max_roi_num); - } else { - LOGW("MAX_ROI_NUM ENV is invalid, now set to default value %d", max_roi_num_int); - } - } else { - LOGW("MAX_ROI_NUM ENV is not set, now set to default value %d", max_roi_num_int); - } - - return ProposalInit(&proposal_param_, &inputs_, max_roi_num_int, image_height_, image_weight_); -} - -int ProposalCPUKernel::ReSize() { - if (inputs_[0]->shape()[0] != 1) { - LOGE("proposal only support input num == 1."); - return RET_ERROR; - } - return RET_OK; -} - -int ProposalCPUKernel::Execute() { return ProposalRun(&inputs_, &outputs_, &proposal_param_); } - -ProposalCPUKernel::~ProposalCPUKernel() { ProposalDeInit(&proposal_param_); } - -bool GetCustomAttr(char *buf, int buf_size, const mindspore::schema::Custom *op, const std::string &attr) { - int attr_size; - for (size_t i = 0; i < op->attr()->size(); i++) { - if (op->attr()->Get(i)->name()->str() == attr) { - auto output_info = op->attr()->Get(i)->data(); - attr_size = static_cast(output_info->size()); - if (attr_size >= buf_size) { - LOGE("attr size too big"); - return false; - } - for (int j = 0; j < attr_size; j++) { - buf[j] = static_cast(output_info->Get(j)); - } - buf[attr_size] = 0; - return true; - } - } - return false; -} - -std::shared_ptr ProposalCreateKernel( - const std::vector &inputs, const std::vector &outputs, - const mindspore::schema::Primitive *primitive, const mindspore::lite::Context *ctx) { - if (primitive->value_type() != mindspore::schema::PrimitiveType_Custom) { - LOGE("Primitive type is not PrimitiveType_Custom"); - return nullptr; - } - - auto op = primitive->value_as_Custom(); - if (op->attr()->size() < 1) { - LOGE("There are at least 1 attribute of Custom"); - return nullptr; - } - int64_t ndims; - int64_t image_height; - int64_t image_width; - - char *res = nullptr; - char buf[MAX_SIZE]; - if (GetCustomAttr(buf, MAX_SIZE, op, "proposal_id")) { - res = nullptr; - ndims = strtol(buf, &res, 10); - if ((*res) != 0) { - LOGE("Get attr id data fail"); - return nullptr; - } - } else { - LOGE("Proposal Custom op should have id"); - return nullptr; - } - - if (GetCustomAttr(buf, MAX_SIZE, op, "image_height")) { - res = nullptr; - image_height = strtol(buf, &res, 10); - if ((*res) != 0) { - LOGE("Get attr id data fail"); - return nullptr; - } - } else { - LOGE("Proposal Custom op should have image_height"); - return nullptr; - } - if (GetCustomAttr(buf, MAX_SIZE, op, "image_width")) { - res = nullptr; - image_width = strtol(buf, &res, 10); - if ((*res) != 0) { - LOGE("Get attr id data fail"); - return nullptr; - } - } else { - LOGE("Proposal Custom op should have image_width"); - return nullptr; - } - - auto kernel = std::make_shared(inputs, outputs, primitive, ctx, ndims, image_height, image_width); - // auto kernel = new (std::nothrow) ProposalCPUKernel(inputs, outputs, primitive, ctx, ndims, image_height, - // image_width); - if (kernel == nullptr) { - LOGE("new custom kernel is nullptr"); - return nullptr; - } - return kernel; -} -} // namespace proposal -} // namespace mindspore - -namespace mindspore { -namespace kernel { -REGISTER_CUSTOM_KERNEL(CPU, NNIE, kNumberTypeFloat32, Proposal, proposal::ProposalCreateKernel) -} // namespace kernel -} // namespace mindspore diff --git a/docs/sample_code/nnie_proposal/src/proposal_fp32.h b/docs/sample_code/nnie_proposal/src/proposal_fp32.h deleted file mode 100644 index ed2d47aa78..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal_fp32.h +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_FP32_H_ -#define MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_FP32_H_ - -#include -#include "include/schema/model_generated.h" -#include "include/context.h" -#include "include/kernel.h" -#include "src/proposal.h" - -using mindspore::kernel::Kernel; -namespace mindspore { -namespace proposal { -class ProposalCPUKernel : public Kernel { - public: - ProposalCPUKernel(const std::vector &inputs, - const std::vector &outputs, - const mindspore::schema::Primitive *primitive, const mindspore::lite::Context *ctx, int id, - int image_height, int image_width) - : Kernel(inputs, outputs, primitive, ctx), id_(id), image_height_(image_height), image_weight_(image_width) {} - - ~ProposalCPUKernel() override; - - int Prepare() override; - int ReSize() override; - int Execute() override; - - private: - proposal::ProposalParam proposal_param_ = {0}; - int64_t id_; - int64_t image_height_; - int64_t image_weight_; -}; -} // namespace proposal -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_FP32_H_ diff --git a/docs/sample_code/nnie_proposal/src/proposal_infer.cc b/docs/sample_code/nnie_proposal/src/proposal_infer.cc deleted file mode 100644 index b689b3df0e..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal_infer.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/proposal_infer.h" -#include -#include -#include "include/errorcode.h" -#include "src/proposal.h" - -using mindspore::kernel::KernelInterface; -using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Custom; - -namespace mindspore { -namespace proposal { -std::shared_ptr ProposalInferCreater() { - auto infer = std::make_shared(); - if (infer == nullptr) { - LOGE("new custom infer is nullptr"); - return nullptr; - } - - return infer; -} -int ProposalInterface::Infer(const std::vector &inputs, - const std::vector &outputs, - const mindspore::schema::Primitive *primitive) { - if (inputs.size() != 2) { - LOGE("Inputs size less 2"); - return RET_ERROR; - } - if (outputs.size() == 0) { - LOGE("Outputs size 0"); - return RET_ERROR; - } - if (primitive->value_type() != mindspore::schema::PrimitiveType_Custom) { - LOGE("Primitive type is not PrimitiveType_Custom"); - return RET_ERROR; - } - - size_t id = 0; - while (id < outputs.size()) { - // 待补完 - // outputs[id]->format_ = input->format_; - // outputs[id]->data_type_ = kNumberTypeFloat32; - // 设置type为int - std::vector shape{-1, COORDI_NUM}; - outputs[id]->set_shape(shape); - id++; - } - return RET_OK; -} -} // namespace proposal -} // namespace mindspore -namespace mindspore { -namespace kernel { -// static KernelInterfaceReg a(aa, schema::PrimitiveType_Custom, CustomInferCreater); -REGISTER_CUSTOM_KERNEL_INTERFACE(NNIE, Proposal, proposal::ProposalInferCreater); -} // namespace kernel -} // namespace mindspore diff --git a/docs/sample_code/nnie_proposal/src/proposal_infer.h b/docs/sample_code/nnie_proposal/src/proposal_infer.h deleted file mode 100644 index b384817af3..0000000000 --- a/docs/sample_code/nnie_proposal/src/proposal_infer.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_INFER_H_ -#define MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_INFER_H_ -#include -#include "include/registry/kernel_interface.h" - -namespace mindspore { -namespace proposal { -class ProposalInterface : public mindspore::kernel::KernelInterface { - public: - ProposalInterface() {} - - ~ProposalInterface() = default; - - int Infer(const std::vector &inputs, - const std::vector &outputs, - const mindspore::schema::Primitive *primitive) override; -}; -} // namespace proposal -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_BENCHMARK_NNIE_PROPOSAL_PROPOSAL_INFER_H_ diff --git a/docs/sample_code/nnie_proposal/third_patry/CMakeLists.txt b/docs/sample_code/nnie_proposal/third_patry/CMakeLists.txt deleted file mode 100644 index 5aa873e18c..0000000000 --- a/docs/sample_code/nnie_proposal/third_patry/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -cmake_minimum_required(VERSION 3.14) -project(ms_download) - -include(ExternalProject) -set(MS_VESION "1.3.0") -message(STATUS "download mindspore-lite-${MS_VESION}-runtime-arm64-cpu") - -SET(MS_URL https://ms-release.obs.cn-north-4.myhuaweicloud.com/${MS_VESION}/lite/android_aarch64/mindspore-lite-${MS_VESION}-runtime-arm64-cpu.tar.gz) - -ExternalProject_Add( - ms - URL ${MS_URL} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - UPDATE_COMMAND "" # Skip annoying updates for every build - # Disable install step - INSTALL_COMMAND "" -) - -add_custom_target(ms_download ALL - DEPENDS ms - ) diff --git a/docs/sample_code/parallel_support_dynamic_shape/main.py b/docs/sample_code/parallel_support_dynamic_shape/main.py deleted file mode 100644 index b3df2aab1f..0000000000 --- a/docs/sample_code/parallel_support_dynamic_shape/main.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Distributed parallel support dynamic shape example.""" -import os -import mindspore as ms -import mindspore.dataset as ds -import mindspore.runtime as rt -from mindspore import nn, ops, Model -from mindspore import Symbol, Tensor, Parameter -from mindspore.communication import init -from mindspore.common.initializer import initializer -from mindspore.train import LossMonitor - -ms.set_context(mode=ms.GRAPH_MODE) -rt.set_memory(max_size="28GB") -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL) -init() -ms.set_seed(1) - - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - - -class Network(nn.Cell): - """Network""" - - def __init__(self): - super().__init__() - self.flatten = ops.Flatten() - self.fc1_weight = Parameter(initializer("normal", [28 * 28, 512], ms.float32)) - self.fc2_weight = Parameter(initializer("normal", [512, 512], ms.float32)) - self.fc3_weight = Parameter(initializer("normal", [512, 10], ms.float32)) - self.matmul1 = ops.MatMul().shard(((2, 4), (4, 1))) - self.relu1 = ops.ReLU().shard(((4, 1),)) - self.matmul2 = ops.MatMul().shard(((1, 8), (8, 1))) - self.relu2 = ops.ReLU().shard(((8, 1),)) - self.matmul3 = ops.MatMul() - - def construct(self, x): - x = ops.reshape(x, (-1, 784)) - x = self.matmul1(x, self.fc1_weight) - x = self.relu1(x) - x = self.matmul2(x, self.fc2_weight) - x = self.relu2(x) - return self.matmul3(x, self.fc3_weight) - - -net = Network() - -data_set = create_dataset(32) # (32, 1, 28, 28) (32,) -optimizer = nn.SGD(net.trainable_params(), 1e-3) -loss_fn = nn.SoftmaxCrossEntropyWithLogits(True) - -s0 = Symbol(divisor=8) -input_dyn = Tensor(shape=[s0, 1, 28, 28], dtype=ms.float32) -label_dyn = Tensor(shape=[s0], dtype=ms.int32) -net.set_inputs(input_dyn) -loss_fn.set_inputs(input_dyn, label_dyn) - -model = Model(net, loss_fn, optimizer) -model.train(5, data_set, callbacks=[LossMonitor()], dataset_sink_mode=False) diff --git a/docs/sample_code/parallel_support_dynamic_shape/run.sh b/docs/sample_code/parallel_support_dynamic_shape/run.sh deleted file mode 100644 index acad9ed637..0000000000 --- a/docs/sample_code/parallel_support_dynamic_shape/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python main.py diff --git a/docs/sample_code/parameter_server/run.sh b/docs/sample_code/parameter_server/run.sh deleted file mode 100644 index b13654e8cb..0000000000 --- a/docs/sample_code/parameter_server/run.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -rm -rf output -mkdir output - -# run Scheduler process -export MS_SERVER_NUM=8 -export MS_WORKER_NUM=8 -export MS_SCHED_HOST=127.0.0.1 -export MS_SCHED_PORT=8118 -export MS_ROLE=MS_SCHED -python train.py > output/scheduler.log 2>&1 & - -# run Server processes -export MS_SERVER_NUM=8 -export MS_WORKER_NUM=8 -export MS_SCHED_HOST=127.0.0.1 -export MS_SCHED_PORT=8118 -export MS_ROLE=MS_PSERVER -for((server_id=0;server_id<${MS_SERVER_NUM};server_id++)) -do - python train.py > output/server_${server_id}.log 2>&1 & -done - -# run Wroker processes -export MS_SERVER_NUM=8 -export MS_WORKER_NUM=8 -export MS_SCHED_HOST=127.0.0.1 -export MS_SCHED_PORT=8118 -export MS_ROLE=MS_WORKER -for((worker_id=0;worker_id<${MS_WORKER_NUM};worker_id++)) -do - python train.py > output/worker_${worker_id}.log 2>&1 & -done diff --git a/docs/sample_code/parameter_server/train.py b/docs/sample_code/parameter_server/train.py deleted file mode 100644 index 819540afbe..0000000000 --- a/docs/sample_code/parameter_server/train.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Parameter Server Example""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn -from mindspore.communication import init - -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(full_batch=True, parallel_mode=ms.ParallelMode.DATA_PARALLEL) -ms.set_ps_context(enable_ps=True) -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.fc1 = nn.Dense(28*28, 10, weight_init="normal", bias_init="zeros") - self.relu = nn.ReLU() - self.fc2 = nn.Dense(10, 1, weight_init="normal", bias_init="zeros") - - def construct(self, x): - x = self.flatten(x) - logits = self.fc2(self.relu(self.fc1(x))) - return logits - -net = Network() -net.set_param_ps() - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.SGD(net.trainable_params(), 1e-2) -loss_fn = nn.MSELoss() - -def forward_fn(data, target): - """forward propagation""" - logits = net(data) - loss = loss_fn(logits, target) - return loss, logits - -grad_fn = ms.value_and_grad(forward_fn, None, net.trainable_params(), has_aux=True) - -@ms.jit -def train_step(inputs, targets): - """train_step""" - (loss_value, _), grads = grad_fn(inputs, targets) - optimizer(grads) - return loss_value - -for epoch in range(10): - i = 0 - for image, label in data_set: - loss_output = train_step(image, label) - if i % 10 == 0: - print("epoch: %s, step: %s, loss is %s" % (epoch, i, loss_output)) - i += 1 diff --git a/docs/sample_code/sapp/run.sh b/docs/sample_code/sapp/run.sh deleted file mode 100644 index c8ad13d637..0000000000 --- a/docs/sample_code/sapp/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py diff --git a/docs/sample_code/sapp/train.py b/docs/sample_code/sapp/train.py deleted file mode 100644 index 72151dacb5..0000000000 --- a/docs/sample_code/sapp/train.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Recursive Programming Guide""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn -from mindspore.communication import init - -os.environ['MS_DEV_SAVE_GRAPHS'] = '2' -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.AUTO_PARALLEL, search_mode="recursive_programming") -init() -ms.set_seed(1) - -class Network(nn.Cell): - """Network""" - def __init__(self): - super().__init__() - self.flatten = nn.Flatten() - self.layer1 = nn.Dense(28*28, 512) - self.layer2 = nn.Dense(512, 512) - self.layer3 = nn.Dense(512, 1) - self.relu = nn.ReLU() - - def construct(self, x): - x = self.flatten(x) - x = self.layer1(x) - x = self.relu(x) - x = self.layer2(x) - x = self.relu(x) - logits = self.layer3(x) - return logits - -net = Network() -net.set_train() - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.Momentum(net.trainable_params(), 1e-3, 0.1) -loss_fn = nn.MAELoss() - -def forward_fn(data, target): - """forward propagation""" - logits = net(data) - loss = loss_fn(logits, target) - return loss, logits - -grad_fn = ms.value_and_grad(forward_fn, None, net.trainable_params(), has_aux=True) - -@ms.jit -def train_step(inputs, targets): - """train_step""" - (loss_value, _), grads = grad_fn(inputs, targets) - optimizer(grads) - return loss_value - -for epoch in range(10): - i = 0 - for image, label in data_set: - loss_output = train_step(image, label) - if i % 100 == 0: - print("epoch: %s, step: %s, loss is %s" % (epoch, i, loss_output)) - i += 1 diff --git a/docs/sample_code/sharding_propagation/run.sh b/docs/sample_code/sharding_propagation/run.sh deleted file mode 100644 index c8ad13d637..0000000000 --- a/docs/sample_code/sharding_propagation/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "==============================================================================================================" -echo "Please run the script as: " -echo "bash run.sh" -echo "==============================================================================================================" - -EXEC_PATH=$(pwd) - -if [ ! -d "${EXEC_PATH}/MNIST_Data" ]; then - if [ ! -f "${EXEC_PATH}/MNIST_Data.zip" ]; then - wget http://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip - fi - unzip MNIST_Data.zip -fi -export DATA_PATH=${EXEC_PATH}/MNIST_Data/train/ - -mpirun -n 8 --output-filename log_output --merge-stderr-to-stdout python train.py diff --git a/docs/sample_code/sharding_propagation/train.py b/docs/sample_code/sharding_propagation/train.py deleted file mode 100644 index f9dc6d39e3..0000000000 --- a/docs/sample_code/sharding_propagation/train.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Sharding Propagation Programming Guide""" - -import os -import mindspore as ms -import mindspore.dataset as ds -from mindspore import nn, ops -from mindspore.communication import init -from mindspore.common.initializer import initializer - -os.environ['MS_DEV_SAVE_GRAPHS'] = '2' -ms.set_context(mode=ms.GRAPH_MODE) -ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.AUTO_PARALLEL, search_mode="sharding_propagation") -init() -ms.set_seed(1) - -class Dense(nn.Cell): - """Dense layer""" - def __init__(self, in_channels, out_channels): - super().__init__() - self.weight = ms.Parameter(initializer("normal", [in_channels, out_channels], ms.float32)) - self.bias = ms.Parameter(initializer("normal", [out_channels], ms.float32)) - # matmul即为被切分的算子 - self.matmul = ops.MatMul() - self.add = ops.Add() - - def construct(self, x): - x = self.matmul(x, self.weight) - x = self.add(x, self.bias) - return x - -class FFN(nn.Cell): - """FeedForward Network""" - def __init__(self): - super().__init__() - self.flatten = ops.Flatten() - self.dense1 = Dense(28*28, 64) - self.relu = ops.ReLU() - self.dense2 = Dense(64, 10) - - def construct(self, x): - x = self.flatten(x) - x = self.dense1(x) - x = self.relu(x) - x = self.dense2(x) - return x - -net = FFN() -# 配置dense1中的matmul算子切分策略为((2, 1), (1, 4)) -net.dense1.matmul.shard(((2, 1), (1, 4))) - -def create_dataset(batch_size): - """create dataset""" - dataset_path = os.getenv("DATA_PATH") - dataset = ds.MnistDataset(dataset_path) - image_transforms = [ - ds.vision.Rescale(1.0 / 255.0, 0), - ds.vision.Normalize(mean=(0.1307,), std=(0.3081,)), - ds.vision.HWC2CHW() - ] - label_transform = ds.transforms.TypeCast(ms.int32) - dataset = dataset.map(image_transforms, 'image') - dataset = dataset.map(label_transform, 'label') - dataset = dataset.batch(batch_size) - return dataset - -data_set = create_dataset(32) -optimizer = nn.Momentum(net.trainable_params(), 1e-3, 0.1) -loss_fn = nn.CrossEntropyLoss() - -def forward_fn(data, target): - """forward propagation""" - logits = net(data) - loss = loss_fn(logits, target) - return loss, logits - -grad_fn = ms.value_and_grad(forward_fn, None, net.trainable_params(), has_aux=True) - -@ms.jit -def train_step(inputs, targets): - """train_step""" - (loss_value, _), grads = grad_fn(inputs, targets) - optimizer(grads) - return loss_value - -for epoch in range(10): - i = 0 - for image, label in data_set: - loss_output = train_step(image, label) - if i % 100 == 0: - print("epoch: %s, step: %s, loss is %s" % (epoch, i, loss_output)) - i += 1 diff --git a/install/mindspore_ascend_install_source_en.md b/install/mindspore_ascend_install_source_en.md index 5b8a5b9027..faa2b3f775 100644 --- a/install/mindspore_ascend_install_source_en.md +++ b/install/mindspore_ascend_install_source_en.md @@ -35,7 +35,7 @@ The following table lists the system environment and third-party dependencies re |[Ascend AI processor software package](#installing-ascend-ai-processor-software-package)|-|Ascend platform AI computing library used by MindSpore| |[wheel](#installing-wheel-setuptools-pyyaml-and-numpy)|0.32.0 or later|Python packaging tool used by MindSpore| |[setuptools](#installing-wheel-setuptools-pyyaml-and-numpy)|44.0 or later|Python package management tool used by MindSpore| -|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compliation in MindSpore depends on| +|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compilation in MindSpore depends on| |[Numpy](#installing-wheel-setuptools-pyyaml-and-numpy)|1.19.3-1.26.4|Numpy module that Numpy-related functions in MindSpore depends on| |[GCC](#installing-gcc)|7.3.0|C++ compiler for compiling MindSpore| |[git](#installing-git-tclsh-patch-numa-and-flex)|-|Source code management tool used by MindSpore| diff --git a/install/mindspore_cpu_install_source_en.md b/install/mindspore_cpu_install_source_en.md index 9838ef5904..6aad29a8b5 100644 --- a/install/mindspore_cpu_install_source_en.md +++ b/install/mindspore_cpu_install_source_en.md @@ -29,7 +29,7 @@ This document describes how to install MindSpore by compiling source code on Lin |[Python](#installing-python)|3.9-3.11|Python environment that MindSpore depends| |[wheel](#installing-wheel-setuptools-pyyaml-and-numpy)|0.32.0 or later|Python packaging tool used by MindSpore| |[setuptools](#installing-wheel-setuptools-pyyaml-and-numpy)|44.0 or later|Python package management tool used by MindSpore| -|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compliation in MindSpore depends on| +|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compilation in MindSpore depends on| |[Numpy](#installing-wheel-setuptools-pyyaml-and-numpy)|1.19.3-1.26.4|Numpy module that Numpy-related functions in MindSpore depends on| |[GCC](#installing-gcc-git-tclsh-patch-and-numa)|7.3.0-9.4.0|C++ compiler for compiling MindSpore| |[git](#installing-gcc-git-tclsh-patch-and-numa)|-|Source code management tools used by MindSpore| diff --git a/install/mindspore_gpu_install_source_en.md b/install/mindspore_gpu_install_source_en.md index 05dfc98c93..363f546606 100644 --- a/install/mindspore_gpu_install_source_en.md +++ b/install/mindspore_gpu_install_source_en.md @@ -36,7 +36,7 @@ The following table lists the system environment and third-party dependencies re |[Python](#installing-python)|3.9-3.11|Python environment that MindSpore depends on| |[wheel](#installing-wheel-setuptools-pyyaml-and-numpy)|0.32.0 or later|Python packaging tool used by MindSpore| |[setuptools](#installing-wheel-setuptools-pyyaml-and-numpy)|44.0 or later|Python package management tool used by MindSpore| -|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compliation in MindSpore depends on| +|[PyYAML](#installing-wheel-setuptools-pyyaml-and-numpy)|6.0-6.0.2|PyYAML module that operator compilation in MindSpore depends on| |[Numpy](#installing-wheel-setuptools-pyyaml-and-numpy)|1.19.3-1.26.4|Numpy module that Numpy-related functions in MindSpore depends on| |[GCC](#installing-gcc-git-and-other-dependencies)|7.3.0-9.4.0|C++ compiler for compiling MindSpore| |[git](#installing-gcc-git-and-other-dependencies)|-|source code management tools used by MindSpore| diff --git a/tutorials/source_en/debug/error_analysis/mindir.md b/tutorials/source_en/debug/error_analysis/mindir.md index 5e1bcec8a8..c659ac8233 100644 --- a/tutorials/source_en/debug/error_analysis/mindir.md +++ b/tutorials/source_en/debug/error_analysis/mindir.md @@ -4,7 +4,7 @@ ## Overview -When a model compiled using MindSpore runs in the graph mode `set_context(mode=GRAPH_MODE)` and setting the environment variable `MS_DEV_SAVE_GRAPHS` to 2, some intermediate files will be generated during graph compliation. These intermediate files are called IR files. Currently, there are two IR files: +When a model compiled using MindSpore runs in the graph mode `set_context(mode=GRAPH_MODE)` and setting the environment variable `MS_DEV_SAVE_GRAPHS` to 2, some intermediate files will be generated during graph compilation. These intermediate files are called IR files. Currently, there are two IR files: - .ir file: An IR file that describes the model structure in text format and can be directly viewed using any text editors. - .dot file: When setting the environment variable `MS_DEV_SAVE_GRAPHS` to 3, an IR file that describes the topology relationships between different nodes. You can use this file by [graphviz](http://graphviz.org/) as the input to generate images for users to view the model structure. -- Gitee