From ad5d00cdf31aca370bead8953aca51106300dfbc Mon Sep 17 00:00:00 2001 From: hid54630209 Date: Thu, 16 Jan 2025 10:19:57 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9F=A5=E6=BC=8F=E8=A1=A5=E7=BC=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../AiPhysicalAttackAndDetecting/ReadMe.md | 8 +- .../AiPhysicalAttackAndDetecting/args.py | 2 +- .../camera/camera_process.py | 2 +- .../darknet_ob_detector/utils.py | 2 +- .../detect_attack_by_mxbase.py | 2 +- .../ultrayolo_attack_detector/utils.py | 2 +- VisionSDK/AllObjectsStructuringV2/README.MD | 2 +- VisionSDK/Ascendffmpeg/README.md | 4 +- VisionSDK/MediaCodec/README.zh.md | 2 +- .../ModuleProcessors/CommonData/CommonData.h | 2 +- .../ModuleProcessors/Signal/Signal.cpp | 2 +- .../OpticalCharacterRecognition/src/build.sh | 2 +- VisionSDK/PPYOLOEPlusDetection/CMakeLists.txt | 2 +- VisionSDK/PPYOLOEPlusDetection/README.md | 4 +- .../ConfigParser/ConfigParser.cpp | 2 +- .../ConfigParser/ConfigParser.h | 2 +- VisionSDK/RtspServer/README.zh.md | 2 +- build/build.sh | 26 ------- contrib/ADNet/README.md | 4 +- contrib/ActionRecognition/README.md | 18 +++-- contrib/AutoSpeechRecognition/README.md | 2 +- contrib/CarPlateRecognition/README.md | 2 +- contrib/CenterFace/C++/run.sh | 2 +- contrib/CenterFace/README.md | 4 +- contrib/CenterNet/README.md | 4 +- contrib/Collision/README.md | 4 +- contrib/Colorization/README.md | 2 +- contrib/CrowdCounting/README.md | 2 +- contrib/DocumentLayoutAnalysis/README.md | 4 +- contrib/DriverStatusRecognition/README.md | 4 +- contrib/DriverStatusRecognition/run.sh | 2 +- contrib/EdgeDetectionPicture/README.md | 2 +- contrib/EfficientDet/README.md | 4 +- contrib/FCOS/README.md | 4 +- contrib/FastSCNN/README.md | 2 +- contrib/Faster_R-CNN/README.md | 2 +- contrib/FireDetection/c++/README.md | 4 +- contrib/FireDetection/python/README.md | 4 +- contrib/HelmetIdentification/README.md | 4 +- contrib/MeterReader/plugins/process3/build.sh | 2 +- contrib/MultiChannelVideoDetection/README.md | 2 +- .../OCR/plugins/TextInfoPlugin/CMakeLists.txt | 2 +- .../TextSimilarityPlugin/CMakeLists.txt | 2 +- contrib/OpenCVPlugin/README.md | 2 +- contrib/PassengerflowEstimation/README.md | 2 +- .../pipeline/passengerflowestimation.pipeline | 2 +- contrib/PatchCoreAnomalyDetection/README.md | 4 +- contrib/PixelLink/README.md | 4 +- contrib/PortraitSegmentation/README.md | 2 +- contrib/PoseEstNet/README.md | 4 +- contrib/RTMHumanKeypointsDetection/README.md | 2 +- contrib/ReID/README.md | 2 +- contrib/ReIDv2/README.md | 2 +- contrib/RemoteSensingSegmentation/README.md | 2 +- contrib/SOLOV2/README.md | 4 +- contrib/SuperRetina/README.md | 2 +- contrib/TextSnake/README.md | 7 +- contrib/VCOD_SLTNet/README.md | 2 +- .../VehicleRetrogradeRecognition/README.md | 4 +- contrib/VideoGestureRecognition/run.sh | 2 +- contrib/YOLOX/README.md | 2 +- contrib/build_all.sh | 73 ------------------- contrib/facemaskdetection/README.md | 2 +- ...00\345\217\221\345\245\227\344\273\266.md" | 5 +- ...03\350\257\225\346\214\207\345\257\274.md" | 2 +- ...13\345\220\216\345\244\204\347\220\206.md" | 10 +-- .../Cmake\344\273\213\347\273\215.md" | 2 +- tutorials/DvppWrapperSample/main.cpp | 2 +- tutorials/ModleSample/python/main.py | 6 +- .../VideoEncoder&VideoDecoder/C++/main.cpp | 4 +- .../VideoEncoder&VideoDecoder/Python/main.py | 4 +- tutorials/VideoEncoder&VideoDecoder/README.md | 4 +- tutorials/sampleCustomProto/CMakeLists.txt | 2 +- 73 files changed, 112 insertions(+), 213 deletions(-) delete mode 100644 build/build.sh delete mode 100644 contrib/build_all.sh diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/ReadMe.md b/VisionSDK/AiPhysicalAttackAndDetecting/ReadMe.md index 6f509389c..8b3163cc1 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/ReadMe.md +++ b/VisionSDK/AiPhysicalAttackAndDetecting/ReadMe.md @@ -47,14 +47,10 @@ |-----------|------------------|-------------------------------| | 系统 | Ubuntu 22.04 | 操作系统 | | CANN | 7.0.RC1 | toolkit开发套件包或nnrt推理包 | -| MindX SDK | 5.0.RC3 | mxVison软件包 | +| Vision SDK | 5.0.RC3 | Vision SDK软件包 | -### 2.1 依赖软件安装 -- CANN获取[链接](https://www.hiascend.com/software/cann), 安装[参考链接](https://www.hiascend.com/document/detail/zh/canncommercial/70RC1/envdeployment/instg/instg_0013.html) -- MindSDK 获取[链接](https://www.hiascend.com/software/Mindx-sdk), 安装[参考链接](https://www.hiascend.com/document/detail/zh/mind-sdk/50rc3/vision/mxvisionug/mxvisionug_0014.html) - -### 2.2 python依赖安装 +### 2.1 python依赖安装 #### 依赖库及版本 | 名称 | 版本 | |------------------------|----------| diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/args.py b/VisionSDK/AiPhysicalAttackAndDetecting/args.py index df26eed9f..1404f5af5 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/args.py +++ b/VisionSDK/AiPhysicalAttackAndDetecting/args.py @@ -3,7 +3,7 @@ """ Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. Description: Input args parse. -Author: MindX SDK +Author: Mind SDK Create: 2023 History: NA """ diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/camera/camera_process.py b/VisionSDK/AiPhysicalAttackAndDetecting/camera/camera_process.py index 30cc71f27..9955d126e 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/camera/camera_process.py +++ b/VisionSDK/AiPhysicalAttackAndDetecting/camera/camera_process.py @@ -3,7 +3,7 @@ """ Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. Description: Input source process. -Author: MindX SDK +Author: Mind SDK Create: 2023 History: NA """ diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/darknet_ob_detector/utils.py b/VisionSDK/AiPhysicalAttackAndDetecting/darknet_ob_detector/utils.py index 6a9732003..cbd420146 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/darknet_ob_detector/utils.py +++ b/VisionSDK/AiPhysicalAttackAndDetecting/darknet_ob_detector/utils.py @@ -3,7 +3,7 @@ """ Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. Description: Util functions. -Author: MindX SDK +Author: Mind SDK Create: 2023 History: NA """ diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/detect_attack_by_mxbase.py b/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/detect_attack_by_mxbase.py index 7bce9c09b..bc032ac64 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/detect_attack_by_mxbase.py +++ b/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/detect_attack_by_mxbase.py @@ -3,7 +3,7 @@ """ Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. Description: Main process of attack detect. -Author: MindX SDK +Author: Mind SDK Create: 2023 History: NA """ diff --git a/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/utils.py b/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/utils.py index eb5f352e8..8dbdb7cb3 100644 --- a/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/utils.py +++ b/VisionSDK/AiPhysicalAttackAndDetecting/ultrayolo_attack_detector/utils.py @@ -3,7 +3,7 @@ """ Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. Description: Util functions. -Author: MindX SDK +Author: Mind SDK Create: 2023 History: NA """ diff --git a/VisionSDK/AllObjectsStructuringV2/README.MD b/VisionSDK/AllObjectsStructuringV2/README.MD index 5065f7a1d..2b1f7d38a 100644 --- a/VisionSDK/AllObjectsStructuringV2/README.MD +++ b/VisionSDK/AllObjectsStructuringV2/README.MD @@ -199,7 +199,7 @@ std::fill(filePaths.begin(), filePaths.end(), "../test.264"); // 如需通过rts ``` -**步骤2:** 根据下面的文档,[创建网络视频流](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/reference_material/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)。 +**步骤2:** 根据下面的文档,[创建网络视频流](https://gitee.com/ascend/mindsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)。 **步骤3:** 在main.cpp中添加拉流代码。 diff --git a/VisionSDK/Ascendffmpeg/README.md b/VisionSDK/Ascendffmpeg/README.md index a310c3511..889a72578 100644 --- a/VisionSDK/Ascendffmpeg/README.md +++ b/VisionSDK/Ascendffmpeg/README.md @@ -72,9 +72,9 @@ chmod +x ./ffbuild/*.sh **步骤3:** 添加环境变量 -通过指令`find / -name libavdevice.so`查找到文件所在路径,形如`/PATH/TO/mindxsdk-referenceapps/VisionSDK/Ascendffmpeg/ascend/lib/libavdevice.so`,则执行: +通过指令`find / -name libavdevice.so`查找到文件所在路径,形如`/PATH/TO/mindsdk-referenceapps/VisionSDK/Ascendffmpeg/ascend/lib/libavdevice.so`,则执行: ```bash -export LD_LIBRARY_PATH=/PATH/TO/mindxsdk-referenceapps/VisionSDK/Ascendffmpeg/ascend/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/PATH/TO/mindsdk-referenceapps/VisionSDK/Ascendffmpeg/ascend/lib:$LD_LIBRARY_PATH ``` **步骤4:** 运行 diff --git a/VisionSDK/MediaCodec/README.zh.md b/VisionSDK/MediaCodec/README.zh.md index e1548d3d0..67db8de92 100644 --- a/VisionSDK/MediaCodec/README.zh.md +++ b/VisionSDK/MediaCodec/README.zh.md @@ -72,7 +72,7 @@ stream.ch0 = rtsp://xxx.xxx.xxx.xxx:xxx/xxx.264 **注意**:解码模块`mxpi_videodecoder`的**vdecChannelId**配置项要保证不重用;缩放模块`mxpi_imageresize`的**resizeHeight**和**resizeWidth**要与编码模块的`mxpi_videoencoder`的**imageHeight**和**imageWidth**保持一致;`mxpi_videoencoder`编码模块的**fps**用于控制是否打印帧率,默认值是**0**表示不打印,若要打印,可设置为**1**;**deviceId**配置为需要运行的npu芯片编号,具体可以通过`npu-smi info`查看。 -**步骤3** 修改MindXSDK的日志配置文件 +**步骤3** 修改Vision SDK的日志配置文件 修改`${MX_SDK_HOME}/mxVision/config/logging.conf`,调节输出日志级别为info级别。 diff --git a/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/CommonData/CommonData.h b/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/CommonData/CommonData.h index 6b7c47017..904baf326 100644 --- a/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/CommonData/CommonData.h +++ b/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/CommonData/CommonData.h @@ -1,7 +1,7 @@ /* * Copyright (c) Huawei Technologies Co., Ltd 2022-2022. All rights reserved. * Description: Common data struct. -* Author: MindX SDK +* Author: Mind SDK * Create: 2022 * History: NA */ diff --git a/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/Signal/Signal.cpp b/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/Signal/Signal.cpp index 3b317be1c..2fce91cad 100644 --- a/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/Signal/Signal.cpp +++ b/VisionSDK/OpticalCharacterRecognition/src/AscendBase/Base/Framework/ModuleProcessors/Signal/Signal.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) Huawei Technologies Co., Ltd 2022-2022. All rights reserved. * Description: signal control. -* Author: MindX SDK +* Author: Mind SDK * Create: 2022 * History: NA */ diff --git a/VisionSDK/OpticalCharacterRecognition/src/build.sh b/VisionSDK/OpticalCharacterRecognition/src/build.sh index 8a4bd57a3..6d4f78460 100644 --- a/VisionSDK/OpticalCharacterRecognition/src/build.sh +++ b/VisionSDK/OpticalCharacterRecognition/src/build.sh @@ -1,7 +1,7 @@ # !/bin/bash # Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. # Description: mxOCR c++ build. -# Author: MindX SDK +# Author: Mind SDK # Create: 2022 # History: NA diff --git a/VisionSDK/PPYOLOEPlusDetection/CMakeLists.txt b/VisionSDK/PPYOLOEPlusDetection/CMakeLists.txt index e0da1e261..6e55c9251 100644 --- a/VisionSDK/PPYOLOEPlusDetection/CMakeLists.txt +++ b/VisionSDK/PPYOLOEPlusDetection/CMakeLists.txt @@ -11,7 +11,7 @@ include_directories( ${MX_SDK_HOME}/include ${MX_SDK_HOME}/opensource/include ${MX_SDK_HOME}/opensource/include/opencv4 - ${mindxsdk-referenceapps安装路径}/mxVision/PPYOLOEPlusDetection/plugin + ${mindsdk-referenceapps安装路径}/VisionSDK/PPYOLOEPlusDetection/plugin ) link_directories( diff --git a/VisionSDK/PPYOLOEPlusDetection/README.md b/VisionSDK/PPYOLOEPlusDetection/README.md index e4832491e..426255ee2 100644 --- a/VisionSDK/PPYOLOEPlusDetection/README.md +++ b/VisionSDK/PPYOLOEPlusDetection/README.md @@ -47,7 +47,7 @@ paddlepaddle框架的ppyoloe模型推理时,前处理方案包括解码为BGR- │ └── CMakeLists.txt # 用于编译后处理插件 ├── model │ ├── coco.names # 需要下载,下载链接在下方 -│ └── ppyoloe.cfg # 模型后处理配置文件,配置说明参考《mxVision用户指南》中已有模型支持->模型后处理配置参数->YOLOv5模型后处理配置参数 +│ └── ppyoloe.cfg # 模型后处理配置文件,配置说明参考《Vision SDK用户指南》中已有模型支持->模型后处理配置参数->YOLOv5模型后处理配置参数 ├── pipeline │ ├── Sample.pipeline # 参考pipeline文件,用于配置rgb模型,用户需要根据自己需求和模型输入类型进行修改 │ └── SampleYuv.pipeline # 参考pipeline文件,用于配置yuv模型,用户需要根据自己需求和模型输入类型进行修改 @@ -206,7 +206,7 @@ make install **步骤3:** 修改`PPYOLOEPlusDetection`目录下的`CMakeLists.txt`,第14行配置mindsdk-referenceapps安装路径: ```bash -14 ${mindxsdk-referenceapps安装路径}/VisionSDK/PPYOLOEPlusDetection/plugin +14 ${mindsdk-referenceapps安装路径}/VisionSDK/PPYOLOEPlusDetection/plugin ``` **步骤4:** 在`PPYOLOEPlusDetection`目录下运行脚本,进行照片检测: ``` diff --git a/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.cpp b/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.cpp index da5e939e9..35c6dec02 100644 --- a/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.cpp +++ b/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. * Description: ConfigParser implementation. - * Author: MindX SDK + * Author: Mind SDK * Create: 2024 * History: NA */ diff --git a/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.h b/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.h index c38b1ba35..ca32eb0c1 100644 --- a/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.h +++ b/VisionSDK/PutTextForMultiVideos/ConfigParser/ConfigParser.h @@ -1,7 +1,7 @@ /* * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. * Description: ConfigParser implementation. - * Author: MindX SDK + * Author: Mind SDK * Create: 2024 * History: NA */ diff --git a/VisionSDK/RtspServer/README.zh.md b/VisionSDK/RtspServer/README.zh.md index a94e873a6..c1bcd5bff 100644 --- a/VisionSDK/RtspServer/README.zh.md +++ b/VisionSDK/RtspServer/README.zh.md @@ -69,7 +69,7 @@ openssl rsa -in server.key -out server_no.crt 生成server_no.crt,则说明成功。 - 证书ca.crt、server.crt的制作,请参考mxVision手册证书制作章节。 + 证书ca.crt、server.crt的制作,请参考Vision SDK手册证书制作章节。 ### 4.2 运行 diff --git a/build/build.sh b/build/build.sh deleted file mode 100644 index db8341850..000000000 --- a/build/build.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License.mitations under the License. - -set -e -current_folder="$( cd "$(dirname "$0")" ;pwd -P )" - -export MX_SDK_HOME=/opt/buildtools/mindxsdk/mxVision - -bash $current_folder/../contrib/build_all.sh - -exit 0 - - - diff --git a/contrib/ADNet/README.md b/contrib/ADNet/README.md index e5d4da3ea..07c2e8a1c 100644 --- a/contrib/ADNet/README.md +++ b/contrib/ADNet/README.md @@ -74,8 +74,8 @@ pipeline流程如下图所示: ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-pat: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` ## 3 模型转换 diff --git a/contrib/ActionRecognition/README.md b/contrib/ActionRecognition/README.md index 6e6212bad..c8cb74190 100644 --- a/contrib/ActionRecognition/README.md +++ b/contrib/ActionRecognition/README.md @@ -137,9 +137,15 @@ **步骤2:** 配置 Vision SDK 环境变量。 -`export MX_SDK_HOME=${安装路径}/mxVision ` +在执行后续步骤前,需要设置环境变量: -注:本例中Vision SDK安装路径为 /root/work/MindX_SDK/mxVision。 +```bash +# 执行环境变量脚本使环境变量生效 +. ${ascend-toolkit-path}/set_env.sh +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 +# ascend-toolkit-path: CANN安装路径 +``` **步骤3:** 推荐在${MX_SDK_HOME}/samples下创建ActionRecognition根目录,在项目根目录下创建目录models `mkdir models`,分别为yolov3和ECONet创建一个文件夹,将两个离线模型及各自的配置文件放入文件夹下。[下载地址](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/ActionRecognition/models.zip)。创建完成后models文件夹下的目录结构如下: @@ -163,8 +169,8 @@ **步骤5:** 配置环境变量MX_SDK_HOME: ```bash -export MX_SDK_HOME=/MindX_SDK/mxVision/ -# 此处MX_SDK_HOME请使用MindX_SDK的实际路径 +export MX_SDK_HOME=${Vision SDK的安装路径} +# 此处MX_SDK_HOME请使用Vision SDK的安装路径 ``` **步骤6**:在插件代码目录下创建build文件夹,使用cmake命令进行编译,生成.so文件。下面以单人独处插件的编译过程作为范例: @@ -420,7 +426,7 @@ terminate called after throwing an instance of 'cv::Exception' H264视频文件及ROI文件:[下载地址](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/ActionRecognition/data.zip) ; -RTSP取流地址(可以从网络摄像机获取,也可通过Live555等工具将本地视频文 件转换为rtsp流)。自主搭建RTSP拉流教程:[live555链接](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/reference_material/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md),需要注意的是在搭建RTSP时,使用./genMakefiles 命令生成编译文件时,输入的参数是根据cofig.<后缀>获取的,与服务器架构等有关。 +RTSP取流地址(可以从网络摄像机获取,也可通过Live555等工具将本地视频文 件转换为rtsp流)。自主搭建RTSP拉流教程:[live555链接](https://gitee.com/ascend/mindsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md),需要注意的是在搭建RTSP时,使用./genMakefiles 命令生成编译文件时,输入的参数是根据cofig.<后缀>获取的,与服务器架构等有关。 RTSP视频拉流插件配置范例: @@ -436,7 +442,7 @@ RTSP视频拉流插件配置范例: 其中rtsp_Url的格式是 rtsp:://host:port/Data,host:port/路径映射到mediaServer/目录下,Data为视频文件的路径。 -RTSP拉流教程:[live555链接](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/reference_material/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)中第七步视频循环推流,按照提示修改cpp文件可以使自主搭建的rtsp循环推流,如果不作更改,则为有限的视频流;同时第六步高分辨率帧花屏,修改mediaServer/DynamicRTSPServer.cpp文件,将OutPacketBuffer::maxSize增大,例如"500000",避免出现”The input frame data was too large for our buffer“问题,导致丢帧。修改完后,需要重新运行以下命令: +RTSP拉流教程:[live555链接](https://gitee.com/ascend/mindsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)中第七步视频循环推流,按照提示修改cpp文件可以使自主搭建的rtsp循环推流,如果不作更改,则为有限的视频流;同时第六步高分辨率帧花屏,修改mediaServer/DynamicRTSPServer.cpp文件,将OutPacketBuffer::maxSize增大,例如"500000",避免出现”The input frame data was too large for our buffer“问题,导致丢帧。修改完后,需要重新运行以下命令: ```cmake ./genMakefiles diff --git a/contrib/AutoSpeechRecognition/README.md b/contrib/AutoSpeechRecognition/README.md index 79440dc70..06e73fe26 100644 --- a/contrib/AutoSpeechRecognition/README.md +++ b/contrib/AutoSpeechRecognition/README.md @@ -78,7 +78,7 @@ apt-get install liblzma-dev #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/CarPlateRecognition/README.md b/contrib/CarPlateRecognition/README.md index c96be090b..bf145cc8e 100644 --- a/contrib/CarPlateRecognition/README.md +++ b/contrib/CarPlateRecognition/README.md @@ -88,7 +88,7 @@ atc --model=./car_plate_recognition/car_plate_recognition.prototxt --weight=./ca ## 4 编译与运行 ### 4.1 修改CMakeLists.txt文件 -第**10**行 `set(MX_SDK_HOME $ENV{MX_SDK_HOME})` 语句是设置Vision SDK的安装路径,一般按第2章设置环境变量后环境中有该变量存在,若没有,则将$ENV{MX_SDK_HOME}替换为用户实际的MindX SDK安装路径。 +第**10**行 `set(MX_SDK_HOME $ENV{MX_SDK_HOME})` 语句是设置Vision SDK的安装路径,一般按第2章设置环境变量后环境中有该变量存在,若没有,则将$ENV{MX_SDK_HOME}替换为用户实际的Vision SDK安装路径。 第**12**行 `set(FREETYPE_HOME $ENV{FREETYPE_HOME})` 语句是设置FreeType库的安装路径,若未设置FREETYPE_HOME环境变量,需将$ENV{FREETYPE_HOME}替换为用户实际的FreeType库安装路径。 diff --git a/contrib/CenterFace/C++/run.sh b/contrib/CenterFace/C++/run.sh index 30883b290..dbcb991aa 100644 --- a/contrib/CenterFace/C++/run.sh +++ b/contrib/CenterFace/C++/run.sh @@ -23,7 +23,7 @@ env_ready=true function check_env() { - # check MindXSDK env + # check MindSDK env if [ ! "${MX_SDK_HOME}" ]; then env_ready=false echo "please set MX_SDK_HOME path into env." diff --git a/contrib/CenterFace/README.md b/contrib/CenterFace/README.md index c7a522dd3..44304fd68 100644 --- a/contrib/CenterFace/README.md +++ b/contrib/CenterFace/README.md @@ -80,8 +80,8 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` ## 3 准备模型 diff --git a/contrib/CenterNet/README.md b/contrib/CenterNet/README.md index 245a7e6e3..020ab55be 100644 --- a/contrib/CenterNet/README.md +++ b/contrib/CenterNet/README.md @@ -60,8 +60,8 @@ CenterNet 目标检测后处理插件基于 Vision SDK 开发,对图片中的 ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` diff --git a/contrib/Collision/README.md b/contrib/Collision/README.md index dbc4f3b45..52c96cfd3 100644 --- a/contrib/Collision/README.md +++ b/contrib/Collision/README.md @@ -90,8 +90,8 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` diff --git a/contrib/Colorization/README.md b/contrib/Colorization/README.md index 4330c4350..4dd035123 100644 --- a/contrib/Colorization/README.md +++ b/contrib/Colorization/README.md @@ -48,7 +48,7 @@ #设置CANN环境变量,ascend-toolkit-path为cann安装路径 . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/CrowdCounting/README.md b/contrib/CrowdCounting/README.md index 8ce663bd0..22ba87c08 100644 --- a/contrib/CrowdCounting/README.md +++ b/contrib/CrowdCounting/README.md @@ -30,7 +30,7 @@ #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Mind SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` ## 3 准备模型 diff --git a/contrib/DocumentLayoutAnalysis/README.md b/contrib/DocumentLayoutAnalysis/README.md index df5cb85b0..758e29cab 100644 --- a/contrib/DocumentLayoutAnalysis/README.md +++ b/contrib/DocumentLayoutAnalysis/README.md @@ -65,8 +65,8 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascent-tookit-path}/set_env.sh -. ${SDK-path}/set_env.sh -# SDK-path: SDK mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascent-tookit-path: CANN安装路径 ``` diff --git a/contrib/DriverStatusRecognition/README.md b/contrib/DriverStatusRecognition/README.md index e48b97140..7f61743a0 100644 --- a/contrib/DriverStatusRecognition/README.md +++ b/contrib/DriverStatusRecognition/README.md @@ -26,8 +26,8 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` ## 3 准备模型 diff --git a/contrib/DriverStatusRecognition/run.sh b/contrib/DriverStatusRecognition/run.sh index d8fe017ae..d7f4ec507 100755 --- a/contrib/DriverStatusRecognition/run.sh +++ b/contrib/DriverStatusRecognition/run.sh @@ -23,7 +23,7 @@ CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exi info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } -#export MX_SDK_HOME=/home/xxxx/sdk/MindX_SDK/mxVision +#export MX_SDK_HOME=/home/xxxx/sdk/Mind_SDK/mxVision export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins diff --git a/contrib/EdgeDetectionPicture/README.md b/contrib/EdgeDetectionPicture/README.md index 2a010fbf1..f4866667a 100644 --- a/contrib/EdgeDetectionPicture/README.md +++ b/contrib/EdgeDetectionPicture/README.md @@ -45,7 +45,7 @@ Init > ReadImage >Resize > Inference >PostProcess >DeInit #设置CANN环境变量 . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` ## 3 准备模型 diff --git a/contrib/EfficientDet/README.md b/contrib/EfficientDet/README.md index e54d76790..c4bc23cb2 100644 --- a/contrib/EfficientDet/README.md +++ b/contrib/EfficientDet/README.md @@ -55,8 +55,8 @@ EfficientDet 目标检测后处理插件基于 Vision SDK 开发,对图片中 ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` diff --git a/contrib/FCOS/README.md b/contrib/FCOS/README.md index b33de1111..30186d982 100644 --- a/contrib/FCOS/README.md +++ b/contrib/FCOS/README.md @@ -63,8 +63,8 @@ 在项目开始运行前需要设置环境变量: ```bash . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision-path: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` diff --git a/contrib/FastSCNN/README.md b/contrib/FastSCNN/README.md index e5297d5f3..3a5148a91 100644 --- a/contrib/FastSCNN/README.md +++ b/contrib/FastSCNN/README.md @@ -83,7 +83,7 @@ #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh #查看环境变量 diff --git a/contrib/Faster_R-CNN/README.md b/contrib/Faster_R-CNN/README.md index 092ee6743..e7220026d 100644 --- a/contrib/Faster_R-CNN/README.md +++ b/contrib/Faster_R-CNN/README.md @@ -125,7 +125,7 @@ #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/FireDetection/c++/README.md b/contrib/FireDetection/c++/README.md index d0406f6e7..9ab87729d 100644 --- a/contrib/FireDetection/c++/README.md +++ b/contrib/FireDetection/c++/README.md @@ -56,10 +56,10 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh +. ${Vision-SDK-path}/set_env.sh export FFMPEG_PATH=${ffmpeg-path} export LD_LIBRARY_PATH=${ffmpeg-lib-path}:$LD_LIBRARY_PATH -# mxVision: mxVision安装路径 +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 # ffmpeg-path: ffmpeg安装路径,通常为/usr/local/ffmpeg # ffmpeg-lib-path: ffmpeg的lib库安装路径,通常为/usr/local/ffmpeg/lib diff --git a/contrib/FireDetection/python/README.md b/contrib/FireDetection/python/README.md index 45c3020a0..8c074fec2 100644 --- a/contrib/FireDetection/python/README.md +++ b/contrib/FireDetection/python/README.md @@ -43,8 +43,8 @@ ```bash # 执行环境变量脚本使环境变量生效 . ${ascend-toolkit-path}/set_env.sh -. ${mxVision-path}/set_env.sh -# mxVision: mxVision安装路径 +. ${Vision-SDK-path}/set_env.sh +# Vision-SDK-path: Vision SDK安装路径 # ascend-toolkit-path: CANN安装路径 ``` diff --git a/contrib/HelmetIdentification/README.md b/contrib/HelmetIdentification/README.md index fa224aaee..73b49f3cd 100644 --- a/contrib/HelmetIdentification/README.md +++ b/contrib/HelmetIdentification/README.md @@ -124,7 +124,7 @@ cmake .. make -j ``` -编译成功后将产生**libmxpi_selectedframe.so**文件,文件生成位置在build目录下。将其复制至SDK的插件库中(./MindX_SDK/mxVision/lib/plugins),并修改权限为440。 +编译成功后将产生**libmxpi_selectedframe.so**文件,文件生成位置在build目录下。将其复制至SDK的插件库中(${Vision SDK安装路径}/lib/plugins),并修改权限为440。 ### 4.2 视频推流 本项目通过mxpi_rtspsrc拉流输入数据,通过两路GetResult接口输出数据,一路输出带有帧信息的图片数据,一路输出带有帧信息的目标检测框和检测框跟踪信息。推理过程如下: @@ -160,7 +160,7 @@ test.264可替换成任意上传至当前目录的[264格式文件](https://gite pipline根据1.6节中技术实现流程图编写,**HelmetDetection.pipline**放在源码根目录Models。 -1. pipline中mxpi_modelinfer用于加载yolov5安全帽识别模型。该插件包含四个参数,modelPath用于加载om模型文件。labelPath用于加载模型可识别类(imgclass.names)。postProcessLibPath用于加载后处理动态链接库文件,该模块实现NMS等后处理。postProcessConfigPath用于加载后处理所需要的配置文件(Helmet_yolov5.cfg)。本项目使用后处理文件为**libMpYOLOv5PostProcessor.so**(在${MX_SDK}/mxVision/lib下)。该后处理配置文件内容如下: +1. pipline中mxpi_modelinfer用于加载yolov5安全帽识别模型。该插件包含四个参数,modelPath用于加载om模型文件。labelPath用于加载模型可识别类(imgclass.names)。postProcessLibPath用于加载后处理动态链接库文件,该模块实现NMS等后处理。postProcessConfigPath用于加载后处理所需要的配置文件(Helmet_yolov5.cfg)。本项目使用后处理文件为**libMpYOLOv5PostProcessor.so**(在${Vision-SDK-path}/mxVision/lib下,Vision-SDK-path表示Vision SDK安装路径)。该后处理配置文件内容如下: ```python CLASS_NUM=3 BIASES_NUM=18 diff --git a/contrib/MeterReader/plugins/process3/build.sh b/contrib/MeterReader/plugins/process3/build.sh index d72c63e98..e5969b8e2 100644 --- a/contrib/MeterReader/plugins/process3/build.sh +++ b/contrib/MeterReader/plugins/process3/build.sh @@ -1,4 +1,4 @@ -# 编译mindx插件,运行环境:华为实验室服务器 +# 编译MindSDK插件,运行环境:华为实验室服务器 # 使用cmake编译插件 diff --git a/contrib/MultiChannelVideoDetection/README.md b/contrib/MultiChannelVideoDetection/README.md index 132661d9b..907f8ed73 100644 --- a/contrib/MultiChannelVideoDetection/README.md +++ b/contrib/MultiChannelVideoDetection/README.md @@ -112,7 +112,7 @@ rtspList.emplace_back("${rtsp流地址1}"); rtspList.emplace_back("${rtsp流地址2}"); ``` -②:将文件中第96行的`${MindXSDK安装路径}`字段值替换为实际使用的安装路径。 +②:将文件中第96行的`${MX_SDK_HOME}`字段值替换为实际Vision SDK的安装路径。 ```c++ APP_ERROR ret = configUtil.LoadConfiguration("${MX_SDK_HOME}/config/logging.conf", configData, MxBase::ConfigMode::CONFIGFILE); diff --git a/contrib/OCR/plugins/TextInfoPlugin/CMakeLists.txt b/contrib/OCR/plugins/TextInfoPlugin/CMakeLists.txt index f5d8dcb4c..a78851fa8 100644 --- a/contrib/OCR/plugins/TextInfoPlugin/CMakeLists.txt +++ b/contrib/OCR/plugins/TextInfoPlugin/CMakeLists.txt @@ -14,7 +14,7 @@ add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private) add_definitions(-DENABLE_DVPP_INTERFACE) -set(MX_SDK_HOME ${XXX}/MindX_SDK/mxVision/) +set(MX_SDK_HOME ${Vision SDK安装路径}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${MX_SDK_HOME/lib/plugins}) diff --git a/contrib/OCR/plugins/TextSimilarityPlugin/CMakeLists.txt b/contrib/OCR/plugins/TextSimilarityPlugin/CMakeLists.txt index 038e7c34b..517fda019 100644 --- a/contrib/OCR/plugins/TextSimilarityPlugin/CMakeLists.txt +++ b/contrib/OCR/plugins/TextSimilarityPlugin/CMakeLists.txt @@ -14,7 +14,7 @@ add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private) add_definitions(-DENABLE_DVPP_INTERFACE) -set(MX_SDK_HOME ${XXX}/MindX_SDK/mxVision/) +set(MX_SDK_HOME ${Vision SDK安装路径}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${MX_SDK_HOME/lib/plugins}) diff --git a/contrib/OpenCVPlugin/README.md b/contrib/OpenCVPlugin/README.md index 5bcfb3e74..93ad541e2 100644 --- a/contrib/OpenCVPlugin/README.md +++ b/contrib/OpenCVPlugin/README.md @@ -127,7 +127,7 @@ CANN 环境变量: - 环境变量介绍 ``` -SDK-path: SDK mxVision 安装路径 +SDK-path: Vision SDK 安装路径 ascend-toolkit-path: CANN 安装路径 ``` diff --git a/contrib/PassengerflowEstimation/README.md b/contrib/PassengerflowEstimation/README.md index 17f883eea..da7d970aa 100644 --- a/contrib/PassengerflowEstimation/README.md +++ b/contrib/PassengerflowEstimation/README.md @@ -63,7 +63,7 @@ bash generate_osd_om.sh "dataSource": "mxpi_tensorinfer0", "postProcessConfigPath": "./models/yolov4.cfg", "labelPath": "./models/yolov3.names", - "postProcessLibPath": "${MindX_SDK安装路径}/mxVision/lib/modelpostprocessors/libyolov3postprocess.so" + "postProcessLibPath": "${Vision SDK安装路径}/lib/modelpostprocessors/libyolov3postprocess.so" }, "factory": "mxpi_objectpostprocessor", "next": "mxpi_selectobject0" diff --git a/contrib/PassengerflowEstimation/pipeline/passengerflowestimation.pipeline b/contrib/PassengerflowEstimation/pipeline/passengerflowestimation.pipeline index de97dbbd1..5a43137b7 100644 --- a/contrib/PassengerflowEstimation/pipeline/passengerflowestimation.pipeline +++ b/contrib/PassengerflowEstimation/pipeline/passengerflowestimation.pipeline @@ -84,7 +84,7 @@ "dataSource": "mxpi_tensorinfer0", "postProcessConfigPath": "./models/yolov4.cfg", "labelPath": "./models/yolov3.names", - "postProcessLibPath": "${MindX_SDK安装路径}/mxVision/lib/modelpostprocessors/libyolov3postprocess.so" + "postProcessLibPath": "${Vision SDK安装路径}/lib/modelpostprocessors/libyolov3postprocess.so" }, "factory": "mxpi_objectpostprocessor", "next": "mxpi_selectobject0" diff --git a/contrib/PatchCoreAnomalyDetection/README.md b/contrib/PatchCoreAnomalyDetection/README.md index f32aa8c28..81a37f3c6 100644 --- a/contrib/PatchCoreAnomalyDetection/README.md +++ b/contrib/PatchCoreAnomalyDetection/README.md @@ -95,7 +95,7 @@ vim ~/.bashrc # 在.bashrc文件中添加以下环境变量 . ${MX_SDK_HOME}/set_env.sh . ${HOME}/Ascend/ascend-toolkit/set_env.sh -# 其中${MX_SDK_HOME}为MindX SDK安装目录,${HOME}为用户目录(如果CANN 开发包装在用户目录,否则为/usr/local/),配置的时候请自行替换成相应目录 +# 其中${MX_SDK_HOME}为Vision SDK安装目录,${HOME}为用户目录(如果CANN 开发包装在用户目录,否则为/usr/local/),配置的时候请自行替换成相应目录 # 保存退出.bashrc文件 # 执行如下命令使环境变量生效 source ~/.bashrc @@ -110,7 +110,7 @@ env 从 pytorch官网获取pth模型,在运行项目之前需要将 pth 模型转为mindspore可以加载的ckpt模型,mindspore训练过程中将ckpt模型转化为 air 模型,再由ATC命令将 air 模型转为 om 模型。 -模型转换工具(ATC)相关介绍如下 [https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md) +模型转换工具(ATC)相关介绍如下:[ATC工具介绍](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/devaids/devtools/atc/atlasatc_16_0005.html) 具体步骤如下 diff --git a/contrib/PixelLink/README.md b/contrib/PixelLink/README.md index 79bf9f314..024871c5a 100644 --- a/contrib/PixelLink/README.md +++ b/contrib/PixelLink/README.md @@ -75,7 +75,7 @@ 在编译运行项目前,需要设置环境变量: -模型转换所需ATC工具环境搭建参考链接:https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md +模型转换所需ATC工具环境搭建参考链接:https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/devaids/devtools/atc/atlasatc_16_0005.html 在编译运行项目前,需要设置环境变量: @@ -102,7 +102,7 @@ export GST_PLUGIN_PATH="${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOM pb模型提供在链接:https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/PixelLink/pixellink_tf.pb; -转换离线模型参考昇腾Gitee:https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md +转换离线模型参考昇腾Gitee:https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/devaids/devtools/atc/atlasatc_16_0005.html 首先需要配置ATC环境,下载pb模型,放到相应的路径后,修改模型转换的cfg配置文件,配置文件已经上传至项目目录model下。使用命令 diff --git a/contrib/PortraitSegmentation/README.md b/contrib/PortraitSegmentation/README.md index 173abe6dc..afd64e1db 100644 --- a/contrib/PortraitSegmentation/README.md +++ b/contrib/PortraitSegmentation/README.md @@ -1,4 +1,4 @@ -# MindXSDK 人像分割与背景替换 +# Vision SDK 人像分割与背景替换 ## 1 介绍 diff --git a/contrib/PoseEstNet/README.md b/contrib/PoseEstNet/README.md index e9b06717f..904553de5 100644 --- a/contrib/PoseEstNet/README.md +++ b/contrib/PoseEstNet/README.md @@ -77,12 +77,12 @@ export install_path=${install_path} . ${MX_SDK_HOME}/set_env.sh ``` -注:**${MX_SDK_HOME}** 替换为用户自己的Vision SDK安装路径(例如:"/home/xxx/MindX_SDK/mxVision");**${install_path}** 替换为CANN开发套件包所在路径(例如:/usr/local/Ascend/ascend-toolkit/latest)。 +注:**${MX_SDK_HOME}** 替换为用户自己的Vision SDK安装路径;**${install_path}** 替换为CANN开发套件包所在路径(例如:/usr/local/Ascend/ascend-toolkit/latest)。 ## 3 准备模型 -模型转换使用的是ATC工具,具体使用教程可参考[《ATC工具使用指南》](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md)。 +模型转换使用的是ATC工具,具体使用教程可参考[《ATC工具使用指南》](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/devaids/devtools/atc/atlasatc_16_0005.html)。 ### 3.1 yolov3的模型转换 **步骤1** 获取yolov3的原始模型(.pb文件)和相应的配置文件(.cfg文件) diff --git a/contrib/RTMHumanKeypointsDetection/README.md b/contrib/RTMHumanKeypointsDetection/README.md index 49a12553d..90b95c4ca 100644 --- a/contrib/RTMHumanKeypointsDetection/README.md +++ b/contrib/RTMHumanKeypointsDetection/README.md @@ -113,7 +113,7 @@ x86_64 Atlas 300I(型号3010)和arm Atlas 300I(型号3000)。 ## 4 编译与运行 步骤1 创建rtsp视频流 -使用live555创建rtsp视频流,live555的使用方法可以参考[链接](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/reference_material/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)。 +使用live555创建rtsp视频流,live555的使用方法可以参考[链接](https://gitee.com/ascend/mindsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)。 步骤2 配置pipeline文件中的rtsp视频流地址、模型文件路径和视频的宽高 diff --git a/contrib/ReID/README.md b/contrib/ReID/README.md index b16932b21..0b0461e44 100644 --- a/contrib/ReID/README.md +++ b/contrib/ReID/README.md @@ -61,7 +61,7 @@ ReID #设置CANN环境变量 . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/ReIDv2/README.md b/contrib/ReIDv2/README.md index 9d6ebdb90..5994896f2 100644 --- a/contrib/ReIDv2/README.md +++ b/contrib/ReIDv2/README.md @@ -95,7 +95,7 @@ ReIDv2 #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` ## 3 准备模型 diff --git a/contrib/RemoteSensingSegmentation/README.md b/contrib/RemoteSensingSegmentation/README.md index 1ef9f2520..4f9c7a6e1 100644 --- a/contrib/RemoteSensingSegmentation/README.md +++ b/contrib/RemoteSensingSegmentation/README.md @@ -67,7 +67,7 @@ #设置CANN环境变量 . ${install_path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Mind SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/SOLOV2/README.md b/contrib/SOLOV2/README.md index 972ec22cb..8e967f1be 100644 --- a/contrib/SOLOV2/README.md +++ b/contrib/SOLOV2/README.md @@ -112,7 +112,7 @@ CANN 环境变量: - 环境变量介绍 ``` -SDK-path: SDK mxVision 安装路径 +SDK-path: Vision SDK 安装路径 ascend-toolkit-path: CANN 安装路径 ``` @@ -143,7 +143,7 @@ pip install -v -e . ## 3. 模型转换 -本项目中采用的模型是SOLOV2 模型,参考实现代码:https://github.com/WXinlong/SOLO ,模型下载链接:https://www.hiascend.com/zh/software/modelzoo/models/detail/1/f32ed480a95b4686a070fff964b4fceb 。下载地址中包含onnx文件,同时也有对应的om文件,可以直接使用。也可以用ATC模型转换工具将onnx转换为om文件。模型转换工具相关介绍参考链接:https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/sdk/tutorials/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99.md 。 +本项目中采用的模型是SOLOV2 模型,参考实现代码:https://github.com/WXinlong/SOLO ,模型下载链接:https://www.hiascend.com/zh/software/modelzoo/models/detail/1/f32ed480a95b4686a070fff964b4fceb 。下载地址中包含onnx文件,同时也有对应的om文件,可以直接使用。也可以用ATC模型转换工具将onnx转换为om文件。模型转换工具相关介绍参考链接:https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/devaids/devtools/atc/atlasatc_16_0005.html 。 ### 3.1 模型转换方法 diff --git a/contrib/SuperRetina/README.md b/contrib/SuperRetina/README.md index 3c253034b..c1aee28a4 100644 --- a/contrib/SuperRetina/README.md +++ b/contrib/SuperRetina/README.md @@ -63,7 +63,7 @@ export install_path=${install_path} ``` 注: -**${MX_SDK_HOME}** 替换为用户自己的Vision SDK安装路径(例如:"/home/xxx/MindX_SDK/mxVision"); +**${MX_SDK_HOME}** 替换为用户自己的Vision SDK安装路径; **${install_path}** 替换为CANN开发套件包所在路径(例如:/usr/local/Ascend/ascend-toolkit/latest)。 diff --git a/contrib/TextSnake/README.md b/contrib/TextSnake/README.md index 9617e21e2..5f4c29350 100644 --- a/contrib/TextSnake/README.md +++ b/contrib/TextSnake/README.md @@ -58,13 +58,12 @@ TextSnake 弯曲形状文字检测基于 Vision SDK 开发,对图片中的任 在编译运行项目前,需要执行以下命令设置环境变量: ```bash -export PYTHONPATH=${MX_SDK_HOME}/python/:$PYTHONPATH +export PYTHONPATH=${Vision SDK安装路径}/python/:$PYTHONPATH export install_path=${install_path} . ${install_path}/set_env.sh -. ${MX_SDK_HOME}/set_env.sh +. ${Vision SDK安装路径}/set_env.sh ``` -注:**${MX_SDK_HOME}** 替换为用户自己的Vision SDK安装路径(例如:"/home/xxx/MindX_SDK/mxVision"); -**${install_path}** 替换为CANN开发套件包所在路径(例如:/usr/local/Ascend/ascend-toolkit/latest)。 +注:**${install_path}** 替换为CANN开发套件包所在路径(例如:/usr/local/Ascend/ascend-toolkit/latest)。 ## 3 准备模型 diff --git a/contrib/VCOD_SLTNet/README.md b/contrib/VCOD_SLTNet/README.md index 9bde1c7df..a04c710a0 100644 --- a/contrib/VCOD_SLTNet/README.md +++ b/contrib/VCOD_SLTNet/README.md @@ -48,7 +48,7 @@ #设置CANN环境变量,ascend-toolkit-path为cann安装路径 . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git a/contrib/VehicleRetrogradeRecognition/README.md b/contrib/VehicleRetrogradeRecognition/README.md index 03ff4e44e..28ff383fd 100644 --- a/contrib/VehicleRetrogradeRecognition/README.md +++ b/contrib/VehicleRetrogradeRecognition/README.md @@ -113,7 +113,7 @@ VehicleRetrogradeRecognition交通逆行识别后处理插件基于Vision SDK开 #设置CANN环境变量(请确认install_path路径是否正确) . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置MindX SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh #查看环境变量 @@ -191,7 +191,7 @@ atc --model=./yolov4_dynamic_bs.onnx --framework=5 --output=yolov4_bs --input_fo bash build.sh ``` -命令执行成功后会在`VehicleRetrogradeRecognition/plugins/MxpiTrackRetrogradeCar`目录下生成build文件夹。将`VehicleRetrogradeRecognition/plugins/MxpiTrackRetrogradeCar/build`目录下生成的的libmxpi_trackretrogradecar.so下载后上传到`${SDK安装路径}/mxVision/lib/plugins`目录下,然后将权限设置为0640。 +命令执行成功后会在`VehicleRetrogradeRecognition/plugins/MxpiTrackRetrogradeCar`目录下生成build文件夹。将`VehicleRetrogradeRecognition/plugins/MxpiTrackRetrogradeCar/build`目录下生成的的libmxpi_trackretrogradecar.so下载后上传到`${Vision SDK安装路径}/lib/plugins`目录下,然后将权限设置为0640。 **步骤6**进入Vision SDK安装目录的`operators/opencvosd`目录下执行命令: ``` diff --git a/contrib/VideoGestureRecognition/run.sh b/contrib/VideoGestureRecognition/run.sh index 4f3b00f4f..61291b8c9 100644 --- a/contrib/VideoGestureRecognition/run.sh +++ b/contrib/VideoGestureRecognition/run.sh @@ -23,7 +23,7 @@ env_ready=true function check_env() { - # check MindXSDK env + # check MindSDK env if [ ! "${MX_SDK_HOME}" ]; then env_ready=false echo "please set MX_SDK_HOME path into env." diff --git a/contrib/YOLOX/README.md b/contrib/YOLOX/README.md index 7f9c63042..8ed9a91c2 100644 --- a/contrib/YOLOX/README.md +++ b/contrib/YOLOX/README.md @@ -91,7 +91,7 @@ YOLOX 的后处理插件接收模型推理插件输出的特征图,该特征 MindSDK 环境变量: ``` -. ${SDK-path}/set_env.sh # SDK-path: mxVision SDK 安装路径 +. ${SDK-path}/set_env.sh # SDK-path: Vision SDK 安装路径 ``` CANN 环境变量: diff --git a/contrib/build_all.sh b/contrib/build_all.sh deleted file mode 100644 index 0c861ba6a..000000000 --- a/contrib/build_all.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -current_folder="$( cd "$(dirname "$0")" ;pwd -P )" - - -SAMPLE_FOLDER=( - # ActionRecognition/ - # CrowdCounting/ - # mxBase_wheatDetection/ - # EdgeDetectionPicture/ - HelmetIdentification/ - Individual/ - # human_segmentation/ - # OpenposeKeypointDetection/ - PersonCount/ - # FatigueDrivingRecognition/ - # CartoonGANPicture/ - # HeadPoseEstimation/ - FaceBoxes/ - BertTextClassification/ - # RTM3DTargetDetection/ - EfficientDet/ - SentimentAnalysis/ - # RotateObjectDetection/ - # FairMOT/ - UltraFastLaneDetection/ - VehicleIdentification/ - yunet/ - # RoadSegmentation/ - # PassengerflowEstimation/ - # VehicleRetrogradeRecognition/ - # Collision/ - # PassengerflowEstimation/ - CenterFace/ - YOLOX/ - PicoDet/ - SOLOV2/ - # OpenCVPlugin/ - RefineDet/ - FCOS - Faster_R-CNN/ - MeterReader/ - # RTMHumanKeypointsDetection/ -) - -err_flag=0 -for sample in ${SAMPLE_FOLDER[@]};do - cd ${current_folder}/${sample} - bash build.sh || { - echo -e "Failed to build ${sample}" - err_flag=1 - } -done - - -if [ ${err_flag} -eq 1 ]; then - exit 1 -fi -exit 0 diff --git a/contrib/facemaskdetection/README.md b/contrib/facemaskdetection/README.md index f68788231..583e84977 100644 --- a/contrib/facemaskdetection/README.md +++ b/contrib/facemaskdetection/README.md @@ -54,7 +54,7 @@ #设置CANN环境变量,ascend-toolkit-path为cann安装路径 . ${ascend-toolkit-path}/set_env.sh -#设置MindX SDK 环境变量,SDK-path为mxVision SDK 安装路径 +#设置Vision SDK 环境变量,SDK-path为Vision SDK 安装路径 . ${SDK-path}/set_env.sh ``` diff --git "a/docs/quickStart/1-1\345\256\211\350\243\205SDK\345\274\200\345\217\221\345\245\227\344\273\266.md" "b/docs/quickStart/1-1\345\256\211\350\243\205SDK\345\274\200\345\217\221\345\245\227\344\273\266.md" index c4ec40b9b..d7c9909f2 100644 --- "a/docs/quickStart/1-1\345\256\211\350\243\205SDK\345\274\200\345\217\221\345\245\227\344\273\266.md" +++ "b/docs/quickStart/1-1\345\256\211\350\243\205SDK\345\274\200\345\217\221\345\245\227\344\273\266.md" @@ -91,10 +91,7 @@ Verifying archive integrity... 100% SHA256 checksums are OK. All good. ./Ascend-mindxsdk-mxvision_{version}_linux-{arch}.run --install ``` - 安装完成后,若未出现错误信息,表示软件成功安装于指定或默认路径(/usr/local/Ascend/mindx_sdk/mxVision/)下: -``` -Uncompressing ASCEND MINDXSDK RNN PACKAGE 100% -``` + 安装完成后,若未出现错误信息,表示软件成功安装。 **步骤7** 环境变量生效。 diff --git "a/docs/quickStart/4-1\346\217\222\344\273\266\345\274\200\345\217\221\350\260\203\350\257\225\346\214\207\345\257\274.md" "b/docs/quickStart/4-1\346\217\222\344\273\266\345\274\200\345\217\221\350\260\203\350\257\225\346\214\207\345\257\274.md" index 33011245a..469099db3 100644 --- "a/docs/quickStart/4-1\346\217\222\344\273\266\345\274\200\345\217\221\350\260\203\350\257\225\346\214\207\345\257\274.md" +++ "b/docs/quickStart/4-1\346\217\222\344\273\266\345\274\200\345\217\221\350\260\203\350\257\225\346\214\207\345\257\274.md" @@ -89,7 +89,7 @@ add_subdirectory(./mindx_sdk_plugin) ``` ### 注意事项 - `mindx_sdk_plugin/src/mxpi_sampleplugin/MxpiSamplePlugin.cpp`第141行起配置了pipeline中支持的props参数和默认值,如自行开发请按需求修改。 + `mind_sdk_plugin/src/mxpi_sampleplugin/MxpiSamplePlugin.cpp`第141行起配置了pipeline中支持的props参数和默认值,如自行开发请按需求修改。 ```c++ // auto parentNameProSptr = std::make_shared>(ElementProperty{ // STRING, "dataSource", "name", "the name of previous plugin", "mxpi_modelinfer0", "NULL", "NULL"}); diff --git "a/docs/quickStart/4-4\346\250\241\345\236\213Tensor\346\225\260\346\215\256\345\244\204\347\220\206&\350\207\252\345\256\232\344\271\211\346\250\241\345\236\213\345\220\216\345\244\204\347\220\206.md" "b/docs/quickStart/4-4\346\250\241\345\236\213Tensor\346\225\260\346\215\256\345\244\204\347\220\206&\350\207\252\345\256\232\344\271\211\346\250\241\345\236\213\345\220\216\345\244\204\347\220\206.md" index 347c19aa6..153341eb9 100644 --- "a/docs/quickStart/4-4\346\250\241\345\236\213Tensor\346\225\260\346\215\256\345\244\204\347\220\206&\350\207\252\345\256\232\344\271\211\346\250\241\345\236\213\345\220\216\345\244\204\347\220\206.md" +++ "b/docs/quickStart/4-4\346\250\241\345\236\213Tensor\346\225\260\346\215\256\345\244\204\347\220\206&\350\207\252\345\256\232\344\271\211\346\250\241\345\236\213\345\220\216\345\244\204\347\220\206.md" @@ -110,7 +110,7 @@ Atlas 300I pro、Atlas 300V pro ``` ├── samplePluginPostProc -| ├── mindx_sdk_plugin // 插件样例 +| ├── mind_sdk_plugin // 插件样例 | | ├── src | | | ├── mxpi_sampleplugin | | | | ├── MxpiSamplePlugin.cpp @@ -140,14 +140,14 @@ Atlas 300I pro、Atlas 300V pro | └── CMakeLists.txt ``` 上述目录中`samplePluginPostProc`为[工程根目录](https://gitee.com/ascend/mindsdk-referenceapps/tree/master/tutorials/samplePluginPostProc)(用户需跳转到页面自行下载), -`mindx_sdk_plugin`为上述根目录下的插件工程目录,`mxVision`为图像分类识别样例工程目录(复制 SDK-path/samples/mxVision文件夹到根目录下,SDK-path表示SDK安装路径)。 +`mind_sdk_plugin`为上述根目录下的插件工程目录,`mxVision`为图像分类识别样例工程目录(复制 SDK-path/samples/mxVision文件夹到根目录下,SDK-path表示SDK安装路径)。 test.jpg为分类识别样例所需图片,用户需要自行准备,并放置在对应目录下。 ## 2 设置环境变量 ``` -# MindX SDK环境变量: +# Vision SDK环境变量: .${SDK-path}/set_env.sh # CANN环境变量: @@ -157,7 +157,7 @@ test.jpg为分类识别样例所需图片,用户需要自行准备,并放置 export LD_LIBRARY_PATH=usr/lib64:$LD_LIBRARY_PATH # 环境变量介绍 -SDK-path:SDK mxVision安装路径 +SDK-path:Vision SDK安装路径 ascend-toolkit-path:CANN安装路径 ``` 相应地,`./mxVision/C++/`和`./mxVision/python/`目录下的run.sh脚本也需要做出对应修改。 将两脚本中环境变量路径: @@ -212,7 +212,7 @@ cd build cmake .. make ``` -编译完成后该工程`mindx_sdk_plugin/lib/plugins/`目录下会生成自定义插件*.so文件,mxVision/C++/目录下会生成可执行文件`main`。 +编译完成后该工程`mind_sdk_plugin/lib/plugins/`目录下会生成自定义插件*.so文件,mxVision/C++/目录下会生成可执行文件`main`。 **步骤3** 将插件复制到`${SDK-path}/lib/plugins/`目录下,执行以下脚本修改文件权限: ``` diff --git "a/docs/quickStart/Cmake\344\273\213\347\273\215.md" "b/docs/quickStart/Cmake\344\273\213\347\273\215.md" index 2f23b4174..cb355e453 100644 --- "a/docs/quickStart/Cmake\344\273\213\347\273\215.md" +++ "b/docs/quickStart/Cmake\344\273\213\347\273\215.md" @@ -106,7 +106,7 @@ add_compile_options(-fPIC -fstack-protector-all -g -Wl,-z,relro,-z,now,-z -pie - add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private) -# 配置环境变量MX_SDK_HOME,如:/home/xxxxxxx/MindX_SDK/mxVision,可在远程环境中用指令env查看 +# 配置环境变量MX_SDK_HOME,如:/home/xxxxxxx/Mind_SDK/mxVision,可在远程环境中用指令env查看 set(MX_SDK_HOME ${用户自己的SDK安装路径}) # 设置所需变量 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/") diff --git a/tutorials/DvppWrapperSample/main.cpp b/tutorials/DvppWrapperSample/main.cpp index 6fa02aab3..d47e7b406 100644 --- a/tutorials/DvppWrapperSample/main.cpp +++ b/tutorials/DvppWrapperSample/main.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved. * Description: Interface sample of DvppWrapper -* Author: MindX SDK +* Author: Mind SDK * Create: 2021 * History: NA */ diff --git a/tutorials/ModleSample/python/main.py b/tutorials/ModleSample/python/main.py index 1d3b1739c..043abd856 100644 --- a/tutorials/ModleSample/python/main.py +++ b/tutorials/ModleSample/python/main.py @@ -1,8 +1,8 @@ import numpy as np from mindx.sdk import Tensor -from mindx.sdk import base # mxVision 推理接口 -from mindx.sdk.base import Model # mxVision 推理接口 +from mindx.sdk import base # Vision SDK 推理接口 +from mindx.sdk.base import Model # Vision SDK 推理接口 def process(): @@ -43,6 +43,6 @@ def process(): print("output numpy array shape", output.shape) if __name__ == "__main__": - base.mx_init() # 初始化 mxVision 资源 + base.mx_init() # 初始化 Vision SDK 资源 process() base.mx_deinit() \ No newline at end of file diff --git a/tutorials/VideoEncoder&VideoDecoder/C++/main.cpp b/tutorials/VideoEncoder&VideoDecoder/C++/main.cpp index c6a5d947c..03fccf14a 100644 --- a/tutorials/VideoEncoder&VideoDecoder/C++/main.cpp +++ b/tutorials/VideoEncoder&VideoDecoder/C++/main.cpp @@ -199,7 +199,7 @@ void VdecThread(VideoDecoder& videoDecoder) // | // V // |获取解码结果| -// 线程3:用于获取解码结果(获取解码结果的线程由mxVision内部创建,用户仅需自定义回调函数、用于由该线程调用、获取解码结果) +// 线程3:用于获取解码结果(获取解码结果的线程由Vision SDK内部创建,用户仅需自定义回调函数、用于由该线程调用、获取解码结果) APP_ERROR VdecCallBack(MxBase::Image &decodedImage, uint32_t channelId, uint32_t frameId, void *userData) { @@ -248,7 +248,7 @@ void VencThread(VideoEncoder& videoEncoder) // | // V // |获取编码结果| -// 线程5:用于获取编码结果(用于获取编码结果的线程由mxVision内部创建,用户仅需自定义回调函数、用于由该线程调用、获取编码结果) +// 线程5:用于获取编码结果(用于获取编码结果的线程由Vision SDK内部创建,用户仅需自定义回调函数、用于由该线程调用、获取编码结果) APP_ERROR VencCallBack(std::shared_ptr& outDataPtr, uint32_t& outDataSize, uint32_t& channelId, uint32_t& frameId, void* userData) { diff --git a/tutorials/VideoEncoder&VideoDecoder/Python/main.py b/tutorials/VideoEncoder&VideoDecoder/Python/main.py index b88bf0753..63c89d052 100644 --- a/tutorials/VideoEncoder&VideoDecoder/Python/main.py +++ b/tutorials/VideoEncoder&VideoDecoder/Python/main.py @@ -89,7 +89,7 @@ def vdec_thread(video_decoder): # | # V # |获取解码结果| -# 线程3:用于获取解码结果(获取解码结果的线程由mxVision内部创建,用户仅需自定义回调函数、用于由该线程调用、获取解码结果) +# 线程3:用于获取解码结果(获取解码结果的线程由Vision SDK内部创建,用户仅需自定义回调函数、用于由该线程调用、获取解码结果) def vdec_callback_func(decoded_image, channel_id, frame_id): VDEC_TO_VENC_QUEUE.append(DecodedFrame(decoded_image, frame_id, channel_id)) @@ -118,7 +118,7 @@ def venc_thread(video_encoder): # | # V # |获取编码结果| -# 线程5:用于获取编码结果(用于获取编码结果的线程由mxVision内部创建,用户仅需自定义回调函数、用于由该线程调用、获取编码结果) +# 线程5:用于获取编码结果(用于获取编码结果的线程由Vision SDK内部创建,用户仅需自定义回调函数、用于由该线程调用、获取编码结果) def venc_callback_func(output, output_datasize, channel_id, frame_id): VENC_TO_FILE_SAVE_QUEUE.append(EncodedFrame(output, frame_id, channel_id)) diff --git a/tutorials/VideoEncoder&VideoDecoder/README.md b/tutorials/VideoEncoder&VideoDecoder/README.md index 2631a166d..54d977eb7 100644 --- a/tutorials/VideoEncoder&VideoDecoder/README.md +++ b/tutorials/VideoEncoder&VideoDecoder/README.md @@ -59,7 +59,7 @@ export LD_LIBRARY_PATH=${ffmpeg-lib-path}:$LD_LIBRARY_PATH **步骤3:修改main.cpp文件,指定VideoDecoder和VideoEncoder的基本初始化参数** -第**332**行到第**354**行展示了VideoDecoder和VideoEncoder的主要配置项,用户可以结合mxVision官方文档根据需要调整。本样例中仅指定必要配置项,如下所示: +第**332**行到第**354**行展示了VideoDecoder和VideoEncoder的主要配置项,用户可以结合Vision SDK官方文档根据需要调整。本样例中仅指定必要配置项,如下所示: 第**333**行 `"std::string filePath = "${filePath}"`中的${filePath}替换为步骤2中视频文件实际的路径。 @@ -102,7 +102,7 @@ bash build.sh **步骤2:修改main.py文件,指定VideoDecoder和VideoEncoder的基本初始化参数** -第**149**行到第**175**行展示了VideoDecoder和VideoEncoder的主要配置项,用户可以结合mxVision官方文档根据需要调整。本样例中仅指定必要配置项,如下所示: +第**149**行到第**175**行展示了VideoDecoder和VideoEncoder的主要配置项,用户可以结合Vision SDK官方文档根据需要调整。本样例中仅指定必要配置项,如下所示: 第**150**行 `"file_path = "${file_path}”"`中的${file_path}替换为步骤1中视频文件实际的路径。 diff --git a/tutorials/sampleCustomProto/CMakeLists.txt b/tutorials/sampleCustomProto/CMakeLists.txt index b2955d916..6c335f135 100644 --- a/tutorials/sampleCustomProto/CMakeLists.txt +++ b/tutorials/sampleCustomProto/CMakeLists.txt @@ -6,5 +6,5 @@ project(mxpi_samplecustomproto) set(MX_SDK_HOME /usr/local/Ascend/mxVision) add_subdirectory("./mxVision/C++") -add_subdirectory("./mindx_sdk_plugin") +add_subdirectory("./mind_sdk_plugin") add_subdirectory("./proto") \ No newline at end of file -- Gitee