From fb4a71c6c633738fb1b712a6310a4e2362667627 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Mon, 11 Mar 2024 08:59:31 +0000 Subject: [PATCH 01/38] update --- Samples/YOLOV5MultiInput/python/README.md | 168 ++++++ Samples/YOLOV5MultiInput/python/data/.keep | 0 .../python/scripts/sample_build.sh | 48 ++ .../python/scripts/sample_run.sh | 12 + .../python/src/multi_process_yolo_nms.py | 174 +++++++ .../YOLOV5MultiInput/python/src/python/API.md | 317 +++++++++++ .../python/src/python/README.md | 41 ++ .../python/src/python/__init__.py | 0 .../python/src/python/acllite_image.py | 221 ++++++++ .../python/src/python/acllite_imageproc.py | 443 ++++++++++++++++ .../python/src/python/acllite_logger.py | 91 ++++ .../python/src/python/acllite_model.py | 434 +++++++++++++++ .../python/src/python/acllite_model_2.py | 477 +++++++++++++++++ .../python/src/python/acllite_model_bak.py | 455 ++++++++++++++++ .../python/src/python/acllite_resource.py | 110 ++++ .../python/src/python/acllite_utils.py | 261 ++++++++++ .../python/src/python/cameracapture.py | 94 ++++ .../python/src/python/constants.py | 217 ++++++++ .../python/src/python/dvpp_vdec.py | 259 +++++++++ .../python/src/python/lib/__init__.py | 0 .../python/src/python/lib/acllite_so.py | 38 ++ .../python/src/python/lib/src/Makefile | 88 ++++ .../python/src/python/lib/src/acllite_utils.h | 67 +++ .../python/src/python/lib/src/camera.cpp | 167 ++++++ .../python/src/python/lib/src/camera.h | 61 +++ .../src/python/presenteragent/__init__.py | 1 + .../python/presenteragent/presenter_agent.py | 91 ++++ .../presenteragent/presenter_channel.py | 144 +++++ .../presenteragent/presenter_datatype.py | 70 +++ .../presenteragent/presenter_message.proto | 67 +++ .../presenteragent/presenter_message.py | 70 +++ .../presenteragent/presenter_message_pb2.py | 493 ++++++++++++++++++ .../python/presenteragent/socket_client.py | 135 +++++ .../python/src/python/videocapture.py | 383 ++++++++++++++ 34 files changed, 5697 insertions(+) create mode 100644 Samples/YOLOV5MultiInput/python/README.md create mode 100644 Samples/YOLOV5MultiInput/python/data/.keep create mode 100644 Samples/YOLOV5MultiInput/python/scripts/sample_build.sh create mode 100644 Samples/YOLOV5MultiInput/python/scripts/sample_run.sh create mode 100644 Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/API.md create mode 100644 Samples/YOLOV5MultiInput/python/src/python/README.md create mode 100644 Samples/YOLOV5MultiInput/python/src/python/__init__.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_image.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/cameracapture.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/constants.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp create mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py create mode 100644 Samples/YOLOV5MultiInput/python/src/python/videocapture.py diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md new file mode 100644 index 0000000..83b2e2c --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -0,0 +1,168 @@ +## 目录 + + - [样例介绍](#样例介绍) + - [获取源码包](#获取源码包) + - [第三方依赖安装](#第三方依赖安装) + - [样例运行](#样例运行) + - [其他资源](#其他资源) + - [更新说明](#更新说明) + - [已知issue](#已知issue) + +## 样例介绍 + +以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 + +样例输入:图片。 +样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 + +## 获取源码包 + + 可以使用以下两种方式下载,请选择其中一种进行源码准备。 + + - 命令行方式下载(下载时间较长,但步骤简单)。 + + ``` + # 开发环境,非root用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/samples.git + ``` + **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** + ``` + git checkout v0.5.0 + ``` + - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 + **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** + ``` + # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + unzip ascend-samples-master.zip + ``` + +## 第三方依赖安装 + + +设置环境变量,配置程序编译依赖的头文件,库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 + + ``` + export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + export THIRDPART_PATH=${DDK_PATH}/thirdpart + export LD_LIBRARY_PATH=${THIRDPART_PATH}/lib:$LD_LIBRARY_PATH + ``` + 创建THIRDPART_PATH路径 + + ``` + mkdir -p ${THIRDPART_PATH} + ``` +- acllite + + 注:源码安装ffmpeg主要是为了acllite库的安装 + 执行以下命令安装x264 + + ``` + # 下载x264 + cd ${HOME} + git clone https://code.videolan.org/videolan/x264.git + cd x264 + # 安装x264 + ./configure --enable-shared --disable-asm + make + sudo make install + sudo cp /usr/local/lib/libx264.so.164 /lib + ``` + 执行以下命令安装ffmpeg + + ``` + # 下载ffmpeg + cd ${HOME} + wget http://www.ffmpeg.org/releases/ffmpeg-4.1.3.tar.gz --no-check-certificate + tar -zxvf ffmpeg-4.1.3.tar.gz + cd ffmpeg-4.1.3 + # 安装ffmpeg + ./configure --enable-shared --enable-pic --enable-static --disable-x86asm --enable-libx264 --enable-gpl --prefix=${THIRDPART_PATH} + make -j8 + make install + ``` + 执行以下命令安装acllite + + ``` + cd ${HOME}/samples/inference/acllite/cplusplus + make + make install + ``` + + +- opencv + + 执行以下命令安装opencv(注:确保是3.x版本) + ``` + sudo apt-get install libopencv-dev + ``` + +## 样例运行 + + - 数据准备 + + 请从以下链接获取该样例的输入图片,放在data目录下。 + + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg + ``` + + - ATC模型转换 + + 将YOLOV7原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 + + ``` + # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + cd $HOME/samples/inference/modelInference/sampleYOLOV7/model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg + atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg + ``` + + - 样例编译 + + 执行以下命令,执行编译脚本,开始样例编译。 + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/scripts + bash sample_build.sh + ``` + - 样例运行 + + 执行运行脚本,开始样例运行。 + ``` + bash sample_run.sh + ``` + - 样例结果展示 + + 运行完成后,会在样例工程的out目录下生成推理后的图片,显示对比结果如下所示。 + ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/out_dog.jpg "image-20211028101534905.png") + +## 其他资源 + +以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: + +**ONNX** +- [GitHub: ONNX](https://github.com/onnx/onnx) + +**Models** +- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) + +**Documentation** +- [AscendCL Samples介绍](../README_CN.md) +- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) +- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + +## 更新说明 + | 时间 | 更新事项 | +|----|------| +| 2023/03/07 | 新增sampleYOLOV7/README.md | + + +## 已知issue + + 暂无 diff --git a/Samples/YOLOV5MultiInput/python/data/.keep b/Samples/YOLOV5MultiInput/python/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh new file mode 100644 index 0000000..a88536b --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh @@ -0,0 +1,48 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" +ModelPath="${ScriptPath}/../model" + +function build() +{ + if [ -d ${ScriptPath}/../build/intermediates/host ];then + rm -rf ${ScriptPath}/../build/intermediates/host + fi + + mkdir -p ${ScriptPath}/../build/intermediates/host + cd ${ScriptPath}/../build/intermediates/host + + cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE + if [ $? -ne 0 ];then + echo "[ERROR] cmake error, Please check your environment!" + return 1 + fi + make + if [ $? -ne 0 ];then + echo "[ERROR] build failed, Please check your environment!" + return 1 + fi + cd - > /dev/null +} +function main() +{ + echo "[INFO] Sample preparation" + + ret=`find ${ModelPath} -maxdepth 1 -name yolov7x.om 2> /dev/null` + + if [[ ${ret} ]];then + echo "[INFO] The yolov7x.om already exists.start buiding" + else + echo "[ERROR] yolov7x.om does not exist, please follow the readme to convert the model and place it in the correct position!" + return 1 + fi + + + build + if [ $? -ne 0 ];then + return 1 + fi + + echo "[INFO] Sample preparation is complete" +} +main + diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh new file mode 100644 index 0000000..a141e29 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +echo "[INFO] The sample starts to run" +running_command="./main" +cd ${ScriptPath}/../out +${running_command} +if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" +else + echo "[INFO] The program runs successfully" +fi diff --git a/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py b/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py new file mode 100644 index 0000000..2a67a94 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py @@ -0,0 +1,174 @@ +import numpy as np +import videocapture as video +import acl +import acllite_utils as utils +import time +import cv2 +import constants as const + +from acllite_resource import AclLiteResource +from acllite_model import AclLiteModel +from acllite_imageproc import AclLiteImageProc +from acllite_image import AclLiteImage +from acllite_logger import log_error, log_info + + +from multiprocessing import Process, Queue, Pool, Value + +Q_PRE_SIZE = 32 +Q_OUT_SIZE = 32 +WAIT_TIME = 0.000003 + +VIDEO_READ_FLAG = Value('d', 10) +COUNT_PRE = Value('d', 0) +# COUNT = Value('d', 0) + +class PrePair: + def __init__(self, data, ori): + self.data = data + self.ori = ori + +labels = ["person", "bicycle", "car", "motorbike", "aeroplane", + "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", + "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", + "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", + "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", + "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table", + "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", + "scissors", "teddy bear", "hair drier", "toothbrush"] + +def init_resource(): + resource = AclLiteResource + resource.init() + +def preprocess(path, q_pre, model_width, model_height, channel): + print(f'sub process preprocess{i} start') + width = 1920 + height = 1080 + scale_x = width / model_width + scale_y = height / model_height + # get scale factor + if scale_x > scale_y: + max_scale = scale_x + resize_shape = (model_width, int(height/max_scale)) + else: + max_scale = scale_y + resize_shape = (int(widht/max_scale), model_height) + count = 0 + cap = cv2.VideoCapture(path) + if not cap.isOpened() : + print('video connect failed') + exit(0) + while True: + ret, frame = cap.read() + if not ret: # + print('cap read end! close subprocess cap read') + q_pre.put('EOF') + break + else: + img = np.zeros([model_height, model_width, 3], dtype=np.uint8) + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + resize_image = cv2.resize(frame_rgb, resize_shape) + img[0:resize_shape[1],0:resize_shape[0]] = resize_image + + q_pre.put(img) + count += 1 + print(f'pre process end! {channel}') + +def infer(model_path, q_pre, q_out, pnums): + resource = AclLiteResource() + resource.init() + model = AclLiteModel(model_path) + count = 0 + nums = pnums + image_info = np.array([640, 640, + 640, 640], + dtype=np.float32) + start = time.time() + while True: + if pnums == 0: + break + for i,q in enumerate(q_pre): + img = q.get() + if isinstance(img, str): + pnums -= 1 + q_out[i].put('EOF') + continue + output = model.execute([img, image_info]) + count += 1 + q_out[i].put(output) + end = time.time() + print(f'fps: {count/(end-start):.3f}') + del resource + del model + print('infer end! close infer ') + +def postprocess(q_out, model_width, model_height): + width = 1920 + height = 1080 + while True: + output = q_out.get() + if isinstance(output, str): + print('postprocess end! close subprocess postprocess') + break + box_num = output[1][0, 0] + box_info = output[0].flatten() + scale_x = width / model_width + scale_y = height / model_height + + # get scale factor + if scale_x > scale_y: + max_scale = scale_x + else: + max_scale = scale_y + colors = [0, 0, 255] + + # draw the boxes in original image + result_msg = "" + for n in range(int(box_num)): + ids = int(box_info[5 * int(box_num) + n]) + score = box_info[4 * int(box_num) + n] + label = labels[ids] + ":" + str("%.2f" % score) + top_left_x = box_info[0 * int(box_num) + n] * max_scale + top_left_y = box_info[1 * int(box_num) + n] * max_scale + bottom_right_x = box_info[2 * int(box_num) + n] * max_scale + bottom_right_y = box_info[3 * int(box_num) + n] * max_scale + result_msg += f'label:{label} ' + # cv2.rectangle(src_image, (int(top_left_x), int(top_left_y)), + # (int(bottom_right_x), int(bottom_right_y)), colors) + # p3 = (max(int(top_left_x), 15), max(int(top_left_y), 15)) + # cv2.putText(src_image, label, p3, cv2.FONT_ITALIC, 0.6, colors, 1) + # cv2.imshow('frame', src_image) + # cv2.imwrite(f'../out/out_{count}.jpg', src_image) + print(f'results: {result_msg}') + +if __name__ == '__main__': + stream_path = "../data/test.mp4" + model_path = "../model/yolov5s_nms.om" + model_width = 640 + model_height = 640 + pnums = 2 + + q_pre = [Queue(maxsize=Q_PRE_SIZE) for i in range(pnums)] + q_out = [Queue(maxsize=Q_OUT_SIZE) for i in range(pnums)] + + loopTime, initTime = time.time(), time.time() + + processes = [] + for i in range(pnums): + processes.append(Process(target=preprocess, args=(stream_path, q_pre[i], model_width, model_height, i))) + # processes.append(Process(target=preprocess, args=(q_data, q_pre, dvpps[i], model_width, model_height))) + processes.append(Process(target=infer, args=(model_path, q_pre, q_out, pnums))) + for i in range(pnums): + processes.append(Process(target=postprocess, args=(q_out[i],model_width, model_width))) + + [process.start() for process in processes] + [process.join() for process in processes] + print('子进程运行结束') + diff --git a/Samples/YOLOV5MultiInput/python/src/python/API.md b/Samples/YOLOV5MultiInput/python/src/python/API.md new file mode 100644 index 0000000..39ed873 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/API.md @@ -0,0 +1,317 @@ +## 接口说明 + +### AclLiteImage类 + +AclLiteImage类为python-acllite公共库,针对atlas200dk摄像头、jpg图片、输入的图片数据,所提供的一套统一的数据封装结构,便于后续公共库接口对其进行处理。 + +#### \_\_init\_\_ + +方法:\_\_init\_\_ (image, width=0, height=0, size=0, memory_type=const.MEMORY_NORMAL): + +说明: + +根据初始化参数列表,生成对应的AclLiteImage结构的数据。 + +输入参数: + +image :图片数据,支持numpy array、jpeg/png图片路径、内存数据作为参数输入;不填/填写不支持类型输入会报错。 + +width :图片宽;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 + +height :图片高;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 + +size :图片数据大小,如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 + +memory_type :图片数据存储类型,即该图片数据是存储在一般内存、device、host或是dvpp内存;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数 MEMORY_NORMAL。 + +返回值: + +AclLiteImage结构的数据 + +约束: + +无 + +#### save + +方法:save(filename): + +说明: + +将AclLiteImage数据转换为np array后。保存为二进制文件。 + +输入参数: + +filename : 保存后的文件名 + +返回值: + +无 + +约束: + +无 + +### Camera类 + +Camera类为Atlas200DK板载摄像头解码提供python接口。 + +#### is_opened + +方法:is_opened() + +说明: + +根据初始化的cameracapture类对象的摄像头id,判断Atlas200DK板载摄像头是否已打开。 + +输入参数: + +无 + +返回值: + +1.TRUE,摄像头已打开 + +2.FALSE,摄像头未打开 + +约束: + +无 + +#### read + +方法:read() + +说明: + +根据cameracapture类对象初始化时的id,从该id表示的摄像头读取图片,并将图片保存为AclLiteImage结构的数据。 + +输入参数: + +无 + +返回值: + +AclLiteImage类型的图片数据 + +约束: + +无 + +#### close + +方法:close() + +说明: + +关闭打开的摄像头 + +输入参数: + +无 + +返回值: + +无,正常执行会打印 "Close camera" 字段。 + +约束: + +无 + +### AclLiteModel类 + +AclLiteModel类为python-acllite对acl模型推理相关接口的封装,包括但不限于模型加载与初始化,模型输入输出dataset的创建,模型推理执行及资源释放等功能。 + +#### __init__ + +方法:AclLiteModel(model_path, load_type) + +说明: + +模型推理类初始化 + +输入数据: + +model_path:模型的路径。 + +load_type:加载模型的方式,可选0和1,默认值为0。(0:从文件加载离线模型数据;1:从内存加载离线模型数据) + +返回值: + +无 + +约束: + +无 + +#### execute + +方法:execute (input_list): + +说明: + +模型推理接口,将输入数据转变为acl dataset类型数据后送给模型做推理,推理结果以numpy array表示 + +输入参数: + +input_list:模型输入数据,支持AclLiteImage、numpy array 和{'data': ,'size':} dict 结构数据。 + +返回值: + +numpy array,用来表示模型推理结果。 + +约束: + +无 + + +### AclLiteImageProc类 + +AclLiteImageProc类为python-acllite对CANN媒体数据处理相关接口的封装,包括但不限于图片解码编码,视频解码编码,抠图缩放等功能。 + +#### jpegd + +方法:jpegd(image): + +说明: + +图片解码接口,将jpeg格式图片转换为yuv格式 + +输入参数: + +image:原始jpeg图片数据,以AclLiteImage结构存储的数据。 + +返回值: + +AclLiteImage,用来存放yuv图片数据。 + +约束: + +无 + +#### jpege + +方法:jpege(image): + +说明: + +图片解码接口,将yuv格式图片转换为jpeg格式 + +输入参数: + +image:原始yuv图片数据,以AclLiteImage结构存储的数据。 + +返回值: + +AclLiteImage,用来存放jpeg图片数据。 + +约束: + +无 + +#### crop_and_paste + +方法:crop_and_paste(image, width, height, crop_and_paste_width, crop_and_paste_height) + +说明: + +图片VPC(vision preprocessing core)功能相接口,将原始图片扣出再贴到目标大小 + +输入参数: + +image:原始图片数据,以AclLiteImage结构存储的数据。 + +width:原始图片宽。 + +height:原始图片高。 + +crop_and_paste_width:VPC后目标图片的宽 + +crop_and_paste_height:VPC后目标图片的高 + +返回值: + +AclLiteImage,用来存放vpc后的图片数据。 + +约束: + +无 + +#### resize + +方法:resize(image, resize_width, resize_height) + +说明: + +将输入图片resize到指定大小。 + +输入参数: + +image:原始图片数据,以AclLiteImage结构存储的数据。 + +resize_width:缩放后图片的宽。 + +resize_height:缩放后图片的高。 + +返回值: + +AclLiteImage,用来存放resize后的图片数据。 + +约束: + +无 + +### Dvpp_Vdec类 + +Dvpp_Vdec类为python-acllite对视频流的解码相关接口的封装。包括了对视频流的切帧等。 + +#### read + +方法:read (no_wait): + +说明: + +视频帧读接口,异步接口,负责从队列中读取数据并送去解码。 + +输入参数: + +no_wait:布尔变量,为真则不断从队列中读取数据,需要使用is_finished()接口来判断该帧数据是否解码完毕;为否则会按照READ_TIMEOUT设置的时间间隔从队列里读取数据,为空则会报错;默认为否。 + +返回值: + +ret :接口执行结果;SUCCESS为正常;FAILED表示失败,有数据未解码,但是接口未从队列中读取到数据。 + +image :读到的视频帧 + +约束: + +视频流必须为以下格式之一: + +h264 :main, baselineor high level,且为 annex-b格式 + +h265 :main level + +#### process + +方法:process (input_data, input_size, user_data) + +说明: + +视频解码接口,将需要解码的视频帧数据送给解码器做处理。 + +输入参数: + +input_data :输入数据。 + +input_size :输入数据大小。 + +user_data :python对象,用户自定义数据。如果用户需要获取解码的帧序号,则可以在user_data参数处定义,然后解码的帧序号可以通过user_data参数传递给VDEC的回调函数,用于确定回调函数中处理的是第几帧数据。 + +返回值: + +ret :接口执行结果;SUCCESS为正常;FAILED表示失败 + + +约束: + +无 \ No newline at end of file diff --git a/Samples/YOLOV5MultiInput/python/src/python/README.md b/Samples/YOLOV5MultiInput/python/src/python/README.md new file mode 100644 index 0000000..d24bdef --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/README.md @@ -0,0 +1,41 @@ +# ACLLite-python快速部署 + +## 安装步骤 + +设置环境变量,配置程序编译依赖的头文件,库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 + + ``` + export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + export THIRDPART_PATH=${DDK_PATH}/thirdpart + export LD_LIBRARY_PATH=${THIRDPART_PATH}/lib:$LD_LIBRARY_PATH + ``` + + 创建THIRDPART_PATH路径 + ``` + mkdir -p ${THIRDPART_PATH} + ``` +运行环境安装python-acllite所需依赖 + + ``` + # 安装ffmpeg + sudo apt-get install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libavresample-dev + # 安装其它依赖 + python3 -m pip install --upgrade pip + python3 -m pip install Cython + sudo apt-get install pkg-config libxcb-shm0-dev libxcb-xfixes0-dev + # 安装pyav + python3 -m pip install av==6.2.0 + # 安装pillow 的依赖 + sudo apt-get install libtiff5-dev libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python-tk + # 安装numpy和PIL + python3 -m pip install numpy + python3 -m pip install Pillow + ``` + + python acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 + + ``` + # 将acllite目录拷贝到第三方文件夹中。后续有变更则需要替换此处的acllite文件夹 + cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} + ``` diff --git a/Samples/YOLOV5MultiInput/python/src/python/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py new file mode 100644 index 0000000..743cbed --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py @@ -0,0 +1,221 @@ +import os +import numpy as np +from PIL import Image + +import acl +import acllite_utils as utils +import acllite_logger as acl_log +import constants as const + +class AclLiteImage(object): + """Image data and operation class + Wrap image data and operation method, support jpeg, png, yuv file and + memory data + + Attributes: + _run_mode: device run mode + _data: image binary data or numpy array + _memory_type: the data in which memory, include dvpp, + device and np array + width: image width + height: image height + alignWidth: align image width + alignHeight: align image height + _encode_format: image format + _load_ok: load image success or not + + """ + _run_mode, _ = acl.rt.get_run_mode() + + def __init__(self, image, width=0, height=0, alignWidth=0, alignHeight=0, + size=0, memory_type=const.MEMORY_NORMAL): + """Create AclLiteImage instance + Args: + image: image data, binary, numpy array or file path + width: image width. if image is jpeg or png file, + this arg is not nesscessary + height: image height. if image is jpeg or png file, this arg is + not nesscessary + size: image data size. if image is file path, this arg is not + nesscessary + memory_type: memory type of image data. if image is file path, this + arg is not nesscessary + """ + self._data = None + self._memory_type = memory_type + self.width = 0 + self.height = 0 + self.alignWidth = 0 + self.alignHeight = 0 + self.size = 0 + self._encode_format = const.ENCODE_FORMAT_UNKNOW + self._load_ok = True + + if isinstance(image, str): + self._instance_by_image_file(image, width, height) + elif isinstance(image, int): + self._instance_by_buffer(image, width, height, alignWidth, alignHeight, size) + elif isinstance(image, np.ndarray): + self._instance_by_nparray(image, width, height, alignWidth, alignHeight) + else: + acl_log.log_error("Create instance failed for " + "unknow image data type") + + def _instance_by_image_file(self, image_path, width, height): + # Get image format by filename suffix + self._encode_format = self._get_image_format_by_suffix(image_path) + if self._encode_format == const.ENCODE_FORMAT_UNKNOW: + acl_log.log_error("Load image %s failed" % (image_path)) + self._load_ok = False + return + + # Read image data from file to memory + self._data = np.fromfile(image_path, dtype=np.byte) + self._type = const.IMAGE_DATA_NUMPY + self.size = self._data.itemsize * self._data.size + self._memory_type = const.MEMORY_NORMAL + + # Get image parameters of jpeg or png file by pillow + if ((self._encode_format == const.ENCODE_FORMAT_JPEG) or + (self._encode_format == const.ENCODE_FORMAT_PNG)): + image = Image.open(image_path) + self.width, self.height = image.size + else: + # pillow can not decode yuv, so need input widht and height args + self.width = width + self.height = height + + def _get_image_format_by_suffix(self, filename): + suffix = os.path.splitext(filename)[-1].strip().lower() + if (suffix == ".jpg") or (suffix == ".jpeg"): + image_format = const.ENCODE_FORMAT_JPEG + elif suffix == ".png": + image_format = const.ENCODE_FORMAT_PNG + elif suffix == ".yuv": + image_format = const.ENCODE_FORMAT_YUV420_SP + else: + acl_log.log_error("Unsupport image format: ", suffix) + image_format = const.ENCODE_FORMAT_UNKNOW + + return image_format + + def is_loaded(self): + """Image file load result + When create image instance by file, call this method to check + file load success or not + + Returns: + True: load success + False: load failed + """ + return self._load_ok + + def _instance_by_buffer(self, image_buffer, width, height, alignWidth, alignHeight, size): + self.width = width + self.height = height + self.alignHeight = alignHeight + self.alignWidth = alignWidth + self.size = size + self._data = image_buffer + self._type = const.IMAGE_DATA_BUFFER + + def _instance_by_nparray(self, data, width, height, alignWidth, alignHeight): + self.width = width + self.height = height + self.alignHeight = alignHeight + self.alignWidth = alignWidth + self.size = data.itemsize * data.size + self._data = data + self._type = const.IMAGE_DATA_NUMPY + self._memory_type = const.MEMORY_NORMAL + + def byte_data_to_np_array(self): + """Trans image data to np array""" + if self._type == const.IMAGE_DATA_NUMPY: + return self._data.copy() + + return utils.copy_data_as_numpy(self._data, self.size, + self._memory_type, AclLiteImage._run_mode) + + def data(self): + """Get image binary data""" + if self._type == const.IMAGE_DATA_NUMPY: + if "bytes_to_ptr" in dir(acl.util): + bytes_data=self._data.tobytes() + factor_ptr=acl.util.bytes_to_ptr(bytes_data) + else: + factor_ptr=acl.util.numpy_to_ptr(self._data) + return factor_ptr + else: + return self._data + + def copy_to_dvpp(self): + """Copy image data to dvpp""" + device_ptr = utils.copy_data_to_dvpp(self.data(), self.size, + self._run_mode) + print(f'device_ptr: {device_ptr}') + if device_ptr is None: + acl_log.log_error("Copy image to dvpp failed") + return None + return AclLiteImage(device_ptr, self.width, self.height, 0, 0, + self.size, const.MEMORY_DVPP) + + def copy_to_host(self): + """"Copy data to host""" + if self._type == const.IMAGE_DATA_NUMPY: + data_np = self._data.copy() + return AclLiteImage(data_np, self.width, self.height, 0, 0) + + data = None + mem_type = const.MEMORY_HOST + if AclLiteImage._run_mode == const.ACL_HOST: + if self.is_local(): + data = utils.copy_data_host_to_host(self._data, self.size) + else: + data = utils.copy_data_device_to_host(self._data, self.size) + else: + data = utils.copy_data_device_to_device(self._data, self.size) + mem_type = const.MEMORY_DEVICE + if data is None: + acl_log.log_error("Copy image to host failed") + return None + + return AclLiteImage(data, self.width, self.height, 0, 0, self.size, mem_type) + + def is_local(self): + """Image data is in host server memory and access directly or not""" + # in atlas200dk, all kind memory can access directly + if AclLiteImage._run_mode == const.ACL_DEVICE: + return True + # in atlas300, only acl host memory or numpy array can access directly + elif ((AclLiteImage._run_mode == const.ACL_HOST) and + ((self._memory_type == const.MEMORY_HOST) or + (self._memory_type == const.MEMORY_NORMAL))): + return True + else: + return False + + def save(self, filename): + """Save image as file""" + image_np = self.byte_data_to_np_array() + image_np.tofile(filename) + + def destroy(self): + """Release image memory""" + if (self._data is None) or (self.size == 0): + acl_log.log_error("Release image abnormaly, data is None") + return + + if self._memory_type == const.MEMORY_DEVICE: + acl.rt.free(self._data) + elif self._memory_type == const.MEMORY_HOST: + acl.rt.free_host(self._data) + elif self._memory_type == const.MEMORY_DVPP: + acl.media.dvpp_free(self._data) + # numpy no need release + self._data = None + self.size = 0 + + def __del__(self): + self.destroy() + diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py new file mode 100644 index 0000000..3453b12 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py @@ -0,0 +1,443 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2021-01-20 20:12:13 +MODIFIED: 2021-01-29 14:04:45 +""" +import numpy as np +import acl +import acllite_utils as utils +from acllite_image import AclLiteImage +from acllite_logger import log_error, log_info +from acllite_resource import resource_list +import constants as constants + +class AclLiteImageProc(object): + """ + dvpp class + """ + + def __init__(self, acl_resource=None): + if acl_resource is None: + self._stream, ret = acl.rt.create_stream() + utils.check_ret("acl.rt.create_stream", ret) + self._run_mode, ret = acl.rt.get_run_mode() + utils.check_ret("acl.rt.get_run_mode", ret) + else: + self._stream = acl_resource.stream + self._run_mode = acl_resource.run_mode + self._dvpp_channel_desc = None + self._crop_config = None + self._paste_config = None + + self._init_resource() + + # AclLiteImageProc involves acl resources, which need to be released \ + # before the acl ends when the program exits, \ + # register here to the resource table to ensure the release timing + self._is_destroyed = False + resource_list.register(self) + + def _init_resource(self): + # Create dvpp channel + self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc() + ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc) + utils.check_ret("acl.media.dvpp_create_channel", ret) + + # Create a resize configuration + self._resize_config = acl.media.dvpp_create_resize_config() + + # Create yuv to jpeg configuration + self._jpege_config = acl.media.dvpp_create_jpege_config() + ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100) + utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret) + + def _gen_input_pic_desc(self, image, + width_align_factor=16, height_align_factor=2): + image.width = utils.align_up2(image.width) + image.height = utils.align_up2(image.height) + image.alignWidth = utils.align_up(image.width, width_align_factor) + image.alignHeight = utils.align_up(image.height, height_align_factor) + image.size = utils.yuv420sp_size(image.alignWidth, image.alignHeight) + + pic_desc = acl.media.dvpp_create_pic_desc() + acl.media.dvpp_set_pic_desc_data(pic_desc, image.data()) + acl.media.dvpp_set_pic_desc_format( + pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) + acl.media.dvpp_set_pic_desc_width(pic_desc, image.width) + acl.media.dvpp_set_pic_desc_height(pic_desc, image.height) + acl.media.dvpp_set_pic_desc_width_stride(pic_desc, image.alignWidth) + acl.media.dvpp_set_pic_desc_height_stride(pic_desc, image.alignHeight) + acl.media.dvpp_set_pic_desc_size(pic_desc, image.size) + + return pic_desc + + def _gen_output_pic_desc(self, width, height, + output_buffer, output_buffer_size, + width_align_factor=16, height_align_factor=2): + # Create output image + stride_width = utils.align_up(width, width_align_factor) + stride_height = utils.align_up(height, height_align_factor) + + pic_desc = acl.media.dvpp_create_pic_desc() + acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) + acl.media.dvpp_set_pic_desc_format( + pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) + acl.media.dvpp_set_pic_desc_width(pic_desc, width) + acl.media.dvpp_set_pic_desc_height(pic_desc, height) + acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width) + acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height) + acl.media.dvpp_set_pic_desc_size(pic_desc, output_buffer_size) + + return pic_desc + + def _stride_yuv_size(self, width, height, + width_align_factor=16, height_align_factor=2): + stride_width = utils.align_up(width, width_align_factor) + stride_height = utils.align_up(height, height_align_factor) + stride_size = utils.yuv420sp_size(stride_width, stride_height) + + return stride_width, stride_height, stride_size + + def jpegd(self, image): + """ + jepg image to yuv image + """ + image.width = utils.align_up2(image.width) + image.height = utils.align_up2(image.height) + soc_version = acl.get_soc_name() + if soc_version == "Ascend310P3" or soc_version == "Ascend310B1" : + stride_width = utils.align_up64(image.width) + stride_height = utils.align_up16(image.height) + stride_size = utils.yuv420sp_size(stride_width, stride_height) + else: + stride_width = utils.align_up128(image.width) + stride_height = utils.align_up16(image.height) + stride_size = utils.yuv420sp_size(stride_width, stride_height) + # Create conversion output image desc + output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image, stride_size) + ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc, + image.data(), + image.size, + output_desc, + self._stream) + if ret != constants.ACL_SUCCESS: + log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) + return None + ret = acl.media.dvpp_destroy_pic_desc(output_desc) + if ret != constants.ACL_SUCCESS: + log_error("dvpp_destroy_pic_desc failed ret={}".format(ret)) + return None + ret = acl.rt.synchronize_stream(self._stream) + if ret != constants.ACL_SUCCESS: + log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) + return None + + # Return the decoded AclLiteImage instance + return AclLiteImage(out_buffer, image.width, image.height, stride_width, + stride_height, stride_size, constants.MEMORY_DVPP) + + def _gen_jpegd_out_pic_desc(self, image, stride_size): + # Predict the memory size required to decode jpeg into yuv pictures + ret, out_buffer_size = self._get_jpegd_memory_size(image, stride_size) + if not ret: + return None + # Apply for memory for storing decoded yuv pictures + out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) + if ret != constants.ACL_SUCCESS: + log_error("AclLiteImageProc malloc failed, error: ", ret) + return None + + soc_version = acl.get_soc_name() + if soc_version == "Ascend310P3" or soc_version == "Ascend310B1" : + width_align_factor = 64 + height_align_factor = 16 + else: + width_align_factor = 128 + height_align_factor = 16 + # Create output image desc + pic_desc = self._gen_output_pic_desc( + image.width, + image.height, + out_buffer, + out_buffer_size, + width_align_factor, + height_align_factor) + return pic_desc, out_buffer + + def _get_jpegd_memory_size(self, image, stride_size): + if image.is_local(): + size, ret = acl.media.dvpp_jpeg_predict_dec_size( + image.data(), image.size, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) + if ret != constants.ACL_SUCCESS: + log_error("Predict jpeg decode size failed, return ", ret) + return False, 0 + return True, size + else: + return True, int(stride_size) + + def resize(self, image, resize_width, resize_height): + """ + Scale yuvsp420 picture to specified size + """ + resize_width = utils.align_up2(resize_width) + resize_height = utils.align_up2(resize_height) + soc_version = acl.get_soc_name() + if soc_version == "Ascend310B1" : + width_align_factor = 2 + height_align_factor = 2 + stride_width = resize_width + stride_height = utils.align_up2(resize_height) + else: + width_align_factor = 16 + height_align_factor = 2 + stride_width = utils.align_up16(resize_width) + stride_height = utils.align_up2(resize_height) + # Generate input picture desc + input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) + # Calculate the image size after scaling + output_size = utils.yuv420sp_size(stride_width, stride_height) + # Request memory for the zoomed picture + out_buffer, ret = acl.media.dvpp_malloc(output_size) + if ret != constants.ACL_SUCCESS: + log_error("AclLiteImageProc malloc failed, error: ", ret) + return None + # Create output image + output_desc = self._gen_output_pic_desc(resize_width, resize_height, + out_buffer, output_size, + width_align_factor, height_align_factor) + if output_desc is None: + log_error("Gen resize output desc failed") + return None + # Call dvpp asynchronous zoom interface to zoom pictures + ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc, + input_desc, + output_desc, + self._resize_config, + self._stream) + if ret != constants.ACL_SUCCESS: + log_error("Vpc resize async failed, error: ", ret) + return None + # Wait for the zoom operation to complete + ret = acl.rt.synchronize_stream(self._stream) + if ret != constants.ACL_SUCCESS: + log_error("Resize synchronize stream failed, error: ", ret) + return None + # Release the resources requested for scaling + acl.media.dvpp_destroy_pic_desc(input_desc) + acl.media.dvpp_destroy_pic_desc(output_desc) + return AclLiteImage(out_buffer, resize_width, resize_height, stride_width, + stride_height, output_size, constants.MEMORY_DVPP) + + def _gen_resize_out_pic_desc(self, resize_width, + resize_height, output_size): + out_buffer, ret = acl.media.dvpp_malloc(output_size) + if ret != constants.ACL_SUCCESS: + log_error("AclLiteImageProc malloc failed, error: ", ret) + return None + pic_desc = self._gen_output_pic_desc(resize_width, resize_height, + out_buffer, output_size) + return pic_desc, out_buffer + + def crop_and_paste( + self, + image, + width, + height, + crop_and_paste_width, + crop_and_paste_height): + """ + crop_and_paste + """ + log_info('AclLiteImageProc vpc crop and paste stage:') + crop_and_paste_width = utils.align_up2(crop_and_paste_width) + crop_and_paste_height = utils.align_up2(crop_and_paste_height) + soc_version = acl.get_soc_name() + if soc_version == "Ascend310B1" : + width_align_factor = 2 + height_align_factor = 2 + stride_width = crop_and_paste_width + stride_height = utils.align_up2(crop_and_paste_height) + else: + width_align_factor = 16 + height_align_factor = 2 + stride_width = utils.align_up16(crop_and_paste_width) + stride_height = utils.align_up2(crop_and_paste_height) + input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) + out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) + out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) + output_desc = self._gen_output_pic_desc( + crop_and_paste_width, + crop_and_paste_height, + out_buffer, + out_buffer_size, + width_align_factor, + height_align_factor) + self._crop_config = acl.media.dvpp_create_roi_config( + 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) + # set crop area: + rx = float(width) / float(crop_and_paste_width) + ry = float(height) / float(crop_and_paste_height) + if rx > ry: + dx = 0 + r = rx + dy = int((crop_and_paste_height - height / r) / 2) + else: + dy = 0 + r = ry + dx = int((crop_and_paste_width - width / r) / 2) + pasteRightOffset = int(crop_and_paste_width - 2 * dx) + pasteBottomOffset = int(crop_and_paste_height - 2 * dy) + if (pasteRightOffset % 2) == 0: + pasteRightOffset = pasteRightOffset - 1 + if (pasteBottomOffset % 2) == 0: + pasteBottomOffset = pasteBottomOffset - 1 + self._paste_config = acl.media.dvpp_create_roi_config( + 0, pasteRightOffset, 0, pasteBottomOffset) + ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, + input_desc, + output_desc, + self._crop_config, + self._paste_config, + self._stream) + utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) + ret = acl.rt.synchronize_stream(self._stream) + utils.check_ret("acl.rt.synchronize_stream", ret) + log_info('AclLiteImageProc vpc crop and paste stage success') + stride_width = crop_and_paste_width - 2 * dx + stride_height = crop_and_paste_height - 2 * dy + acl.media.dvpp_destroy_pic_desc(input_desc) + acl.media.dvpp_destroy_pic_desc(output_desc) + + return AclLiteImage(out_buffer, image.width, image.height, stride_width, + stride_height, out_buffer_size, constants.MEMORY_DVPP) + + def crop_and_paste_get_roi( + self, + image, + width, + height, + crop_and_paste_width, + crop_and_paste_height): + """ + :image: input image + :width: input image width + :height: input image height + :crop_and_paste_width: crop_and_paste_width + :crop_and_paste_height: crop_and_paste_height + :return: return AclLiteImage + """ + log_info('AclLiteImageProc vpc crop and paste stage:') + crop_and_paste_width = utils.align_up2(crop_and_paste_width) + crop_and_paste_height = utils.align_up2(crop_and_paste_height) + soc_version = acl.get_soc_name() + if soc_version == "Ascend310B1" : + width_align_factor = 2 + height_align_factor = 2 + stride_width = crop_and_paste_width + stride_height = utils.align_up2(crop_and_paste_height) + else: + width_align_factor = 16 + height_align_factor = 2 + stride_width = utils.align_up16(crop_and_paste_width) + stride_height = utils.align_up2(crop_and_paste_height) + input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) + out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) + out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) + output_desc = self._gen_output_pic_desc( + crop_and_paste_width, + crop_and_paste_height, + out_buffer, + out_buffer_size, + width_align_factor, + height_align_factor) + self._crop_config = acl.media.dvpp_create_roi_config( + 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) + self._paste_config = acl.media.dvpp_create_roi_config( + 0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1) + ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, + input_desc, + output_desc, + self._crop_config, + self._paste_config, + self._stream) + utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) + ret = acl.rt.synchronize_stream(self._stream) + utils.check_ret("acl.rt.synchronize_stream", ret) + log_info('AclLiteImageProc vpc crop and paste stage success') + stride_width = utils.align_up16(crop_and_paste_width) + stride_height = utils.align_up2(crop_and_paste_height) + acl.media.dvpp_destroy_pic_desc(input_desc) + acl.media.dvpp_destroy_pic_desc(output_desc) + return AclLiteImage(out_buffer, image.width, image.height, stride_width, + stride_height, out_buffer_size, constants.MEMORY_DVPP) + + def jpege(self, image): + """ + Convert yuv420sp pictures to jpeg pictures + """ + # create input image + input_desc = self._gen_input_pic_desc(image) + # Predict the memory size required for conversion + output_size, ret = acl.media.dvpp_jpeg_predict_enc_size( + input_desc, self._jpege_config) + if (ret != constants.ACL_SUCCESS): + log_error("Predict jpege output size failed") + return None + # Request memory required for conversion + output_buffer, ret = acl.media.dvpp_malloc(output_size) + if (ret != constants.ACL_SUCCESS): + log_error("Malloc jpege output memory failed") + return None + output_size_array = np.array([output_size], dtype=np.int32) + if "bytes_to_ptr" in dir(acl.util): + bytes_data = output_size_array.tobytes() + output_size_ptr = acl.util.bytes_to_ptr(bytes_data) + else: + output_size_ptr = acl.util.numpy_to_ptr(output_size_array) + + # Call jpege asynchronous interface to convert pictures + ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc, + input_desc, output_buffer, + output_size_ptr, + self._jpege_config, + self._stream) + if (ret != constants.ACL_SUCCESS): + log_error("Jpege failed, ret ", ret) + return None + # Wait for the conversion to complete + ret = acl.rt.synchronize_stream(self._stream) + if (ret != constants.ACL_SUCCESS): + log_error("Jpege synchronize stream, failed, ret ", ret) + return None + # Release resources + acl.media.dvpp_destroy_pic_desc(input_desc) + if "bytes_to_ptr" in dir(acl.util): + output_size_array=np.frombuffer(bytes_data,dtype=output_size_array.dtype).reshape(output_size_array.shape) + return AclLiteImage( + output_buffer, image.width, image.height, 0, 0, int( + output_size_array[0]), constants.MEMORY_DVPP) + + def destroy(self): + """ + dvpp resource release + """ + if self._is_destroyed: + return + + if self._resize_config: + acl.media.dvpp_destroy_resize_config(self._resize_config) + + if self._dvpp_channel_desc: + acl.media.dvpp_destroy_channel(self._dvpp_channel_desc) + acl.media.dvpp_destroy_channel_desc(self._dvpp_channel_desc) + + if self._jpege_config: + acl.media.dvpp_destroy_jpege_config(self._jpege_config) + self._is_destroyed = True + resource_list.unregister(self) + log_info("dvpp resource release success") + + def __del__(self): + self.destroy() + diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py new file mode 100644 index 0000000..5409410 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py @@ -0,0 +1,91 @@ +import sys +import os + +import acl + +_ACL_DEBUG = 0 +_ACL_INFO = 1 +_ACL_WARNING = 2 +_ACL_ERROR = 3 + +def log_error(*log_msg): + """Recode error level log to file + Args: + *log_msg: format string and args list + """ + log_str = [str(i) for i in log_msg] + log_str = "[ERROR]\t" + "".join(log_str) + print(log_str) + + caller_frame = sys._getframe().f_back + # caller file + filename = caller_frame.f_code.co_filename + # caller line no + line_no = caller_frame.f_lineno + # caller function + func_name = caller_frame.f_code.co_name + + message = "[" + filename + ":" + str(line_no) + \ + " " + func_name + "]" + log_str + acl.app_log(_ACL_ERROR, message) + +def log_warning(*log_msg): + """Recode warning level log to file + Args: + *log_msg: format string and args list + """ + log_str = [str(i) for i in log_msg] + log_str = "[WARNING]\t" + "".join(log_str) + + print(log_str) + + caller_frame = sys._getframe().f_back + # caller file + filename = caller_frame.f_code.co_filename + # caller line no + line_no = caller_frame.f_lineno + # caller function + func_name = caller_frame.f_code.co_name + + message = "[" + filename + ":" + str(line_no) + \ + " " + func_name + "]" + log_str + acl.app_log(_ACL_WARNING, message) + +def log_info(*log_msg): + """Recode info level log to file + Args: + *log_msg: format string and args list + """ + log_str = [str(i) for i in log_msg] + log_str = "[INFO]\t" + "".join(log_str) + print(log_str) + caller_frame = sys._getframe().f_back + # caller file + filename = caller_frame.f_code.co_filename + # caller line no + line_no = caller_frame.f_lineno + # caller function + func_name = caller_frame.f_code.co_name + + message = "[" + filename + ":" + str(line_no) + \ + " " + func_name + "]" + log_str + acl.app_log(_ACL_INFO, message) + +def log_debug(*log_msg): + """Recode debug level log to file + Args: + *log_msg: format string and args list + """ + log_str = [str(i) for i in log_msg] + log_str = "[DEBUG]\t" + "".join(log_str) + caller_frame = sys._getframe().f_back + # caller file + filename = caller_frame.f_code.co_filename + # caller line no + line_no = caller_frame.f_lineno + # caller function + func_name = caller_frame.f_code.co_name + + message = "[" + filename + ":" + str(line_no) + \ + " " + func_name + "]" + log_str + acl.app_log(_ACL_DEBUG, message) diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py new file mode 100644 index 0000000..8c7bb09 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py @@ -0,0 +1,434 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2020-6-04 20:12:13 +MODIFIED: 2020-6-28 14:04:45 +""" +import acl +import struct +import numpy as np +import datetime +import sys +import os +import time + +import constants as const +import acllite_utils as utils +from acllite_logger import log_error, log_info, log_warning +from acllite_image import AclLiteImage +from acllite_resource import resource_list + +class AclLiteModel(object): + """ + wrap acl model inference interface, include input dataset construction, + execute, and output transform to numpy array + Attributes: + model_path: om offline mode file path + """ + + def __init__(self, model_path, load_type=0): + self._run_mode, ret = acl.rt.get_run_mode() + utils.check_ret("acl.rt.get_run_mode", ret) + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + if self._run_mode == const.ACL_HOST: + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST + + self._model_path = model_path # string + self._load_type = load_type + self._model_id = None # pointer + self._input_num = 0 + self._input_buffer = [] + self._input_dataset = None + self._output_dataset = None + self._model_desc = None # pointer when using + self._output_size = 0 + self._init_resource() + self._is_destroyed = False + self.runMode_ = acl.rt.get_run_mode() + resource_list.register(self) + + def _init_resource(self): + log_info("Init model resource start...") + if not os.path.isfile(self._model_path): + log_error( + "model_path failed, please check. model_path=%s" % + self._model_path) + return const.FAILED + + if self._load_type == 0: + self._model_id, ret = acl.mdl.load_from_file(self._model_path) + utils.check_ret("acl.mdl.load_from_file", ret) + elif self._load_type == 1: + with open(self._model_path, "rb") as f: + om_bytes = f.read() + if om_bytes: + ptr = acl.util.bytes_to_ptr(om_bytes) + self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) + utils.check_ret("acl.mdl.load_from_mem", ret) + else: + log_error( + "model_context is null, please check. model_path=%s" % + self._model_path) + return const.FAILED + else: + log_error( + "load_type is not in 0 or 1, please check. load_type=%d" % + self._load_type) + return const.FAILED + self._model_desc = acl.mdl.create_desc() + ret = acl.mdl.get_desc(self._model_desc, self._model_id) + utils.check_ret("acl.mdl.get_desc", ret) + # get outputs num of model + self._output_size = acl.mdl.get_num_outputs(self._model_desc) + # create output dataset + self._gen_output_dataset(self._output_size) + # recode input data address,if need malloc memory,the memory will be + # reuseable + self._init_input_buffer() + log_info("Init model resource success") + self._gen_input_dataset() + + return const.SUCCESS + + def _gen_output_dataset(self, ouput_num): + log_info("AclLiteModel create model output dataset:") + dataset = acl.mdl.create_dataset() + for i in range(ouput_num): + # malloc device memory for output + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) + utils.check_ret("acl.rt.malloc", ret) + # crate oputput data buffer + dataset_buffer = acl.create_data_buffer(buf, size) + _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) + log_info("malloc output %d, size %d" % (i, size)) + if ret: + acl.rt.free(buf) + acl.destroy_data_buffer(dataset_buffer) + utils.check_ret("acl.destroy_data_buffer", ret) + self._output_dataset = dataset + log_info("Create model output dataset success") + + def _init_input_buffer(self): + self._input_num = acl.mdl.get_num_inputs(self._model_desc) + for i in range(self._input_num): + item = {"addr": None, "size": 0} + self._input_buffer.append(item) + + def _gen_input_dataset(self): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + + self._input_dataset = acl.mdl.create_dataset() + self.input_buffers = [] + self.input_buffer_sizes = [] + for i in range(self._input_num): + input_buffer_size = acl.mdl.get_input_size_by_index(self._model_desc, i) + input_buffer, ret = acl.rt.malloc(input_buffer_size, const.ACL_MEM_MALLOC_HUGE_FIRST) + input_data = acl.create_data_buffer(input_buffer, input_buffer_size) + self._input_dataset, ret = acl.mdl.add_dataset_buffer(self._input_dataset, input_data) + if ret != const.ACL_SUCCESS: + print('acl.mdl.add_dataset_buffer failed, errorCode is', ret) + self.input_buffers.append(input_buffer) + self.input_buffer_sizes.append(input_buffer_size) + + return ret + + def _parse_input_data(self, input_data, index): + data = None + size = 0 + if isinstance(input_data, AclLiteImage): + size = input_data.size + data = input_data.data() + elif isinstance(input_data, np.ndarray): + size = input_data.size * input_data.itemsize + if "bytes_to_ptr" in dir(acl.util): + bytes_data = input_data.tobytes() + ptr = acl.util.bytes_to_ptr(bytes_data) + else: + ptr = acl.util.numpy_to_ptr(input_data) + + data = ptr + if data is None: + size = 0 + log_error("Copy input to device failed") + elif (isinstance(input_data, dict) and + ('data' in input_data.keys()) and ('size' in input_data.keys())): + size = input_data['size'] + data = input_data['data'] + else: + log_error("Unsupport input") + + return data, size + + def _copy_input_to_device(self, input_ptr, size, index): + buffer_item = self._input_buffer[index] + data = None + if buffer_item['addr'] is None: + if self._run_mode == const.ACL_HOST: + data = utils.copy_data_host_to_device(input_ptr, size) + else: + data = utils.copy_data_device_to_device(input_ptr, size) + if data is None: + log_error("Malloc memory and copy model %dth " + "input to device failed" % (index)) + return None + buffer_item['addr'] = data + buffer_item['size'] = size + elif size == buffer_item['size']: + if self._run_mode == const.ACL_HOST: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_HOST_TO_DEVICE) + else: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_DEVICE_TO_DEVICE) + if ret != const.ACL_SUCCESS: + log_error("Copy model %dth input to device failed" % (index)) + return None + data = buffer_item['addr'] + else: + log_error("The model %dth input size %d is change," + " before is %d" % (index, size, buffer_item['size'])) + return None + + return data + + def _set_dynamic_batch_size(self, batch): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + if ret != const.ACL_SUCCESS: + log_error("get_input_index_by_name failed") + return const.FAILED + batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("get_dynamic_batch failed") + return const.FAILED + log_info("get dynamic_batch = ", batch_dic) + ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) + if ret != const.ACL_SUCCESS: + log_error("set_dynamic_batch_size failed, ret = ", ret) + return const.FAILED + if batch in batch_dic["batch"]: + return const.SUCCESS + else: + assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID + log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) + return const.FAILED + + def _execute_with_dynamic_batch_size(self, input_list, batch): + ret = self._gen_input_dataset(input_list) + if ret == const.FAILED: + log_error("Gen model input dataset failed") + return None + + ret = self._set_dynamic_batch_size(batch) + if ret == const.FAILED: + log_error("Set dynamic batch failed") + return None + + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + + self._release_dataset(self._input_dataset) + self._input_dataset = None + + return self._output_dataset_to_numpy() + + def execute(self, input_list): + """ + inference input data + Args: + input_list: input data list, support AclLiteImage, + numpy array and {'data': ,'size':} dict + returns: + inference result data, which is a numpy array list, + each corresponse to a model output + """ + if self.runMode_ == const.ACL_DEVICE: + kind = const.ACL_MEMCPY_DEVICE_TO_DEVICE + else: + kind = const.ACL_MEMCPY_HOST_TO_DEVICE + for i,data_in in enumerate(input_list): + if "bytes_to_ptr" in dir(acl.util): + bytes_data = data_in.tobytes() + ptr = acl.util.bytes_to_ptr(bytes_data) + else: + ptr = acl.util.numpy_to_ptr(self.image_bytes) + ret = acl.rt.memcpy(self.input_buffers[i], + self.input_buffer_sizes[i], + ptr, + self.input_buffer_sizes[i], + kind) + # print(f'gen_input_data_time:{start - a}') + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + # print(f'infer execute run time: {b - start}') + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + # self._release_dataset(self._input_dataset) + # self._input_dataset = None + numpy_data = self._output_dataset_to_numpy() + return numpy_data + + def _output_dataset_to_numpy(self): + dataset = [] + output_tensor_list = self._gen_output_tensor() + num = acl.mdl.get_dataset_num_buffers(self._output_dataset) + + for i in range(num): + buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) + data = acl.get_data_buffer_addr(buf) + size = int(acl.get_data_buffer_size(buf)) + output_ptr = output_tensor_list[i]["ptr"] + output_data = output_tensor_list[i]["tensor"] + if isinstance (output_data,bytes): + data_size = len(output_data) + else: + data_size = output_data.size * output_data.itemsize + ret = acl.rt.memcpy(output_ptr, + data_size, + data, size, self._copy_policy) + if ret != const.ACL_SUCCESS: + log_error("Memcpy inference output to local failed") + return None + + if isinstance (output_data,bytes): + output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) + output_tensor = output_data.copy() + else: + output_tensor = output_data + dataset.append(output_tensor) + + return dataset + + def _gen_output_tensor(self): + output_tensor_list = [] + for i in range(self._output_size): + dims = acl.mdl.get_output_dims(self._model_desc, i) + shape = tuple(dims[0]["dims"]) + datatype = acl.mdl.get_output_data_type(self._model_desc, i) + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + + if datatype == const.ACL_FLOAT: + np_type = np.float32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_DOUBLE: + np_type = np.float64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT64: + np_type = np.int64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT64: + np_type = np.uint64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT32: + np_type = np.int32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT32: + np_type = np.uint32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_FLOAT16: + np_type = np.float16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT16: + np_type = np.int16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT16: + np_type = np.uint16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT8: + np_type = np.int8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: + np_type = np.uint8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + else: + log_error("Unspport model output datatype ", datatype) + return None + + if not output_tensor.flags['C_CONTIGUOUS']: + output_tensor = np.ascontiguousarray(output_tensor) + + if "bytes_to_ptr" in dir(acl.util): + bytes_data=output_tensor.tobytes() + tensor_ptr=acl.util.bytes_to_ptr(bytes_data) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": bytes_data, + "shape":output_tensor.shape, + "dtype":output_tensor.dtype},) + else: + tensor_ptr = acl.util.numpy_to_ptr(output_tensor) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": output_tensor}) + + return output_tensor_list + + def _release_dataset(self, dataset, free_memory=False): + if not dataset: + return + + num = acl.mdl.get_dataset_num_buffers(dataset) + for i in range(num): + data_buf = acl.mdl.get_dataset_buffer(dataset, i) + if data_buf: + self._release_databuffer(data_buf, free_memory) + + ret = acl.mdl.destroy_dataset(dataset) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def _release_databuffer(self, data_buffer, free_memory=False): + if free_memory: + data_addr = acl.get_data_buffer_addr(data_buffer) + if data_addr: + acl.rt.free(data_addr) + + ret = acl.destroy_data_buffer(data_buffer) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def destroy(self): + """ + release resource of model inference + Args: + null + Returns: + null + """ + if self._is_destroyed: + return + + self._release_dataset(self._output_dataset, free_memory=True) + if self._model_id: + ret = acl.mdl.unload(self._model_id) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.unload error:", ret) + + if self._model_desc: + ret = acl.mdl.destroy_desc(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.destroy_desc error:", ret) + + self._is_destroyed = True + resource_list.unregister(self) + log_info("AclLiteModel release source success") + + def __del__(self): + self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py new file mode 100644 index 0000000..faa60f4 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py @@ -0,0 +1,477 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2020-6-04 20:12:13 +MODIFIED: 2020-6-28 14:04:45 +""" +import acl +import struct +import numpy as np +import datetime +import sys +import os +import time + +import constants as const +import acllite_utils as utils +from acllite_logger import log_error, log_info, log_warning +from acllite_image import AclLiteImage +from acllite_resource import resource_list + +class AclLiteModel(object): + """ + wrap acl model inference interface, include input dataset construction, + execute, and output transform to numpy array + Attributes: + model_path: om offline mode file path + """ + + def __init__(self, model_path, load_type=0): + self._run_mode, ret = acl.rt.get_run_mode() + utils.check_ret("acl.rt.get_run_mode", ret) + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + if self._run_mode == const.ACL_HOST: + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST + + self._model_path = model_path # string + self._load_type = load_type + self._model_id = None # pointer + self._input_num = 0 + self._input_buffer = [] + self._input_dataset = None + self._output_dataset = None + self._model_desc = None # pointer when using + self._output_size = 0 + self._init_resource() + self._is_destroyed = False + self.runMode_ = acl.rt.get_run_mode() + resource_list.register(self) + + def _init_resource(self): + log_info("Init model resource start...") + if not os.path.isfile(self._model_path): + log_error( + "model_path failed, please check. model_path=%s" % + self._model_path) + return const.FAILED + + if self._load_type == 0: + self._model_id, ret = acl.mdl.load_from_file(self._model_path) + utils.check_ret("acl.mdl.load_from_file", ret) + elif self._load_type == 1: + with open(self._model_path, "rb") as f: + om_bytes = f.read() + if om_bytes: + ptr = acl.util.bytes_to_ptr(om_bytes) + self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) + utils.check_ret("acl.mdl.load_from_mem", ret) + else: + log_error( + "model_context is null, please check. model_path=%s" % + self._model_path) + return const.FAILED + else: + log_error( + "load_type is not in 0 or 1, please check. load_type=%d" % + self._load_type) + return const.FAILED + self._model_desc = acl.mdl.create_desc() + ret = acl.mdl.get_desc(self._model_desc, self._model_id) + utils.check_ret("acl.mdl.get_desc", ret) + # get outputs num of model + self._output_size = acl.mdl.get_num_outputs(self._model_desc) + # create output dataset + self._gen_output_dataset(self._output_size) + # recode input data address,if need malloc memory,the memory will be + # reuseable + self._init_input_buffer() + log_info("Init model resource success") + self._gen_input_dataset() + + return const.SUCCESS + + def _gen_output_dataset(self, ouput_num): + log_info("AclLiteModel create model output dataset:") + dataset = acl.mdl.create_dataset() + for i in range(ouput_num): + # malloc device memory for output + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) + utils.check_ret("acl.rt.malloc", ret) + # crate oputput data buffer + dataset_buffer = acl.create_data_buffer(buf, size) + _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) + log_info("malloc output %d, size %d" % (i, size)) + if ret: + acl.rt.free(buf) + acl.destroy_data_buffer(dataset_buffer) + utils.check_ret("acl.destroy_data_buffer", ret) + self._output_dataset = dataset + log_info("Create model output dataset success") + + def _init_input_buffer(self): + self._input_num = acl.mdl.get_num_inputs(self._model_desc) + for i in range(self._input_num): + item = {"addr": None, "size": 0} + self._input_buffer.append(item) + + def _gen_input_dataset(self): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + # if ret == const.ACL_SUCCESS: + # print('111111111111111111111111111111') + # dataLen = acl.mdl.get_input_size_by_index(self._model_desc, dynamicIdx) + # buf, ret = acl.rt.malloc(dataLen, const.ACL_MEM_MALLOC_NORMAL_ONLY) + # utils.check_ret("acl.rt.malloc", ret) + # batch_buffer = {'data': buf, 'size':dataLen} + # input_list.append(batch_buffer) + + ret = const.SUCCESS + # if len(input_list) != self._input_num: + # log_error("Current input data num %d unequal to model " + # "input num %d" % (len(input_list), self._input_num)) + # return const.FAILED + + self._input_dataset = acl.mdl.create_dataset() + self.input_buffers = [] + self.input_buffer_sizes = [] + for i in range(self._input_num): + input_buffer_size = acl.mdl.get_input_size_by_index(self._model_desc, i) + input_buffer, ret = acl.rt.malloc(input_buffer_size, const.ACL_MEM_MALLOC_HUGE_FIRST) + input_data = acl.create_data_buffer(input_buffer, input_buffer_size) + self._input_dataset, ret = acl.mdl.add_dataset_buffer(self._input_dataset, input_data) + if ret != const.ACL_SUCCESS: + print('acl.mdl.add_dataset_buffer failed, errorCode is', ret) + self.input_buffers.append(input_buffer) + self.input_buffer_sizes.append(input_buffer_size) + + + # model_size = acl.mdl.get_input_size_by_index(self._model_desc, i) + # if size != model_size: + # log_warning(" Input[%d] size: %d not equal om size: %d" % (i, size, model_size) +\ + # ", may cause inference result error, please check model input") + + + # dataset_buffer = acl.create_data_buffer(data, size) + # _, ret = acl.mdl.add_dataset_buffer(self._input_dataset, + # dataset_buffer) + # if ret: + # log_error("Add input dataset buffer failed") + # acl.destroy_data_buffer(self._input_dataset) + # ret = const.FAILED + # break + # if ret == const.FAILED: + # self._release_dataset(self._input_dataset) + # self._input_dataset = None + + return ret + + def _parse_input_data(self, input_data, index): + data = None + size = 0 + if isinstance(input_data, AclLiteImage): + size = input_data.size + data = input_data.data() + elif isinstance(input_data, np.ndarray): + size = input_data.size * input_data.itemsize + if "bytes_to_ptr" in dir(acl.util): + bytes_data = input_data.tobytes() + ptr = acl.util.bytes_to_ptr(bytes_data) + else: + ptr = acl.util.numpy_to_ptr(input_data) + # start = time.time() + # data = self._copy_input_to_device(ptr, size, index) + # print(f'copy_to_device time:{time.time() - start}') + data = ptr + if data is None: + size = 0 + log_error("Copy input to device failed") + elif (isinstance(input_data, dict) and + ('data' in input_data.keys()) and ('size' in input_data.keys())): + size = input_data['size'] + data = input_data['data'] + else: + log_error("Unsupport input") + + return data, size + + def _copy_input_to_device(self, input_ptr, size, index): + buffer_item = self._input_buffer[index] + data = None + if buffer_item['addr'] is None: + if self._run_mode == const.ACL_HOST: + data = utils.copy_data_host_to_device(input_ptr, size) + else: + data = utils.copy_data_device_to_device(input_ptr, size) + if data is None: + log_error("Malloc memory and copy model %dth " + "input to device failed" % (index)) + return None + buffer_item['addr'] = data + buffer_item['size'] = size + elif size == buffer_item['size']: + if self._run_mode == const.ACL_HOST: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_HOST_TO_DEVICE) + else: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_DEVICE_TO_DEVICE) + if ret != const.ACL_SUCCESS: + log_error("Copy model %dth input to device failed" % (index)) + return None + data = buffer_item['addr'] + else: + log_error("The model %dth input size %d is change," + " before is %d" % (index, size, buffer_item['size'])) + return None + + return data + + def _set_dynamic_batch_size(self, batch): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + if ret != const.ACL_SUCCESS: + log_error("get_input_index_by_name failed") + return const.FAILED + batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("get_dynamic_batch failed") + return const.FAILED + log_info("get dynamic_batch = ", batch_dic) + ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) + if ret != const.ACL_SUCCESS: + log_error("set_dynamic_batch_size failed, ret = ", ret) + return const.FAILED + if batch in batch_dic["batch"]: + return const.SUCCESS + else: + assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID + log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) + return const.FAILED + + def _execute_with_dynamic_batch_size(self, input_list, batch): + ret = self._gen_input_dataset(input_list) + if ret == const.FAILED: + log_error("Gen model input dataset failed") + return None + + ret = self._set_dynamic_batch_size(batch) + if ret == const.FAILED: + log_error("Set dynamic batch failed") + return None + + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + + self._release_dataset(self._input_dataset) + self._input_dataset = None + + return self._output_dataset_to_numpy() + + def execute(self, input_list): + """ + inference input data + Args: + input_list: input data list, support AclLiteImage, + numpy array and {'data': ,'size':} dict + returns: + inference result data, which is a numpy array list, + each corresponse to a model output + """ + a = time.time() + # ret = self._gen_input_dataset(input_list) + # if ret == const.FAILED: + # log_error("Gen model input dataset failed") + # return None + if self.runMode_ == const.ACL_DEVICE: + kind = const.ACL_MEMCPY_DEVICE_TO_DEVICE + else: + kind = const.ACL_MEMCPY_HOST_TO_DEVICE + for i,data_in in enumerate(input_list): + if "bytes_to_ptr" in dir(acl.util): + bytes_data = data_in.tobytes() + ptr = acl.util.bytes_to_ptr(bytes_data) + else: + ptr = acl.util.numpy_to_ptr(self.image_bytes) + ret = acl.rt.memcpy(self.input_buffers[i], + self.input_buffer_sizes[i], + ptr, + self.input_buffer_sizes[i], + kind) + start = time.time() + # print(f'gen_input_data_time:{start - a}') + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + b = time.time() + # print(f'infer execute run time: {b - start}') + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + # self._release_dataset(self._input_dataset) + # self._input_dataset = None + numpy_data = self._output_dataset_to_numpy() + c = time.time() + print(f'frame_count_execute gen_input_data_time: {start - a} infer_execute_run_time: {b - start} output_dataset_to_numpy: {c - b}') + return numpy_data + + def _output_dataset_to_numpy(self): + dataset = [] + output_tensor_list = self._gen_output_tensor() + num = acl.mdl.get_dataset_num_buffers(self._output_dataset) + + for i in range(num): + buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) + data = acl.get_data_buffer_addr(buf) + size = int(acl.get_data_buffer_size(buf)) + output_ptr = output_tensor_list[i]["ptr"] + output_data = output_tensor_list[i]["tensor"] + if isinstance (output_data,bytes): + data_size = len(output_data) + else: + data_size = output_data.size * output_data.itemsize + ret = acl.rt.memcpy(output_ptr, + data_size, + data, size, self._copy_policy) + if ret != const.ACL_SUCCESS: + log_error("Memcpy inference output to local failed") + return None + + if isinstance (output_data,bytes): + output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) + output_tensor = output_data.copy() + else: + output_tensor = output_data + dataset.append(output_tensor) + + return dataset + + def _gen_output_tensor(self): + output_tensor_list = [] + for i in range(self._output_size): + dims = acl.mdl.get_output_dims(self._model_desc, i) + shape = tuple(dims[0]["dims"]) + datatype = acl.mdl.get_output_data_type(self._model_desc, i) + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + + if datatype == const.ACL_FLOAT: + np_type = np.float32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_DOUBLE: + np_type = np.float64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT64: + np_type = np.int64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT64: + np_type = np.uint64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT32: + np_type = np.int32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT32: + np_type = np.uint32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_FLOAT16: + np_type = np.float16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT16: + np_type = np.int16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT16: + np_type = np.uint16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT8: + np_type = np.int8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: + np_type = np.uint8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + else: + log_error("Unspport model output datatype ", datatype) + return None + + if not output_tensor.flags['C_CONTIGUOUS']: + output_tensor = np.ascontiguousarray(output_tensor) + + if "bytes_to_ptr" in dir(acl.util): + bytes_data=output_tensor.tobytes() + tensor_ptr=acl.util.bytes_to_ptr(bytes_data) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": bytes_data, + "shape":output_tensor.shape, + "dtype":output_tensor.dtype},) + else: + tensor_ptr = acl.util.numpy_to_ptr(output_tensor) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": output_tensor}) + + return output_tensor_list + + def _release_dataset(self, dataset, free_memory=False): + if not dataset: + return + + num = acl.mdl.get_dataset_num_buffers(dataset) + for i in range(num): + data_buf = acl.mdl.get_dataset_buffer(dataset, i) + if data_buf: + self._release_databuffer(data_buf, free_memory) + + ret = acl.mdl.destroy_dataset(dataset) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def _release_databuffer(self, data_buffer, free_memory=False): + if free_memory: + data_addr = acl.get_data_buffer_addr(data_buffer) + if data_addr: + acl.rt.free(data_addr) + + ret = acl.destroy_data_buffer(data_buffer) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def destroy(self): + """ + release resource of model inference + Args: + null + Returns: + null + """ + if self._is_destroyed: + return + + self._release_dataset(self._output_dataset, free_memory=True) + if self._model_id: + ret = acl.mdl.unload(self._model_id) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.unload error:", ret) + + if self._model_desc: + ret = acl.mdl.destroy_desc(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.destroy_desc error:", ret) + + self._is_destroyed = True + resource_list.unregister(self) + log_info("AclLiteModel release source success") + + def __del__(self): + self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py new file mode 100644 index 0000000..2b6ffeb --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py @@ -0,0 +1,455 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2020-6-04 20:12:13 +MODIFIED: 2020-6-28 14:04:45 +""" +import acl +import struct +import numpy as np +import datetime +import sys +import os +import time + +import constants as const +import acllite_utils as utils +from acllite_logger import log_error, log_info, log_warning +from acllite_image import AclLiteImage +from acllite_resource import resource_list + +class AclLiteModel(object): + """ + wrap acl model inference interface, include input dataset construction, + execute, and output transform to numpy array + Attributes: + model_path: om offline mode file path + """ + + def __init__(self, model_path, load_type=0): + self._run_mode, ret = acl.rt.get_run_mode() + utils.check_ret("acl.rt.get_run_mode", ret) + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + if self._run_mode == const.ACL_HOST: + self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST + + self._model_path = model_path # string + self._load_type = load_type + self._model_id = None # pointer + self._input_num = 0 + self._input_buffer = [] + self._input_dataset = None + self._output_dataset = None + self._model_desc = None # pointer when using + self._output_size = 0 + self._init_resource() + self._is_destroyed = False + resource_list.register(self) + + def _init_resource(self): + log_info("Init model resource start...") + if not os.path.isfile(self._model_path): + log_error( + "model_path failed, please check. model_path=%s" % + self._model_path) + return const.FAILED + + if self._load_type == 0: + self._model_id, ret = acl.mdl.load_from_file(self._model_path) + utils.check_ret("acl.mdl.load_from_file", ret) + elif self._load_type == 1: + with open(self._model_path, "rb") as f: + om_bytes = f.read() + if om_bytes: + ptr = acl.util.bytes_to_ptr(om_bytes) + self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) + utils.check_ret("acl.mdl.load_from_mem", ret) + else: + log_error( + "model_context is null, please check. model_path=%s" % + self._model_path) + return const.FAILED + else: + log_error( + "load_type is not in 0 or 1, please check. load_type=%d" % + self._load_type) + return const.FAILED + self._model_desc = acl.mdl.create_desc() + ret = acl.mdl.get_desc(self._model_desc, self._model_id) + utils.check_ret("acl.mdl.get_desc", ret) + # get outputs num of model + self._output_size = acl.mdl.get_num_outputs(self._model_desc) + # create output dataset + self._gen_output_dataset(self._output_size) + # recode input data address,if need malloc memory,the memory will be + # reuseable + self._init_input_buffer() + log_info("Init model resource success") + + return const.SUCCESS + + def _gen_output_dataset(self, ouput_num): + log_info("AclLiteModel create model output dataset:") + dataset = acl.mdl.create_dataset() + for i in range(ouput_num): + # malloc device memory for output + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) + utils.check_ret("acl.rt.malloc", ret) + # crate oputput data buffer + dataset_buffer = acl.create_data_buffer(buf, size) + _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) + log_info("malloc output %d, size %d" % (i, size)) + if ret: + acl.rt.free(buf) + acl.destroy_data_buffer(dataset_buffer) + utils.check_ret("acl.destroy_data_buffer", ret) + self._output_dataset = dataset + log_info("Create model output dataset success") + + def _init_input_buffer(self): + self._input_num = acl.mdl.get_num_inputs(self._model_desc) + for i in range(self._input_num): + item = {"addr": None, "size": 0} + self._input_buffer.append(item) + + def _gen_input_dataset(self, input_list): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + # if ret == const.ACL_SUCCESS: + # print('111111111111111111111111111111') + # dataLen = acl.mdl.get_input_size_by_index(self._model_desc, dynamicIdx) + # buf, ret = acl.rt.malloc(dataLen, const.ACL_MEM_MALLOC_NORMAL_ONLY) + # utils.check_ret("acl.rt.malloc", ret) + # batch_buffer = {'data': buf, 'size':dataLen} + # input_list.append(batch_buffer) + + ret = const.SUCCESS + if len(input_list) != self._input_num: + log_error("Current input data num %d unequal to model " + "input num %d" % (len(input_list), self._input_num)) + return const.FAILED + + self._input_dataset = acl.mdl.create_dataset() + for i in range(self._input_num): + item = input_list[i] + data, size = self._parse_input_data(item, i) + if (data is None) or (size == 0): + ret = const.FAILED + log_error("The %d input is invalid" % (i)) + break + + model_size = acl.mdl.get_input_size_by_index(self._model_desc, i) + if size != model_size: + log_warning(" Input[%d] size: %d not equal om size: %d" % (i, size, model_size) +\ + ", may cause inference result error, please check model input") + + + dataset_buffer = acl.create_data_buffer(data, size) + _, ret = acl.mdl.add_dataset_buffer(self._input_dataset, + dataset_buffer) + if ret: + log_error("Add input dataset buffer failed") + acl.destroy_data_buffer(self._input_dataset) + ret = const.FAILED + break + if ret == const.FAILED: + self._release_dataset(self._input_dataset) + self._input_dataset = None + + return ret + + def _parse_input_data(self, input_data, index): + data = None + size = 0 + if isinstance(input_data, AclLiteImage): + size = input_data.size + data = input_data.data() + elif isinstance(input_data, np.ndarray): + size = input_data.size * input_data.itemsize + if "bytes_to_ptr" in dir(acl.util): + bytes_data=input_data.tobytes() + ptr=acl.util.bytes_to_ptr(bytes_data) + else: + ptr = acl.util.numpy_to_ptr(input_data) + # start = time.time() + # data = self._copy_input_to_device(ptr, size, index) + # print(f'copy_to_device time:{time.time() - start}') + data = ptr + if data is None: + size = 0 + log_error("Copy input to device failed") + elif (isinstance(input_data, dict) and + ('data' in input_data.keys()) and ('size' in input_data.keys())): + size = input_data['size'] + data = input_data['data'] + else: + log_error("Unsupport input") + + return data, size + + def _copy_input_to_device(self, input_ptr, size, index): + buffer_item = self._input_buffer[index] + data = None + if buffer_item['addr'] is None: + if self._run_mode == const.ACL_HOST: + data = utils.copy_data_host_to_device(input_ptr, size) + else: + data = utils.copy_data_device_to_device(input_ptr, size) + if data is None: + log_error("Malloc memory and copy model %dth " + "input to device failed" % (index)) + return None + buffer_item['addr'] = data + buffer_item['size'] = size + elif size == buffer_item['size']: + if self._run_mode == const.ACL_HOST: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_HOST_TO_DEVICE) + else: + ret = acl.rt.memcpy(buffer_item['addr'], size, + input_ptr, size, + const.ACL_MEMCPY_DEVICE_TO_DEVICE) + if ret != const.ACL_SUCCESS: + log_error("Copy model %dth input to device failed" % (index)) + return None + data = buffer_item['addr'] + else: + log_error("The model %dth input size %d is change," + " before is %d" % (index, size, buffer_item['size'])) + return None + + return data + + def _set_dynamic_batch_size(self, batch): + dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") + if ret != const.ACL_SUCCESS: + log_error("get_input_index_by_name failed") + return const.FAILED + batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("get_dynamic_batch failed") + return const.FAILED + log_info("get dynamic_batch = ", batch_dic) + ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) + if ret != const.ACL_SUCCESS: + log_error("set_dynamic_batch_size failed, ret = ", ret) + return const.FAILED + if batch in batch_dic["batch"]: + return const.SUCCESS + else: + assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID + log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) + return const.FAILED + + def _execute_with_dynamic_batch_size(self, input_list, batch): + ret = self._gen_input_dataset(input_list) + if ret == const.FAILED: + log_error("Gen model input dataset failed") + return None + + ret = self._set_dynamic_batch_size(batch) + if ret == const.FAILED: + log_error("Set dynamic batch failed") + return None + + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + + self._release_dataset(self._input_dataset) + self._input_dataset = None + + return self._output_dataset_to_numpy() + + def execute(self, input_list): + """ + inference input data + Args: + input_list: input data list, support AclLiteImage, + numpy array and {'data': ,'size':} dict + returns: + inference result data, which is a numpy array list, + each corresponse to a model output + """ + a = time.time() + ret = self._gen_input_dataset(input_list) + if ret == const.FAILED: + log_error("Gen model input dataset failed") + return None + start = time.time() + # print(f'gen_input_data_time:{start - a}') + ret = acl.mdl.execute(self._model_id, + self._input_dataset, + self._output_dataset) + b = time.time() + # print(f'infer execute run time: {b - start}') + if ret != const.ACL_SUCCESS: + log_error("Execute model failed for acl.mdl.execute error ", ret) + return None + self._release_dataset(self._input_dataset) + self._input_dataset = None + numpy_data = self._output_dataset_to_numpy() + c = time.time() + print(f'frame_count_execute gen_input_data_time: {start - a} infer_execute_run_time: {b - start} output_dataset_to_numpy: {c - b}') + return numpy_data + + def _output_dataset_to_numpy(self): + dataset = [] + output_tensor_list = self._gen_output_tensor() + num = acl.mdl.get_dataset_num_buffers(self._output_dataset) + + for i in range(num): + buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) + data = acl.get_data_buffer_addr(buf) + size = int(acl.get_data_buffer_size(buf)) + output_ptr = output_tensor_list[i]["ptr"] + output_data = output_tensor_list[i]["tensor"] + if isinstance (output_data,bytes): + data_size = len(output_data) + else: + data_size = output_data.size * output_data.itemsize + ret = acl.rt.memcpy(output_ptr, + data_size, + data, size, self._copy_policy) + if ret != const.ACL_SUCCESS: + log_error("Memcpy inference output to local failed") + return None + + if isinstance (output_data,bytes): + output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) + output_tensor = output_data.copy() + else: + output_tensor = output_data + dataset.append(output_tensor) + + return dataset + + def _gen_output_tensor(self): + output_tensor_list = [] + for i in range(self._output_size): + dims = acl.mdl.get_output_dims(self._model_desc, i) + shape = tuple(dims[0]["dims"]) + datatype = acl.mdl.get_output_data_type(self._model_desc, i) + size = acl.mdl.get_output_size_by_index(self._model_desc, i) + + if datatype == const.ACL_FLOAT: + np_type = np.float32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_DOUBLE: + np_type = np.float64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT64: + np_type = np.int64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT64: + np_type = np.uint64 + output_tensor = np.zeros( + size // 8, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT32: + np_type = np.int32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT32: + np_type = np.uint32 + output_tensor = np.zeros( + size // 4, dtype=np_type).reshape(shape) + elif datatype == const.ACL_FLOAT16: + np_type = np.float16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT16: + np_type = np.int16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_UINT16: + np_type = np.uint16 + output_tensor = np.zeros( + size // 2, dtype=np_type).reshape(shape) + elif datatype == const.ACL_INT8: + np_type = np.int8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: + np_type = np.uint8 + output_tensor = np.zeros( + size, dtype=np_type).reshape(shape) + else: + log_error("Unspport model output datatype ", datatype) + return None + + if not output_tensor.flags['C_CONTIGUOUS']: + output_tensor = np.ascontiguousarray(output_tensor) + + if "bytes_to_ptr" in dir(acl.util): + bytes_data=output_tensor.tobytes() + tensor_ptr=acl.util.bytes_to_ptr(bytes_data) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": bytes_data, + "shape":output_tensor.shape, + "dtype":output_tensor.dtype},) + else: + tensor_ptr = acl.util.numpy_to_ptr(output_tensor) + output_tensor_list.append({"ptr": tensor_ptr, + "tensor": output_tensor}) + + return output_tensor_list + + def _release_dataset(self, dataset, free_memory=False): + if not dataset: + return + + num = acl.mdl.get_dataset_num_buffers(dataset) + for i in range(num): + data_buf = acl.mdl.get_dataset_buffer(dataset, i) + if data_buf: + self._release_databuffer(data_buf, free_memory) + + ret = acl.mdl.destroy_dataset(dataset) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def _release_databuffer(self, data_buffer, free_memory=False): + if free_memory: + data_addr = acl.get_data_buffer_addr(data_buffer) + if data_addr: + acl.rt.free(data_addr) + + ret = acl.destroy_data_buffer(data_buffer) + if ret != const.ACL_SUCCESS: + log_error("Destroy data buffer error ", ret) + + def destroy(self): + """ + release resource of model inference + Args: + null + Returns: + null + """ + if self._is_destroyed: + return + + self._release_dataset(self._output_dataset, free_memory=True) + if self._model_id: + ret = acl.mdl.unload(self._model_id) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.unload error:", ret) + + if self._model_desc: + ret = acl.mdl.destroy_desc(self._model_desc) + if ret != const.ACL_SUCCESS: + log_error("acl.mdl.destroy_desc error:", ret) + + self._is_destroyed = True + resource_list.unregister(self) + log_info("AclLiteModel release source success") + + def __del__(self): + self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py new file mode 100644 index 0000000..9021be8 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py @@ -0,0 +1,110 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2021-01-20 20:12:13 +MODIFIED: 2021-02-03 14:04:45 +""" +import threading +import acl +from acllite_logger import log_info +import acllite_utils as utils + +REGISTER = 0 +UNREGISTER = 1 + +class _ResourceList(object): + """Acl resources of current application + This class provide register inferace of acl resource, when application + exit, all register resource will release befor acl.rt.reset_device to + avoid program abnormal + """ + _instance_lock = threading.Lock() + + def __init__(self): + self.resources = [] + + def __new__(cls, *args, **kwargs): + if not hasattr(_ResourceList, "_instance"): + with _ResourceList._instance_lock: + if not hasattr(_ResourceList, "_instance"): + _ResourceList._instance = object.__new__( + cls, *args, **kwargs) + return _ResourceList._instance + + def register(self, resource): + """Resource register interface + Args: + resource: object with acl resource, the object must be has + method destroy() + """ + item = {"resource": resource, "status": REGISTER} + self.resources.append(item) + + def unregister(self, resource): + """Resource unregister interface + If registered resource release by self and no need _ResourceList + release, the resource object should unregister self + Args: + resource: registered resource + """ + for item in self.resources: + if resource == item["resource"]: + item["status"] = UNREGISTER + + def destroy(self): + """Destroy all register resource""" + for item in self.resources: + if item["status"] == REGISTER: + item["resource"].destroy() + item["status"] = UNREGISTER + +resource_list = _ResourceList() + +class AclLiteResource(object): + """ + AclLiteResource + """ + + def __init__(self, device_id=0): + self.device_id = device_id + self.context = None + self.stream = None + self.run_mode = None + + def init(self): + """ + init resource + """ + log_info("init resource stage:") + ret = acl.init() + utils.check_ret("acl.init", ret) + + ret = acl.rt.set_device(self.device_id) + utils.check_ret("acl.rt.set_device", ret) + + self.context, ret = acl.rt.create_context(self.device_id) + utils.check_ret("acl.rt.create_context", ret) + + self.stream, ret = acl.rt.create_stream() + utils.check_ret("acl.rt.create_stream", ret) + + self.run_mode, ret = acl.rt.get_run_mode() + utils.check_ret("acl.rt.get_run_mode", ret) + + log_info("Init resource success") + + def __del__(self): + log_info("acl resource release all resource") + resource_list.destroy() + if self.stream: + log_info("acl resource release stream") + acl.rt.destroy_stream(self.stream) + + if self.context: + log_info("acl resource release context") + acl.rt.destroy_context(self.context) + + log_info("Reset acl device ", self.device_id) + acl.rt.reset_device(self.device_id) + acl.finalize() + log_info("Release acl resource success") diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py new file mode 100644 index 0000000..9ac84ea --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py @@ -0,0 +1,261 @@ +import numpy as np +import acl +import constants as const +from acllite_logger import log_error, log_info +import time + +from functools import wraps +DEBUG = True + +def check_ret(message, ret_int): + """Check int value is 0 or not + Args: + message: output log str + ret_int: check value that type is int + """ + if ret_int != 0: + raise Exception("{} failed ret_int={}" + .format(message, ret_int)) + +def check_none(message, ret_none): + """Check object is None or not + Args: + message: output log str + ret_none: check object + """ + if ret_none is None: + raise Exception("{} failed" + .format(message)) + +def copy_data_device_to_host(device_data, data_size): + """Copy device data to host + Args: + device_data: data that to be copyed + data_size: data size + Returns: + None: copy failed + others: host data which copy from device_data + """ + host_buffer, ret = acl.rt.malloc_host(data_size) + if ret != const.ACL_SUCCESS: + log_error("Malloc host memory failed, error: ", ret) + return None + + ret = acl.rt.memcpy(host_buffer, data_size, + device_data, data_size, + const.ACL_MEMCPY_DEVICE_TO_HOST) + if ret != const.ACL_SUCCESS: + log_error("Copy device data to host memory failed, error: ", ret) + acl.rt.free_host(host_buffer) + return None + + return host_buffer + +def copy_data_device_to_device(device_data, data_size): + """Copy device data to device + Args: + device_data: data that to be copyed + data_size: data size + Returns: + None: copy failed + others: device data which copy from device_data + """ + device_buffer, ret = acl.rt.malloc(data_size, + const.ACL_MEM_MALLOC_NORMAL_ONLY) + if ret != const.ACL_SUCCESS: + log_error("Malloc device memory failed, error: ", ret) + return None + + ret = acl.rt.memcpy(device_buffer, data_size, + device_data, data_size, + const.ACL_MEMCPY_DEVICE_TO_DEVICE) + if ret != const.ACL_SUCCESS: + log_error("Copy device data to device memory failed, error: ", ret) + acl.rt.free(device_buffer) + return None + + return device_buffer + +def copy_data_host_to_device(host_data, data_size): + """Copy host data to device + Args: + host_data: data that to be copyed + data_size: data size + Returns: + None: copy failed + others: device data which copy from host_data + """ + device_buffer, ret = acl.rt.malloc(data_size, + const.ACL_MEM_MALLOC_NORMAL_ONLY) + if ret != const.ACL_SUCCESS: + log_error("Malloc device memory failed, error: ", ret) + return None + + ret = acl.rt.memcpy(device_buffer, data_size, + host_data, data_size, + const.ACL_MEMCPY_HOST_TO_DEVICE) + if ret != const.ACL_SUCCESS: + log_error("Copy device data to device memory failed, error: ", ret) + acl.rt.free(device_buffer) + return None + + return device_buffer + +def copy_data_host_to_host(host_data, data_size): + """Copy host data to host + Args: + host_data: data that to be copyed + data_size: data size + Returns: + None: copy failed + others: host data which copy from host_data + """ + host_buffer, ret = acl.rt.malloc_host(data_size) + if ret != const.ACL_SUCCESS: + log_error("Malloc host memory failed, error: ", ret) + return None + + ret = acl.rt.memcpy(host_buffer, data_size, + host_data, data_size, + const.ACL_MEMCPY_HOST_TO_HOST) + if ret != const.ACL_SUCCESS: + log_error("Copy host data to host memory failed, error: ", ret) + acl.rt.free_host(host_buffer) + return None + + return host_buffer + +def copy_data_to_dvpp(data, size, run_mode): + """Copy data to dvpp + Args: + data: data that to be copyed + data_size: data size + run_mode: device run mode + Returns: + None: copy failed + others: data which copy from host_data + """ + policy = const.ACL_MEMCPY_HOST_TO_DEVICE + if run_mode == const.ACL_DEVICE: + policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + + dvpp_buf, ret = acl.media.dvpp_malloc(size) + check_ret("acl.rt.malloc_host", ret) + + ret = acl.rt.memcpy(dvpp_buf, size, data, size, policy) + check_ret("acl.rt.memcpy", ret) + + return dvpp_buf + +def copy_data_as_numpy(data, size, data_mem_type, run_mode): + """Copy data as numpy array + Args: + data: data that to be copyed + size: data size + data_mem_type: src data memory type + run_mode: device run mode + Returns: + None: copy failed + others: numpy array whoes data copy from host_data + """ + np_data = np.zeros(size, dtype=np.byte) + if "bytes_to_ptr" in dir(acl.util): + bytes_data=np_data.tobytes() + np_data_ptr=acl.util.bytes_to_ptr(bytes_data) + else: + np_data_ptr = acl.util.numpy_to_ptr(np_data) + + policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + if run_mode == const.ACL_HOST: + if ((data_mem_type == const.MEMORY_DEVICE) or + (data_mem_type == const.MEMORY_DVPP)): + policy = const.ACL_MEMCPY_DEVICE_TO_HOST + elif data_mem_type == const.MEMORY_HOST: + policy = const.ACL_MEMCPY_HOST_TO_HOST + + ret = acl.rt.memcpy(np_data_ptr, size, data, size, policy) + check_ret("acl.rt.memcpy", ret) + if "bytes_to_ptr" in dir(acl.util): + np_data=np.frombuffer(bytes_data,dtype=np_data.dtype).reshape(np_data.shape) + return np_data + +def align_up(value, align): + """Align up int value + Args: + value:input data + align: align data + Return: + aligned data + """ + return int(int((value + align - 1) / align) * align) + +def align_up16(value): + """Align up data with 16 + Args: + value:input data + Returns: + 16 aligned data + """ + return align_up(value, 16) + +def align_up64(value): + """Align up data with 128 + Args: + value:input data + Returns: + 128 aligned data + """ + return align_up(value, 64) + +def align_up128(value): + """Align up data with 128 + Args: + value:input data + Returns: + 128 aligned data + """ + return align_up(value, 128) + +def align_up2(value): + """Align up data with 2 + Args: + value:input data + Returns: + 2 aligned data + """ + return align_up(value, 2) + +def yuv420sp_size(width, height): + """Calculate yuv420sp image size + Args: + width: image width + height: image height + Returns: + image data size + """ + return int(width * height * 3 / 2) + +def rgbu8_size(width, height): + """Calculate rgb 24bit image size + Args: + width: image width + height: image height + Returns: + rgb 24bit image data size + """ + return int(width * height * 3) + +def display_time(func): + """print func execute time""" + @wraps(func) + def wrapper(*args, **kwargs): + """wrapper caller""" + if DEBUG: + btime = time.time() + res = func(*args, **kwargs) + use_time = time.time() - btime + log_info("in %s, use time:%s" % (func.__name__, use_time)) + return res + else: + return func(*args, **kwargs) + return wrapper diff --git a/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py b/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py new file mode 100644 index 0000000..c9129c2 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py @@ -0,0 +1,94 @@ +# !/usr/bin/env python +# -*- coding:utf-8 -*- +# +from ctypes import * +import os +import time +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +from lib.acllite_so import libacllite +import constants as const +from acllite_image import AclLiteImage +from acllite_logger import log_error, log_info + +CAMERA_OK = 0 +CAMERA_ERROR = 1 + +CAMERA_CLOSED = 0 +CAMERA_OPENED = 1 + +class CameraOutputC(Structure): + """Ctypes parameter object for frame data""" + _fields_ = [ + ('size', c_int), + ('data', POINTER(c_ubyte)) + ] + +class CameraCapture(object): + """Atlas200dk board camera access class""" + def __init__(self, camera_id, fps=15, size=(1280, 720)): + """Create camera instance + Args: + camera_id: camera slot + fps: frame per second + size: frame resolution + """ + self._id = camera_id + self._fps = fps + self._width = size[0] + self._height = size[1] + self._size = int(self._width * self._height * 3 / 2) + self._status = CAMERA_CLOSED + if CAMERA_OK == self._open(): + self._status = CAMERA_OPENED + else: + log_error("Open camera %d failed" % (camera_id)) + + def _open(self): + ret = libacllite.OpenCameraEx(self._id, self._fps, + self._width, self._height) + if (ret != CAMERA_OK): + log_error("Open camera %d failed ,ret = %d" % (self._id, ret)) + return CAMERA_ERROR + self._status = CAMERA_OPENED + return CAMERA_OK + + def is_opened(self): + """Camera is opened or not""" + return (self._status == CAMERA_OPENED) + + def read(self): + """Read frame from camera""" + frame_data = CameraOutputC() + ret = libacllite.ReadCameraFrame(self._id, byref(frame_data)) + if (ret != CAMERA_OK): + log_error("Read camera %d failed" % (self._id)) + return None + + return AclLiteImage( + addressof(frame_data.data.contents), + self._width, + self._height, + 0, + 0, + self._size, + const.MEMORY_DVPP) + + def close(self): + """Close camera""" + log_info("Close camera ", self._id) + libacllite.CloseCameraEx(self._id) + + def __del__(self): + self.close() + +if __name__ == "__main__": + cap = Camera(camera_id=0, fps=15, size=(1280, 720)) + + start = time.time() + for i in range(0, 100): + image = cap.read() + log_info("Read 100 frame exhaust ", time.time() - start) + diff --git a/Samples/YOLOV5MultiInput/python/src/python/constants.py b/Samples/YOLOV5MultiInput/python/src/python/constants.py new file mode 100644 index 0000000..8c22249 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/constants.py @@ -0,0 +1,217 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2020-6-04 20:12:13 +MODIFIED: 2020-6-06 14:04:45 +""" +SUCCESS = 0 +FAILED = 1 + +ACL_DEVICE = 0 +ACL_HOST = 1 + +MEMORY_NORMAL = 0 +MEMORY_HOST = 1 +MEMORY_DEVICE = 2 +MEMORY_DVPP = 3 +MEMORY_CTYPES = 4 + +IMAGE_DATA_NUMPY = 0 +IMAGE_DATA_BUFFER = 1 + +READ_VIDEO_OK = 0 + +# error code +ACL_SUCCESS = 0 +ACL_ERROR_INVALID_PARAM = 100000 +ACL_ERROR_UNINITIALIZE = 100001 +ACL_ERROR_REPEAT_INITIALIZE = 100002 +ACL_ERROR_INVALID_FILE = 100003 +ACL_ERROR_WRITE_FILE = 100004 +ACL_ERROR_INVALID_FILE_SIZE = 100005 +ACL_ERROR_PARSE_FILE = 100006 +ACL_ERROR_FILE_MISSING_ATTR = 100007 +ACL_ERROR_FILE_ATTR_INVALID = 100008 +ACL_ERROR_INVALID_DUMP_CONFIG = 100009 +ACL_ERROR_INVALID_PROFILING_CONFIG = 100010 +ACL_ERROR_INVALID_MODEL_ID = 100011 +ACL_ERROR_DESERIALIZE_MODEL = 100012 +ACL_ERROR_PARSE_MODEL = 100013 +ACL_ERROR_READ_MODEL_FAILURE = 100014 +ACL_ERROR_MODEL_SIZE_INVALID = 100015 +ACL_ERROR_MODEL_MISSING_ATTR = 100016 +ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017 +ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018 +ACL_ERROR_MODEL_NOT_DYNAMIC = 100019 +ACL_ERROR_OP_TYPE_NOT_MATCH = 100020 +ACL_ERROR_OP_INPUT_NOT_MATCH = 100021 +ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022 +ACL_ERROR_OP_ATTR_NOT_MATCH = 100023 +ACL_ERROR_OP_NOT_FOUND = 100024 +ACL_ERROR_OP_LOAD_FAILED = 100025 +ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026 +ACL_ERROR_FORMAT_NOT_MATCH = 100027 +ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028 +ACL_ERROR_KERNEL_NOT_FOUND = 100029 +ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030 +ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031 +ACL_ERROR_INVALID_QUEUE_ID = 100032 +ACL_ERROR_REPEAT_SUBSCRIBE = 100033 +ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034 +ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035 +ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036 +ACL_ERROR_REPEAT_FINALIZE = 100037 +ACL_ERROR_BAD_ALLOC = 200000 +ACL_ERROR_API_NOT_SUPPORT = 200001 +ACL_ERROR_INVALID_DEVICE = 200002 +ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003 +ACL_ERROR_RESOURCE_NOT_MATCH = 200004 +ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005 +ACL_ERROR_STORAGE_OVER_LIMIT = 300000 +ACL_ERROR_INTERNAL_ERROR = 500000 +ACL_ERROR_FAILURE = 500001 +ACL_ERROR_GE_FAILURE = 500002 +ACL_ERROR_RT_FAILURE = 500003 +ACL_ERROR_DRV_FAILURE = 500004 +# rule for mem +ACL_MEM_MALLOC_HUGE_FIRST = 0 +ACL_MEM_MALLOC_HUGE_ONLY = 1 +ACL_MEM_MALLOC_NORMAL_ONLY = 2 +# rule for memory copy +ACL_MEMCPY_HOST_TO_HOST = 0 +ACL_MEMCPY_HOST_TO_DEVICE = 1 +ACL_MEMCPY_DEVICE_TO_HOST = 2 +ACL_MEMCPY_DEVICE_TO_DEVICE = 3 +# input +LAST_ONE = -1 +LAST_TWO = -2 +type_dict = { + "bool": 0, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "float16": 2, + "float32": 4, + "float64": 8, + "float_": 8 +} +NPY_BOOL = 0 +NPY_BYTE = 1 +NPY_UBYTE = 2 +NPY_SHORT = 3 +NPY_USHORT = 4 +NPY_INT = 5 +NPY_UINT = 6 +NPY_LONG = 7 +NPY_ULONG = 8 +NPY_LONGLONG = 9 +NPY_ULONGLONG = 10 + +ACL_DT_UNDEFINED = -1 +ACL_FLOAT = 0 +ACL_FLOAT16 = 1 +ACL_INT8 = 2 +ACL_INT32 = 3 +ACL_UINT8 = 4 +ACL_INT16 = 6 +ACL_UINT16 = 7 +ACL_UINT32 = 8 +ACL_INT64 = 9 +ACL_UINT64 = 10 +ACL_DOUBLE = 11 +ACL_BOOL = 12 + +# data format +ACL_FORMAT_UNDEFINED = -1 +ACL_FORMAT_NCHW = 0 +ACL_FORMAT_NHWC = 1 +ACL_FORMAT_ND = 2 +ACL_FORMAT_NC1HWC0 = 3 +ACL_FORMAT_FRACTAL_Z = 4 +ACL_DT_UNDEFINED = -1 +ACL_FLOAT = 0 +ACL_FLOAT16 = 1 +ACL_INT8 = 2 +ACL_INT32 = 3 +ACL_UINT8 = 4 +ACL_INT16 = 6 +ACL_UINT16 = 7 +ACL_UINT32 = 8 +ACL_INT64 = 9 +ACL_UINT64 = 10 +ACL_DOUBLE = 11 +ACL_BOOL = 12 +acl_dtype = { + "dt_undefined": -1, + "float": 0, + "float16": 1, + "int8": 2, + "int32": 3, + "uint8": 4, + "int16": 6, + "uint16": 7, + "uint32": 8, + "int64": 9, + "double": 11, + "bool": 12 +} +ACL_CALLBACK_NO_BLOCK = 0 +ACL_CALLBACK_BLOCK = 1 +PIXEL_FORMAT_YUV_400 = 0 # 0, YUV400 8bit +PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1 # 1, YUV420SP NV12 8bit +PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2 # 2, YUV420SP NV21 8bit +PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3 # 3, YUV422SP NV12 8bit +PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4 # 4, YUV422SP NV21 8bit +PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5 # 5, YUV444SP NV12 8bit +PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6 # 6, YUV444SP NV21 8bit +PIXEL_FORMAT_YUYV_PACKED_422 = 7 # 7, YUV422P YUYV 8bit +PIXEL_FORMAT_UYVY_PACKED_422 = 8 # 8, YUV422P UYVY 8bit +PIXEL_FORMAT_YVYU_PACKED_422 = 9 # 9, YUV422P YVYU 8bit +PIXEL_FORMAT_VYUY_PACKED_422 = 10 # 10, YUV422P VYUY 8bit +PIXEL_FORMAT_YUV_PACKED_444 = 11 # 11, YUV444P 8bit +PIXEL_FORMAT_RGB_888 = 12 # 12, RGB888 +PIXEL_FORMAT_BGR_888 = 13 # 13, BGR888 +PIXEL_FORMAT_ARGB_8888 = 14 # 14, ARGB8888 +PIXEL_FORMAT_ABGR_8888 = 15 # 15, ABGR8888 +PIXEL_FORMAT_RGBA_8888 = 16 # 16, RGBA8888 +PIXEL_FORMAT_BGRA_8888 = 17 # 17, BGRA8888 +PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18 # 18, YUV420SP 10bit +PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19 # 19, YVU420sp 10bit +PIXEL_FORMAT_YVU_PLANAR_420 = 20 # 20, YUV420P 8bit +# images format +IMG_EXT = ['.jpg', '.JPG', '.png', '.PNG', '.bmp', '.BMP', '.jpeg', '.JPEG'] + +ENCODE_FORMAT_UNKNOW = 0 +ENCODE_FORMAT_JPEG = 1 +ENCODE_FORMAT_PNG = 2 +ENCODE_FORMAT_YUV420_SP = 3 + +""" +enType 0 +0 H265 main level +1 H264 baseline level +2 H264 main level +3 H264 high level +""" +ENTYPE_H265_MAIN = 0 +ENTYPE_H264_BASE = 1 +ENTYPE_H264_MAIN = 2 +ENTYPE_H264_HIGH = 3 + +# h264 stream codec id +AV_CODEC_ID_H264 = 27 +# h265 stream codec id +AV_CODEC_ID_HEVC = 173 +# h264 baseline level +FF_PROFILE_H264_BASELINE = 66 +# h264 main level profile +FF_PROFILE_H264_MAIN = 77 +# h264 high level profile +FF_PROFILE_H264_HIGH = 100 +# h265 main level profile +FF_PROFILE_HEVC_MAIN = 1 diff --git a/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py b/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py new file mode 100644 index 0000000..5c82cdc --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py @@ -0,0 +1,259 @@ +import numpy as np +import acl +import queue + +import constants as const +import acllite_utils as utils +import acllite_logger as acl_log +from acllite_image import AclLiteImage + +READ_TIMEOUT = 5 +WAIT_INTERVAL = 0.1 + +class DvppVdec(object): + """Decode h264/h265 stream by dvpp vdec + Decode one frame of h264/h265 stream.The stream must be h264 main, baseline + or high level, annex-b format, or h265 main level.Output image is yuv420sp + Attributes: + _channel_id: dvpp vdec channel parameter, must global unique + _width: input frame width + _height:input frame height + _run_flag:deocde is running or not currently, callback thread daemon condition + _callbak_tid: decode callback thread id + _channel_desc: vdec channel desc handle + _ctx: current thread acl context + _entype: video stream encode type, dvpp vdec support: + const.ENTYPE_H265_MAIN = 0 H265 main level + const.ENTYPE_H264_BASE = 1 H264 baseline level + const.ENTYPE_H264_MAIN = 2 H264 main level + const.ENTYPE_H264_HIGH = 3 H264 high level + _format: output frame image format, use yuv420sp + _decod_complete_cnt: output decoded complete frames counter + _decode_cnt: input frames counter + _output_pic_size: output image data size + _frame_queue: output decoded frame image queue + """ + + def __init__(self, channel_id, width, height, entype, ctx, + output_format=const.PIXEL_FORMAT_YUV_SEMIPLANAR_420): + """Create dvpp vdec instance + Args: + channel_id: decode channel id, must be global unique + width: frame width + height: frame height + entype: video stream encode type + ctx: current thread acl context + output_format: output image format, support yuv420 nv12 and nv21 + """ + self._channel_id = channel_id + self._width = width + self._height = height + self._run_flag = True + self._callbak_tid = None + self._channel_desc = None + self._ctx = ctx + self._entype = entype + self._format = output_format + self._decode_complete_cnt = 0 + self._decode_cnt = 0 + self._output_pic_size = (self._width * self._height * 3) // 2 + self._frame_queue = queue.Queue(64) + self._frame_config = None + self._destory_channel_flag = False + print('dvpp init ') + + def _callback_thread_entry(self, args_list): + ret = acl.rt.set_context(self._ctx) + while self._run_flag is True: + ret = acl.rt.process_report(300) + + def _callback(self, input_stream_desc, output_pic_desc, user_data): + self._decode_complete_cnt += 1 + #print("callback ", self._decode_complete_cnt) + input_stream_data = acl.media.dvpp_get_stream_desc_data( + input_stream_desc) + input_stream_data_size = acl.media.dvpp_get_stream_desc_size( + input_stream_desc) + ret = acl.media.dvpp_destroy_stream_desc(input_stream_desc) + + self._get_pic_desc_data(output_pic_desc, user_data) + + def _get_pic_desc_data(self, pic_desc, user_data): + pic_data = acl.media.dvpp_get_pic_desc_data(pic_desc) + pic_data_size = acl.media.dvpp_get_pic_desc_size(pic_desc) + ret_code = acl.media.dvpp_get_pic_desc_ret_code(pic_desc) + if ret_code: + channel_id, frame_id = user_data + acl_log.log_error("Decode channel %d frame %d failed, error %d" + % (channel_id, frame_id, ret_code)) + acl.media.dvpp_free(pic_data) + else: + image = AclLiteImage(pic_data, self._width, self._height, 0, 0, + pic_data_size, const.MEMORY_DVPP) + self._frame_queue.put(image) + acl.media.dvpp_destroy_pic_desc(pic_desc) + + def init(self): + """Init dvpp vdec + Returns: + const.SUCCESS: init success + const.FAILED: init failed + """ + + print('--------111') + self._channel_desc = acl.media.vdec_create_channel_desc() + print('--------333') + self._callbak_tid, ret = acl.util.start_thread( + self._callback_thread_entry, []) + acl.media.vdec_set_channel_desc_channel_id(self._channel_desc, + self._channel_id) + acl.media.vdec_set_channel_desc_thread_id(self._channel_desc, + self._callbak_tid) + print('--------222') + acl.media.vdec_set_channel_desc_callback(self._channel_desc, + self._callback) + + acl.media.vdec_set_channel_desc_entype(self._channel_desc, + self._entype) + acl.media.vdec_set_channel_desc_out_pic_format(self._channel_desc, + self._format) + + out_mode = acl.media.vdec_get_channel_desc_out_mode(self._channel_desc) + if out_mode != 0: + acl_log.log_error("Dvpp vdec out mode(%d) is invalid" % (out_mode)) + return const.FAILED + + + acl.media.vdec_set_channel_desc_out_mode(self._channel_desc, + out_mode) + acl.media.vdec_create_channel(self._channel_desc) + + self._frame_config = acl.media.vdec_create_frame_config() + if self._frame_config is None: + acl_log.log_error("Create dvpp frame config failed") + return const.FAILED + + return const.SUCCESS + + def _thread_join(self): + if self._callbak_tid is not None: + self._run_flag = False + ret = acl.util.stop_thread(self._callbak_tid) + self._callbak_tid = None + + def process(self, input_data, input_size, user_data): + """Decode frame + Args: + input_data: input frame data + input_size: input frame data size + + Returns: + const.SUCCESS: process success + const.FAILED: process failed + """ + input_stream_desc = self._create_input_pic_stream_desc(input_data, + input_size) + if input_stream_desc is None: + acl_log.log_error("Dvpp vdec decode frame failed for " + "create input stream desc error") + return const.FAILED + + output_pic_desc = self._create_output_pic_desc() + if output_pic_desc is None: + acl_log.log_error("Dvpp vdec decode frame failed for create " + "output pic desc failed") + return const.FAILED + + ret = acl.media.vdec_send_frame(self._channel_desc, input_stream_desc, + output_pic_desc, self._frame_config, + user_data) + if ret: + acl_log.log_error("Dvpp vdec send frame failed, error ", ret) + return const.FAILED + + self._decode_cnt += 1 + #print("send frame ", self._decode_cnt) + + return const.SUCCESS + + def _create_input_pic_stream_desc(self, input_data, input_size): + stream_desc = acl.media.dvpp_create_stream_desc() + if stream_desc is None: + acl_log.log_error("Create dvpp vdec input pic stream desc failed") + return None + + acl.media.dvpp_set_stream_desc_size(stream_desc, input_size) + acl.media.dvpp_set_stream_desc_data(stream_desc, input_data) + + return stream_desc + + def _create_output_pic_desc(self): + output_buffer, ret = acl.media.dvpp_malloc(self._output_pic_size) + if (output_buffer is None) or ret: + acl_log.log_error( + "Dvpp vdec malloc output memory failed, " + "size %d, error %d" % + (self._output_pic_size, ret)) + return None + + pic_desc = acl.media.dvpp_create_pic_desc() + if pic_desc is None: + acl_log.log_error("Create dvpp vdec output pic desc failed") + return None + + acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) + acl.media.dvpp_set_pic_desc_size(pic_desc, self._output_pic_size) + acl.media.dvpp_set_pic_desc_format(pic_desc, self._format) + + return pic_desc + + def destroy(self): + """Release dvpp vdec resource""" + #print("vdec destroy****************") + if self._channel_desc is not None: + ret = acl.media.vdec_destroy_channel(self._channel_desc) + self._channel_desc = None + + self._thread_join() + + if self._frame_config is not None: + acl.media.vdec_destroy_frame_config(self._frame_config) + self._frame_config = None + self._destory_channel_flag = True + + def is_finished(self): + """Video decode finished""" + return ((self._decode_cnt > 0) and + (self._decode_complete_cnt >= self._decode_cnt)) + + def read(self, no_wait=False): + """Read decoded frame + no_wait: Get image without wait. If set this arg True, and + return image is None, should call is_finished() method + to confirm decode finish or failed + + Returns: + 1. const.SUCCESS, not None: get image success + 2. const.SUCCESS, None: all frames decoded and be token off + 3. const.FAILED, None: Has frame not decoded, but no image decoded, + it means decode video failed + """ + image = None + ret = const.SUCCESS + # received eos frame and all received frame decode complete + if no_wait or self.is_finished(): + try: + image = self._frame_queue.get_nowait() + except queue.Empty: + acl_log.log_info("No decode frame in queue anymore") + else: + try: + image = self._frame_queue.get(timeout=READ_TIMEOUT) + except queue.Empty: + ret = const.FAILED + acl_log.log_error("Read channel id %d frame timeout, " + "receive frame %d, decoded %d" + % (self._channel_id, self._decode_cnt, + self._decode_complete_cnt)) + return ret, image + diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py b/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py new file mode 100644 index 0000000..c2fde93 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py @@ -0,0 +1,38 @@ +import threading +import ctypes +import os +import platform + +import acl + +from constants import ACL_HOST, ACL_DEVICE + + +def _load_lib_acllite(): + run_mode, ret = acl.rt.get_run_mode() + + lib = None + if run_mode == ACL_DEVICE: + cur_dir = os.path.dirname(os.path.abspath(__file__)) + so_path = os.path.join(cur_dir, 'atlas200dk/libpython_acllite.so') + lib=ctypes.CDLL(so_path) + + return lib + + +class _AclLiteLib(object): + _instance_lock=threading.Lock() + lib=_load_lib_acllite() + + def __init__(self): + pass + + def __new__(cls, *args, **kwargs): + if not hasattr(_AclLiteLib, "_instance"): + with _AclLiteLib._instance_lock: + if not hasattr(_AclLiteLib, "_instance"): + _AclLiteLib._instance=object.__new__( + cls, *args, **kwargs) + return _AclLiteLib._instance + +libacllite=_AclLiteLib.lib diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile b/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile new file mode 100644 index 0000000..9ba097a --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile @@ -0,0 +1,88 @@ +TOPDIR := $(patsubst %,%,$(CURDIR)) + +ifndef CPU_ARCH +$(error "Can not find CPU_ARCH env, please set it in environment!.") +endif + +ifndef INSTALL_DIR +$(error "Can not find INSTALL_DIR env, please set it in environment!.") +endif + +ifndef THIRDPART_PATH +$(error "Can not find THIRDPART_PATH env, please set it in environment!.") +endif + +media_mini_exist = $(shell if [ -f ${INSTALL_DIR}/driver/libmedia_mini.so ]; then echo "exist"; else echo "notexist"; fi;) + +ifeq ($(CPU_ARCH), aarch64) + CC := aarch64-linux-gnu-g++ + OUT_DIR = ../atlas200dk +else ifeq ($(CPU_ARCH), x86_64) + CC := g++ +else + $(error "Unsupported param: "$(CPU_ARCH)) +endif + +LOCAL_MODULE_NAME := libpython_acllite.so + +LOCAL_DIR := . +OBJ_DIR = $(OUT_DIR)/obj +DEPS_DIR = $(OUT_DIR)/deps +LOCAL_LIBRARY=$(OUT_DIR)/$(LOCAL_MODULE_NAME) +OUT_INC_DIR = $(OUT_DIR)/include + +INC_DIR = \ + -I./ \ + -I../include \ + -I$(INSTALL_DIR)/driver/ \ + -I$(INSTALL_DIR)/runtime/include/ \ + -I$(THIRDPART_PATH)/include/ \ + -I$(THIRDPART_PATH)/include/presenter/agent/ \ + #-I$(DDK_PATH)/compiler/include/protobuf + +CC_FLAGS := $(INC_DIR) -DENABLE_DVPP_INTERFACE -std=c++11 -fPIC -Wall -O2 +LNK_FLAGS := \ + -Wl,-rpath-link=$(INSTALL_DIR)/runtime/lib64/stub \ + -Wl,-rpath-link=$(THIRDPART_PATH)/lib \ + -L$(INSTALL_DIR)/runtime/lib64/stub \ + -L$(THIRDPART_PATH)/lib \ + -lascendcl \ + -lacl_dvpp \ + -lstdc++ \ + -lpthread \ + -shared + +ifeq ($(media_mini_exist),exist) +LNK_FLAGS += -L${INSTALL_DIR}/driver -lmedia_mini +endif + +SRCS_ALL := $(patsubst $(LOCAL_DIR)/%.cpp, %.cpp, $(shell find $(LOCAL_DIR) -name "*.cpp")) +ifeq ($(media_mini_exist),exist) +SRCS := $(SRCS_ALL) +else +SRCS := $(subst camera.cpp, ,$(SRCS_ALL)) +endif + +OBJS := $(addprefix $(OBJ_DIR)/, $(patsubst %.cpp, %.o,$(SRCS))) +ALL_OBJS := $(OBJS) + +all: do_pre_build do_build + +do_pre_build: + $(Q)echo - do [$@] + $(Q)mkdir -p $(OBJ_DIR) + +do_build: $(LOCAL_LIBRARY) | do_pre_build + $(Q)echo - do [$@] + +$(LOCAL_LIBRARY): $(ALL_OBJS) + $(Q)echo [LD] $@ + $(Q)$(CC) $(CC_FLAGS) -o $@ $^ -Wl,--whole-archive -Wl,--no-whole-archive -Wl,--start-group -Wl,--end-group -Wl,-rpath='/home/HwHiAiUser/HIAI_PROJECTS/ascend_lib' $(LNK_FLAGS) + +$(OBJS): $(OBJ_DIR)/%.o : %.cpp | do_pre_build + $(Q)echo [CC] $@ + $(Q)mkdir -p $(dir $@) + $(Q)$(CC) $(CC_FLAGS) $(INC_DIR) -c -fstack-protector-all $< -o $@ + +clean: + rm -rf $(OUT_DIR)/* diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h b/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h new file mode 100644 index 0000000..0e5851b --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h @@ -0,0 +1,67 @@ +/** +* Copyright 2020 Huawei Technologies Co., Ltd +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at + +* http://www.apache.org/licenses/LICENSE-2.0 + +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + +* File utils.h +* Description: handle file operations +*/ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include "acl/acl.h" + +extern "C" { + +/** + * @brief calculate YUVSP420 image size + * @param [in] width: image width + * @param [in] height: image height + * @return bytes size of image + */ +#define YUV420SP_SIZE(width, height) ((width) * (height) * 3 / 2) + +/** + * @brief Write acl error level log to host log + * @param [in] fmt: the input format string + * @return none + */ +#define ACLLITE_LOG_ERROR(fmt, ...) \ + do{aclAppLog(ACL_ERROR, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ + fprintf(stdout, "[ERROR] " fmt "\n", ##__VA_ARGS__);}while(0) + +/** + * @brief Write acl info level log to host log + * @param [in] fmt: the input format string + * @return none + */ +#define ACLLITE_LOG_INFO(fmt, ...) \ + do{aclAppLog(ACL_INFO, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ + fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) + +/** + * @brief Write acl debug level log to host log + * @param [in] fmt: the input format string + * @return none + */ +#define ACLLITE_LOG_DEBUG(fmt, ...) \ + do{aclAppLog(ACL_DEBUG, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ + fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) +} \ No newline at end of file diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp new file mode 100644 index 0000000..d280907 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp @@ -0,0 +1,167 @@ +/** + * ============================================================================ + * + * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1 Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2 Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3 Neither the names of the copyright holders nor the names of the + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * ============================================================================ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "acl/acl.h" +#include "acl/ops/acl_dvpp.h" +#include "acllite_utils.h" +#include "camera.h" + +using namespace std; + +extern "C" { +#include "peripheral_api.h" +#include "camera.h" +CameraManager g_CameraMgr; + +void HwInit() { + if (!g_CameraMgr.hwInited) { + MediaLibInit(); + g_CameraMgr.hwInited = 1; + } +} + +int CameraInit(int id, int fps, int width, int height) { + Camera& cap = CAMERA(id); + cap.frameSize = YUV420SP_SIZE(width, height); + cap.id = id; + cap.fps = fps; + cap.width = width; + cap.height = height; + cap.inited = true; + + return ACLLITE_OK; +} + +int ConfigCamera(int id, int fps, int width, int height) { + int ret = SetCameraProperty(id, CAMERA_PROP_FPS, &fps); + if (ret == LIBMEDIA_STATUS_FAILED) { + ACLLITE_LOG_ERROR("Set camera fps failed"); + return ACLLITE_ERROR; + } + + CameraResolution resolution; + resolution.width = width; + resolution.height = height; + ret = SetCameraProperty(id, CAMERA_PROP_RESOLUTION, &resolution); + if (ret == LIBMEDIA_STATUS_FAILED) { + ACLLITE_LOG_ERROR("Set camera resolution failed"); + return ACLLITE_ERROR; + } + + CameraCapMode mode = CAMERA_CAP_ACTIVE; + ret = SetCameraProperty(id, CAMERA_PROP_CAP_MODE, &mode); + if (ret == LIBMEDIA_STATUS_FAILED) { + ACLLITE_LOG_ERROR("Set camera mode:%d failed", mode); + return ACLLITE_ERROR; + } + + return ACLLITE_OK; +} + +int OpenCameraEx(int id, int fps, int width, int height) { + if ((id < 0) || (id >= CAMERA_NUM)) { + ACLLITE_LOG_ERROR("Open camera failed for invalid id %d", id); + return ACLLITE_ERROR; + } + + HwInit(); + + CameraStatus status = QueryCameraStatus(id); + if (status == CAMERA_STATUS_CLOSED){ + // Open Camera + if (LIBMEDIA_STATUS_FAILED == OpenCamera(id)) { + ACLLITE_LOG_ERROR("Camera%d closed, and open failed.", id); + return ACLLITE_ERROR; + } + } else if (status != CAMERA_STATUS_OPEN) { + //如果摄像头状态不是close状态也不是open状态,则认为摄像头异常 + ACLLITE_LOG_ERROR("Invalid camera%d status %d", id, status); + return ACLLITE_ERROR; + } + + //Set camera property + if (ACLLITE_OK != ConfigCamera(id, fps, width, height)) { + CloseCamera(id); + ACLLITE_LOG_ERROR("Set camera%d property failed", id); + return ACLLITE_ERROR; + } + + if (!CAMERA(id).inited) { + CameraInit(id, fps, width, height); + } + + ACLLITE_LOG_INFO("Open camera %d success", id); + + return ACLLITE_OK; +} + +int ReadCameraFrame(int id, CameraOutput& frame) { + int size = CAMERA(id).frameSize; + void* data = nullptr; + auto aclRet = acldvppMalloc(&data, size); + if (aclRet != ACL_SUCCESS) { + ACLLITE_LOG_ERROR("acl malloc dvpp data failed, dataSize %d, error %d", + size, aclRet); + return ACLLITE_ERROR; + } + + int ret = ReadFrameFromCamera(id, (void*)data, (int *)&size); + if ((ret == LIBMEDIA_STATUS_FAILED) || + (size != CAMERA(id).frameSize)) { + acldvppFree(data); + ACLLITE_LOG_ERROR("Get image from camera %d failed, size %d", id, size); + return ACLLITE_ERROR; + } + frame.size = size; + frame.data = (uint8_t*)data; + + return ACLLITE_OK; +} + +int CloseCameraEx(int cameraId) { + if (LIBMEDIA_STATUS_FAILED == CloseCamera(cameraId)) { + ACLLITE_LOG_ERROR("Close camera %d failed", cameraId); + return ACLLITE_ERROR; + } + + return ACLLITE_OK; +} + +} diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h new file mode 100644 index 0000000..ca40d61 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h @@ -0,0 +1,61 @@ +/** + * ============================================================================ + * + * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1 Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2 Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3 Neither the names of the copyright holders nor the names of the + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * ============================================================================ + */ +#ifndef _CAMERA_H +#define _CAMERA_H + +#define CAMERA_NUM (2) +#define CAMERA(i) (g_CameraMgr.cap[i]) + +const int ACLLITE_OK = 0; +const int ACLLITE_ERROR = 1; + +struct CameraOutput { + int size; + uint8_t* data; +}; + +struct Camera { + bool inited = false; + int id = 255; + int fps = 0; + int width = 0; + int height = 0; + int frameSize = 0; +}; + +struct CameraManager { + bool hwInited = 0; + Camera cap[CAMERA_NUM]; +}; + +#endif diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py @@ -0,0 +1 @@ + diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py new file mode 100644 index 0000000..a69c1a8 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py @@ -0,0 +1,91 @@ +# !/usr/bin/env python +# -*- coding:utf-8 -*- +import time +from threading import Thread +import sys + +from acllite_logger import log_error, log_info +from presenteragent.socket_client import AgentSocket +import presenteragent.presenter_message as pm +import presenteragent.presenter_datatype as datatype + + +class PresenterAgent(object): + """Message proxy to presenter server""" + def __init__(self, server_ip, port): + self.socket = AgentSocket(server_ip, port) + self._closed = False + self.heart_beat_thread = None + + def connect_server(self): + """Connect presenter server""" + return self.socket.connect() + + def start_heard_beat_thread(self): + """Start thread that send heardbeat messages""" + self.heart_beat_thread = Thread(target=self._keep_alive) + self.heart_beat_thread.start() + + def _keep_alive(self): + msg = pm.heartbeat_message() + + while True: + if self._closed: + log_error("Heard beat thread exit") + break + + self.socket.send_msg(msg) + time.sleep(2) + + def exit(self): + """Proxy exit""" + self.socket.close() + self._closed = True + + +def StartPresenterAgent( + msg_queue, + server_ip, + port, + open_status, + data_respone_counter): + """Startup presenter agent""" + agent = PresenterAgent(server_ip, port) + ret = agent.connect_server() + if ret: + log_error("Connect server failed, ret =", ret) + return + + open_status.value = datatype.STATUS_CONNECTED + + while True: + data = msg_queue.get() + if data is None: + continue + + if isinstance(data, datatype.FinishMsg): + log_info("Receive presenter agent exit notification, queue size ", + msg_queue.qsize()) + time.sleep(0.1) + agent.exit() + break + + agent.socket.send_msg(data) + msg_name, msg_body = agent.socket.recv_msg() + if (msg_name is None) or (msg_body is None): + log_error("Recv invalid message, message name ", msg_name) + continue + + if ((open_status.value == datatype.STATUS_CONNECTED) and + pm.is_open_channel_response(msg_name)): + log_info("Received open channel respone") + open_status.value = datatype.STATUS_OPENED + agent.start_heard_beat_thread() + log_info( + "presenter agent change connect_status to ", + open_status.value) + + if ((open_status.value == datatype.STATUS_OPENED) and + pm.is_image_frame_response(msg_name)): + data_respone_counter.value += 1 + #log_info("send ok ", data_respone_counter.value) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py new file mode 100644 index 0000000..c5557ea --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py @@ -0,0 +1,144 @@ +# !/usr/bin/env python +# -*- coding:utf-8 -*- +import time +import configparser +from multiprocessing import Process, Queue, Manager +import queue +import numpy as np +import sys +# sys.path.append("..") + +import acl +import constants as const +from acllite_logger import log_error, log_info +from acllite_image import AclLiteImage + + +import presenteragent.presenter_datatype as dtype +import presenteragent.presenter_agent as agent +import presenteragent.presenter_message as pm + + +class PresenterChannel(object): + """Communication channel between presenter agent and server""" + def __init__(self, server_ip, port, name='video', + content_type=dtype.CONTENT_TYPE_VIDEO): + """Create instance""" + self._server_ip = server_ip + self._port = port + self._type = content_type + self._name = name + self.agent_msg_queue = Queue() + self.open_status = Manager().Value('i', dtype.STATUS_DISCONNECT) + self.data_respone_counter = Manager().Value('i', 0) + self._send_counter = 0 + + def startup(self): + """Create channel and connect with presenter server + Returns: + 0 connect success + 1 connect failed + """ + agent_process = Process( + target=agent.StartPresenterAgent, + args=( + self.agent_msg_queue, + self._server_ip, + self._port, + self.open_status, + self.data_respone_counter)) + agent_process.start() + time.sleep(0.5) + self._send_open_channel_request(self._name, self._type) + return self._wait_open_status(dtype.STATUS_OPENED) + + def _wait_open_status(self, listen_status): + ret = dtype.STATUS_ERROR + for i in range(0, 100): + time.sleep(0.001) + if self.open_status.value == listen_status: + log_info("Open status is %d now" % (listen_status)) + ret = dtype.STATUS_OK + break + return ret + + def send_message(self, data): + """Send message to presenter server""" + self.agent_msg_queue.put(data) + self._send_counter += 1 + + def _send_open_channel_request(self, channel_name, content_type): + request_msg = pm.open_channel_request(channel_name, content_type) + self.send_message(request_msg) + + def send_detection_data(self, image_width, image_height, + image, detection_result): + """Send image frame request to presenter server""" + image_data = None + if isinstance(image, AclLiteImage): + image_data = image.byte_data_to_np_array() + elif isinstance(image, np.ndarray): + image_data = image + else: + log_error("Invalid data to send, ", image) + return False + + request_msg = pm.image_frame_request(image_width, image_height, + image_data.tobytes(), + detection_result) + self.send_message(request_msg) + + return True + + def send_image(self, image_width, image_height, image): + """Send image frame request that only has image to presenter server""" + detection_result = [] + return self.send_detection_data(image_width, image_height, + image, detection_result) + + def _send_heart_beat_message(self): + msg = pm.heartbeat_message() + self.send_message(msg) + + def close(self): + """Close channel""" + if self.open_status == dtype.STATUS_EXITTED: + return + + log_info("Presenter channel close...") + eos = dtype.FinishMsg("exit") + self.send_message(eos) + while self.agent_msg_queue.qsize() > 0: + time.sleep(0.001) + self.open_status = dtype.STATUS_EXITTED + + def __del__(self): + self.close() + + +def get_channel_config(config_file): + """Get connect parameters from config file""" + config = configparser.ConfigParser() + config.read(config_file) + presenter_server_ip = config['baseconf']['presenter_server_ip'] + port = int(config['baseconf']['presenter_server_port']) + channel_name = config['baseconf']['channel_name'] + content_type = int(config['baseconf']['content_type']) + + log_info( + "presenter server ip %s, port %d, channel name %s, " + "type %d" % + (presenter_server_ip, port, channel_name, content_type)) + return presenter_server_ip, port, channel_name, content_type + + +def open_channel(config_file): + """Connect with presenter server""" + server_ip, port, channel_name, content_type = get_channel_config( + config_file) + channel = PresenterChannel(server_ip, port, channel_name, content_type) + ret = channel.startup() + if ret: + log_error("ERROR:Open channel failed") + return None + return channel diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py new file mode 100644 index 0000000..b19415a --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py @@ -0,0 +1,70 @@ +STATUS_DISCONNECT = 0 +STATUS_CONNECTED = 1 +STATUS_OPEN_CH_REQUEST = 2 +STATUS_OPENED = 3 +STATUS_EXITING = 4 +STATUS_EXITTED = 5 + +CONTENT_TYPE_IMAGE = 0 +CONTENT_TYPE_VIDEO = 1 + +STATUS_OK = 0 +STATUS_ERROR = 1 + + +class Point(object): + """ + point coordinate + """ + + def __init__(self, x=0, y=0): + self.x = x + self.y = y + + +class Box(object): + """ + object rectangle area + """ + + def __init__(self, lt, rb): + self.lt = Point(lt) + self.rb = Point(rb) + + def box_valid(self): + """ + verify box coordinate is valid + """ + return ((self.lt.x >= 0) + and (self.lt.y >= 0) + and (self.rb.x >= self.lt.x) + and (self.rb.y >= self.lt.y)) + + +class ObjectDetectionResult(object): + """ + object detection information, include object position, confidence and label + """ + + def __init__(self, ltx=0, lty=0, rbx=0, rby=0, text=None): + self.object_class = 0 + self.confidence = 0 + self.box = Box((ltx, lty), (rbx, rby)) + self.result_text = text + + def check_box_vaild(self, width, height): + """ + verify object position is valid + """ + return (self.box.box_valid() and + (self.box.rb.x <= width) and + (self.box.rb.y <= height)) + + +class FinishMsg(object): + """ + the message to notify presenter agent exit + """ + + def __init__(self, data): + self.data = data diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto new file mode 100644 index 0000000..879d557 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package ascend.presenter.proto; + +enum OpenChannelErrorCode { + kOpenChannelErrorNone = 0; + kOpenChannelErrorNoSuchChannel = 1; + kOpenChannelErrorChannelAlreadyOpened = 2; + kOpenChannelErrorOther = -1; +} + +enum ChannelContentType { + kChannelContentTypeImage = 0; + kChannelContentTypeVideo = 1; +} + +// By Protocol Buffer Style Guide, need to use underscore_separated_names +// for field names +message OpenChannelRequest { + string channel_name = 1; + ChannelContentType content_type = 2; +} + +message OpenChannelResponse { + OpenChannelErrorCode error_code = 1; + string error_message = 2; +} + +message HeartbeatMessage { + +} + +enum ImageFormat { + kImageFormatJpeg = 0; +} + +message Coordinate { + uint32 x = 1; + uint32 y = 2; +} + +message Rectangle_Attr { + Coordinate left_top = 1; + Coordinate right_bottom = 2; + string label_text = 3; +} + +message PresentImageRequest { + ImageFormat format = 1; + uint32 width = 2; + uint32 height = 3; + bytes data = 4; + repeated Rectangle_Attr rectangle_list = 5; +} + +enum PresentDataErrorCode { + kPresentDataErrorNone = 0; + kPresentDataErrorUnsupportedType = 1; + kPresentDataErrorUnsupportedFormat = 2; + kPresentDataErrorOther = -1; +} + +message PresentImageResponse { + PresentDataErrorCode error_code = 1; + string error_message = 2; +} + diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py new file mode 100644 index 0000000..383eff6 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py @@ -0,0 +1,70 @@ +# !/usr/bin/env python +# -*- coding:utf-8 -*- +import struct +import socket + +import presenteragent.presenter_message_pb2 as pb2 + + +def pack_message(msg_name, msg_data): + """Pack message name and data to byte stream""" + buf = msg_data.SerializeToString() + msg_body_len = len(buf) + msg_name_len = len(msg_name) + msg_total_len = msg_name_len + msg_body_len + 5 + data = b'' + msg_total_len = socket.htonl(msg_total_len) + pack_data = struct.pack('IB', msg_total_len, msg_name_len) + data += pack_data + data += msg_name.encode() + data += buf + + return data + + +def open_channel_request(channel_name, content_type): + """Create open channel request message""" + request = pb2.OpenChannelRequest() + request.channel_name = channel_name + request.content_type = content_type + + return pack_message(pb2._OPENCHANNELREQUEST.full_name, request) + + +def image_frame_request( + image_width, + image_height, + image_data, + detection_result): + """Create image frame request message""" + request = pb2.PresentImageRequest() + request.format = 0 + request.width = image_width + request.height = image_height + request.data = image_data + for i in range(0, len(detection_result)): + myadd = request.rectangle_list.add() + myadd.left_top.x = detection_result[i].box.lt.x + myadd.left_top.y = detection_result[i].box.lt.y + myadd.right_bottom.x = detection_result[i].box.rb.x + myadd.right_bottom.y = detection_result[i].box.rb.y + myadd.label_text = detection_result[i].result_text + + return pack_message(pb2._PRESENTIMAGEREQUEST.full_name, request) + + +def heartbeat_message(): + """Create headbeat message""" + return pack_message( + pb2._HEARTBEATMESSAGE.full_name, + pb2.HeartbeatMessage()) + + +def is_open_channel_response(msg_name): + """Confirm the message is open channel response or not""" + return (msg_name == pb2._OPENCHANNELRESPONSE.full_name) + + +def is_image_frame_response(msg_name): + """Confirm the message is image frame response or not""" + return (msg_name == pb2._PRESENTIMAGERESPONSE.full_name) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py new file mode 100644 index 0000000..6c99f06 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py @@ -0,0 +1,493 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: presenter_message.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='presenter_message.proto', + package='ascend.presenter.proto', + syntax='proto3', + serialized_pb=_b('\n\x17presenter_message.proto\x12\x16\x61scend.presenter.proto\"l\n\x12OpenChannelRequest\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t\x12@\n\x0c\x63ontent_type\x18\x02 \x01(\x0e\x32*.ascend.presenter.proto.ChannelContentType\"n\n\x13OpenChannelResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.OpenChannelErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\x12\n\x10HeartbeatMessage\"\"\n\nCoordinate\x12\t\n\x01x\x18\x01 \x01(\r\x12\t\n\x01y\x18\x02 \x01(\r\"\x94\x01\n\x0eRectangle_Attr\x12\x34\n\x08left_top\x18\x01 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x38\n\x0cright_bottom\x18\x02 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x12\n\nlabel_text\x18\x03 \x01(\t\"\xb7\x01\n\x13PresentImageRequest\x12\x33\n\x06\x66ormat\x18\x01 \x01(\x0e\x32#.ascend.presenter.proto.ImageFormat\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12>\n\x0erectangle_list\x18\x05 \x03(\x0b\x32&.ascend.presenter.proto.Rectangle_Attr\"o\n\x14PresentImageResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.PresentDataErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t*\xa5\x01\n\x14OpenChannelErrorCode\x12\x19\n\x15kOpenChannelErrorNone\x10\x00\x12\"\n\x1ekOpenChannelErrorNoSuchChannel\x10\x01\x12)\n%kOpenChannelErrorChannelAlreadyOpened\x10\x02\x12#\n\x16kOpenChannelErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01*P\n\x12\x43hannelContentType\x12\x1c\n\x18kChannelContentTypeImage\x10\x00\x12\x1c\n\x18kChannelContentTypeVideo\x10\x01*#\n\x0bImageFormat\x12\x14\n\x10kImageFormatJpeg\x10\x00*\xa4\x01\n\x14PresentDataErrorCode\x12\x19\n\x15kPresentDataErrorNone\x10\x00\x12$\n kPresentDataErrorUnsupportedType\x10\x01\x12&\n\"kPresentDataErrorUnsupportedFormat\x10\x02\x12#\n\x16kPresentDataErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x62\x06proto3') +) + +_OPENCHANNELERRORCODE = _descriptor.EnumDescriptor( + name='OpenChannelErrorCode', + full_name='ascend.presenter.proto.OpenChannelErrorCode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='kOpenChannelErrorNone', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kOpenChannelErrorNoSuchChannel', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kOpenChannelErrorChannelAlreadyOpened', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kOpenChannelErrorOther', index=3, number=-1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=780, + serialized_end=945, +) +_sym_db.RegisterEnumDescriptor(_OPENCHANNELERRORCODE) + +OpenChannelErrorCode = enum_type_wrapper.EnumTypeWrapper(_OPENCHANNELERRORCODE) +_CHANNELCONTENTTYPE = _descriptor.EnumDescriptor( + name='ChannelContentType', + full_name='ascend.presenter.proto.ChannelContentType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='kChannelContentTypeImage', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kChannelContentTypeVideo', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=947, + serialized_end=1027, +) +_sym_db.RegisterEnumDescriptor(_CHANNELCONTENTTYPE) + +ChannelContentType = enum_type_wrapper.EnumTypeWrapper(_CHANNELCONTENTTYPE) +_IMAGEFORMAT = _descriptor.EnumDescriptor( + name='ImageFormat', + full_name='ascend.presenter.proto.ImageFormat', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='kImageFormatJpeg', index=0, number=0, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1029, + serialized_end=1064, +) +_sym_db.RegisterEnumDescriptor(_IMAGEFORMAT) + +ImageFormat = enum_type_wrapper.EnumTypeWrapper(_IMAGEFORMAT) +_PRESENTDATAERRORCODE = _descriptor.EnumDescriptor( + name='PresentDataErrorCode', + full_name='ascend.presenter.proto.PresentDataErrorCode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='kPresentDataErrorNone', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kPresentDataErrorUnsupportedType', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kPresentDataErrorUnsupportedFormat', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kPresentDataErrorOther', index=3, number=-1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1067, + serialized_end=1231, +) +_sym_db.RegisterEnumDescriptor(_PRESENTDATAERRORCODE) + +PresentDataErrorCode = enum_type_wrapper.EnumTypeWrapper(_PRESENTDATAERRORCODE) +kOpenChannelErrorNone = 0 +kOpenChannelErrorNoSuchChannel = 1 +kOpenChannelErrorChannelAlreadyOpened = 2 +kOpenChannelErrorOther = -1 +kChannelContentTypeImage = 0 +kChannelContentTypeVideo = 1 +kImageFormatJpeg = 0 +kPresentDataErrorNone = 0 +kPresentDataErrorUnsupportedType = 1 +kPresentDataErrorUnsupportedFormat = 2 +kPresentDataErrorOther = -1 + + + +_OPENCHANNELREQUEST = _descriptor.Descriptor( + name='OpenChannelRequest', + full_name='ascend.presenter.proto.OpenChannelRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channel_name', full_name='ascend.presenter.proto.OpenChannelRequest.channel_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='content_type', full_name='ascend.presenter.proto.OpenChannelRequest.content_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=51, + serialized_end=159, +) + + +_OPENCHANNELRESPONSE = _descriptor.Descriptor( + name='OpenChannelResponse', + full_name='ascend.presenter.proto.OpenChannelResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='error_code', full_name='ascend.presenter.proto.OpenChannelResponse.error_code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='error_message', full_name='ascend.presenter.proto.OpenChannelResponse.error_message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=161, + serialized_end=271, +) + + +_HEARTBEATMESSAGE = _descriptor.Descriptor( + name='HeartbeatMessage', + full_name='ascend.presenter.proto.HeartbeatMessage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=273, + serialized_end=291, +) + + +_COORDINATE = _descriptor.Descriptor( + name='Coordinate', + full_name='ascend.presenter.proto.Coordinate', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='ascend.presenter.proto.Coordinate.x', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='y', full_name='ascend.presenter.proto.Coordinate.y', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=293, + serialized_end=327, +) + + +_RECTANGLE_ATTR = _descriptor.Descriptor( + name='Rectangle_Attr', + full_name='ascend.presenter.proto.Rectangle_Attr', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='left_top', full_name='ascend.presenter.proto.Rectangle_Attr.left_top', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='right_bottom', full_name='ascend.presenter.proto.Rectangle_Attr.right_bottom', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='label_text', full_name='ascend.presenter.proto.Rectangle_Attr.label_text', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=330, + serialized_end=478, +) + + +_PRESENTIMAGEREQUEST = _descriptor.Descriptor( + name='PresentImageRequest', + full_name='ascend.presenter.proto.PresentImageRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='format', full_name='ascend.presenter.proto.PresentImageRequest.format', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='width', full_name='ascend.presenter.proto.PresentImageRequest.width', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='height', full_name='ascend.presenter.proto.PresentImageRequest.height', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data', full_name='ascend.presenter.proto.PresentImageRequest.data', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='rectangle_list', full_name='ascend.presenter.proto.PresentImageRequest.rectangle_list', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=481, + serialized_end=664, +) + + +_PRESENTIMAGERESPONSE = _descriptor.Descriptor( + name='PresentImageResponse', + full_name='ascend.presenter.proto.PresentImageResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='error_code', full_name='ascend.presenter.proto.PresentImageResponse.error_code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='error_message', full_name='ascend.presenter.proto.PresentImageResponse.error_message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=666, + serialized_end=777, +) + +_OPENCHANNELREQUEST.fields_by_name['content_type'].enum_type = _CHANNELCONTENTTYPE +_OPENCHANNELRESPONSE.fields_by_name['error_code'].enum_type = _OPENCHANNELERRORCODE +_RECTANGLE_ATTR.fields_by_name['left_top'].message_type = _COORDINATE +_RECTANGLE_ATTR.fields_by_name['right_bottom'].message_type = _COORDINATE +_PRESENTIMAGEREQUEST.fields_by_name['format'].enum_type = _IMAGEFORMAT +_PRESENTIMAGEREQUEST.fields_by_name['rectangle_list'].message_type = _RECTANGLE_ATTR +_PRESENTIMAGERESPONSE.fields_by_name['error_code'].enum_type = _PRESENTDATAERRORCODE +DESCRIPTOR.message_types_by_name['OpenChannelRequest'] = _OPENCHANNELREQUEST +DESCRIPTOR.message_types_by_name['OpenChannelResponse'] = _OPENCHANNELRESPONSE +DESCRIPTOR.message_types_by_name['HeartbeatMessage'] = _HEARTBEATMESSAGE +DESCRIPTOR.message_types_by_name['Coordinate'] = _COORDINATE +DESCRIPTOR.message_types_by_name['Rectangle_Attr'] = _RECTANGLE_ATTR +DESCRIPTOR.message_types_by_name['PresentImageRequest'] = _PRESENTIMAGEREQUEST +DESCRIPTOR.message_types_by_name['PresentImageResponse'] = _PRESENTIMAGERESPONSE +DESCRIPTOR.enum_types_by_name['OpenChannelErrorCode'] = _OPENCHANNELERRORCODE +DESCRIPTOR.enum_types_by_name['ChannelContentType'] = _CHANNELCONTENTTYPE +DESCRIPTOR.enum_types_by_name['ImageFormat'] = _IMAGEFORMAT +DESCRIPTOR.enum_types_by_name['PresentDataErrorCode'] = _PRESENTDATAERRORCODE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +OpenChannelRequest = _reflection.GeneratedProtocolMessageType('OpenChannelRequest', (_message.Message,), dict( + DESCRIPTOR = _OPENCHANNELREQUEST, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelRequest) + )) +_sym_db.RegisterMessage(OpenChannelRequest) + +OpenChannelResponse = _reflection.GeneratedProtocolMessageType('OpenChannelResponse', (_message.Message,), dict( + DESCRIPTOR = _OPENCHANNELRESPONSE, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelResponse) + )) +_sym_db.RegisterMessage(OpenChannelResponse) + +HeartbeatMessage = _reflection.GeneratedProtocolMessageType('HeartbeatMessage', (_message.Message,), dict( + DESCRIPTOR = _HEARTBEATMESSAGE, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.HeartbeatMessage) + )) +_sym_db.RegisterMessage(HeartbeatMessage) + +Coordinate = _reflection.GeneratedProtocolMessageType('Coordinate', (_message.Message,), dict( + DESCRIPTOR = _COORDINATE, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Coordinate) + )) +_sym_db.RegisterMessage(Coordinate) + +Rectangle_Attr = _reflection.GeneratedProtocolMessageType('Rectangle_Attr', (_message.Message,), dict( + DESCRIPTOR = _RECTANGLE_ATTR, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Rectangle_Attr) + )) +_sym_db.RegisterMessage(Rectangle_Attr) + +PresentImageRequest = _reflection.GeneratedProtocolMessageType('PresentImageRequest', (_message.Message,), dict( + DESCRIPTOR = _PRESENTIMAGEREQUEST, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageRequest) + )) +_sym_db.RegisterMessage(PresentImageRequest) + +PresentImageResponse = _reflection.GeneratedProtocolMessageType('PresentImageResponse', (_message.Message,), dict( + DESCRIPTOR = _PRESENTIMAGERESPONSE, + __module__ = 'presenter_message_pb2' + # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageResponse) + )) +_sym_db.RegisterMessage(PresentImageResponse) + + +# @@protoc_insertion_point(module_scope) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py new file mode 100644 index 0000000..1e69b88 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py @@ -0,0 +1,135 @@ +# !/usr/bin/env python +# -*- coding:utf-8 -*- +import sys +sys.path.append("..") + +import threading +import socket +import time +import struct + +from acllite_logger import log_error, log_info + +class AgentSocket(object): + """Create socket between app and presenter server""" + def __init__(self, server_ip, port): + """Create socket instance + Args: + server_ip: presenter server ip addr + port: connect port of presenter server + """ + self._server_address = (server_ip, port) + self._sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + def connect(self): + """Create connect with presenter server + Returns: + ret: connect error code, 0 is connect success, otherwise failed + """ + ret = 0 + for i in range(0, 5): + ret = self._sock_client.connect_ex(self._server_address) + if ret == 0: + break + time.sleep(0.2) + return ret + + def _read_socket(self, read_len): + has_read_len = 0 + read_buf = b'' + total_buf = b'' + + while has_read_len != read_len: + try: + read_buf = self._sock_client.recv(read_len - has_read_len) + except socket.error: + log_error("Read socket failed, error ", socket.error) + return False, None + if read_buf == b'': + return False, None + total_buf += read_buf + has_read_len = len(total_buf) + + return True, total_buf + + def _read_msg_head(self, read_len): + ret, msg_head = self._read_socket(read_len) + if not ret: + log_error("socket receive msg head null") + return None, None + + # in Struct(), 'I' is unsigned int, 'B' is unsigned char + msg_head_data = struct.Struct('IB') + (msg_total_len, msg_name_len) = msg_head_data.unpack(msg_head) + msg_total_len = socket.ntohl(msg_total_len) + + return msg_total_len, msg_name_len + + def _read_msg_name(self, msg_name_len): + ret, msg_name = self._read_socket(msg_name_len) + + if not ret: + log_error("Socket receive msg but name is null") + return False, None + try: + msg_name = msg_name.decode("utf-8") + except: + log_error("Msg name decode to utf-8 error") + return False, None + + return True, msg_name + + def _read_msg_body(self, msg_body_len): + ret, msg_body = self._read_socket(msg_body_len) + if not ret: + log_error("Socket receive msg but body null") + return False, None + return True, msg_body + + def recv_msg(self): + """Receive message from presenter server + Returns: + msg_name: received message name + msg_body: received message data + """ + # Step1: read msg head + msg_total_len, msg_name_len = self._read_msg_head(5) + if msg_total_len is None: + log_error("msg total len is None.") + return None, None + + # Step2: read msg name + ret, msg_name = self._read_msg_name(msg_name_len) + if not ret: + return None, None + + # Step3: read msg body + msg_body_len = msg_total_len - 5 - msg_name_len + if msg_body_len < 0: + log_error("msg total len is 0") + return None, None + ret, msg_body = self._read_msg_body(msg_body_len) + if not ret: + return None, None + + return msg_name, msg_body + + def send_msg(self, data): + """Send message to presenter server + Args: + data: message data + Returns: + 0 send success + 1 send failed + """ + try: + self._sock_client.sendall(data) + except: + log_error("Send msg failed") + return 1 + return 0 + + def close(self): + """Close connect""" + self._sock_client.shutdown(socket.SHUT_RDWR) + self._sock_client.close() diff --git a/Samples/YOLOV5MultiInput/python/src/python/videocapture.py b/Samples/YOLOV5MultiInput/python/src/python/videocapture.py new file mode 100644 index 0000000..361bd4e --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/python/videocapture.py @@ -0,0 +1,383 @@ +import av +import threading +import numpy as np +import acl +import time + +import constants as const +import acllite_utils as utils +import acllite_logger as acl_log +import dvpp_vdec as dvpp_vdec +from acllite_image import AclLiteImage + +WAIT_INTERVAL = 0.01 # 0.01 +WAIT_READY_MAX = 10 +WAIT_FIRST_DECODED_FRAME = 0.02 + +DECODE_STATUS_INIT = 0 +DECODE_STATUS_READY = 1 +DECODE_STATUS_RUNNING = 2 +DECODE_STATUS_PYAV_FINISH = 3 +DECODE_STATUS_ERROR = 4 +DECODE_STATUS_STOP = 5 +DECODE_STATUS_EXIT = 6 + + +class _ChannelIdGenerator(object): + """Generate global unique id number, single instance mode class""" + _instance_lock = threading.Lock() + channel_id = 0 + + def __init__(self): + pass + + def __new__(cls, *args, **kwargs): + if not hasattr(_ChannelIdGenerator, "_instance"): + with _ChannelIdGenerator._instance_lock: + if not hasattr(_ChannelIdGenerator, "_instance"): + _ChannelIdGenerator._instance = object.__new__( + cls, *args, **kwargs) + return _ChannelIdGenerator._instance + + def generator_channel_id(self): + """Generate global unique id number + The id number is increase + """ + curren_channel_id = 0 + with _ChannelIdGenerator._instance_lock: + curren_channel_id = _ChannelIdGenerator.channel_id + _ChannelIdGenerator.channel_id += 1 + + return curren_channel_id + + +def gen_unique_channel_id(): + """Interface of generate global unique id number""" + generator = _ChannelIdGenerator() + return generator.generator_channel_id() + + +class VideoCapture(object): + """Decode video by pyav and pyacl dvpp vdec + This class only support decode annex-b h264 file or rtsp ip camera. + You can use command: + ffmpeg -i aaa.mp4 -codec copy -bsf: h264_mp4toannexb -f h264 aaa.h264 + to transform mp4 file to h264 stream file. + If decode rtsp of ip camera or stream pull stream software, make sure + the stream format is annex-b + + Attributes: + _stream_name: video stream name + _input_buffer: dvpp vdec decode input data buffer + _ctx: decode thread acl context, use the same contxt with app + _entype: video stream encode type, dvpp vdec support: + const.ENTYPE_H265_MAIN = 0 H265 main level + const.ENTYPE_H264_BASE = 1 H264 baseline level + const.ENTYPE_H264_MAIN = 2 H264 main level + const.ENTYPE_H264_HIGH = 3 H264 high level + this attributes will read from video stream extradata + _channel_id: dvpp vdec decode channel id parameter, global unique + _vdec: pyacl dvpp vdec instance + _is_opened: the video stream wether open or not + _status: video decoder current status + _run_mode: the device mode + """ + + def __init__(self, strame_name): + self._stream_name = strame_name + self._input_buffer = None + self._vdec = None + self._is_opened = False + self._width = 0 + self._height = 0 + self._decode_thread_id = None + self._dextory_dvpp_flag = False + self._ctx, ret = acl.rt.get_context() + if ret: + acl_log.log_error("Get acl context failed when " + "instance AclVideo, error ", ret) + else: + self._entype = const.ENTYPE_H264_MAIN + self._channel_id = gen_unique_channel_id() + self._status = DECODE_STATUS_INIT + self._run_mode, ret = acl.rt.get_run_mode() + if ret: + acl_log.log_error("Get acl run mode failed when " + "instance AclVideo, error ", ret) + else: + self._open() + + def __del__(self): + self.destroy() + + def _open(self): + # Get frame width, height, encode type by pyav + if self._get_param(): + acl_log.log_error("Decode %s failed for get stream " + "parameters error" % (self._stream_name)) + return + print(000000000) + # Create decode thread and prepare to decode + self._decode_thread_id, ret = acl.util.start_thread( + self._decode_thread_entry, []) + if ret: + acl_log.log_error("Create %s decode thread failed, error %d" + % (self._stream_name, ret)) + return + print(1111111) + # Wait decode thread decode ready + for i in range(0, WAIT_READY_MAX): + print(f'====={i}') + if self._status == DECODE_STATUS_INIT: + print(f'-----{i}') + time.sleep(WAIT_INTERVAL) + print(222222222) + if self._status == DECODE_STATUS_READY: + self._is_opened = True + acl_log.log_info("Ready to decode %s..." % (self._stream_name)) + else: + acl_log.log_error("Open %s failed for wait ready timeout" + % (self._stream_name)) + return + + def _get_param(self): + container = av.open(self._stream_name) + stream = [s for s in container.streams if s.type == 'video'] + if len(stream) == 0: + # The stream is not video + acl_log.log_error("%s has no video stream" % (self._stream_name)) + return const.FAILED + + ret, profile = self._get_profile(stream) + if ret: + acl_log.log_error("%s is not annex-b format, decode failed" + % (self._stream_name)) + return const.FAILED + + video_context = container.streams.video[0].codec_context + codec_id_name = video_context.name + ret, self._entype = self._get_entype(codec_id_name, profile) + if ret: + return const.FAILED + + self._width = video_context.width + self._height = video_context.height + + acl_log.log_info( + "Get %s infomation: width %d, height %d, profile %d, " + "codec %s, entype %d" % + (self._stream_name, + self._width, + self._height, + profile, + codec_id_name, + self._entype)) + print('aaaaaa') + container.close() + print('bbbbbbbbb') + return const.SUCCESS + + def _get_profile(self, stream): + # Annex-b format h264 extradata is start with 0x000001 or 0x00000001 + extradata = np.frombuffer(stream[0].codec_context.extradata, np.ubyte) + if (extradata[0:3] == [0, 0, 1]).all(): + profile_id = extradata[4] + elif (extradata[0:4] == [0, 0, 0, 1]).all(): + profile_id = extradata[5] + else: + acl_log.log_error("The stream %s is not annex-b h264, " + "can not decode it" % (self._stream_name)) + return const.FAILED, None + + return const.SUCCESS, profile_id + + def _get_entype(self, codec_id_name, profile): + # Dvpp vdec support h264 baseline, main and high level + profile_entype_tbl = { + 'h264': {const.FF_PROFILE_H264_BASELINE: const.ENTYPE_H264_BASE, + const.FF_PROFILE_H264_MAIN: const.ENTYPE_H264_MAIN, + const.FF_PROFILE_H264_HIGH: const.ENTYPE_H264_HIGH}, + 'h265': {const.FF_PROFILE_HEVC_MAIN: const.ENTYPE_H265_MAIN}, + 'hevc': {const.FF_PROFILE_HEVC_MAIN: const.ENTYPE_H265_MAIN}} + entype = None + ret = const.SUCCESS + + if codec_id_name in profile_entype_tbl.keys(): + entype_tbl = profile_entype_tbl[codec_id_name] + if profile in entype_tbl.keys(): + entype = entype_tbl[profile] + elif codec_id_name == 'h264': + # if not support profile, try to decode as main + entype = const.ENTYPE_H264_MAIN + acl_log.log_error("Unsurpport h264 profile ", profile, + ", decode as main level") + else: + entype = const.ENTYPE_H265_MAIN + acl_log.log_error("Unsurpport h265 profile ", profile, + ", decode as main level") + else: + # Not h264 or h265 + ret = const.FAILED + acl_log.log_error("Unsupport codec type ", codec_id_name) + + return ret, entype + + def _pyav_vdec(self): + frame = 0 + video = av.open(self._stream_name) + stream = [s for s in video.streams if s.type == 'video'] + acl_log.log_info("Start decode %s frames" % (self._stream_name)) + for packet in video.demux([stream[0]]): + # Get frame data from packet and copy to dvpp + frame_data, data_size = self._prepare_frame_data(packet) + if data_size == 0: + # Last packet size is 0, no frame to decode anymore + break + + if self._vdec.process(frame_data, data_size, + [self._channel_id, frame]): + acl_log.log_error("Dvpp vdec deocde frame %d failed, " + "stop decode" % (frame)) + self._status = DECODE_STATUS_ERROR + break + frame += 1 + + # The status chang to stop when app stop decode + if self._status != DECODE_STATUS_RUNNING: + acl_log.log_info("Decode status change to %d, stop decode" + % (self._status)) + break + + def _prepare_frame_data(self, packet): + in_frame_np = np.frombuffer(packet.to_bytes(), np.byte) + size = in_frame_np.size + print(f'size:{size}') + if size == 0: + # Last frame data is empty + acl_log.log_info("Pyav decode finish") + self._status = DECODE_STATUS_PYAV_FINISH + return None, 0 + + if "bytes_to_ptr" in dir(acl.util): + bytes_data = in_frame_np.tobytes() + in_frame_ptr = acl.util.bytes_to_ptr(bytes_data) + else: + in_frame_ptr = acl.util.numpy_to_ptr(in_frame_np) + policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE + if self._run_mode == const.ACL_HOST: + policy = const.ACL_MEMCPY_HOST_TO_DEVICE + ret = acl.rt.memcpy(self._input_buffer, size, in_frame_ptr, size, + policy) + if ret: + acl_log.log_error("Copy data to dvpp failed, policy %d, error %d" + % (policy, ret)) + self._status = DECODE_STATUS_ERROR + return None, 0 + + return self._input_buffer, size + + def _decode_thread_entry(self, arg_list): + # Set acl context for decode thread + print('thread init=======') + if self._decode_thread_init(): + acl_log.log_error("Decode thread init failed") + return const.FAILED + + self._status = DECODE_STATUS_READY + while (self._status == DECODE_STATUS_READY): + time.sleep(WAIT_INTERVAL) + + self._pyav_vdec() + self._decode_thread_join() + + return const.SUCCESS + + def _decode_thread_init(self): + # Set acl context for decode thread + ret = acl.rt.set_context(self._ctx) + if ret: + acl_log.log_error("%s decode thread init dvpp vdec failed") + return const.FAILED + print('======111') + # Instance dvpp vdec and init it + self._vdec = dvpp_vdec.DvppVdec(self._channel_id, self._width, + self._height, self._entype, self._ctx) + print('=====222') + if self._vdec.init(): + acl_log.log_error("%s decode thread init dvpp vdec failed" + % (self._stream_name)) + return const.FAILED + print('=====333') + + # Malloc dvpp vdec decode input dvpp memory + self._input_buffer, ret = acl.media.dvpp_malloc( + utils.rgbu8_size(self._width, self._height)) + if ret: + acl_log.log_error("%s decode thread malloc input memory failed, " + "error %d. frame width %d, height %d, size %d" + % (self._stream_name, ret, + self._width, self._height, + utils.rgbu8_size(self._width, self._height))) + return const.FAILED + + return const.SUCCESS + + def _decode_thread_join(self): + self.destroy() + # Wait all decoded frame token off by read() + while self._status < DECODE_STATUS_STOP: + time.sleep(WAIT_INTERVAL) + self._status = DECODE_STATUS_EXIT + + def is_finished(self): + """Decode finished + Pyav and dvpp vdec decoded all frame, and all deocde frames were + token off. When read() return success but image is none, use this to + confirm decode finished + """ + return self._status == DECODE_STATUS_EXIT + + def read(self, no_wait=False): + """Read decoded frame + Args: + no_wait: Get image without wait. If set this arg True, and + return image is None, should call is_finished() method + to confirm decode finish or failed + + Returns: + 1. const.SUCCESS, not None: get image success + 2. const.SUCCESS, None: all frames decoded and be token off + 3. const.FAILED, None: Has frame not decoded, but no image decoded, + it means decode video failed + """ + # Pyav and dvpp vdec decoded all frame, + # and all deocde frames were token off + if self._status == DECODE_STATUS_EXIT: + return const.SUCCESS, None + + # When call read first time, the decode thread only ready to decode, + # but not decoding already. Set status to DECODE_STATUS_RUNNING will + # cause pyav and dvpp vdec start decode actually + if self._status == DECODE_STATUS_READY: + self._status = DECODE_STATUS_RUNNING + # The decode just begin, need wait the first frame to be decoded + time.sleep(WAIT_FIRST_DECODED_FRAME) + + ret, image = self._vdec.read(no_wait) + + # Decode finish or stopped, and all decode frames were token off + if (image is None) and (self._status > DECODE_STATUS_RUNNING): + self._status = DECODE_STATUS_EXIT + + return ret, image + + def destroy(self): + """Release all decode resource""" + if self._vdec is not None: + self._vdec.destroy() + while self._vdec._destory_channel_flag == False: + time.sleep(0.001) + if self._input_buffer is not None: + acl.media.dvpp_free(self._input_buffer) + self._input_buffer = None + self._dextory_dvpp_flag = True -- Gitee From b11322c20e5339d70a46ec2abe891b29336895ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 08:57:54 +0000 Subject: [PATCH 02/38] update Samples/YOLOV5MultiInput/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5MultiInput/python/README.md | 236 +++++++++------------- 1 file changed, 92 insertions(+), 144 deletions(-) diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md index 83b2e2c..8ab8892 100644 --- a/Samples/YOLOV5MultiInput/python/README.md +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -1,168 +1,116 @@ -## 目录 - - - [样例介绍](#样例介绍) - - [获取源码包](#获取源码包) - - [第三方依赖安装](#第三方依赖安装) - - [样例运行](#样例运行) - - [其他资源](#其他资源) - - [更新说明](#更新说明) - - [已知issue](#已知issue) - -## 样例介绍 - -以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 - -样例输入:图片。 -样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 - -## 获取源码包 - - 可以使用以下两种方式下载,请选择其中一种进行源码准备。 - - - 命令行方式下载(下载时间较长,但步骤简单)。 - - ``` - # 开发环境,非root用户命令行中执行以下命令下载源码仓。 - cd ${HOME} - git clone https://gitee.com/ascend/samples.git - ``` - **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** - ``` - git checkout v0.5.0 - ``` - - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 - **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** - ``` - # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 - # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 - # 3. 开发环境中,执行以下命令,解压zip包。 - cd ${HOME} - unzip ascend-samples-master.zip - ``` +# 目标检测(YoloV5s) -## 第三方依赖安装 +#### 样例介绍 +使用多路离线视频流(*.mp4)作为应用程序的输入,基于YoloV5s模型对输入视频中的物体做实时检测,将推理结果信息使用imshow方式显示。 +样例代码逻辑如下所示:![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/samples-pic/EdgeAndRobotics/%E5%A4%9A%E7%BA%BF%E7%A8%8B%E7%A4%BA%E4%BE%8B%E5%9B%BE%E7%89%87.png) -设置环境变量,配置程序编译依赖的头文件,库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 +#### 样例下载 - ``` - export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - export THIRDPART_PATH=${DDK_PATH}/thirdpart - export LD_LIBRARY_PATH=${THIRDPART_PATH}/lib:$LD_LIBRARY_PATH - ``` - 创建THIRDPART_PATH路径 +可以使用以下两种方式下载,请选择其中一种进行源码准备。 + +- 命令行方式下载(**下载时间较长,但步骤简单**)。 + + ``` + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/YOLOV5MultiInput + ``` + +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 + + ``` + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/YOLOV5MultiInput + ``` + +#### 准备环境 + +1. 以HwHiAiUser用户登录开发板。 + +2. 设置环境变量。 ``` - mkdir -p ${THIRDPART_PATH} + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub ``` -- acllite - - 注:源码安装ffmpeg主要是为了acllite库的安装 - 执行以下命令安装x264 - - ``` - # 下载x264 - cd ${HOME} - git clone https://code.videolan.org/videolan/x264.git - cd x264 - # 安装x264 - ./configure --enable-shared --disable-asm - make - sudo make install - sudo cp /usr/local/lib/libx264.so.164 /lib - ``` - 执行以下命令安装ffmpeg - - ``` - # 下载ffmpeg - cd ${HOME} - wget http://www.ffmpeg.org/releases/ffmpeg-4.1.3.tar.gz --no-check-certificate - tar -zxvf ffmpeg-4.1.3.tar.gz - cd ffmpeg-4.1.3 - # 安装ffmpeg - ./configure --enable-shared --enable-pic --enable-static --disable-x86asm --enable-libx264 --enable-gpl --prefix=${THIRDPART_PATH} - make -j8 - make install - ``` - 执行以下命令安装acllite - - ``` - cd ${HOME}/samples/inference/acllite/cplusplus - make - make install - ``` - - -- opencv - - 执行以下命令安装opencv(注:确保是3.x版本) - ``` - sudo apt-get install libopencv-dev - ``` -## 样例运行 +3. 安装ACLLite库。 + + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装ACLLite库。 - - 数据准备 - 请从以下链接获取该样例的输入图片,放在data目录下。 - - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg - ``` +#### 运行样例 - - ATC模型转换 +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 - 将YOLOV7原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 +2. 获取PyTorch框架的YoloV5s模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + ``` + cd model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp.cfg --no-check-certificate + atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_nms --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp.cfg + ``` - ``` - # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 - cd $HOME/samples/inference/modelInference/sampleYOLOV7/model - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg - atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg - ``` + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 - - 样例编译 + - --model:YoloV5s网络的模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 - 执行以下命令,执行编译脚本,开始样例编译。 - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/scripts - bash sample_build.sh - ``` - - 样例运行 +3. 准备测试视频。 - 执行运行脚本,开始样例运行。 - ``` - bash sample_run.sh - ``` - - 样例结果展示 - - 运行完成后,会在样例工程的out目录下生成推理后的图片,显示对比结果如下所示。 - ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/out_dog.jpg "image-20211028101534905.png") + 请从以下链接获取该样例的测试视频,放在data目录下。 -## 其他资源 + ``` + cd ../data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.mp4 --no-check-certificate + ``` + + **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 -以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: +4. 编译样例源码。 -**ONNX** -- [GitHub: ONNX](https://github.com/onnx/onnx) + 执行以下命令编译样例源码。 + + ``` + cd ../scripts + bash sample_build.sh + ``` -**Models** -- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) +5. 运行样例。 -**Documentation** -- [AscendCL Samples介绍](../README_CN.md) -- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) -- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + - 在HDMI连接屏幕场景,执行以下脚本运行样例。此时会以画面的形式呈现推理效果。 + ``` + bash sample_run.sh imshow + ``` + - 在直连电脑场景,执行以下脚本运行样例。此时会以结果打屏的形式呈现推理效果。 + ``` + bash sample_run.sh stdout + ``` -## 更新说明 - | 时间 | 更新事项 | -|----|------| -| 2023/03/07 | 新增sampleYOLOV7/README.md | - +#### 相关操作 -## 已知issue +- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 +- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 获取学习文档,请单击[AscendCL C&C++](https://hiascend.com/document/redirect/CannCommunityCppAclQuick),查看最新版本的AscendCL推理应用开发指南。 +- 查模型的输入输出 - 暂无 + 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From 903bd6e14fa8be8d55fb0c5efca2268748637ede Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 09:02:07 +0000 Subject: [PATCH 03/38] update Samples/YOLOV5MultiInput/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5MultiInput/python/README.md | 30 +++++------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md index 8ab8892..357935e 100644 --- a/Samples/YOLOV5MultiInput/python/README.md +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -29,7 +29,7 @@ chmod +x EdgeAndRobotics-master.zip unzip EdgeAndRobotics-master.zip # 4. 切换到样例目录 - cd EdgeAndRobotics-master/Samples/YOLOV5MultiInput + cd EdgeAndRobotics-master/Samples/YOLOV5MultiInput/python ``` #### 准备环境 @@ -42,13 +42,9 @@ # 配置程序编译依赖的头文件与库文件路径 export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + export PYTHONPATH=`pwd`/python:$PYTHONPATH ``` -3. 安装ACLLite库。 - - 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装ACLLite库。 - - #### 运行样例 1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 @@ -63,8 +59,8 @@ ``` cd model wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp.cfg --no-check-certificate - atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_nms --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp.cfg + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp_rgb.cfg --no-check-certificate + atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_nms --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp_rgb.cfg ``` atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 @@ -86,24 +82,10 @@ **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 -4. 编译样例源码。 - - 执行以下命令编译样例源码。 - - ``` - cd ../scripts - bash sample_build.sh - ``` - -5. 运行样例。 +4. 运行样例。 - - 在HDMI连接屏幕场景,执行以下脚本运行样例。此时会以画面的形式呈现推理效果。 - ``` - bash sample_run.sh imshow - ``` - - 在直连电脑场景,执行以下脚本运行样例。此时会以结果打屏的形式呈现推理效果。 ``` - bash sample_run.sh stdout + bash sample_run.sh ``` #### 相关操作 -- Gitee From 3e466980508186c9f2228fd266f611fb66a953b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 09:02:29 +0000 Subject: [PATCH 04/38] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Sa?= =?UTF-8?q?mples/YOLOV5MultiInput/python/scripts/sample=5Fbuild.sh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../python/scripts/sample_build.sh | 48 ------------------- 1 file changed, 48 deletions(-) delete mode 100644 Samples/YOLOV5MultiInput/python/scripts/sample_build.sh diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh deleted file mode 100644 index a88536b..0000000 --- a/Samples/YOLOV5MultiInput/python/scripts/sample_build.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" -ModelPath="${ScriptPath}/../model" - -function build() -{ - if [ -d ${ScriptPath}/../build/intermediates/host ];then - rm -rf ${ScriptPath}/../build/intermediates/host - fi - - mkdir -p ${ScriptPath}/../build/intermediates/host - cd ${ScriptPath}/../build/intermediates/host - - cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE - if [ $? -ne 0 ];then - echo "[ERROR] cmake error, Please check your environment!" - return 1 - fi - make - if [ $? -ne 0 ];then - echo "[ERROR] build failed, Please check your environment!" - return 1 - fi - cd - > /dev/null -} -function main() -{ - echo "[INFO] Sample preparation" - - ret=`find ${ModelPath} -maxdepth 1 -name yolov7x.om 2> /dev/null` - - if [[ ${ret} ]];then - echo "[INFO] The yolov7x.om already exists.start buiding" - else - echo "[ERROR] yolov7x.om does not exist, please follow the readme to convert the model and place it in the correct position!" - return 1 - fi - - - build - if [ $? -ne 0 ];then - return 1 - fi - - echo "[INFO] Sample preparation is complete" -} -main - -- Gitee From 2832f99b98c6328fece164d583c63374794f1816 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 09:03:32 +0000 Subject: [PATCH 05/38] update Samples/YOLOV5MultiInput/python/scripts/sample_run.sh. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5MultiInput/python/scripts/sample_run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh index a141e29..12a7e0b 100644 --- a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh +++ b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh @@ -2,8 +2,8 @@ ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" echo "[INFO] The sample starts to run" -running_command="./main" -cd ${ScriptPath}/../out +running_command="python3 multi_process_yolo_nms.py" +cd ${ScriptPath}/../src ${running_command} if [ $? -ne 0 ];then echo "[INFO] The program runs failed" -- Gitee From 6ce04f37eb0c41092f50268e6897d0c9b28835b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 09:07:21 +0000 Subject: [PATCH 06/38] update Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- .../YOLOV5MultiInput/python/src/multi_process_yolo_nms.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py b/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py index 2a67a94..5847c61 100644 --- a/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py +++ b/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py @@ -43,10 +43,6 @@ labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] -def init_resource(): - resource = AclLiteResource - resource.init() - def preprocess(path, q_pre, model_width, model_height, channel): print(f'sub process preprocess{i} start') width = 1920 @@ -163,7 +159,6 @@ if __name__ == '__main__': processes = [] for i in range(pnums): processes.append(Process(target=preprocess, args=(stream_path, q_pre[i], model_width, model_height, i))) - # processes.append(Process(target=preprocess, args=(q_data, q_pre, dvpps[i], model_width, model_height))) processes.append(Process(target=infer, args=(model_path, q_pre, q_out, pnums))) for i in range(pnums): processes.append(Process(target=postprocess, args=(q_out[i],model_width, model_width))) -- Gitee From fe0c61f57a5be452276569795da5bdc51a5d2628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Mon, 11 Mar 2024 10:46:10 +0000 Subject: [PATCH 07/38] update Samples/YOLOV5MultiInput/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5MultiInput/python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md index 357935e..1426e84 100644 --- a/Samples/YOLOV5MultiInput/python/README.md +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -92,7 +92,7 @@ - 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 - 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 -- 获取学习文档,请单击[AscendCL C&C++](https://hiascend.com/document/redirect/CannCommunityCppAclQuick),查看最新版本的AscendCL推理应用开发指南。 +- 获取学习文档,请单击[AscendCL python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha002/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 - 查模型的输入输出 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From 914be76d3b18b71989b85a3a5ee0c7c88aeefd12 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Thu, 14 Mar 2024 16:43:45 +0800 Subject: [PATCH 08/38] update --- Samples/ResnetPicture/pyACLLite/README.md | 142 +++ Samples/ResnetPicture/pyACLLite/data/.keep | 0 Samples/ResnetPicture/pyACLLite/model/.keep | 0 .../pyACLLite/scripts/sample_run.sh | 13 + Samples/ResnetPicture/pyACLLite/src/label.py | 1006 +++++++++++++++++ .../pyACLLite/src/sampleResnetDVPP.py | 111 ++ 6 files changed, 1272 insertions(+) create mode 100644 Samples/ResnetPicture/pyACLLite/README.md create mode 100644 Samples/ResnetPicture/pyACLLite/data/.keep create mode 100644 Samples/ResnetPicture/pyACLLite/model/.keep create mode 100644 Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh create mode 100644 Samples/ResnetPicture/pyACLLite/src/label.py create mode 100644 Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py diff --git a/Samples/ResnetPicture/pyACLLite/README.md b/Samples/ResnetPicture/pyACLLite/README.md new file mode 100644 index 0000000..e2fc939 --- /dev/null +++ b/Samples/ResnetPicture/pyACLLite/README.md @@ -0,0 +1,142 @@ +## 目录 + + - [样例介绍](#样例介绍) + - [获取源码包](#获取源码包) + - [第三方依赖安装](#第三方依赖安装) + - [样例运行](#样例运行) + - [其他资源](#其他资源) + - [更新说明](#更新说明) + - [已知issue](#已知issue) + +## 样例介绍 + +使用DVPP加速预处理网络输入,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对ResNet50网络执行推理,最终对输入的图片进行分类并且给出TOP5类别置信度和相应的类别信息。 + +样例输入:图片。 +样例输出:打屏显示置信度TOP5的类别标识、置信度信息和相应的类别信息。 + + +## 获取源码包 + + 可以使用以下两种方式下载,请选择其中一种进行源码准备。 + + - 命令行方式下载(下载时间较长,但步骤简单)。 + + ``` + # 开发环境,非root用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/samples.git + ``` + **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** + ``` + git checkout v0.5.0 + ``` + - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 + **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** + ``` + # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + unzip ascend-samples-master.zip + ``` + +## 第三方依赖安装 + 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 + ``` + export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + export THIRDPART_PATH=${DDK_PATH}/thirdpart + export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH + ``` + + 执行以下命令,创建THIRDPART_PATH和PYTHONPATH路径 + + ``` + mkdir -p ${PYTHONPATH} + ``` +- python-acllite + + python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 + + ``` + cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} + + ``` + +- numpy + + 执行以下命令安装numpy库。 + ``` + pip3 install numpy + ``` + + +## 样例运行 + + - 数据准备 + + 请从以下链接获取该样例的输入图片,放在data目录下。 + + ``` + cd $HOME/samples/inference/modelInference/sampleResnetDVPP/pyACLLite/data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg + ``` + + - ATC模型转换 + + 将ResNet-50原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 + + ``` + # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + cd $HOME/samples/inference/modelInference/sampleResnetDVPP/pyACLLite/model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50_DVPP/aipp.cfg + atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape="actual_input_1:1,3,224,224" --soc_version=Ascend310 --insert_op_conf=aipp.cfg + ``` + + - 样例运行 + + 执行运行脚本,开始样例运行。 + ``` + bash sample_run.sh + ``` + - 样例结果展示 + + 执行成功后,在屏幕上显示置信度top5的相关信息如下,提示信息中的label表示类别标识、confidence表示该类别的置信度,class表示对应的类别,这些值可能会根据版本、环境有所不同,请以实际情况为准: + + + ``` + ======== top5 inference results: ============= + label:162 confidence:0.913663 class:beaglebeagle, + label:161 confidence:0.078597 class:basset houndbasset hound, + label:166 confidence:0.003647 class:Walker foxhoundWalker foxhound, + label:167 confidence:0.003535 class:English foxhoundEnglish foxhound, + label:163 confidence:0.000268 class:sleuthhoundsleuthhound, + *****run finish****** + ``` + +## 其他资源 + +以下资源提供了对ONNX项目和Renet50模型的更深入理解: + +**ONNX** +- [GitHub: ONNX](https://github.com/onnx/onnx) + +**Models** +- [Resnet50 - image classification](https://gitee.com/ascend/ModelZoo-PyTorch/tree/master/ACL_PyTorch/built-in/cv/Resnet50_Pytorch_Infer) + +**Documentation** +- [AscendCL Samples介绍](../README_CN.md) +- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) +- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + +## 更新说明 + | 时间 | 更新事项 | +|----|------| +| 2023/02/17 | 新增sampleResnetDVPP/pyACLLite/README.md | + + +## 已知issue + + 暂无 diff --git a/Samples/ResnetPicture/pyACLLite/data/.keep b/Samples/ResnetPicture/pyACLLite/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/ResnetPicture/pyACLLite/model/.keep b/Samples/ResnetPicture/pyACLLite/model/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh b/Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh new file mode 100644 index 0000000..9c210a4 --- /dev/null +++ b/Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh @@ -0,0 +1,13 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" +Src=${ScriptPath}/../src + +cd ${Src} +echo "[INFO] The sample starts to run" +running_command="python3 sampleResnetDVPP.py" +${running_command} +if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" +else + echo "[INFO] The program runs successfully" +fi diff --git a/Samples/ResnetPicture/pyACLLite/src/label.py b/Samples/ResnetPicture/pyACLLite/src/label.py new file mode 100644 index 0000000..76dd0c6 --- /dev/null +++ b/Samples/ResnetPicture/pyACLLite/src/label.py @@ -0,0 +1,1006 @@ +label = { + "0": ["tench", "Tinca tinca"], + "1": ["goldfish", "Carassius auratus"], + "2": ["great white shark", "white shark", "man-eater", "man-eating shark", "Carcharodon carcharias"], + "3": ["tiger shark", "Galeocerdo cuvieri"], + "4": ["hammerhead", "hammerhead shark"], + "5": ["electric ray", "crampfish", "numbfish", "torpedo"], + "6": ["stingray"], + "7": ["cock"], + "8": ["hen"], + "9": ["ostrich", "Struthio camelus"], + "10": ["brambling", "Fringilla montifringilla"], + "11": ["goldfinch", "Carduelis carduelis"], + "12": ["house finch", "linnet", "Carpodacus mexicanus"], + "13": ["junco", "snowbird"], + "14": ["indigo bunting", "indigo finch", "indigo bird", "Passerina cyanea"], + "15": ["robin", "American robin", "Turdus migratorius"], + "16": ["bulbul"], + "17": ["jay"], + "18": ["magpie"], + "19": ["chickadee"], + "20": ["water ouzel", "dipper"], + "21": ["kite"], + "22": ["bald eagle", "American eagle", "Haliaeetus leucocephalus"], + "23": ["vulture"], + "24": ["great grey owl", "great gray owl", "Strix nebulosa"], + "25": ["European fire salamander", "Salamandra salamandra"], + "26": ["common newt", "Triturus vulgaris"], + "27": ["eft"], + "28": ["spotted salamander", "Ambystoma maculatum"], + "29": ["axolotl", "mud puppy", "Ambystoma mexicanum"], + "30": ["bullfrog", "Rana catesbeiana"], + "31": ["tree frog", "tree-frog"], + "32": ["tailed frog", "bell toad", "ribbed toad", "tailed toad", "Ascaphus trui"], + "33": ["loggerhead", "loggerhead turtle", "Caretta caretta"], + "34": ["leatherback turtle", "leatherback", "leathery turtle", "Dermochelys coriacea"], + "35": ["mud turtle"], + "36": ["terrapin"], + "37": ["box turtle", "box tortoise"], + "38": ["banded gecko"], + "39": ["common iguana", "iguana", "Iguana iguana"], + "40": ["American chameleon", "anole", "Anolis carolinensis"], + "41": ["whiptail", "whiptail lizard"], + "42": ["agama"], + "43": ["frilled lizard", "Chlamydosaurus kingi"], + "44": ["alligator lizard"], + "45": ["Gila monster", "Heloderma suspectum"], + "46": ["green lizard", "Lacerta viridis"], + "47": ["African chameleon", "Chamaeleo chamaeleon"], + "48": ["Komodo dragon", "Komodo lizard", "dragon lizard", "giant lizard", "Varanus komodoensis"], + "49": ["African crocodile", "Nile crocodile", "Crocodylus niloticus"], + "50": ["American alligator", "Alligator mississipiensis"], + "51": ["triceratops"], + "52": ["thunder snake", "worm snake", "Carphophis amoenus"], + "53": ["ringneck snake", "ring-necked snake", "ring snake"], + "54": ["hognose snake", "puff adder", "sand viper"], + "55": ["green snake", "grass snake"], + "56": ["king snake", "kingsnake"], + "57": ["garter snake", "grass snake"], + "58": ["water snake"], + "59": ["vine snake"], + "60": ["night snake", "Hypsiglena torquata"], + "61": ["boa constrictor", "Constrictor constrictor"], + "62": ["rock python", "rock snake", "Python sebae"], + "63": ["Indian cobra", "Naja naja"], + "64": ["green mamba"], + "65": ["sea snake"], + "66": ["horned viper", "cerastes", "sand viper", "horned asp", "Cerastes cornutus"], + "67": ["diamondback", "diamondback rattlesnake", "Crotalus adamanteus"], + "68": ["sidewinder", "horned rattlesnake", "Crotalus cerastes"], + "69": ["trilobite"], + "70": ["harvestman", "daddy longlegs", "Phalangium opilio"], + "71": ["scorpion"], + "72": ["black and gold garden spider", "Argiope aurantia"], + "73": ["barn spider", "Araneus cavaticus"], + "74": ["garden spider", "Aranea diademata"], + "75": ["black widow", "Latrodectus mactans"], + "76": ["tarantula"], + "77": ["wolf spider", "hunting spider"], + "78": ["tick"], + "79": ["centipede"], + "80": ["black grouse"], + "81": ["ptarmigan"], + "82": ["ruffed grouse", "partridge", "Bonasa umbellus"], + "83": ["prairie chicken", "prairie grouse", "prairie fowl"], + "84": ["peacock"], + "85": ["quail"], + "86": ["partridge"], + "87": ["African grey", "African gray", "Psittacus erithacus"], + "88": ["macaw"], + "89": ["sulphur-crested cockatoo", "Kakatoe galerita", "Cacatua galerita"], + "90": ["lorikeet"], + "91": ["coucal"], + "92": ["bee eater"], + "93": ["hornbill"], + "94": ["hummingbird"], + "95": ["jacamar"], + "96": ["toucan"], + "97": ["drake"], + "98": ["red-breasted merganser", "Mergus serrator"], + "99": ["goose"], + "100": ["black swan", "Cygnus atratus"], + "101": ["tusker"], + "102": ["echidna", "spiny anteater", "anteater"], + "103": ["platypus", "duckbill", "duckbilled platypus", "duck-billed platypus", "Ornithorhynchus anatinus"], + "104": ["wallaby", "brush kangaroo"], + "105": ["koala", "koala bear", "kangaroo bear", "native bear", "Phascolarctos cinereus"], + "106": ["wombat"], + "107": ["jellyfish"], + "108": ["sea anemone", "anemone"], + "109": ["brain coral"], + "110": ["flatworm", "platyhelminth"], + "111": ["nematode", "nematode worm", "roundworm"], + "112": ["conch"], + "113": ["snail"], + "114": ["slug"], + "115": ["sea slug", "nudibranch"], + "116": ["chiton", "coat-of-mail shell", "sea cradle", "polyplacophore"], + "117": ["chambered nautilus", "pearly nautilus", "nautilus"], + "118": ["Dungeness crab", "Cancer magister"], + "119": ["rock crab", "Cancer irroratus"], + "120": ["fiddler crab"], + "121": ["king crab", "Alaska crab", "Alaskan king crab", "Alaska king crab", "Paralithodes camtschatica"], + "122": ["American lobster", "Northern lobster", "Maine lobster", "Homarus americanus"], + "123": ["spiny lobster", "langouste", "rock lobster", "crawfish", "crayfish", "sea crawfish"], + "124": ["crayfish", "crawfish", "crawdad", "crawdaddy"], + "125": ["hermit crab"], + "126": ["isopod"], + "127": ["white stork", "Ciconia ciconia"], + "128": ["black stork", "Ciconia nigra"], + "129": ["spoonbill"], + "130": ["flamingo"], + "131": ["little blue heron", "Egretta caerulea"], + "132": ["American egret", "great white heron", "Egretta albus"], + "133": ["bittern"], + "134": ["crane"], + "135": ["limpkin", "Aramus pictus"], + "136": ["European gallinule", "Porphyrio porphyrio"], + "137": ["American coot", "marsh hen", "mud hen", "water hen", "Fulica americana"], + "138": ["bustard"], + "139": ["ruddy turnstone", "Arenaria interpres"], + "140": ["red-backed sandpiper", "dunlin", "Erolia alpina"], + "141": ["redshank", "Tringa totanus"], + "142": ["dowitcher"], + "143": ["oystercatcher", "oyster catcher"], + "144": ["pelican"], + "145": ["king penguin", "Aptenodytes patagonica"], + "146": ["albatross", "mollymawk"], + "147": ["grey whale", "gray whale", "devilfish", "Eschrichtius gibbosus", "Eschrichtius robustus"], + "148": ["killer whale", "killer", "orca", "grampus", "sea wolf", "Orcinus orca"], + "149": ["dugong", "Dugong dugon"], + "150": ["sea lion"], + "151": ["Chihuahua"], + "152": ["Japanese spaniel"], + "153": ["Maltese dog", "Maltese terrier", "Maltese"], + "154": ["Pekinese", "Pekingese", "Peke"], + "155": ["Shih-Tzu"], + "156": ["Blenheim spaniel"], + "157": ["papillon"], + "158": ["toy terrier"], + "159": ["Rhodesian ridgeback"], + "160": ["Afghan hound", "Afghan"], + "161": ["basset", "basset hound"], + "162": ["beagle"], + "163": ["bloodhound", "sleuthhound"], + "164": ["bluetick"], + "165": ["black-and-tan coonhound"], + "166": ["Walker hound", "Walker foxhound"], + "167": ["English foxhound"], + "168": ["redbone"], + "169": ["borzoi", "Russian wolfhound"], + "170": ["Irish wolfhound"], + "171": ["Italian greyhound"], + "172": ["whippet"], + "173": ["Ibizan hound", "Ibizan Podenco"], + "174": ["Norwegian elkhound", "elkhound"], + "175": ["otterhound", "otter hound"], + "176": ["Saluki", "gazelle hound"], + "177": ["Scottish deerhound", "deerhound"], + "178": ["Weimaraner"], + "179": ["Staffordshire bullterrier", "Staffordshire bull terrier"], + "180": ["American Staffordshire terrier", "Staffordshire terrier", "American pit bull terrier", "pit bull terrier"], + "181": ["Bedlington terrier"], + "182": ["Border terrier"], + "183": ["Kerry blue terrier"], + "184": ["Irish terrier"], + "185": ["Norfolk terrier"], + "186": ["Norwich terrier"], + "187": ["Yorkshire terrier"], + "188": ["wire-haired fox terrier"], + "189": ["Lakeland terrier"], + "190": ["Sealyham terrier", "Sealyham"], + "191": ["Airedale", "Airedale terrier"], + "192": ["cairn", "cairn terrier"], + "193": ["Australian terrier"], + "194": ["Dandie Dinmont", "Dandie Dinmont terrier"], + "195": ["Boston bull", "Boston terrier"], + "196": ["miniature schnauzer"], + "197": ["giant schnauzer"], + "198": ["standard schnauzer"], + "199": ["Scotch terrier", "Scottish terrier", "Scottie"], + "200": ["Tibetan terrier", "chrysanthemum dog"], + "201": ["silky terrier", "Sydney silky"], + "202": ["soft-coated wheaten terrier"], + "203": ["West Highland white terrier"], + "204": ["Lhasa", "Lhasa apso"], + "205": ["flat-coated retriever"], + "206": ["curly-coated retriever"], + "207": ["golden retriever"], + "208": ["Labrador retriever"], + "209": ["Chesapeake Bay retriever"], + "210": ["German short-haired pointer"], + "211": ["vizsla", "Hungarian pointer"], + "212": ["English setter"], + "213": ["Irish setter", "red setter"], + "214": ["Gordon setter"], + "215": ["Brittany spaniel"], + "216": ["clumber", "clumber spaniel"], + "217": ["English springer", "English springer spaniel"], + "218": ["Welsh springer spaniel"], + "219": ["cocker spaniel", "English cocker spaniel", "cocker"], + "220": ["Sussex spaniel"], + "221": ["Irish water spaniel"], + "222": ["kuvasz"], + "223": ["schipperke"], + "224": ["groenendael"], + "225": ["malinois"], + "226": ["briard"], + "227": ["kelpie"], + "228": ["komondor"], + "229": ["Old English sheepdog", "bobtail"], + "230": ["Shetland sheepdog", "Shetland sheep dog", "Shetland"], + "231": ["collie"], + "232": ["Border collie"], + "233": ["Bouvier des Flandres", "Bouviers des Flandres"], + "234": ["Rottweiler"], + "235": ["German shepherd", "German shepherd dog", "German police dog", "alsatian"], + "236": ["Doberman", "Doberman pinscher"], + "237": ["miniature pinscher"], + "238": ["Greater Swiss Mountain dog"], + "239": ["Bernese mountain dog"], + "240": ["Appenzeller"], + "241": ["EntleBucher"], + "242": ["boxer"], + "243": ["bull mastiff"], + "244": ["Tibetan mastiff"], + "245": ["French bulldog"], + "246": ["Great Dane"], + "247": ["Saint Bernard", "St Bernard"], + "248": ["Eskimo dog", "husky"], + "249": ["malamute", "malemute", "Alaskan malamute"], + "250": ["Siberian husky"], + "251": ["dalmatian", "coach dog", "carriage dog"], + "252": ["affenpinscher", "monkey pinscher", "monkey dog"], + "253": ["basenji"], + "254": ["pug", "pug-dog"], + "255": ["Leonberg"], + "256": ["Newfoundland", "Newfoundland dog"], + "257": ["Great Pyrenees"], + "258": ["Samoyed", "Samoyede"], + "259": ["Pomeranian"], + "260": ["chow", "chow chow"], + "261": ["keeshond"], + "262": ["Brabancon griffon"], + "263": ["Pembroke", "Pembroke Welsh corgi"], + "264": ["Cardigan", "Cardigan Welsh corgi"], + "265": ["toy poodle"], + "266": ["miniature poodle"], + "267": ["standard poodle"], + "268": ["Mexican hairless"], + "269": ["timber wolf", "grey wolf", "gray wolf", "Canis lupus"], + "270": ["white wolf", "Arctic wolf", "Canis lupus tundrarum"], + "271": ["red wolf", "maned wolf", "Canis rufus", "Canis niger"], + "272": ["coyote", "prairie wolf", "brush wolf", "Canis latrans"], + "273": ["dingo", "warrigal", "warragal", "Canis dingo"], + "274": ["dhole", "Cuon alpinus"], + "275": ["African hunting dog", "hyena dog", "Cape hunting dog", "Lycaon pictus"], + "276": ["hyena", "hyaena"], + "277": ["red fox", "Vulpes vulpes"], + "278": ["kit fox", "Vulpes macrotis"], + "279": ["Arctic fox", "white fox", "Alopex lagopus"], + "280": ["grey fox", "gray fox", "Urocyon cinereoargenteus"], + "281": ["tabby", "tabby cat"], + "282": ["tiger cat"], + "283": ["Persian cat"], + "284": ["Siamese cat", "Siamese"], + "285": ["Egyptian cat"], + "286": ["cougar", "puma", "catamount", "mountain lion", "painter", "panther", "Felis concolor"], + "287": ["lynx", "catamount"], + "288": ["leopard", "Panthera pardus"], + "289": ["snow leopard", "ounce", "Panthera uncia"], + "290": ["jaguar", "panther", "Panthera onca", "Felis onca"], + "291": ["lion", "king of beasts", "Panthera leo"], + "292": ["tiger", "Panthera tigris"], + "293": ["cheetah", "chetah", "Acinonyx jubatus"], + "294": ["brown bear", "bruin", "Ursus arctos"], + "295": ["American black bear", "black bear", "Ursus americanus", "Euarctos americanus"], + "296": ["ice bear", "polar bear", "Ursus Maritimus", "Thalarctos maritimus"], + "297": ["sloth bear", "Melursus ursinus", "Ursus ursinus"], + "298": ["mongoose"], + "299": ["meerkat", "mierkat"], + "300": ["tiger beetle"], + "301": ["ladybug", "ladybeetle", "lady beetle", "ladybird", "ladybird beetle"], + "302": ["ground beetle", "carabid beetle"], + "303": ["long-horned beetle", "longicorn", "longicorn beetle"], + "304": ["leaf beetle", "chrysomelid"], + "305": ["dung beetle"], + "306": ["rhinoceros beetle"], + "307": ["weevil"], + "308": ["fly"], + "309": ["bee"], + "310": ["ant", "emmet", "pismire"], + "311": ["grasshopper", "hopper"], + "312": ["cricket"], + "313": ["walking stick", "walkingstick", "stick insect"], + "314": ["cockroach", "roach"], + "315": ["mantis", "mantid"], + "316": ["cicada", "cicala"], + "317": ["leafhopper"], + "318": ["lacewing", "lacewing fly"], + "319": ["dragonfly", "darning needle", "devil's darning needle", "sewing needle", \ + "snake feeder", "snake doctor", "mosquito hawk", "skeeter hawk"], + "320": ["damselfly"], + "321": ["admiral"], + "322": ["ringlet", "ringlet butterfly"], + "323": ["monarch", "monarch butterfly", "milkweed butterfly", "Danaus plexippus"], + "324": ["cabbage butterfly"], + "325": ["sulphur butterfly", "sulfur butterfly"], + "326": ["lycaenid", "lycaenid butterfly"], + "327": ["starfish", "sea star"], + "328": ["sea urchin"], + "329": ["sea cucumber", "holothurian"], + "330": ["wood rabbit", "cottontail", "cottontail rabbit"], + "331": ["hare"], + "332": ["Angora", "Angora rabbit"], + "333": ["hamster"], + "334": ["porcupine", "hedgehog"], + "335": ["fox squirrel", "eastern fox squirrel", "Sciurus niger"], + "336": ["marmot"], + "337": ["beaver"], + "338": ["guinea pig", "Cavia cobaya"], + "339": ["sorrel"], + "340": ["zebra"], + "341": ["hog", "pig", "grunter", "squealer", "Sus scrofa"], + "342": ["wild boar", "boar", "Sus scrofa"], + "343": ["warthog"], + "344": ["hippopotamus", "hippo", "river horse", "Hippopotamus amphibius"], + "345": ["ox"], + "346": ["water buffalo", "water ox", "Asiatic buffalo", "Bubalus bubalis"], + "347": ["bison"], + "348": ["ram", "tup"], + "349": ["bighorn", "bighorn sheep", "cimarron", "Rocky Mountain bighorn", "Rocky Mountain sheep", \ + "Ovis canadensis"], + "350": ["ibex", "Capra ibex"], + "351": ["hartebeest"], + "352": ["impala", "Aepyceros melampus"], + "353": ["gazelle"], + "354": ["Arabian camel", "dromedary", "Camelus dromedarius"], + "355": ["llama"], + "356": ["weasel"], + "357": ["mink"], + "358": ["polecat", "fitch", "foulmart", "foumart", "Mustela putorius"], + "359": ["black-footed ferret", "ferret", "Mustela nigripes"], + "360": ["otter"], + "361": ["skunk", "polecat", "wood pussy"], + "362": ["badger"], + "363": ["armadillo"], + "364": ["three-toed sloth", "ai", "Bradypus tridactylus"], + "365": ["orangutan", "orang", "orangutang", "Pongo pygmaeus"], + "366": ["gorilla", "Gorilla gorilla"], + "367": ["chimpanzee", "chimp", "Pan troglodytes"], + "368": ["gibbon", "Hylobates lar"], + "369": ["siamang", "Hylobates syndactylus", "Symphalangus syndactylus"], + "370": ["guenon", "guenon monkey"], + "371": ["patas", "hussar monkey", "Erythrocebus patas"], + "372": ["baboon"], + "373": ["macaque"], + "374": ["langur"], + "375": ["colobus", "colobus monkey"], + "376": ["proboscis monkey", "Nasalis larvatus"], + "377": ["marmoset"], + "378": ["capuchin", "ringtail", "Cebus capucinus"], + "379": ["howler monkey", "howler"], + "380": ["titi", "titi monkey"], + "381": ["spider monkey", "Ateles geoffroyi"], + "382": ["squirrel monkey", "Saimiri sciureus"], + "383": ["Madagascar cat", "ring-tailed lemur", "Lemur catta"], + "384": ["indri", "indris", "Indri indri", "Indri brevicaudatus"], + "385": ["Indian elephant", "Elephas maximus"], + "386": ["African elephant", "Loxodonta africana"], + "387": ["lesser panda", "red panda", "panda", "bear cat", "cat bear", "Ailurus fulgens"], + "388": ["giant panda", "panda", "panda bear", "coon bear", "Ailuropoda melanoleuca"], + "389": ["barracouta", "snoek"], + "390": ["eel"], + "391": ["coho", "cohoe", "coho salmon", "blue jack", "silver salmon", "Oncorhynchus kisutch"], + "392": ["rock beauty", "Holocanthus tricolor"], + "393": ["anemone fish"], + "394": ["sturgeon"], + "395": ["gar", "garfish", "garpike", "billfish", "Lepisosteus osseus"], + "396": ["lionfish"], + "397": ["puffer", "pufferfish", "blowfish", "globefish"], + "398": ["abacus"], + "399": ["abaya"], + "400": ["academic gown", "academic robe", "judge's robe"], + "401": ["accordion", "piano accordion", "squeeze box"], + "402": ["acoustic guitar"], + "403": ["aircraft carrier", "carrier", "flattop", "attack aircraft carrier"], + "404": ["airliner"], + "405": ["airship", "dirigible"], + "406": ["altar"], + "407": ["ambulance"], + "408": ["amphibian", "amphibious vehicle"], + "409": ["analog clock"], + "410": ["apiary", "bee house"], + "411": ["apron"], + "412": ["ashcan", "trash can", "garbage can", "wastebin", "ash bin", "ash-bin", "ashbin",\ + "dustbin", "trash barrel", "trash bin"], + "413": ["assault rifle", "assault gun"], + "414": ["backpack", "back pack", "knapsack", "packsack", "rucksack", "haversack"], + "415": ["bakery", "bakeshop", "bakehouse"], + "416": ["balance beam", "beam"], + "417": ["balloon"], + "418": ["ballpoint", "ballpoint pen", "ballpen", "Biro"], + "419": ["Band Aid"], + "420": ["banjo"], + "421": ["bannister", "banister", "balustrade", "balusters", "handrail"], + "422": ["barbell"], + "423": ["barber chair"], + "424": ["barbershop"], + "425": ["barn"], + "426": ["barometer"], + "427": ["barrel", "cask"], + "428": ["barrow", "garden cart", "lawn cart", "wheelbarrow"], + "429": ["baseball"], + "430": ["basketball"], + "431": ["bassinet"], + "432": ["bassoon"], + "433": ["bathing cap", "swimming cap"], + "434": ["bath towel"], + "435": ["bathtub", "bathing tub", "bath", "tub"], + "436": ["beach wagon", "station wagon", "wagon", "estate car", "beach waggon", "station waggon", "waggon"], + "437": ["beacon", "lighthouse", "beacon light", "pharos"], + "438": ["beaker"], + "439": ["bearskin", "busby", "shako"], + "440": ["beer bottle"], + "441": ["beer glass"], + "442": ["bell cote", "bell cot"], + "443": ["bib"], + "444": ["bicycle-built-for-two", "tandem bicycle", "tandem"], + "445": ["bikini", "two-piece"], + "446": ["binder", "ring-binder"], + "447": ["binoculars", "field glasses", "opera glasses"], + "448": ["birdhouse"], + "449": ["boathouse"], + "450": ["bobsled", "bobsleigh", "bob"], + "451": ["bolo tie", "bolo", "bola tie", "bola"], + "452": ["bonnet", "poke bonnet"], + "453": ["bookcase"], + "454": ["bookshop", "bookstore", "bookstall"], + "455": ["bottlecap"], + "456": ["bow"], + "457": ["bow tie", "bow-tie", "bowtie"], + "458": ["brass", "memorial tablet", "plaque"], + "459": ["brassiere", "bra", "bandeau"], + "460": ["breakwater", "groin", "groyne", "mole", "bulwark", "seawall", "jetty"], + "461": ["breastplate", "aegis", "egis"], + "462": ["broom"], + "463": ["bucket", "pail"], + "464": ["buckle"], + "465": ["bulletproof vest"], + "466": ["bullet train", "bullet"], + "467": ["butcher shop", "meat market"], + "468": ["cab", "hack", "taxi", "taxicab"], + "469": ["caldron", "cauldron"], + "470": ["candle", "taper", "wax light"], + "471": ["cannon"], + "472": ["canoe"], + "473": ["can opener", "tin opener"], + "474": ["cardigan"], + "475": ["car mirror"], + "476": ["carousel", "carrousel", "merry-go-round", "roundabout", "whirligig"], + "477": ["carpenter's kit", "tool kit"], + "478": ["carton"], + "479": ["car wheel"], + "480": ["cash machine", "cash dispenser", "automated teller machine", "automatic teller machine",\ + "automated teller", "automatic teller", "ATM"], + "481": ["cassette"], + "482": ["cassette player"], + "483": ["castle"], + "484": ["catamaran"], + "485": ["CD player"], + "486": ["cello", "violoncello"], + "487": ["cellular telephone", "cellular phone", "cellphone", "cell", "mobile phone"], + "488": ["chain"], + "489": ["chainlink fence"], + "490": ["chain mail", "ring mail", "mail", "chain armor", "chain armour", "ring armor", "ring armour"], + "491": ["chain saw", "chainsaw"], + "492": ["chest"], + "493": ["chiffonier", "commode"], + "494": ["chime", "bell", "gong"], + "495": ["china cabinet", "china closet"], + "496": ["Christmas stocking"], + "497": ["church", "church building"], + "498": ["cinema", "movie theater", "movie theatre", "movie house", "picture palace"], + "499": ["cleaver", "meat cleaver", "chopper"], + "500": ["cliff dwelling"], + "501": ["cloak"], + "502": ["clog", "geta", "patten", "sabot"], + "503": ["cocktail shaker"], + "504": ["coffee mug"], + "505": ["coffeepot"], + "506": ["coil", "spiral", "volute", "whorl", "helix"], + "507": ["combination lock"], + "508": ["computer keyboard", "keypad"], + "509": ["confectionery", "confectionary", "candy store"], + "510": ["container ship", "containership", "container vessel"], + "511": ["convertible"], + "512": ["corkscrew", "bottle screw"], + "513": ["cornet", "horn", "trumpet", "trump"], + "514": ["cowboy boot"], + "515": ["cowboy hat", "ten-gallon hat"], + "516": ["cradle"], + "517": ["crane"], + "518": ["crash helmet"], + "519": ["crate"], + "520": ["crib", "cot"], + "521": ["Crock Pot"], + "522": ["croquet ball"], + "523": ["crutch"], + "524": ["cuirass"], + "525": ["dam", "dike", "dyke"], + "526": ["desk"], + "527": ["desktop computer"], + "528": ["dial telephone", "dial phone"], + "529": ["diaper", "nappy", "napkin"], + "530": ["digital clock"], + "531": ["digital watch"], + "532": ["dining table", "board"], + "533": ["dishrag", "dishcloth"], + "534": ["dishwasher", "dish washer", "dishwashing machine"], + "535": ["disk brake", "disc brake"], + "536": ["dock", "dockage", "docking facility"], + "537": ["dogsled", "dog sled", "dog sleigh"], + "538": ["dome"], + "539": ["doormat", "welcome mat"], + "540": ["drilling platform", "offshore rig"], + "541": ["drum", "membranophone", "tympan"], + "542": ["drumstick"], + "543": ["dumbbell"], + "544": ["Dutch oven"], + "545": ["electric fan", "blower"], + "546": ["electric guitar"], + "547": ["electric locomotive"], + "548": ["entertainment center"], + "549": ["envelope"], + "550": ["espresso maker"], + "551": ["face powder"], + "552": ["feather boa", "boa"], + "553": ["file", "file cabinet", "filing cabinet"], + "554": ["fireboat"], + "555": ["fire engine", "fire truck"], + "556": ["fire screen", "fireguard"], + "557": ["flagpole", "flagstaff"], + "558": ["flute", "transverse flute"], + "559": ["folding chair"], + "560": ["football helmet"], + "561": ["forklift"], + "562": ["fountain"], + "563": ["fountain pen"], + "564": ["four-poster"], + "565": ["freight car"], + "566": ["French horn", "horn"], + "567": ["frying pan", "frypan", "skillet"], + "568": ["fur coat"], + "569": ["garbage truck", "dustcart"], + "570": ["gasmask", "respirator", "gas helmet"], + "571": ["gas pump", "gasoline pump", "petrol pump", "island dispenser"], + "572": ["goblet"], + "573": ["go-kart"], + "574": ["golf ball"], + "575": ["golfcart", "golf cart"], + "576": ["gondola"], + "577": ["gong", "tam-tam"], + "578": ["gown"], + "579": ["grand piano", "grand"], + "580": ["greenhouse", "nursery", "glasshouse"], + "581": ["grille", "radiator grille"], + "582": ["grocery store", "grocery", "food market", "market"], + "583": ["guillotine"], + "584": ["hair slide"], + "585": ["hair spray"], + "586": ["half track"], + "587": ["hammer"], + "588": ["hamper"], + "589": ["hand blower", "blow dryer", "blow drier", "hair dryer", "hair drier"], + "590": ["hand-held computer", "hand-held microcomputer"], + "591": ["handkerchief", "hankie", "hanky", "hankey"], + "592": ["hard disc", "hard disk", "fixed disk"], + "593": ["harmonica", "mouth organ", "harp", "mouth harp"], + "594": ["harp"], + "595": ["harvester", "reaper"], + "596": ["hatchet"], + "597": ["holster"], + "598": ["home theater", "home theatre"], + "599": ["honeycomb"], + "600": ["hook", "claw"], + "601": ["hoopskirt", "crinoline"], + "602": ["horizontal bar", "high bar"], + "603": ["horse cart", "horse-cart"], + "604": ["hourglass"], + "605": ["iPod"], + "606": ["iron", "smoothing iron"], + "607": ["jack-o'-lantern"], + "608": ["jean", "blue jean", "denim"], + "609": ["jeep", "landrover"], + "610": ["jersey", "T-shirt", "tee shirt"], + "611": ["jigsaw puzzle"], + "612": ["jinrikisha", "ricksha", "rickshaw"], + "613": ["joystick"], + "614": ["kimono"], + "615": ["knee pad"], + "616": ["knot"], + "617": ["lab coat", "laboratory coat"], + "618": ["ladle"], + "619": ["lampshade", "lamp shade"], + "620": ["laptop", "laptop computer"], + "621": ["lawn mower", "mower"], + "622": ["lens cap", "lens cover"], + "623": ["letter opener", "paper knife", "paperknife"], + "624": ["library"], + "625": ["lifeboat"], + "626": ["lighter", "light", "igniter", "ignitor"], + "627": ["limousine", "limo"], + "628": ["liner", "ocean liner"], + "629": ["lipstick", "lip rouge"], + "630": ["Loafer"], + "631": ["lotion"], + "632": ["loudspeaker", "speaker", "speaker unit", "loudspeaker system", "speaker system"], + "633": ["loupe", "jeweler's loupe"], + "634": ["lumbermill", "sawmill"], + "635": ["magnetic compass"], + "636": ["mailbag", "postbag"], + "637": ["mailbox", "letter box"], + "638": ["maillot"], + "639": ["maillot", "tank suit"], + "640": ["manhole cover"], + "641": ["maraca"], + "642": ["marimba", "xylophone"], + "643": ["mask"], + "644": ["matchstick"], + "645": ["maypole"], + "646": ["maze", "labyrinth"], + "647": ["measuring cup"], + "648": ["medicine chest", "medicine cabinet"], + "649": ["megalith", "megalithic structure"], + "650": ["microphone", "mike"], + "651": ["microwave", "microwave oven"], + "652": ["military uniform"], + "653": ["milk can"], + "654": ["minibus"], + "655": ["miniskirt", "mini"], + "656": ["minivan"], + "657": ["missile"], + "658": ["mitten"], + "659": ["mixing bowl"], + "660": ["mobile home", "manufactured home"], + "661": ["Model T"], + "662": ["modem"], + "663": ["monastery"], + "664": ["monitor"], + "665": ["moped"], + "666": ["mortar"], + "667": ["mortarboard"], + "668": ["mosque"], + "669": ["mosquito net"], + "670": ["motor scooter", "scooter"], + "671": ["mountain bike", "all-terrain bike", "off-roader"], + "672": ["mountain tent"], + "673": ["mouse", "computer mouse"], + "674": ["mousetrap"], + "675": ["moving van"], + "676": ["muzzle"], + "677": ["nail"], + "678": ["neck brace"], + "679": ["necklace"], + "680": ["nipple"], + "681": ["notebook", "notebook computer"], + "682": ["obelisk"], + "683": ["oboe", "hautboy", "hautbois"], + "684": ["ocarina", "sweet potato"], + "685": ["odometer", "hodometer", "mileometer", "milometer"], + "686": ["oil filter"], + "687": ["organ", "pipe organ"], + "688": ["oscilloscope", "scope", "cathode-ray oscilloscope", "CRO"], + "689": ["overskirt"], + "690": ["oxcart"], + "691": ["oxygen mask"], + "692": ["packet"], + "693": ["paddle", "boat paddle"], + "694": ["paddlewheel", "paddle wheel"], + "695": ["padlock"], + "696": ["paintbrush"], + "697": ["pajama", "pyjama", "pj's", "jammies"], + "698": ["palace"], + "699": ["panpipe", "pandean pipe", "syrinx"], + "700": ["paper towel"], + "701": ["parachute", "chute"], + "702": ["parallel bars", "bars"], + "703": ["park bench"], + "704": ["parking meter"], + "705": ["passenger car", "coach", "carriage"], + "706": ["patio", "terrace"], + "707": ["pay-phone", "pay-station"], + "708": ["pedestal", "plinth", "footstall"], + "709": ["pencil box", "pencil case"], + "710": ["pencil sharpener"], + "711": ["perfume", "essence"], + "712": ["Petri dish"], + "713": ["photocopier"], + "714": ["pick", "plectrum", "plectron"], + "715": ["pickelhaube"], + "716": ["picket fence", "paling"], + "717": ["pickup", "pickup truck"], + "718": ["pier"], + "719": ["piggy bank", "penny bank"], + "720": ["pill bottle"], + "721": ["pillow"], + "722": ["ping-pong ball"], + "723": ["pinwheel"], + "724": ["pirate", "pirate ship"], + "725": ["pitcher", "ewer"], + "726": ["plane", "carpenter's plane", "woodworking plane"], + "727": ["planetarium"], + "728": ["plastic bag"], + "729": ["plate rack"], + "730": ["plow", "plough"], + "731": ["plunger", "plumber's helper"], + "732": ["Polaroid camera", "Polaroid Land camera"], + "733": ["pole"], + "734": ["police van", "police wagon", "paddy wagon", "patrol wagon", "wagon", "black Maria"], + "735": ["poncho"], + "736": ["pool table", "billiard table", "snooker table"], + "737": ["pop bottle", "soda bottle"], + "738": ["pot", "flowerpot"], + "739": ["potter's wheel"], + "740": ["power drill"], + "741": ["prayer rug", "prayer mat"], + "742": ["printer"], + "743": ["prison", "prison house"], + "744": ["projectile", "missile"], + "745": ["projector"], + "746": ["puck", "hockey puck"], + "747": ["punching bag", "punch bag", "punching ball", "punchball"], + "748": ["purse"], + "749": ["quill", "quill pen"], + "750": ["quilt", "comforter", "comfort", "puff"], + "751": ["racer", "race car", "racing car"], + "752": ["racket", "racquet"], + "753": ["radiator"], + "754": ["radio", "wireless"], + "755": ["radio telescope", "radio reflector"], + "756": ["rain barrel"], + "757": ["recreational vehicle", "RV", "R.V."], + "758": ["reel"], + "759": ["reflex camera"], + "760": ["refrigerator", "icebox"], + "761": ["remote control", "remote"], + "762": ["restaurant", "eating house", "eating place", "eatery"], + "763": ["revolver", "six-gun", "six-shooter"], + "764": ["rifle"], + "765": ["rocking chair", "rocker"], + "766": ["rotisserie"], + "767": ["rubber eraser", "rubber", "pencil eraser"], + "768": ["rugby ball"], + "769": ["rule", "ruler"], + "770": ["running shoe"], + "771": ["safe"], + "772": ["safety pin"], + "773": ["saltshaker", "salt shaker"], + "774": ["sandal"], + "775": ["sarong"], + "776": ["sax", "saxophone"], + "777": ["scabbard"], + "778": ["scale", "weighing machine"], + "779": ["school bus"], + "780": ["schooner"], + "781": ["scoreboard"], + "782": ["screen", "CRT screen"], + "783": ["screw"], + "784": ["screwdriver"], + "785": ["seat belt", "seatbelt"], + "786": ["sewing machine"], + "787": ["shield", "buckler"], + "788": ["shoe shop", "shoe-shop", "shoe store"], + "789": ["shoji"], + "790": ["shopping basket"], + "791": ["shopping cart"], + "792": ["shovel"], + "793": ["shower cap"], + "794": ["shower curtain"], + "795": ["ski"], + "796": ["ski mask"], + "797": ["sleeping bag"], + "798": ["slide rule", "slipstick"], + "799": ["sliding door"], + "800": ["slot", "one-armed bandit"], + "801": ["snorkel"], + "802": ["snowmobile"], + "803": ["snowplow", "snowplough"], + "804": ["soap dispenser"], + "805": ["soccer ball"], + "806": ["sock"], + "807": ["solar dish", "solar collector", "solar furnace"], + "808": ["sombrero"], + "809": ["soup bowl"], + "810": ["space bar"], + "811": ["space heater"], + "812": ["space shuttle"], + "813": ["spatula"], + "814": ["speedboat"], + "815": ["spider web", "spider's web"], + "816": ["spindle"], + "817": ["sports car", "sport car"], + "818": ["spotlight", "spot"], + "819": ["stage"], + "820": ["steam locomotive"], + "821": ["steel arch bridge"], + "822": ["steel drum"], + "823": ["stethoscope"], + "824": ["stole"], + "825": ["stone wall"], + "826": ["stopwatch", "stop watch"], + "827": ["stove"], + "828": ["strainer"], + "829": ["streetcar", "tram", "tramcar", "trolley", "trolley car"], + "830": ["stretcher"], + "831": ["studio couch", "day bed"], + "832": ["stupa", "tope"], + "833": ["submarine", "pigboat", "sub", "U-boat"], + "834": ["suit", "suit of clothes"], + "835": ["sundial"], + "836": ["sunglass"], + "837": ["sunglasses", "dark glasses", "shades"], + "838": ["sunscreen", "sunblock", "sun blocker"], + "839": ["suspension bridge"], + "840": ["swab", "swob", "mop"], + "841": ["sweatshirt"], + "842": ["swimming trunks", "bathing trunks"], + "843": ["swing"], + "844": ["switch", "electric switch", "electrical switch"], + "845": ["syringe"], + "846": ["table lamp"], + "847": ["tank", "army tank", "armored combat vehicle", "armoured combat vehicle"], + "848": ["tape player"], + "849": ["teapot"], + "850": ["teddy", "teddy bear"], + "851": ["television", "television system"], + "852": ["tennis ball"], + "853": ["thatch", "thatched roof"], + "854": ["theater curtain", "theatre curtain"], + "855": ["thimble"], + "856": ["thresher", "thrasher", "threshing machine"], + "857": ["throne"], + "858": ["tile roof"], + "859": ["toaster"], + "860": ["tobacco shop", "tobacconist shop", "tobacconist"], + "861": ["toilet seat"], + "862": ["torch"], + "863": ["totem pole"], + "864": ["tow truck", "tow car", "wrecker"], + "865": ["toyshop"], + "866": ["tractor"], + "867": ["trailer truck", "tractor trailer", "trucking rig", "rig", "articulated lorry", "semi"], + "868": ["tray"], + "869": ["trench coat"], + "870": ["tricycle", "trike", "velocipede"], + "871": ["trimaran"], + "872": ["tripod"], + "873": ["triumphal arch"], + "874": ["trolleybus", "trolley coach", "trackless trolley"], + "875": ["trombone"], + "876": ["tub", "vat"], + "877": ["turnstile"], + "878": ["typewriter keyboard"], + "879": ["umbrella"], + "880": ["unicycle", "monocycle"], + "881": ["upright", "upright piano"], + "882": ["vacuum", "vacuum cleaner"], + "883": ["vase"], + "884": ["vault"], + "885": ["velvet"], + "886": ["vending machine"], + "887": ["vestment"], + "888": ["viaduct"], + "889": ["violin", "fiddle"], + "890": ["volleyball"], + "891": ["waffle iron"], + "892": ["wall clock"], + "893": ["wallet", "billfold", "notecase", "pocketbook"], + "894": ["wardrobe", "closet", "press"], + "895": ["warplane", "military plane"], + "896": ["washbasin", "handbasin", "washbowl", "lavabo", "wash-hand basin"], + "897": ["washer", "automatic washer", "washing machine"], + "898": ["water bottle"], + "899": ["water jug"], + "900": ["water tower"], + "901": ["whiskey jug"], + "902": ["whistle"], + "903": ["wig"], + "904": ["window screen"], + "905": ["window shade"], + "906": ["Windsor tie"], + "907": ["wine bottle"], + "908": ["wing"], + "909": ["wok"], + "910": ["wooden spoon"], + "911": ["wool", "woolen", "woollen"], + "912": ["worm fence", "snake fence", "snake-rail fence", "Virginia fence"], + "913": ["wreck"], + "914": ["yawl"], + "915": ["yurt"], + "916": ["web site", "website", "internet site", "site"], + "917": ["comic book"], + "918": ["crossword puzzle", "crossword"], + "919": ["street sign"], + "920": ["traffic light", "traffic signal", "stoplight"], + "921": ["book jacket", "dust cover", "dust jacket", "dust wrapper"], + "922": ["menu"], + "923": ["plate"], + "924": ["guacamole"], + "925": ["consomme"], + "926": ["hot pot", "hotpot"], + "927": ["trifle"], + "928": ["ice cream", "icecream"], + "929": ["ice lolly", "lolly", "lollipop", "popsicle"], + "930": ["French loaf"], + "931": ["bagel", "beigel"], + "932": ["pretzel"], + "933": ["cheeseburger"], + "934": ["hotdog", "hot dog", "red hot"], + "935": ["mashed potato"], + "936": ["head cabbage"], + "937": ["broccoli"], + "938": ["cauliflower"], + "939": ["zucchini", "courgette"], + "940": ["spaghetti squash"], + "941": ["acorn squash"], + "942": ["butternut squash"], + "943": ["cucumber", "cuke"], + "944": ["artichoke", "globe artichoke"], + "945": ["bell pepper"], + "946": ["cardoon"], + "947": ["mushroom"], + "948": ["Granny Smith"], + "949": ["strawberry"], + "950": ["orange"], + "951": ["lemon"], + "952": ["fig"], + "953": ["pineapple", "ananas"], + "954": ["banana"], + "955": ["jackfruit", "jak", "jack"], + "956": ["custard apple"], + "957": ["pomegranate"], + "958": ["hay"], + "959": ["carbonara"], + "960": ["chocolate sauce", "chocolate syrup"], + "961": ["dough"], + "962": ["meat loaf", "meatloaf"], + "963": ["pizza", "pizza pie"], + "964": ["potpie"], + "965": ["burrito"], + "966": ["red wine"], + "967": ["espresso"], + "968": ["cup"], + "969": ["eggnog"], + "970": ["alp"], + "971": ["bubble"], + "972": ["cliff", "drop", "drop-off"], + "973": ["coral reef"], + "974": ["geyser"], + "975": ["lakeside", "lakeshore"], + "976": ["promontory", "headland", "head", "foreland"], + "977": ["sandbar", "sand bar"], + "978": ["seashore", "coast", "seacoast", "sea-coast"], + "979": ["valley", "vale"], + "980": ["volcano"], + "981": ["ballplayer", "baseball player"], + "982": ["groom", "bridegroom"], + "983": ["scuba diver"], + "984": ["rapeseed"], + "985": ["daisy"], + "986": ["yellow lady's slipper", "yellow lady-slipper", "Cypripedium calceolus", "Cypripedium parviflorum"], + "987": ["corn"], + "988": ["acorn"], + "989": ["hip", "rose hip", "rosehip"], + "990": ["buckeye", "horse chestnut", "conker"], + "991": ["coral fungus"], + "992": ["agaric"], + "993": ["gyromitra"], + "994": ["stinkhorn", "carrion fungus"], + "995": ["earthstar"], + "996": ["hen-of-the-woods", "hen of the woods", "Polyporus frondosus", "Grifola frondosa"], + "997": ["bolete"], + "998": ["ear", "spike", "capitulum"], + "999": ["toilet tissue", "toilet paper", "bathroom tissue"] +} diff --git a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py new file mode 100644 index 0000000..b2507e5 --- /dev/null +++ b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py @@ -0,0 +1,111 @@ +import os +import numpy as np +from acllite_imageproc import AclLiteImage +from acllite_imageproc import AclLiteImageProc +from acllite_model import AclLiteModel +from acllite_resource import AclLiteResource +from label import label + +class sample_resnet_dvpp(object): + def __init__(self, model_path): + self.model_path = model_path # string + self.resource = None + self.dvpp = None + self.model = None + self.resized_image = None + self.result = None + + def init_resource(self): + # init acl resource + self.resource = AclLiteResource() + self.resource.init() + + # init dvpp resource + self.dvpp = AclLiteImageProc(self.resource) + + # load model from file + self.model = AclLiteModel(self.model_path) + + def process_input(self, input_path, model_width, model_height): + # read image from file + self.image = AclLiteImage(input_path) + + # memory copy from host to dvpp + image_input = self.image.copy_to_dvpp() + + # decode image from JPEGD format to YUV + yuv_image = self.dvpp.jpegd(image_input) + + # execute resize + self.resized_image = self.dvpp.resize(yuv_image, model_width, model_height) + + def inference(self): + # inference + self.result = self.model.execute([self.resized_image]) + + def get_result(self): + # do data processing with softmax + res = np.array(self.result).flatten() + res = np.exp(res) + + # print top 5 classes + top_k = res.argsort()[-1:-6:-1] + total = np.sum(res) + print("======== top5 inference results: =============") + for j in top_k: + label_class = "" + label_string = label.get(str(j)) + if label_string: + list_iterator = iter(range(len(label_string))) + for i in list_iterator: + if i == len(label_string) - 1: + label_class += label_string[i] + else: + label_class += label_string[i] + "," + "\t" + print("label:%d confidence:%f class:%s" % (j, res[j]/total, label_class)) + else: + raise Exception("the key of label is not exist") + + def release_resource(self): + # release resource includes acl resource, data set and unload model + # self.dvpp.__del__() + # self.model.__del__() + # self.resource.__del__() + # AclLiteResource.__del__ = lambda x: 0 + # AclLiteImage.__del__ = lambda x: 0 + # AclLiteImageProc.__del__ = lambda x: 0 + # AclLiteModel.__del__ = lambda x: 0 + + del self.dvpp + del self.model + del self.resource + +if __name__ == '__main__': + model_width = 224 + model_height = 224 + current_dir = os.path.dirname(os.path.abspath(__file__)) + model_path = os.path.join(current_dir, "../model/resnet50.om") + if not os.path.exists(model_path): + raise Exception("the model is not exist") + + # read all file path of image + images_path = os.path.join(current_dir, "../data") + if not os.path.exists(images_path): + raise Exception("the directory is not exist") + all_path = [] + for path in os.listdir(images_path): + if path != '.keep': + total_path = os.path.join(images_path, path) + all_path.append(total_path) + if len(all_path) == 0: + raise Exception("the directory is empty, please download image") + + # inference + net = sample_resnet_dvpp(model_path) + net.init_resource() + for image in all_path: + net.process_input(image, model_width, model_height) + net.inference() + net.get_result() + print("*****run finish******") + net.release_resource() -- Gitee From 593f09d73ce8adae01b134528afb6f841df34639 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 14 Mar 2024 08:50:56 +0000 Subject: [PATCH 09/38] update Samples/ResnetPicture/pyACLLite/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/pyACLLite/README.md | 218 ++++++++++------------ 1 file changed, 95 insertions(+), 123 deletions(-) diff --git a/Samples/ResnetPicture/pyACLLite/README.md b/Samples/ResnetPicture/pyACLLite/README.md index e2fc939..c9ed6c2 100644 --- a/Samples/ResnetPicture/pyACLLite/README.md +++ b/Samples/ResnetPicture/pyACLLite/README.md @@ -1,142 +1,114 @@ -## 目录 - - - [样例介绍](#样例介绍) - - [获取源码包](#获取源码包) - - [第三方依赖安装](#第三方依赖安装) - - [样例运行](#样例运行) - - [其他资源](#其他资源) - - [更新说明](#更新说明) - - [已知issue](#已知issue) - -## 样例介绍 - -使用DVPP加速预处理网络输入,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对ResNet50网络执行推理,最终对输入的图片进行分类并且给出TOP5类别置信度和相应的类别信息。 - -样例输入:图片。 -样例输出:打屏显示置信度TOP5的类别标识、置信度信息和相应的类别信息。 - - -## 获取源码包 - - 可以使用以下两种方式下载,请选择其中一种进行源码准备。 - - - 命令行方式下载(下载时间较长,但步骤简单)。 - - ``` - # 开发环境,非root用户命令行中执行以下命令下载源码仓。 - cd ${HOME} - git clone https://gitee.com/ascend/samples.git - ``` - **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** - ``` - git checkout v0.5.0 - ``` - - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 - **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** - ``` - # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 - # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 - # 3. 开发环境中,执行以下命令,解压zip包。 - cd ${HOME} - unzip ascend-samples-master.zip - ``` +# 图片分类(ResNet50) -## 第三方依赖安装 - 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 - ``` - export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - export THIRDPART_PATH=${DDK_PATH}/thirdpart - export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH - ``` +#### 样例介绍 + +基于PyTorch框架的ResNet50模型,对*.jpg图片分类,输出各图片Top5置信度的分类ID、分类名称。 + +#### 样例下载 - 执行以下命令,创建THIRDPART_PATH和PYTHONPATH路径 +可以使用以下两种方式下载,请选择其中一种进行源码准备。 + +- 命令行方式下载(**下载时间较长,但步骤简单**)。 ``` - mkdir -p ${PYTHONPATH} + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/ResnetPicture/pyACLLite/ ``` -- python-acllite - python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 ``` - cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/ResnetPicture/pyACLLite/ + ``` - ``` +#### 执行准备 -- numpy +1. 以HwHiAiUser用户登录开发板。 - 执行以下命令安装numpy库。 - ``` - pip3 install numpy - ``` - - -## 样例运行 - - - 数据准备 - - 请从以下链接获取该样例的输入图片,放在data目录下。 - - ``` - cd $HOME/samples/inference/modelInference/sampleResnetDVPP/pyACLLite/data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg - ``` - - - ATC模型转换 - - 将ResNet-50原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 - - ``` - # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 - cd $HOME/samples/inference/modelInference/sampleResnetDVPP/pyACLLite/model - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50_DVPP/aipp.cfg - atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape="actual_input_1:1,3,224,224" --soc_version=Ascend310 --insert_op_conf=aipp.cfg - ``` - - - 样例运行 - - 执行运行脚本,开始样例运行。 - ``` - bash sample_run.sh - ``` - - 样例结果展示 - - 执行成功后,在屏幕上显示置信度top5的相关信息如下,提示信息中的label表示类别标识、confidence表示该类别的置信度,class表示对应的类别,这些值可能会根据版本、环境有所不同,请以实际情况为准: - - - ``` - ======== top5 inference results: ============= - label:162 confidence:0.913663 class:beaglebeagle, - label:161 confidence:0.078597 class:basset houndbasset hound, - label:166 confidence:0.003647 class:Walker foxhoundWalker foxhound, - label:167 confidence:0.003535 class:English foxhoundEnglish foxhound, - label:163 confidence:0.000268 class:sleuthhoundsleuthhound, - *****run finish****** +2. 设置环境变量。 + + ``` + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + ``` + +3. 安装ACLLite库。 + + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装ACLLite库python版。 + + +#### 运行样例 + +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 +2. 获取PyTorch框架的ResNet50模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 ``` - -## 其他资源 + cd model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50_DVPP/aipp.cfg + atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape="actual_input_1:1,3,224,224" --soc_version=Ascend310B4 --insert_op_conf=aipp.cfg + ``` + + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 -以下资源提供了对ONNX项目和Renet50模型的更深入理解: + - --model:ResNet-50网络的模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:resnet50.om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 -**ONNX** -- [GitHub: ONNX](https://github.com/onnx/onnx) -**Models** -- [Resnet50 - image classification](https://gitee.com/ascend/ModelZoo-PyTorch/tree/master/ACL_PyTorch/built-in/cv/Resnet50_Pytorch_Infer) +3. 获取测试图片数据。 + + 请从以下链接获取该样例的测试图片dog1\_1024\_683.jpg,放在data目录下。 + + ``` + cd ../data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg + ``` + + **注:**若需更换测试图片,则需自行准备测试图片,并将测试图片放到data目录下。 -**Documentation** -- [AscendCL Samples介绍](../README_CN.md) -- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) -- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) +4. 运行样例。 + + 执行以下脚本运行样例: + + ``` + bash sample_run.sh + ``` + + 执行成功后,在屏幕上的关键提示信息示例如下,提示信息中的top1-5表示图片置信度的前5种类别、index表示类别标识、value表示该分类的最大置信度,class表示所属类别。这些值可能会根据版本、环境有所不同,请以实际情况为准: + + ``` +======== top5 inference results: ============= +label:162 confidence:0.849168 class:beagle +label:161 confidence:0.147563 class:basset, basset hound +label:167 confidence:0.001627 class:English foxhound +label:166 confidence:0.001228 class:Walker hound, Walker foxhound +label:163 confidence:0.000184 class:bloodhound, sleuthhound + + ``` -## 更新说明 - | 时间 | 更新事项 | -|----|------| -| 2023/02/17 | 新增sampleResnetDVPP/pyACLLite/README.md | - +#### 相关操作 -## 已知issue +- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 +- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 查模型的输入输出 - 暂无 + 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From 1c6852f1e7a79c574d158528304b096a6db4bf63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 14 Mar 2024 08:51:16 +0000 Subject: [PATCH 10/38] update Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py index b2507e5..8a10403 100644 --- a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py +++ b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py @@ -60,8 +60,8 @@ class sample_resnet_dvpp(object): for i in list_iterator: if i == len(label_string) - 1: label_class += label_string[i] - else: - label_class += label_string[i] + "," + "\t" + else: + label_class += label_string[i] + "," + "\t" print("label:%d confidence:%f class:%s" % (j, res[j]/total, label_class)) else: raise Exception("the key of label is not exist") -- Gitee From 32763ddc635d26d1591e31dce6c0e47e5337e5e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 14 Mar 2024 08:53:00 +0000 Subject: [PATCH 11/38] update Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py index 8a10403..1d4ea6b 100644 --- a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py +++ b/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py @@ -67,15 +67,6 @@ class sample_resnet_dvpp(object): raise Exception("the key of label is not exist") def release_resource(self): - # release resource includes acl resource, data set and unload model - # self.dvpp.__del__() - # self.model.__del__() - # self.resource.__del__() - # AclLiteResource.__del__ = lambda x: 0 - # AclLiteImage.__del__ = lambda x: 0 - # AclLiteImageProc.__del__ = lambda x: 0 - # AclLiteModel.__del__ = lambda x: 0 - del self.dvpp del self.model del self.resource -- Gitee From fcfb5567dbcf9bfdda815a2f4d3b69949ac86a95 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Tue, 26 Mar 2024 09:45:21 +0800 Subject: [PATCH 12/38] update --- Samples/YOLOV5USBCamera/python/README.md | 130 ++++++++++++++++ Samples/YOLOV5USBCamera/python/data/.keep | 0 Samples/YOLOV5USBCamera/python/model/.keep | 0 .../python/scripts/sample_run.sh | 12 ++ .../python/src/YOLOV7USBCamera.py | 112 ++++++++++++++ Samples/YOLOV5USBCamera/python/src/label.py | 20 +++ Samples/YOLOV5Video/README.md | 130 ++++++++++++++++ Samples/YOLOV5Video/data/.keep | 0 Samples/YOLOV5Video/model/.keep | 0 Samples/YOLOV5Video/scripts/sample_run.sh | 12 ++ Samples/YOLOV5Video/src/YOLOV5Video.py | 139 ++++++++++++++++++ Samples/YOLOV5Video/src/label.py | 20 +++ 12 files changed, 575 insertions(+) create mode 100644 Samples/YOLOV5USBCamera/python/README.md create mode 100644 Samples/YOLOV5USBCamera/python/data/.keep create mode 100644 Samples/YOLOV5USBCamera/python/model/.keep create mode 100644 Samples/YOLOV5USBCamera/python/scripts/sample_run.sh create mode 100644 Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py create mode 100644 Samples/YOLOV5USBCamera/python/src/label.py create mode 100644 Samples/YOLOV5Video/README.md create mode 100644 Samples/YOLOV5Video/data/.keep create mode 100644 Samples/YOLOV5Video/model/.keep create mode 100644 Samples/YOLOV5Video/scripts/sample_run.sh create mode 100644 Samples/YOLOV5Video/src/YOLOV5Video.py create mode 100644 Samples/YOLOV5Video/src/label.py diff --git a/Samples/YOLOV5USBCamera/python/README.md b/Samples/YOLOV5USBCamera/python/README.md new file mode 100644 index 0000000..94a9e79 --- /dev/null +++ b/Samples/YOLOV5USBCamera/python/README.md @@ -0,0 +1,130 @@ +## 目录 + + - [样例介绍](#样例介绍) + - [获取源码包](#获取源码包) + - [第三方依赖安装](#第三方依赖安装) + - [样例运行](#样例运行) + - [其他资源](#其他资源) + - [更新说明](#更新说明) + - [已知issue](#已知issue) + +## 样例介绍 + +以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 + +样例输入:图片。 +样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 + +## 获取源码包 + + 可以使用以下两种方式下载,请选择其中一种进行源码准备。 + + - 命令行方式下载(下载时间较长,但步骤简单)。 + + ``` + # 开发环境,非root用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/samples.git + ``` + **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** + ``` + git checkout v0.5.0 + ``` + - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 + **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** + ``` + # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + unzip ascend-samples-master.zip + ``` + +## 第三方依赖安装 + 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 + ``` + echo 'export THIRDPART_PATH=$HOME/Ascend/ascend-toolkit/latest/thirdpart'>> ~/.bashrc + echo 'export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH'>> ~/.bashrc + ``` + + 执行以下命令使环境变量生效并创建文件夹 + + ``` + source ~/.bashrc + mkdir -p ${THIRDPART_PATH} + ``` + + python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 + + ``` + cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} + + ``` + +- python依赖 + + 执行以下命令安装numpy,opencv-python库。 + ``` + pip3 install numpy opencv-python + ``` + +## 样例运行 + + - 数据准备 + + 请从以下链接获取该样例的输入图片,放在data目录下。 + + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg + ``` + + - ATC模型转换 + + 将yolov7.onnx原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 + + ``` + # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。act指令中的--soc_version需填写对应的推理卡型号。 + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg + atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg + ``` + + - 样例运行 + + 执行运行脚本,开始样例运行。 + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/scripts + bash sample_run.sh + ``` + - 样例结果展示 + + 运行完成后,会在样例工程的out目录下生成推理后的图片,显示结果如下所示。 + + ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/result.jpg "result.jpg") + +## 其他资源 + +以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: + +**ONNX** +- [GitHub: ONNX](https://github.com/onnx/onnx) + +**Models** +- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) + +**Documentation** +- [AscendCL Samples介绍](../README_CN.md) +- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) +- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + +## 更新说明 + | 时间 | 更新事项 | +|----|------| +| 2023/09/26 | 新增sampleYOLOV7/README.md | + + +## 已知issue + + 暂无 diff --git a/Samples/YOLOV5USBCamera/python/data/.keep b/Samples/YOLOV5USBCamera/python/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5USBCamera/python/model/.keep b/Samples/YOLOV5USBCamera/python/model/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh b/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh new file mode 100644 index 0000000..524e08a --- /dev/null +++ b/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +echo "[INFO] The sample starts to run" + +cd ${ScriptPath}/../src +python3 YOLOV7USBCamera.py +if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" +else + echo "[INFO] The program runs successfully" +fi diff --git a/Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py b/Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py new file mode 100644 index 0000000..b7a8889 --- /dev/null +++ b/Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py @@ -0,0 +1,112 @@ +import videocapture as video +import numpy as np +import cv2 + +import time + +from acllite_resource import AclLiteResource +from acllite_model import AclLiteModel +from acllite_imageproc import AclLiteImageProc +from acllite_image import AclLiteImage +from label import labels +from acllite_logger import log_error, log_info + + +class sampleYOLOV7(object): + '''load the model, and do preprocess, infer, postprocess''' + def __init__(self, model_path, model_width, model_height): + self.model_path = model_path + self.model_width = model_width + self.model_height = model_height + + def init_resource(self): + # initial acl resource, create image processor, create model + self._resource = AclLiteResource() + self._resource.init() + + self._dvpp = AclLiteImageProc(self._resource) + self._model = AclLiteModel(self.model_path) + + def preprocess(self, frame): + # resize frame, keep original image + self.src_image = frame + self.resized_image = cv2.resize(frame, (self.model_width, self.model_height)) + + def infer(self): + # infer frame + image_info = np.array([640, 640, + 640, 640], + dtype=np.float32) + self.result = self._model.execute([self.resized_image, image_info]) + + def postprocess(self): + box_num = self.result[1][0, 0] + box_info = self.result[0].flatten() + + height, width, _ = self.src_image.shape + scale_x = width / self.model_width + scale_y = height / self.model_height + + colors = [0, 0, 255] + text = "" + # draw the boxes in original image + for n in range(int(box_num)): + ids = int(box_info[5 * int(box_num) + n]) + score = box_info[4 * int(box_num) + n] + label = labels[ids] + ":" + str("%.2f" % score) + top_left_x = box_info[0 * int(box_num) + n] * scale_x + top_left_y = box_info[1 * int(box_num) + n] * scale_y + bottom_right_x = box_info[2 * int(box_num) + n] * scale_x + bottom_right_y = box_info[3 * int(box_num) + n] * scale_y + cv2.rectangle(self.src_image, (int(top_left_x), int(top_left_y)), + (int(bottom_right_x), int(bottom_right_y)), colors) + p3 = (max(int(top_left_x), 15), max(int(top_left_y), 15)) + position = [int(top_left_x), int(top_left_y), int(bottom_right_x), int(bottom_right_y)] + cv2.putText(self.src_image, label, p3, cv2.FONT_ITALIC, 0.6, colors, 1) + text += f'label:{label} {position} ' + log_info(text) + cv2.imshow('out', self.src_image) + + def release_resource(self): + # release resource includes acl resource, data set and unload model + del self._resource + del self._dvpp + del self._model + del self.resized_image + +def find_camera_index(): + max_index_to_check = 10 # Maximum index to check for camera + for index in range(max_index_to_check): + cap = cv2.VideoCapture(index) + if cap.read()[0]: + cap.release() + return index + # If no camera is found + raise ValueError("No camera found.") + + +if __name__ == '__main__': + model_path = '../model/yolov5s_rgb.om' + model_width = 640 + model_height = 640 + model = sampleYOLOV7(model_path, model_width, model_height) + model.init_resource() + + camera_index = find_camera_index() + cap = cv2.VideoCapture(camera_index) + cv2.namedWindow('out', cv2.WINDOW_NORMAL) + while True: + ret, frame = cap.read() + if not ret: + print("Can't receive frame (stream end?). Exiting ...") + break + model.preprocess(frame) + model.infer() + model.postprocess() + # cv2.imshow('Frame', frame) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + cap.release() + cv2.destroyAllWindows() + + model.release_resource() diff --git a/Samples/YOLOV5USBCamera/python/src/label.py b/Samples/YOLOV5USBCamera/python/src/label.py new file mode 100644 index 0000000..cc23459 --- /dev/null +++ b/Samples/YOLOV5USBCamera/python/src/label.py @@ -0,0 +1,20 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2023-05-25 09:12:13 +MODIFIED: 2023-05-25 10:10:55 +""" +labels = ["person", "bicycle", "car", "motorbike", "aeroplane", + "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", + "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", + "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", + "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", + "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table", + "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", + "scissors", "teddy bear", "hair drier", "toothbrush"] \ No newline at end of file diff --git a/Samples/YOLOV5Video/README.md b/Samples/YOLOV5Video/README.md new file mode 100644 index 0000000..94a9e79 --- /dev/null +++ b/Samples/YOLOV5Video/README.md @@ -0,0 +1,130 @@ +## 目录 + + - [样例介绍](#样例介绍) + - [获取源码包](#获取源码包) + - [第三方依赖安装](#第三方依赖安装) + - [样例运行](#样例运行) + - [其他资源](#其他资源) + - [更新说明](#更新说明) + - [已知issue](#已知issue) + +## 样例介绍 + +以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 + +样例输入:图片。 +样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 + +## 获取源码包 + + 可以使用以下两种方式下载,请选择其中一种进行源码准备。 + + - 命令行方式下载(下载时间较长,但步骤简单)。 + + ``` + # 开发环境,非root用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/samples.git + ``` + **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** + ``` + git checkout v0.5.0 + ``` + - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 + **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** + ``` + # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + unzip ascend-samples-master.zip + ``` + +## 第三方依赖安装 + 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 + ``` + echo 'export THIRDPART_PATH=$HOME/Ascend/ascend-toolkit/latest/thirdpart'>> ~/.bashrc + echo 'export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH'>> ~/.bashrc + ``` + + 执行以下命令使环境变量生效并创建文件夹 + + ``` + source ~/.bashrc + mkdir -p ${THIRDPART_PATH} + ``` + + python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 + + ``` + cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} + + ``` + +- python依赖 + + 执行以下命令安装numpy,opencv-python库。 + ``` + pip3 install numpy opencv-python + ``` + +## 样例运行 + + - 数据准备 + + 请从以下链接获取该样例的输入图片,放在data目录下。 + + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg + ``` + + - ATC模型转换 + + 将yolov7.onnx原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 + + ``` + # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。act指令中的--soc_version需填写对应的推理卡型号。 + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg + atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg + ``` + + - 样例运行 + + 执行运行脚本,开始样例运行。 + ``` + cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/scripts + bash sample_run.sh + ``` + - 样例结果展示 + + 运行完成后,会在样例工程的out目录下生成推理后的图片,显示结果如下所示。 + + ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/result.jpg "result.jpg") + +## 其他资源 + +以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: + +**ONNX** +- [GitHub: ONNX](https://github.com/onnx/onnx) + +**Models** +- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) + +**Documentation** +- [AscendCL Samples介绍](../README_CN.md) +- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) +- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + +## 更新说明 + | 时间 | 更新事项 | +|----|------| +| 2023/09/26 | 新增sampleYOLOV7/README.md | + + +## 已知issue + + 暂无 diff --git a/Samples/YOLOV5Video/data/.keep b/Samples/YOLOV5Video/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5Video/model/.keep b/Samples/YOLOV5Video/model/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5Video/scripts/sample_run.sh b/Samples/YOLOV5Video/scripts/sample_run.sh new file mode 100644 index 0000000..abc9185 --- /dev/null +++ b/Samples/YOLOV5Video/scripts/sample_run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +echo "[INFO] The sample starts to run" + +cd ${ScriptPath}/../src +python3 YOLOV5Video.py +if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" +else + echo "[INFO] The program runs successfully" +fi diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py new file mode 100644 index 0000000..d401a26 --- /dev/null +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -0,0 +1,139 @@ +import videocapture as video +import numpy as np +import cv2 + +import time + +from acllite_resource import AclLiteResource +from acllite_model import AclLiteModel +from acllite_imageproc import AclLiteImageProc +from acllite_image import AclLiteImage +from label import labels +from acllite_logger import log_error, log_info + + +class sampleYOLOV7(object): + '''load the model, and do preprocess, infer, postprocess''' + def __init__(self, model_path, model_width, model_height): + self.model_path = model_path + self.model_width = model_width + self.model_height = model_height + self.conf_thres = 0.25 + self.nms_thres = 0.45 + self.src_image = None + + def init_resource(self): + # initial acl resource, create image processor, create model + self._resource = AclLiteResource() + self._resource.init() + + self._dvpp = AclLiteImageProc(self._resource) + self._model = AclLiteModel(self.model_path) + + def preprocess_image(self, frame): + # resize frame by dvpp + yuv_image = self._dvpp.jpegd(frame) + self.resized_image = self._dvpp.resize(yuv_image, self.model_width, self.model_height) + + def preprocess_vis(self, frame): + # resize frame, keep original image + src_image = frame.byte_data_to_np_array().astype(np.uint8) + self.src_image = cv2.cvtColor(src_image.reshape((frame.height*3//2, frame.width)), cv2.COLOR_YUV2RGB_NV21) + + self.resized_image = self._dvpp.resize(frame, self.model_width, self.model_height) + + def infer(self): + # infer frame + image_info = np.array([640, 640, + 640, 640], + dtype=np.float32) + self.result = self._model.execute([self.resized_image, image_info]) + + def postprocess(self,path): + box_num = self.result[1][0, 0] + box_info = self.result[0].flatten() + + if self.src_image is None: + src_image = cv2.imread(path) + else: + src_image = self.src_image + height, width, _ = src_image.shape + scale_x = width / self.model_width + scale_y = height / self.model_height + + + colors = [0, 0, 255] + text = "" + # draw the boxes in original image + for n in range(int(box_num)): + ids = int(box_info[5 * int(box_num) + n]) + score = box_info[4 * int(box_num) + n] + label = labels[ids] + ":" + str("%.2f" % score) + top_left_x = box_info[0 * int(box_num) + n] * scale_x + top_left_y = box_info[1 * int(box_num) + n] * scale_y + bottom_right_x = box_info[2 * int(box_num) + n] * scale_x + bottom_right_y = box_info[3 * int(box_num) + n] * scale_y + cv2.rectangle(src_image, (int(top_left_x), int(top_left_y)), + (int(bottom_right_x), int(bottom_right_y)), colors) + p3 = (max(int(top_left_x), 15), max(int(top_left_y), 15)) + position = [int(top_left_x), int(top_left_y), int(bottom_right_x), int(bottom_right_y)] + cv2.putText(src_image, label, p3, cv2.FONT_ITALIC, 0.6, colors, 1) + text += f'label:{label} {position} ' + log_info(text) + cv2.imshow('out', src_image) + + def release_resource(self): + # release resource includes acl resource, data set and unload model + del self._resource + del self._dvpp + del self._model + del self.resized_image + +def video_infer(video_path, model): + cap = video.VideoCapture(video_path) + cnt = 0 + while True: + ret, frame = cap.read() + if ret: + print('cap read end! close subprocess cap read') + break + if frame is not None: + model.preprocess_vis(frame) + model.infer() + model.postprocess(video_path) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + else: + log_info("read frame finish") + break + cv2.release() + +def image_infer(image_path, model): + frame = AclLiteImage(image_path) + model.preprocess_image(frame) + model.infer() + model.postprocess(image_path) + cv2.waitKey(0) + + +if __name__ == '__main__': + model_path = '../model/yolov5s_nms.om' + model_width = 640 + model_height = 640 + + model = sampleYOLOV7(model_path, model_width, model_height) + model.init_resource() + cv2.namedWindow('out', cv2.WINDOW_NORMAL) + + mode = "camera" + if mode == "image": + path = "../data/dog1_1024_683.jpg" + image_infer(path, model) + elif mode == "video": + path = "../data/output.h264" + video_infer(path, model) + elif: + print('input mode is incorrect.') + + model.release_resource() + cv2.destroyAllWindows() diff --git a/Samples/YOLOV5Video/src/label.py b/Samples/YOLOV5Video/src/label.py new file mode 100644 index 0000000..cc23459 --- /dev/null +++ b/Samples/YOLOV5Video/src/label.py @@ -0,0 +1,20 @@ +""" +Copyright (R) @huawei.com, all rights reserved +-*- coding:utf-8 -*- +CREATED: 2023-05-25 09:12:13 +MODIFIED: 2023-05-25 10:10:55 +""" +labels = ["person", "bicycle", "car", "motorbike", "aeroplane", + "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", + "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", + "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", + "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", + "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table", + "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", + "scissors", "teddy bear", "hair drier", "toothbrush"] \ No newline at end of file -- Gitee From a183d25c642e4ab54b0d5c2a7e4416e4672fa831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 01:49:18 +0000 Subject: [PATCH 13/38] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20Samples/Res?= =?UTF-8?q?netPicture/pyACLLite=20=E4=B8=BA=20Samples/ResnetPicture/python?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Samples/ResnetPicture/{pyACLLite => python}/README.md | 0 Samples/ResnetPicture/{pyACLLite => python}/data/.keep | 0 Samples/ResnetPicture/{pyACLLite => python}/model/.keep | 0 Samples/ResnetPicture/{pyACLLite => python}/scripts/sample_run.sh | 0 Samples/ResnetPicture/{pyACLLite => python}/src/label.py | 0 .../ResnetPicture/{pyACLLite => python}/src/sampleResnetDVPP.py | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename Samples/ResnetPicture/{pyACLLite => python}/README.md (100%) rename Samples/ResnetPicture/{pyACLLite => python}/data/.keep (100%) rename Samples/ResnetPicture/{pyACLLite => python}/model/.keep (100%) rename Samples/ResnetPicture/{pyACLLite => python}/scripts/sample_run.sh (100%) rename Samples/ResnetPicture/{pyACLLite => python}/src/label.py (100%) rename Samples/ResnetPicture/{pyACLLite => python}/src/sampleResnetDVPP.py (100%) diff --git a/Samples/ResnetPicture/pyACLLite/README.md b/Samples/ResnetPicture/python/README.md similarity index 100% rename from Samples/ResnetPicture/pyACLLite/README.md rename to Samples/ResnetPicture/python/README.md diff --git a/Samples/ResnetPicture/pyACLLite/data/.keep b/Samples/ResnetPicture/python/data/.keep similarity index 100% rename from Samples/ResnetPicture/pyACLLite/data/.keep rename to Samples/ResnetPicture/python/data/.keep diff --git a/Samples/ResnetPicture/pyACLLite/model/.keep b/Samples/ResnetPicture/python/model/.keep similarity index 100% rename from Samples/ResnetPicture/pyACLLite/model/.keep rename to Samples/ResnetPicture/python/model/.keep diff --git a/Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh b/Samples/ResnetPicture/python/scripts/sample_run.sh similarity index 100% rename from Samples/ResnetPicture/pyACLLite/scripts/sample_run.sh rename to Samples/ResnetPicture/python/scripts/sample_run.sh diff --git a/Samples/ResnetPicture/pyACLLite/src/label.py b/Samples/ResnetPicture/python/src/label.py similarity index 100% rename from Samples/ResnetPicture/pyACLLite/src/label.py rename to Samples/ResnetPicture/python/src/label.py diff --git a/Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py b/Samples/ResnetPicture/python/src/sampleResnetDVPP.py similarity index 100% rename from Samples/ResnetPicture/pyACLLite/src/sampleResnetDVPP.py rename to Samples/ResnetPicture/python/src/sampleResnetDVPP.py -- Gitee From 7154a4bd2621d218e61625e3e6ebaa620bc8c3cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:41:07 +0000 Subject: [PATCH 14/38] update Samples/YOLOV5USBCamera/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5USBCamera/python/README.md | 175 +++++++++-------------- 1 file changed, 69 insertions(+), 106 deletions(-) diff --git a/Samples/YOLOV5USBCamera/python/README.md b/Samples/YOLOV5USBCamera/python/README.md index 94a9e79..3dd1a5c 100644 --- a/Samples/YOLOV5USBCamera/python/README.md +++ b/Samples/YOLOV5USBCamera/python/README.md @@ -1,130 +1,93 @@ -## 目录 - - - [样例介绍](#样例介绍) - - [获取源码包](#获取源码包) - - [第三方依赖安装](#第三方依赖安装) - - [样例运行](#样例运行) - - [其他资源](#其他资源) - - [更新说明](#更新说明) - - [已知issue](#已知issue) - -## 样例介绍 - -以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 - -样例输入:图片。 -样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 - -## 获取源码包 - - 可以使用以下两种方式下载,请选择其中一种进行源码准备。 - - - 命令行方式下载(下载时间较长,但步骤简单)。 - - ``` - # 开发环境,非root用户命令行中执行以下命令下载源码仓。 - cd ${HOME} - git clone https://gitee.com/ascend/samples.git - ``` - **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** - ``` - git checkout v0.5.0 - ``` - - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 - **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** - ``` - # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 - # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 - # 3. 开发环境中,执行以下命令,解压zip包。 - cd ${HOME} - unzip ascend-samples-master.zip - ``` +# 目标检测(YoloV5s) -## 第三方依赖安装 - 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 - ``` - echo 'export THIRDPART_PATH=$HOME/Ascend/ascend-toolkit/latest/thirdpart'>> ~/.bashrc - echo 'export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH'>> ~/.bashrc - ``` +#### 样例介绍 - 执行以下命令使环境变量生效并创建文件夹 - - ``` - source ~/.bashrc - mkdir -p ${THIRDPART_PATH} - ``` +通过USB接口连接Camera与开发板,从Camera获取视频,基于yolov5s模型对输入视频中的物体做实时检测,将推理结果信息使用imshow方式显示。 - python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 +#### 样例下载 - ``` - cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} +可以使用以下两种方式下载,请选择其中一种进行源码准备。 - ``` +- 命令行方式下载(**下载时间较长,但步骤简单**)。 + + ``` + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/YOLOV5USBCamera/python + ``` -- python依赖 +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 - 执行以下命令安装numpy,opencv-python库。 ``` - pip3 install numpy opencv-python - ``` - -## 样例运行 + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/YOLOV5USBCamera/python + ``` + +#### 执行准备 + +1. 确认已安装带桌面的镜像且HDMI连接的屏幕正常显示 - - 数据准备 +2. 以HwHiAiUser用户登录开发板。 - 请从以下链接获取该样例的输入图片,放在data目录下。 - - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg - ``` +3. 设置环境变量。 - - ATC模型转换 + ``` + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + ``` - 将yolov7.onnx原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 +4. 安装ACLLite库。 - ``` - # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。act指令中的--soc_version需填写对应的推理卡型号。 - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/model - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg - atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg - ``` + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装PyACLLite库。 - - 样例运行 +#### 运行样例 - 执行运行脚本,开始样例运行。 - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/scripts - bash sample_run.sh - ``` - - 样例结果展示 - - 运行完成后,会在样例工程的out目录下生成推理后的图片,显示结果如下所示。 +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 - ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/result.jpg "result.jpg") +2. 获取PyTorch框架的Yolov5模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + ``` + cd model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp_rgb.cfg --no-check-certificate + atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_rgb --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp_rgb.cfg + ``` -## 其他资源 + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 -以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: + - --model:yolov5s网络的模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:yolov5s_rgb.om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 -**ONNX** -- [GitHub: ONNX](https://github.com/onnx/onnx) +3. 运行样例。 -**Models** -- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) + 执行以下脚本运行样例: -**Documentation** -- [AscendCL Samples介绍](../README_CN.md) -- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) -- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + ``` + bash sample_run.sh + ``` -## 更新说明 - | 时间 | 更新事项 | -|----|------| -| 2023/09/26 | 新增sampleYOLOV7/README.md | - +#### 相关操作 -## 已知issue +- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 +- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 获取学习文档,请单击[AscendCL Python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha003/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 +- 查模型的输入输出 - 暂无 + 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From 63909f410d4774ba2c7d59242a48a47fb7740a06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:44:40 +0000 Subject: [PATCH 15/38] update Samples/YOLOV5USBCamera/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5USBCamera/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5USBCamera/README.md b/Samples/YOLOV5USBCamera/README.md index 98141b5..eb8337f 100644 --- a/Samples/YOLOV5USBCamera/README.md +++ b/Samples/YOLOV5USBCamera/README.md @@ -72,7 +72,7 @@ atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 - - --model:ResNet-50网络的模型文件的路径。 + - --model:Yolov5网络的模型文件的路径。 - --framework:原始框架类型。5表示ONNX。 - --output:yolov5s.om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 - --input\_shape:模型输入数据的shape。 -- Gitee From 2fef2f2f927b822c84d7d1f0b29ad28c309cdd43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:44:56 +0000 Subject: [PATCH 16/38] update Samples/YOLOV5USBCamera/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5USBCamera/python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5USBCamera/python/README.md b/Samples/YOLOV5USBCamera/python/README.md index 3dd1a5c..58cd70a 100644 --- a/Samples/YOLOV5USBCamera/python/README.md +++ b/Samples/YOLOV5USBCamera/python/README.md @@ -69,7 +69,7 @@ atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 - - --model:yolov5s网络的模型文件的路径。 + - --model:yolov5网络的模型文件的路径。 - --framework:原始框架类型。5表示ONNX。 - --output:yolov5s_rgb.om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 - --input\_shape:模型输入数据的shape。 -- Gitee From 6864a7507c0ef839439ae514ffc300e8b2e2fb49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:48:14 +0000 Subject: [PATCH 17/38] update Samples/YOLOV5Video/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/README.md | 175 ++++++++++++++-------------------- 1 file changed, 69 insertions(+), 106 deletions(-) diff --git a/Samples/YOLOV5Video/README.md b/Samples/YOLOV5Video/README.md index 94a9e79..de8a0d6 100644 --- a/Samples/YOLOV5Video/README.md +++ b/Samples/YOLOV5Video/README.md @@ -1,130 +1,93 @@ -## 目录 - - - [样例介绍](#样例介绍) - - [获取源码包](#获取源码包) - - [第三方依赖安装](#第三方依赖安装) - - [样例运行](#样例运行) - - [其他资源](#其他资源) - - [更新说明](#更新说明) - - [已知issue](#已知issue) - -## 样例介绍 - -以YOLOV7网络模型为例,使能Acllite对图片进行预处理,并通过模型转换使能静态AIPP功能,使能AIPP功能后,YUV420SP_U8格式图片转化为RGB,然后减均值和归一化操作,并将该信息固化到转换后的离线模型中,对YOLOV7网络执行推理,对图片进行物体检测和分类,并给出标定框和类别置信度。 - -样例输入:图片。 -样例输出:图片物体检测,并且在图片上给出物体标注框,类别以及置信度。 - -## 获取源码包 - - 可以使用以下两种方式下载,请选择其中一种进行源码准备。 - - - 命令行方式下载(下载时间较长,但步骤简单)。 - - ``` - # 开发环境,非root用户命令行中执行以下命令下载源码仓。 - cd ${HOME} - git clone https://gitee.com/ascend/samples.git - ``` - **注:如果需要切换到其它tag版本,以v0.5.0为例,可执行以下命令。** - ``` - git checkout v0.5.0 - ``` - - 压缩包方式下载(下载时间较短,但步骤稍微复杂)。 - **注:如果需要下载其它版本代码,请先请根据前置条件说明进行samples仓分支切换。** - ``` - # 1. samples仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 - # 2. 将ZIP包上传到开发环境中的普通用户家目录中,【例如:${HOME}/ascend-samples-master.zip】。 - # 3. 开发环境中,执行以下命令,解压zip包。 - cd ${HOME} - unzip ascend-samples-master.zip - ``` +# 目标检测(YoloV5s) -## 第三方依赖安装 - 设置环境变量,配置程序编译依赖的头文件与库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 - ``` - echo 'export THIRDPART_PATH=$HOME/Ascend/ascend-toolkit/latest/thirdpart'>> ~/.bashrc - echo 'export PYTHONPATH=${THIRDPART_PATH}/python:$PYTHONPATH'>> ~/.bashrc - ``` +#### 样例介绍 - 执行以下命令使环境变量生效并创建文件夹 - - ``` - source ~/.bashrc - mkdir -p ${THIRDPART_PATH} - ``` +通过USB接口连接Camera与开发板,从Camera获取视频,基于yolov5s模型对输入视频中的物体做实时检测,将推理结果信息使用imshow方式显示。 - python-acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 +#### 样例下载 - ``` - cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} +可以使用以下两种方式下载,请选择其中一种进行源码准备。 - ``` +- 命令行方式下载(**下载时间较长,但步骤简单**)。 + + ``` + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/YOLOV5Video + ``` -- python依赖 +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 - 执行以下命令安装numpy,opencv-python库。 ``` - pip3 install numpy opencv-python - ``` - -## 样例运行 + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/YOLOV5Video + ``` + +#### 执行准备 + +1. 确认已安装带桌面的镜像且HDMI连接的屏幕正常显示 - - 数据准备 +2. 以HwHiAiUser用户登录开发板。 - 请从以下链接获取该样例的输入图片,放在data目录下。 - - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg - ``` +3. 设置环境变量。 - - ATC模型转换 + ``` + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + ``` - 将yolov7.onnx原始模型转换为适配昇腾310处理器的离线模型(\*.om文件),放在model路径下。 +4. 安装ACLLite库。 - ``` - # 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。act指令中的--soc_version需填写对应的推理卡型号。 - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/model - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/yolov7x.onnx - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/aipp.cfg - atc --model=yolov7x.onnx --framework=5 --output=yolov7x --input_shape="images:1,3,640,640" --soc_version=Ascend310 --insert_op_conf=aipp.cfg - ``` + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装PyACLLite库。 - - 样例运行 +#### 运行样例 - 执行运行脚本,开始样例运行。 - ``` - cd $HOME/samples/inference/modelInference/sampleYOLOV7/python/scripts - bash sample_run.sh - ``` - - 样例结果展示 - - 运行完成后,会在样例工程的out目录下生成推理后的图片,显示结果如下所示。 +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 - ![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov7/result.jpg "result.jpg") +2. 获取PyTorch框架的Yolov5模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + ``` + cd model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp.cfg --no-check-certificate + atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_nms --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp.cfg + ``` -## 其他资源 + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 -以下资源提供了对ONNX项目和YOLOV7模型的更深入理解: + - --model:yolov5网络的模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:yolov5s_nms.om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 -**ONNX** -- [GitHub: ONNX](https://github.com/onnx/onnx) +3. 运行样例。 -**Models** -- [YOLOV7 - object detect](https://gitee.com/ascend/modelzoo-GPL/tree/master/built-in/ACL_Pytorch/Yolov7_for_Pytorch) + 执行以下脚本运行样例: -**Documentation** -- [AscendCL Samples介绍](../README_CN.md) -- [使用AscendCLC API库开发深度神经网络应用](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha006/infacldevg/aclcppdevg/aclcppdevg_000000.html) -- [昇腾文档](https://www.hiascend.com/document?tag=community-developer) + ``` + bash sample_run.sh + ``` -## 更新说明 - | 时间 | 更新事项 | -|----|------| -| 2023/09/26 | 新增sampleYOLOV7/README.md | - +#### 相关操作 -## 已知issue +- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 +- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 获取学习文档,请单击[AscendCL Python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha003/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 +- 查模型的输入输出 - 暂无 + 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From 24018571e53a032309226a115fdee1b469172857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:50:59 +0000 Subject: [PATCH 18/38] update Samples/YOLOV5Video/src/YOLOV5Video.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/src/YOLOV5Video.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index d401a26..b7b58f5 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -91,7 +91,6 @@ class sampleYOLOV7(object): def video_infer(video_path, model): cap = video.VideoCapture(video_path) - cnt = 0 while True: ret, frame = cap.read() if ret: -- Gitee From 93906021f8b859f450f035d84e4e5b6c705885de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:56:01 +0000 Subject: [PATCH 19/38] update Samples/YOLOV5Video/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Samples/YOLOV5Video/README.md b/Samples/YOLOV5Video/README.md index de8a0d6..72d93fd 100644 --- a/Samples/YOLOV5Video/README.md +++ b/Samples/YOLOV5Video/README.md @@ -75,13 +75,26 @@ - --input\_shape:模型输入数据的shape。 - --soc\_version:昇腾AI处理器的版本。 -3. 运行样例。 +3. 准备测试视频和图片。 + + 请从以下链接获取该样例的测试视频,放在data目录下。 + + ``` + cd ../data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.mp4 --no-check-certificate + wget wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg --no-check-certificate + ``` + + **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 + +4. 运行样例。 执行以下脚本运行样例: ``` bash sample_run.sh ``` + 可以通过修改YOLOV5Video.py中的变量mode选择处理图片或视频,结果会以imshow的方式呈现。 #### 相关操作 -- Gitee From 95e1e6fa7d092c9765611b7777b3978e8a6670f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:56:51 +0000 Subject: [PATCH 20/38] update Samples/YOLOV5Video/src/YOLOV5Video.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/src/YOLOV5Video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index b7b58f5..e71b341 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -124,7 +124,7 @@ if __name__ == '__main__': model.init_resource() cv2.namedWindow('out', cv2.WINDOW_NORMAL) - mode = "camera" + mode = "video" if mode == "image": path = "../data/dog1_1024_683.jpg" image_infer(path, model) -- Gitee From 33aa7e8b767454f8b1fd5052c7d7b88bae87bfb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 02:59:45 +0000 Subject: [PATCH 21/38] update Samples/ResnetPicture/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/python/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Samples/ResnetPicture/python/README.md b/Samples/ResnetPicture/python/README.md index c9ed6c2..ce85297 100644 --- a/Samples/ResnetPicture/python/README.md +++ b/Samples/ResnetPicture/python/README.md @@ -96,12 +96,12 @@ 执行成功后,在屏幕上的关键提示信息示例如下,提示信息中的top1-5表示图片置信度的前5种类别、index表示类别标识、value表示该分类的最大置信度,class表示所属类别。这些值可能会根据版本、环境有所不同,请以实际情况为准: ``` -======== top5 inference results: ============= -label:162 confidence:0.849168 class:beagle -label:161 confidence:0.147563 class:basset, basset hound -label:167 confidence:0.001627 class:English foxhound -label:166 confidence:0.001228 class:Walker hound, Walker foxhound -label:163 confidence:0.000184 class:bloodhound, sleuthhound + ======== top5 inference results: ============= + label:162 confidence:0.849168 class:beagle + label:161 confidence:0.147563 class:basset, basset hound + label:167 confidence:0.001627 class:English foxhound + label:166 confidence:0.001228 class:Walker hound, Walker foxhound + label:163 confidence:0.000184 class:bloodhound, sleuthhound ``` -- Gitee From 23ac9cfa203ce1523ba7407364c888ebf9b0dfbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 03:00:40 +0000 Subject: [PATCH 22/38] update Samples/ResnetPicture/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/python/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/Samples/ResnetPicture/python/README.md b/Samples/ResnetPicture/python/README.md index ce85297..076ba96 100644 --- a/Samples/ResnetPicture/python/README.md +++ b/Samples/ResnetPicture/python/README.md @@ -109,6 +109,7 @@ - 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 - 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 获取学习文档,请单击[AscendCL python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha002/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 - 查模型的输入输出 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 -- Gitee From e0a2396928d6954e2d7b32e50f4c40afb7c8a40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 03:01:17 +0000 Subject: [PATCH 23/38] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20Samples/Res?= =?UTF-8?q?netPicture/python/src/sampleResnetDVPP.py=20=E4=B8=BA=20Samples?= =?UTF-8?q?/ResnetPicture/python/src/ResnetDVPP.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../python/src/{sampleResnetDVPP.py => ResnetDVPP.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Samples/ResnetPicture/python/src/{sampleResnetDVPP.py => ResnetDVPP.py} (100%) diff --git a/Samples/ResnetPicture/python/src/sampleResnetDVPP.py b/Samples/ResnetPicture/python/src/ResnetDVPP.py similarity index 100% rename from Samples/ResnetPicture/python/src/sampleResnetDVPP.py rename to Samples/ResnetPicture/python/src/ResnetDVPP.py -- Gitee From 43d3cf85bbc52cffef27171b2e81a1957bd43af4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 03:01:43 +0000 Subject: [PATCH 24/38] update Samples/ResnetPicture/python/scripts/sample_run.sh. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/python/scripts/sample_run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/ResnetPicture/python/scripts/sample_run.sh b/Samples/ResnetPicture/python/scripts/sample_run.sh index 9c210a4..16dd55b 100644 --- a/Samples/ResnetPicture/python/scripts/sample_run.sh +++ b/Samples/ResnetPicture/python/scripts/sample_run.sh @@ -4,7 +4,7 @@ Src=${ScriptPath}/../src cd ${Src} echo "[INFO] The sample starts to run" -running_command="python3 sampleResnetDVPP.py" +running_command="python3 ResnetDVPP.py" ${running_command} if [ $? -ne 0 ];then echo "[INFO] The program runs failed" -- Gitee From 67b7bb9b04ae1ff2fbfea43d34474b51a13292b0 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Tue, 26 Mar 2024 11:04:35 +0800 Subject: [PATCH 25/38] update --- Samples/YOLOV5MultiInput/python/README.md | 98 ---- Samples/YOLOV5MultiInput/python/data/.keep | 0 .../python/scripts/sample_run.sh | 12 - .../python/src/multi_process_yolo_nms.py | 169 ------ .../YOLOV5MultiInput/python/src/python/API.md | 317 ----------- .../python/src/python/README.md | 41 -- .../python/src/python/__init__.py | 0 .../python/src/python/acllite_image.py | 221 -------- .../python/src/python/acllite_imageproc.py | 443 ---------------- .../python/src/python/acllite_logger.py | 91 ---- .../python/src/python/acllite_model.py | 434 --------------- .../python/src/python/acllite_model_2.py | 477 ----------------- .../python/src/python/acllite_model_bak.py | 455 ---------------- .../python/src/python/acllite_resource.py | 110 ---- .../python/src/python/acllite_utils.py | 261 ---------- .../python/src/python/cameracapture.py | 94 ---- .../python/src/python/constants.py | 217 -------- .../python/src/python/dvpp_vdec.py | 259 --------- .../python/src/python/lib/__init__.py | 0 .../python/src/python/lib/acllite_so.py | 38 -- .../python/src/python/lib/src/Makefile | 88 ---- .../python/src/python/lib/src/acllite_utils.h | 67 --- .../python/src/python/lib/src/camera.cpp | 167 ------ .../python/src/python/lib/src/camera.h | 61 --- .../src/python/presenteragent/__init__.py | 1 - .../python/presenteragent/presenter_agent.py | 91 ---- .../presenteragent/presenter_channel.py | 144 ----- .../presenteragent/presenter_datatype.py | 70 --- .../presenteragent/presenter_message.proto | 67 --- .../presenteragent/presenter_message.py | 70 --- .../presenteragent/presenter_message_pb2.py | 493 ------------------ .../python/presenteragent/socket_client.py | 135 ----- .../python/src/python/videocapture.py | 383 -------------- 33 files changed, 5574 deletions(-) delete mode 100644 Samples/YOLOV5MultiInput/python/README.md delete mode 100644 Samples/YOLOV5MultiInput/python/data/.keep delete mode 100644 Samples/YOLOV5MultiInput/python/scripts/sample_run.sh delete mode 100644 Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/API.md delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/README.md delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/__init__.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_image.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/cameracapture.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/constants.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py delete mode 100644 Samples/YOLOV5MultiInput/python/src/python/videocapture.py diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md deleted file mode 100644 index 1426e84..0000000 --- a/Samples/YOLOV5MultiInput/python/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# 目标检测(YoloV5s) - -#### 样例介绍 - -使用多路离线视频流(*.mp4)作为应用程序的输入,基于YoloV5s模型对输入视频中的物体做实时检测,将推理结果信息使用imshow方式显示。 -样例代码逻辑如下所示:![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/samples-pic/EdgeAndRobotics/%E5%A4%9A%E7%BA%BF%E7%A8%8B%E7%A4%BA%E4%BE%8B%E5%9B%BE%E7%89%87.png) - -#### 样例下载 - -可以使用以下两种方式下载,请选择其中一种进行源码准备。 - -- 命令行方式下载(**下载时间较长,但步骤简单**)。 - - ``` - # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 - cd ${HOME} - git clone https://gitee.com/ascend/EdgeAndRobotics.git - # 切换到样例目录 - cd EdgeAndRobotics/Samples/YOLOV5MultiInput - ``` - -- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 - - ``` - # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 - # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 - # 3. 开发环境中,执行以下命令,解压zip包。 - cd ${HOME} - chmod +x EdgeAndRobotics-master.zip - unzip EdgeAndRobotics-master.zip - # 4. 切换到样例目录 - cd EdgeAndRobotics-master/Samples/YOLOV5MultiInput/python - ``` - -#### 准备环境 - -1. 以HwHiAiUser用户登录开发板。 - -2. 设置环境变量。 - - ``` - # 配置程序编译依赖的头文件与库文件路径 - export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - export PYTHONPATH=`pwd`/python:$PYTHONPATH - ``` - -#### 运行样例 - -1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 - -2. 获取PyTorch框架的YoloV5s模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 - - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 - ``` - export TE_PARALLEL_COMPILER=1 - export MAX_COMPILE_CORE_NUMBER=1 - ``` - - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 - ``` - cd model - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp_rgb.cfg --no-check-certificate - atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_nms --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp_rgb.cfg - ``` - - atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 - - - --model:YoloV5s网络的模型文件的路径。 - - --framework:原始框架类型。5表示ONNX。 - - --output:om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 - - --input\_shape:模型输入数据的shape。 - - --soc\_version:昇腾AI处理器的版本。 - -3. 准备测试视频。 - - 请从以下链接获取该样例的测试视频,放在data目录下。 - - ``` - cd ../data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.mp4 --no-check-certificate - ``` - - **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 - -4. 运行样例。 - - ``` - bash sample_run.sh - ``` - -#### 相关操作 - -- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 -- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 -- 获取学习文档,请单击[AscendCL python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha002/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 -- 查模型的输入输出 - - 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 diff --git a/Samples/YOLOV5MultiInput/python/data/.keep b/Samples/YOLOV5MultiInput/python/data/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh deleted file mode 100644 index 12a7e0b..0000000 --- a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" - -echo "[INFO] The sample starts to run" -running_command="python3 multi_process_yolo_nms.py" -cd ${ScriptPath}/../src -${running_command} -if [ $? -ne 0 ];then - echo "[INFO] The program runs failed" -else - echo "[INFO] The program runs successfully" -fi diff --git a/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py b/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py deleted file mode 100644 index 5847c61..0000000 --- a/Samples/YOLOV5MultiInput/python/src/multi_process_yolo_nms.py +++ /dev/null @@ -1,169 +0,0 @@ -import numpy as np -import videocapture as video -import acl -import acllite_utils as utils -import time -import cv2 -import constants as const - -from acllite_resource import AclLiteResource -from acllite_model import AclLiteModel -from acllite_imageproc import AclLiteImageProc -from acllite_image import AclLiteImage -from acllite_logger import log_error, log_info - - -from multiprocessing import Process, Queue, Pool, Value - -Q_PRE_SIZE = 32 -Q_OUT_SIZE = 32 -WAIT_TIME = 0.000003 - -VIDEO_READ_FLAG = Value('d', 10) -COUNT_PRE = Value('d', 0) -# COUNT = Value('d', 0) - -class PrePair: - def __init__(self, data, ori): - self.data = data - self.ori = ori - -labels = ["person", "bicycle", "car", "motorbike", "aeroplane", - "bus", "train", "truck", "boat", "traffic light", - "fire hydrant", "stop sign", "parking meter", "bench", "bird", - "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", - "umbrella", "handbag", "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", "kite", "baseball bat", - "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", - "wine glass", "cup", "fork", "knife", "spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", - "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table", - "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone", - "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", - "scissors", "teddy bear", "hair drier", "toothbrush"] - -def preprocess(path, q_pre, model_width, model_height, channel): - print(f'sub process preprocess{i} start') - width = 1920 - height = 1080 - scale_x = width / model_width - scale_y = height / model_height - # get scale factor - if scale_x > scale_y: - max_scale = scale_x - resize_shape = (model_width, int(height/max_scale)) - else: - max_scale = scale_y - resize_shape = (int(widht/max_scale), model_height) - count = 0 - cap = cv2.VideoCapture(path) - if not cap.isOpened() : - print('video connect failed') - exit(0) - while True: - ret, frame = cap.read() - if not ret: # - print('cap read end! close subprocess cap read') - q_pre.put('EOF') - break - else: - img = np.zeros([model_height, model_width, 3], dtype=np.uint8) - frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - resize_image = cv2.resize(frame_rgb, resize_shape) - img[0:resize_shape[1],0:resize_shape[0]] = resize_image - - q_pre.put(img) - count += 1 - print(f'pre process end! {channel}') - -def infer(model_path, q_pre, q_out, pnums): - resource = AclLiteResource() - resource.init() - model = AclLiteModel(model_path) - count = 0 - nums = pnums - image_info = np.array([640, 640, - 640, 640], - dtype=np.float32) - start = time.time() - while True: - if pnums == 0: - break - for i,q in enumerate(q_pre): - img = q.get() - if isinstance(img, str): - pnums -= 1 - q_out[i].put('EOF') - continue - output = model.execute([img, image_info]) - count += 1 - q_out[i].put(output) - end = time.time() - print(f'fps: {count/(end-start):.3f}') - del resource - del model - print('infer end! close infer ') - -def postprocess(q_out, model_width, model_height): - width = 1920 - height = 1080 - while True: - output = q_out.get() - if isinstance(output, str): - print('postprocess end! close subprocess postprocess') - break - box_num = output[1][0, 0] - box_info = output[0].flatten() - scale_x = width / model_width - scale_y = height / model_height - - # get scale factor - if scale_x > scale_y: - max_scale = scale_x - else: - max_scale = scale_y - colors = [0, 0, 255] - - # draw the boxes in original image - result_msg = "" - for n in range(int(box_num)): - ids = int(box_info[5 * int(box_num) + n]) - score = box_info[4 * int(box_num) + n] - label = labels[ids] + ":" + str("%.2f" % score) - top_left_x = box_info[0 * int(box_num) + n] * max_scale - top_left_y = box_info[1 * int(box_num) + n] * max_scale - bottom_right_x = box_info[2 * int(box_num) + n] * max_scale - bottom_right_y = box_info[3 * int(box_num) + n] * max_scale - result_msg += f'label:{label} ' - # cv2.rectangle(src_image, (int(top_left_x), int(top_left_y)), - # (int(bottom_right_x), int(bottom_right_y)), colors) - # p3 = (max(int(top_left_x), 15), max(int(top_left_y), 15)) - # cv2.putText(src_image, label, p3, cv2.FONT_ITALIC, 0.6, colors, 1) - # cv2.imshow('frame', src_image) - # cv2.imwrite(f'../out/out_{count}.jpg', src_image) - print(f'results: {result_msg}') - -if __name__ == '__main__': - stream_path = "../data/test.mp4" - model_path = "../model/yolov5s_nms.om" - model_width = 640 - model_height = 640 - pnums = 2 - - q_pre = [Queue(maxsize=Q_PRE_SIZE) for i in range(pnums)] - q_out = [Queue(maxsize=Q_OUT_SIZE) for i in range(pnums)] - - loopTime, initTime = time.time(), time.time() - - processes = [] - for i in range(pnums): - processes.append(Process(target=preprocess, args=(stream_path, q_pre[i], model_width, model_height, i))) - processes.append(Process(target=infer, args=(model_path, q_pre, q_out, pnums))) - for i in range(pnums): - processes.append(Process(target=postprocess, args=(q_out[i],model_width, model_width))) - - [process.start() for process in processes] - [process.join() for process in processes] - print('子进程运行结束') - diff --git a/Samples/YOLOV5MultiInput/python/src/python/API.md b/Samples/YOLOV5MultiInput/python/src/python/API.md deleted file mode 100644 index 39ed873..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/API.md +++ /dev/null @@ -1,317 +0,0 @@ -## 接口说明 - -### AclLiteImage类 - -AclLiteImage类为python-acllite公共库,针对atlas200dk摄像头、jpg图片、输入的图片数据,所提供的一套统一的数据封装结构,便于后续公共库接口对其进行处理。 - -#### \_\_init\_\_ - -方法:\_\_init\_\_ (image, width=0, height=0, size=0, memory_type=const.MEMORY_NORMAL): - -说明: - -根据初始化参数列表,生成对应的AclLiteImage结构的数据。 - -输入参数: - -image :图片数据,支持numpy array、jpeg/png图片路径、内存数据作为参数输入;不填/填写不支持类型输入会报错。 - -width :图片宽;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 - -height :图片高;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 - -size :图片数据大小,如果image为jpeg/png图片,该参数可不填;不填则填入默认参数0。 - -memory_type :图片数据存储类型,即该图片数据是存储在一般内存、device、host或是dvpp内存;如果image为jpeg/png图片,该参数可不填;不填则填入默认参数 MEMORY_NORMAL。 - -返回值: - -AclLiteImage结构的数据 - -约束: - -无 - -#### save - -方法:save(filename): - -说明: - -将AclLiteImage数据转换为np array后。保存为二进制文件。 - -输入参数: - -filename : 保存后的文件名 - -返回值: - -无 - -约束: - -无 - -### Camera类 - -Camera类为Atlas200DK板载摄像头解码提供python接口。 - -#### is_opened - -方法:is_opened() - -说明: - -根据初始化的cameracapture类对象的摄像头id,判断Atlas200DK板载摄像头是否已打开。 - -输入参数: - -无 - -返回值: - -1.TRUE,摄像头已打开 - -2.FALSE,摄像头未打开 - -约束: - -无 - -#### read - -方法:read() - -说明: - -根据cameracapture类对象初始化时的id,从该id表示的摄像头读取图片,并将图片保存为AclLiteImage结构的数据。 - -输入参数: - -无 - -返回值: - -AclLiteImage类型的图片数据 - -约束: - -无 - -#### close - -方法:close() - -说明: - -关闭打开的摄像头 - -输入参数: - -无 - -返回值: - -无,正常执行会打印 "Close camera" 字段。 - -约束: - -无 - -### AclLiteModel类 - -AclLiteModel类为python-acllite对acl模型推理相关接口的封装,包括但不限于模型加载与初始化,模型输入输出dataset的创建,模型推理执行及资源释放等功能。 - -#### __init__ - -方法:AclLiteModel(model_path, load_type) - -说明: - -模型推理类初始化 - -输入数据: - -model_path:模型的路径。 - -load_type:加载模型的方式,可选0和1,默认值为0。(0:从文件加载离线模型数据;1:从内存加载离线模型数据) - -返回值: - -无 - -约束: - -无 - -#### execute - -方法:execute (input_list): - -说明: - -模型推理接口,将输入数据转变为acl dataset类型数据后送给模型做推理,推理结果以numpy array表示 - -输入参数: - -input_list:模型输入数据,支持AclLiteImage、numpy array 和{'data': ,'size':} dict 结构数据。 - -返回值: - -numpy array,用来表示模型推理结果。 - -约束: - -无 - - -### AclLiteImageProc类 - -AclLiteImageProc类为python-acllite对CANN媒体数据处理相关接口的封装,包括但不限于图片解码编码,视频解码编码,抠图缩放等功能。 - -#### jpegd - -方法:jpegd(image): - -说明: - -图片解码接口,将jpeg格式图片转换为yuv格式 - -输入参数: - -image:原始jpeg图片数据,以AclLiteImage结构存储的数据。 - -返回值: - -AclLiteImage,用来存放yuv图片数据。 - -约束: - -无 - -#### jpege - -方法:jpege(image): - -说明: - -图片解码接口,将yuv格式图片转换为jpeg格式 - -输入参数: - -image:原始yuv图片数据,以AclLiteImage结构存储的数据。 - -返回值: - -AclLiteImage,用来存放jpeg图片数据。 - -约束: - -无 - -#### crop_and_paste - -方法:crop_and_paste(image, width, height, crop_and_paste_width, crop_and_paste_height) - -说明: - -图片VPC(vision preprocessing core)功能相接口,将原始图片扣出再贴到目标大小 - -输入参数: - -image:原始图片数据,以AclLiteImage结构存储的数据。 - -width:原始图片宽。 - -height:原始图片高。 - -crop_and_paste_width:VPC后目标图片的宽 - -crop_and_paste_height:VPC后目标图片的高 - -返回值: - -AclLiteImage,用来存放vpc后的图片数据。 - -约束: - -无 - -#### resize - -方法:resize(image, resize_width, resize_height) - -说明: - -将输入图片resize到指定大小。 - -输入参数: - -image:原始图片数据,以AclLiteImage结构存储的数据。 - -resize_width:缩放后图片的宽。 - -resize_height:缩放后图片的高。 - -返回值: - -AclLiteImage,用来存放resize后的图片数据。 - -约束: - -无 - -### Dvpp_Vdec类 - -Dvpp_Vdec类为python-acllite对视频流的解码相关接口的封装。包括了对视频流的切帧等。 - -#### read - -方法:read (no_wait): - -说明: - -视频帧读接口,异步接口,负责从队列中读取数据并送去解码。 - -输入参数: - -no_wait:布尔变量,为真则不断从队列中读取数据,需要使用is_finished()接口来判断该帧数据是否解码完毕;为否则会按照READ_TIMEOUT设置的时间间隔从队列里读取数据,为空则会报错;默认为否。 - -返回值: - -ret :接口执行结果;SUCCESS为正常;FAILED表示失败,有数据未解码,但是接口未从队列中读取到数据。 - -image :读到的视频帧 - -约束: - -视频流必须为以下格式之一: - -h264 :main, baselineor high level,且为 annex-b格式 - -h265 :main level - -#### process - -方法:process (input_data, input_size, user_data) - -说明: - -视频解码接口,将需要解码的视频帧数据送给解码器做处理。 - -输入参数: - -input_data :输入数据。 - -input_size :输入数据大小。 - -user_data :python对象,用户自定义数据。如果用户需要获取解码的帧序号,则可以在user_data参数处定义,然后解码的帧序号可以通过user_data参数传递给VDEC的回调函数,用于确定回调函数中处理的是第几帧数据。 - -返回值: - -ret :接口执行结果;SUCCESS为正常;FAILED表示失败 - - -约束: - -无 \ No newline at end of file diff --git a/Samples/YOLOV5MultiInput/python/src/python/README.md b/Samples/YOLOV5MultiInput/python/src/python/README.md deleted file mode 100644 index d24bdef..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# ACLLite-python快速部署 - -## 安装步骤 - -设置环境变量,配置程序编译依赖的头文件,库文件路径。“$HOME/Ascend”请替换“Ascend-cann-toolkit”包的实际安装路径。 - - ``` - export DDK_PATH=$HOME/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - export THIRDPART_PATH=${DDK_PATH}/thirdpart - export LD_LIBRARY_PATH=${THIRDPART_PATH}/lib:$LD_LIBRARY_PATH - ``` - - 创建THIRDPART_PATH路径 - ``` - mkdir -p ${THIRDPART_PATH} - ``` -运行环境安装python-acllite所需依赖 - - ``` - # 安装ffmpeg - sudo apt-get install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libavresample-dev - # 安装其它依赖 - python3 -m pip install --upgrade pip - python3 -m pip install Cython - sudo apt-get install pkg-config libxcb-shm0-dev libxcb-xfixes0-dev - # 安装pyav - python3 -m pip install av==6.2.0 - # 安装pillow 的依赖 - sudo apt-get install libtiff5-dev libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python-tk - # 安装numpy和PIL - python3 -m pip install numpy - python3 -m pip install Pillow - ``` - - python acllite库以源码方式提供,安装时将acllite目录拷贝到运行环境的第三方库目录 - - ``` - # 将acllite目录拷贝到第三方文件夹中。后续有变更则需要替换此处的acllite文件夹 - cp -r ${HOME}/samples/inference/acllite/python ${THIRDPART_PATH} - ``` diff --git a/Samples/YOLOV5MultiInput/python/src/python/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py deleted file mode 100644 index 743cbed..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_image.py +++ /dev/null @@ -1,221 +0,0 @@ -import os -import numpy as np -from PIL import Image - -import acl -import acllite_utils as utils -import acllite_logger as acl_log -import constants as const - -class AclLiteImage(object): - """Image data and operation class - Wrap image data and operation method, support jpeg, png, yuv file and - memory data - - Attributes: - _run_mode: device run mode - _data: image binary data or numpy array - _memory_type: the data in which memory, include dvpp, - device and np array - width: image width - height: image height - alignWidth: align image width - alignHeight: align image height - _encode_format: image format - _load_ok: load image success or not - - """ - _run_mode, _ = acl.rt.get_run_mode() - - def __init__(self, image, width=0, height=0, alignWidth=0, alignHeight=0, - size=0, memory_type=const.MEMORY_NORMAL): - """Create AclLiteImage instance - Args: - image: image data, binary, numpy array or file path - width: image width. if image is jpeg or png file, - this arg is not nesscessary - height: image height. if image is jpeg or png file, this arg is - not nesscessary - size: image data size. if image is file path, this arg is not - nesscessary - memory_type: memory type of image data. if image is file path, this - arg is not nesscessary - """ - self._data = None - self._memory_type = memory_type - self.width = 0 - self.height = 0 - self.alignWidth = 0 - self.alignHeight = 0 - self.size = 0 - self._encode_format = const.ENCODE_FORMAT_UNKNOW - self._load_ok = True - - if isinstance(image, str): - self._instance_by_image_file(image, width, height) - elif isinstance(image, int): - self._instance_by_buffer(image, width, height, alignWidth, alignHeight, size) - elif isinstance(image, np.ndarray): - self._instance_by_nparray(image, width, height, alignWidth, alignHeight) - else: - acl_log.log_error("Create instance failed for " - "unknow image data type") - - def _instance_by_image_file(self, image_path, width, height): - # Get image format by filename suffix - self._encode_format = self._get_image_format_by_suffix(image_path) - if self._encode_format == const.ENCODE_FORMAT_UNKNOW: - acl_log.log_error("Load image %s failed" % (image_path)) - self._load_ok = False - return - - # Read image data from file to memory - self._data = np.fromfile(image_path, dtype=np.byte) - self._type = const.IMAGE_DATA_NUMPY - self.size = self._data.itemsize * self._data.size - self._memory_type = const.MEMORY_NORMAL - - # Get image parameters of jpeg or png file by pillow - if ((self._encode_format == const.ENCODE_FORMAT_JPEG) or - (self._encode_format == const.ENCODE_FORMAT_PNG)): - image = Image.open(image_path) - self.width, self.height = image.size - else: - # pillow can not decode yuv, so need input widht and height args - self.width = width - self.height = height - - def _get_image_format_by_suffix(self, filename): - suffix = os.path.splitext(filename)[-1].strip().lower() - if (suffix == ".jpg") or (suffix == ".jpeg"): - image_format = const.ENCODE_FORMAT_JPEG - elif suffix == ".png": - image_format = const.ENCODE_FORMAT_PNG - elif suffix == ".yuv": - image_format = const.ENCODE_FORMAT_YUV420_SP - else: - acl_log.log_error("Unsupport image format: ", suffix) - image_format = const.ENCODE_FORMAT_UNKNOW - - return image_format - - def is_loaded(self): - """Image file load result - When create image instance by file, call this method to check - file load success or not - - Returns: - True: load success - False: load failed - """ - return self._load_ok - - def _instance_by_buffer(self, image_buffer, width, height, alignWidth, alignHeight, size): - self.width = width - self.height = height - self.alignHeight = alignHeight - self.alignWidth = alignWidth - self.size = size - self._data = image_buffer - self._type = const.IMAGE_DATA_BUFFER - - def _instance_by_nparray(self, data, width, height, alignWidth, alignHeight): - self.width = width - self.height = height - self.alignHeight = alignHeight - self.alignWidth = alignWidth - self.size = data.itemsize * data.size - self._data = data - self._type = const.IMAGE_DATA_NUMPY - self._memory_type = const.MEMORY_NORMAL - - def byte_data_to_np_array(self): - """Trans image data to np array""" - if self._type == const.IMAGE_DATA_NUMPY: - return self._data.copy() - - return utils.copy_data_as_numpy(self._data, self.size, - self._memory_type, AclLiteImage._run_mode) - - def data(self): - """Get image binary data""" - if self._type == const.IMAGE_DATA_NUMPY: - if "bytes_to_ptr" in dir(acl.util): - bytes_data=self._data.tobytes() - factor_ptr=acl.util.bytes_to_ptr(bytes_data) - else: - factor_ptr=acl.util.numpy_to_ptr(self._data) - return factor_ptr - else: - return self._data - - def copy_to_dvpp(self): - """Copy image data to dvpp""" - device_ptr = utils.copy_data_to_dvpp(self.data(), self.size, - self._run_mode) - print(f'device_ptr: {device_ptr}') - if device_ptr is None: - acl_log.log_error("Copy image to dvpp failed") - return None - return AclLiteImage(device_ptr, self.width, self.height, 0, 0, - self.size, const.MEMORY_DVPP) - - def copy_to_host(self): - """"Copy data to host""" - if self._type == const.IMAGE_DATA_NUMPY: - data_np = self._data.copy() - return AclLiteImage(data_np, self.width, self.height, 0, 0) - - data = None - mem_type = const.MEMORY_HOST - if AclLiteImage._run_mode == const.ACL_HOST: - if self.is_local(): - data = utils.copy_data_host_to_host(self._data, self.size) - else: - data = utils.copy_data_device_to_host(self._data, self.size) - else: - data = utils.copy_data_device_to_device(self._data, self.size) - mem_type = const.MEMORY_DEVICE - if data is None: - acl_log.log_error("Copy image to host failed") - return None - - return AclLiteImage(data, self.width, self.height, 0, 0, self.size, mem_type) - - def is_local(self): - """Image data is in host server memory and access directly or not""" - # in atlas200dk, all kind memory can access directly - if AclLiteImage._run_mode == const.ACL_DEVICE: - return True - # in atlas300, only acl host memory or numpy array can access directly - elif ((AclLiteImage._run_mode == const.ACL_HOST) and - ((self._memory_type == const.MEMORY_HOST) or - (self._memory_type == const.MEMORY_NORMAL))): - return True - else: - return False - - def save(self, filename): - """Save image as file""" - image_np = self.byte_data_to_np_array() - image_np.tofile(filename) - - def destroy(self): - """Release image memory""" - if (self._data is None) or (self.size == 0): - acl_log.log_error("Release image abnormaly, data is None") - return - - if self._memory_type == const.MEMORY_DEVICE: - acl.rt.free(self._data) - elif self._memory_type == const.MEMORY_HOST: - acl.rt.free_host(self._data) - elif self._memory_type == const.MEMORY_DVPP: - acl.media.dvpp_free(self._data) - # numpy no need release - self._data = None - self.size = 0 - - def __del__(self): - self.destroy() - diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py deleted file mode 100644 index 3453b12..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_imageproc.py +++ /dev/null @@ -1,443 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2021-01-20 20:12:13 -MODIFIED: 2021-01-29 14:04:45 -""" -import numpy as np -import acl -import acllite_utils as utils -from acllite_image import AclLiteImage -from acllite_logger import log_error, log_info -from acllite_resource import resource_list -import constants as constants - -class AclLiteImageProc(object): - """ - dvpp class - """ - - def __init__(self, acl_resource=None): - if acl_resource is None: - self._stream, ret = acl.rt.create_stream() - utils.check_ret("acl.rt.create_stream", ret) - self._run_mode, ret = acl.rt.get_run_mode() - utils.check_ret("acl.rt.get_run_mode", ret) - else: - self._stream = acl_resource.stream - self._run_mode = acl_resource.run_mode - self._dvpp_channel_desc = None - self._crop_config = None - self._paste_config = None - - self._init_resource() - - # AclLiteImageProc involves acl resources, which need to be released \ - # before the acl ends when the program exits, \ - # register here to the resource table to ensure the release timing - self._is_destroyed = False - resource_list.register(self) - - def _init_resource(self): - # Create dvpp channel - self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc() - ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc) - utils.check_ret("acl.media.dvpp_create_channel", ret) - - # Create a resize configuration - self._resize_config = acl.media.dvpp_create_resize_config() - - # Create yuv to jpeg configuration - self._jpege_config = acl.media.dvpp_create_jpege_config() - ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100) - utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret) - - def _gen_input_pic_desc(self, image, - width_align_factor=16, height_align_factor=2): - image.width = utils.align_up2(image.width) - image.height = utils.align_up2(image.height) - image.alignWidth = utils.align_up(image.width, width_align_factor) - image.alignHeight = utils.align_up(image.height, height_align_factor) - image.size = utils.yuv420sp_size(image.alignWidth, image.alignHeight) - - pic_desc = acl.media.dvpp_create_pic_desc() - acl.media.dvpp_set_pic_desc_data(pic_desc, image.data()) - acl.media.dvpp_set_pic_desc_format( - pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) - acl.media.dvpp_set_pic_desc_width(pic_desc, image.width) - acl.media.dvpp_set_pic_desc_height(pic_desc, image.height) - acl.media.dvpp_set_pic_desc_width_stride(pic_desc, image.alignWidth) - acl.media.dvpp_set_pic_desc_height_stride(pic_desc, image.alignHeight) - acl.media.dvpp_set_pic_desc_size(pic_desc, image.size) - - return pic_desc - - def _gen_output_pic_desc(self, width, height, - output_buffer, output_buffer_size, - width_align_factor=16, height_align_factor=2): - # Create output image - stride_width = utils.align_up(width, width_align_factor) - stride_height = utils.align_up(height, height_align_factor) - - pic_desc = acl.media.dvpp_create_pic_desc() - acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) - acl.media.dvpp_set_pic_desc_format( - pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) - acl.media.dvpp_set_pic_desc_width(pic_desc, width) - acl.media.dvpp_set_pic_desc_height(pic_desc, height) - acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width) - acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height) - acl.media.dvpp_set_pic_desc_size(pic_desc, output_buffer_size) - - return pic_desc - - def _stride_yuv_size(self, width, height, - width_align_factor=16, height_align_factor=2): - stride_width = utils.align_up(width, width_align_factor) - stride_height = utils.align_up(height, height_align_factor) - stride_size = utils.yuv420sp_size(stride_width, stride_height) - - return stride_width, stride_height, stride_size - - def jpegd(self, image): - """ - jepg image to yuv image - """ - image.width = utils.align_up2(image.width) - image.height = utils.align_up2(image.height) - soc_version = acl.get_soc_name() - if soc_version == "Ascend310P3" or soc_version == "Ascend310B1" : - stride_width = utils.align_up64(image.width) - stride_height = utils.align_up16(image.height) - stride_size = utils.yuv420sp_size(stride_width, stride_height) - else: - stride_width = utils.align_up128(image.width) - stride_height = utils.align_up16(image.height) - stride_size = utils.yuv420sp_size(stride_width, stride_height) - # Create conversion output image desc - output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image, stride_size) - ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc, - image.data(), - image.size, - output_desc, - self._stream) - if ret != constants.ACL_SUCCESS: - log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) - return None - ret = acl.media.dvpp_destroy_pic_desc(output_desc) - if ret != constants.ACL_SUCCESS: - log_error("dvpp_destroy_pic_desc failed ret={}".format(ret)) - return None - ret = acl.rt.synchronize_stream(self._stream) - if ret != constants.ACL_SUCCESS: - log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) - return None - - # Return the decoded AclLiteImage instance - return AclLiteImage(out_buffer, image.width, image.height, stride_width, - stride_height, stride_size, constants.MEMORY_DVPP) - - def _gen_jpegd_out_pic_desc(self, image, stride_size): - # Predict the memory size required to decode jpeg into yuv pictures - ret, out_buffer_size = self._get_jpegd_memory_size(image, stride_size) - if not ret: - return None - # Apply for memory for storing decoded yuv pictures - out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) - if ret != constants.ACL_SUCCESS: - log_error("AclLiteImageProc malloc failed, error: ", ret) - return None - - soc_version = acl.get_soc_name() - if soc_version == "Ascend310P3" or soc_version == "Ascend310B1" : - width_align_factor = 64 - height_align_factor = 16 - else: - width_align_factor = 128 - height_align_factor = 16 - # Create output image desc - pic_desc = self._gen_output_pic_desc( - image.width, - image.height, - out_buffer, - out_buffer_size, - width_align_factor, - height_align_factor) - return pic_desc, out_buffer - - def _get_jpegd_memory_size(self, image, stride_size): - if image.is_local(): - size, ret = acl.media.dvpp_jpeg_predict_dec_size( - image.data(), image.size, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) - if ret != constants.ACL_SUCCESS: - log_error("Predict jpeg decode size failed, return ", ret) - return False, 0 - return True, size - else: - return True, int(stride_size) - - def resize(self, image, resize_width, resize_height): - """ - Scale yuvsp420 picture to specified size - """ - resize_width = utils.align_up2(resize_width) - resize_height = utils.align_up2(resize_height) - soc_version = acl.get_soc_name() - if soc_version == "Ascend310B1" : - width_align_factor = 2 - height_align_factor = 2 - stride_width = resize_width - stride_height = utils.align_up2(resize_height) - else: - width_align_factor = 16 - height_align_factor = 2 - stride_width = utils.align_up16(resize_width) - stride_height = utils.align_up2(resize_height) - # Generate input picture desc - input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) - # Calculate the image size after scaling - output_size = utils.yuv420sp_size(stride_width, stride_height) - # Request memory for the zoomed picture - out_buffer, ret = acl.media.dvpp_malloc(output_size) - if ret != constants.ACL_SUCCESS: - log_error("AclLiteImageProc malloc failed, error: ", ret) - return None - # Create output image - output_desc = self._gen_output_pic_desc(resize_width, resize_height, - out_buffer, output_size, - width_align_factor, height_align_factor) - if output_desc is None: - log_error("Gen resize output desc failed") - return None - # Call dvpp asynchronous zoom interface to zoom pictures - ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc, - input_desc, - output_desc, - self._resize_config, - self._stream) - if ret != constants.ACL_SUCCESS: - log_error("Vpc resize async failed, error: ", ret) - return None - # Wait for the zoom operation to complete - ret = acl.rt.synchronize_stream(self._stream) - if ret != constants.ACL_SUCCESS: - log_error("Resize synchronize stream failed, error: ", ret) - return None - # Release the resources requested for scaling - acl.media.dvpp_destroy_pic_desc(input_desc) - acl.media.dvpp_destroy_pic_desc(output_desc) - return AclLiteImage(out_buffer, resize_width, resize_height, stride_width, - stride_height, output_size, constants.MEMORY_DVPP) - - def _gen_resize_out_pic_desc(self, resize_width, - resize_height, output_size): - out_buffer, ret = acl.media.dvpp_malloc(output_size) - if ret != constants.ACL_SUCCESS: - log_error("AclLiteImageProc malloc failed, error: ", ret) - return None - pic_desc = self._gen_output_pic_desc(resize_width, resize_height, - out_buffer, output_size) - return pic_desc, out_buffer - - def crop_and_paste( - self, - image, - width, - height, - crop_and_paste_width, - crop_and_paste_height): - """ - crop_and_paste - """ - log_info('AclLiteImageProc vpc crop and paste stage:') - crop_and_paste_width = utils.align_up2(crop_and_paste_width) - crop_and_paste_height = utils.align_up2(crop_and_paste_height) - soc_version = acl.get_soc_name() - if soc_version == "Ascend310B1" : - width_align_factor = 2 - height_align_factor = 2 - stride_width = crop_and_paste_width - stride_height = utils.align_up2(crop_and_paste_height) - else: - width_align_factor = 16 - height_align_factor = 2 - stride_width = utils.align_up16(crop_and_paste_width) - stride_height = utils.align_up2(crop_and_paste_height) - input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) - out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) - out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) - output_desc = self._gen_output_pic_desc( - crop_and_paste_width, - crop_and_paste_height, - out_buffer, - out_buffer_size, - width_align_factor, - height_align_factor) - self._crop_config = acl.media.dvpp_create_roi_config( - 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) - # set crop area: - rx = float(width) / float(crop_and_paste_width) - ry = float(height) / float(crop_and_paste_height) - if rx > ry: - dx = 0 - r = rx - dy = int((crop_and_paste_height - height / r) / 2) - else: - dy = 0 - r = ry - dx = int((crop_and_paste_width - width / r) / 2) - pasteRightOffset = int(crop_and_paste_width - 2 * dx) - pasteBottomOffset = int(crop_and_paste_height - 2 * dy) - if (pasteRightOffset % 2) == 0: - pasteRightOffset = pasteRightOffset - 1 - if (pasteBottomOffset % 2) == 0: - pasteBottomOffset = pasteBottomOffset - 1 - self._paste_config = acl.media.dvpp_create_roi_config( - 0, pasteRightOffset, 0, pasteBottomOffset) - ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, - input_desc, - output_desc, - self._crop_config, - self._paste_config, - self._stream) - utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) - ret = acl.rt.synchronize_stream(self._stream) - utils.check_ret("acl.rt.synchronize_stream", ret) - log_info('AclLiteImageProc vpc crop and paste stage success') - stride_width = crop_and_paste_width - 2 * dx - stride_height = crop_and_paste_height - 2 * dy - acl.media.dvpp_destroy_pic_desc(input_desc) - acl.media.dvpp_destroy_pic_desc(output_desc) - - return AclLiteImage(out_buffer, image.width, image.height, stride_width, - stride_height, out_buffer_size, constants.MEMORY_DVPP) - - def crop_and_paste_get_roi( - self, - image, - width, - height, - crop_and_paste_width, - crop_and_paste_height): - """ - :image: input image - :width: input image width - :height: input image height - :crop_and_paste_width: crop_and_paste_width - :crop_and_paste_height: crop_and_paste_height - :return: return AclLiteImage - """ - log_info('AclLiteImageProc vpc crop and paste stage:') - crop_and_paste_width = utils.align_up2(crop_and_paste_width) - crop_and_paste_height = utils.align_up2(crop_and_paste_height) - soc_version = acl.get_soc_name() - if soc_version == "Ascend310B1" : - width_align_factor = 2 - height_align_factor = 2 - stride_width = crop_and_paste_width - stride_height = utils.align_up2(crop_and_paste_height) - else: - width_align_factor = 16 - height_align_factor = 2 - stride_width = utils.align_up16(crop_and_paste_width) - stride_height = utils.align_up2(crop_and_paste_height) - input_desc = self._gen_input_pic_desc(image, width_align_factor, height_align_factor) - out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) - out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) - output_desc = self._gen_output_pic_desc( - crop_and_paste_width, - crop_and_paste_height, - out_buffer, - out_buffer_size, - width_align_factor, - height_align_factor) - self._crop_config = acl.media.dvpp_create_roi_config( - 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) - self._paste_config = acl.media.dvpp_create_roi_config( - 0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1) - ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, - input_desc, - output_desc, - self._crop_config, - self._paste_config, - self._stream) - utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) - ret = acl.rt.synchronize_stream(self._stream) - utils.check_ret("acl.rt.synchronize_stream", ret) - log_info('AclLiteImageProc vpc crop and paste stage success') - stride_width = utils.align_up16(crop_and_paste_width) - stride_height = utils.align_up2(crop_and_paste_height) - acl.media.dvpp_destroy_pic_desc(input_desc) - acl.media.dvpp_destroy_pic_desc(output_desc) - return AclLiteImage(out_buffer, image.width, image.height, stride_width, - stride_height, out_buffer_size, constants.MEMORY_DVPP) - - def jpege(self, image): - """ - Convert yuv420sp pictures to jpeg pictures - """ - # create input image - input_desc = self._gen_input_pic_desc(image) - # Predict the memory size required for conversion - output_size, ret = acl.media.dvpp_jpeg_predict_enc_size( - input_desc, self._jpege_config) - if (ret != constants.ACL_SUCCESS): - log_error("Predict jpege output size failed") - return None - # Request memory required for conversion - output_buffer, ret = acl.media.dvpp_malloc(output_size) - if (ret != constants.ACL_SUCCESS): - log_error("Malloc jpege output memory failed") - return None - output_size_array = np.array([output_size], dtype=np.int32) - if "bytes_to_ptr" in dir(acl.util): - bytes_data = output_size_array.tobytes() - output_size_ptr = acl.util.bytes_to_ptr(bytes_data) - else: - output_size_ptr = acl.util.numpy_to_ptr(output_size_array) - - # Call jpege asynchronous interface to convert pictures - ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc, - input_desc, output_buffer, - output_size_ptr, - self._jpege_config, - self._stream) - if (ret != constants.ACL_SUCCESS): - log_error("Jpege failed, ret ", ret) - return None - # Wait for the conversion to complete - ret = acl.rt.synchronize_stream(self._stream) - if (ret != constants.ACL_SUCCESS): - log_error("Jpege synchronize stream, failed, ret ", ret) - return None - # Release resources - acl.media.dvpp_destroy_pic_desc(input_desc) - if "bytes_to_ptr" in dir(acl.util): - output_size_array=np.frombuffer(bytes_data,dtype=output_size_array.dtype).reshape(output_size_array.shape) - return AclLiteImage( - output_buffer, image.width, image.height, 0, 0, int( - output_size_array[0]), constants.MEMORY_DVPP) - - def destroy(self): - """ - dvpp resource release - """ - if self._is_destroyed: - return - - if self._resize_config: - acl.media.dvpp_destroy_resize_config(self._resize_config) - - if self._dvpp_channel_desc: - acl.media.dvpp_destroy_channel(self._dvpp_channel_desc) - acl.media.dvpp_destroy_channel_desc(self._dvpp_channel_desc) - - if self._jpege_config: - acl.media.dvpp_destroy_jpege_config(self._jpege_config) - self._is_destroyed = True - resource_list.unregister(self) - log_info("dvpp resource release success") - - def __del__(self): - self.destroy() - diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py deleted file mode 100644 index 5409410..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_logger.py +++ /dev/null @@ -1,91 +0,0 @@ -import sys -import os - -import acl - -_ACL_DEBUG = 0 -_ACL_INFO = 1 -_ACL_WARNING = 2 -_ACL_ERROR = 3 - -def log_error(*log_msg): - """Recode error level log to file - Args: - *log_msg: format string and args list - """ - log_str = [str(i) for i in log_msg] - log_str = "[ERROR]\t" + "".join(log_str) - print(log_str) - - caller_frame = sys._getframe().f_back - # caller file - filename = caller_frame.f_code.co_filename - # caller line no - line_no = caller_frame.f_lineno - # caller function - func_name = caller_frame.f_code.co_name - - message = "[" + filename + ":" + str(line_no) + \ - " " + func_name + "]" + log_str - acl.app_log(_ACL_ERROR, message) - -def log_warning(*log_msg): - """Recode warning level log to file - Args: - *log_msg: format string and args list - """ - log_str = [str(i) for i in log_msg] - log_str = "[WARNING]\t" + "".join(log_str) - - print(log_str) - - caller_frame = sys._getframe().f_back - # caller file - filename = caller_frame.f_code.co_filename - # caller line no - line_no = caller_frame.f_lineno - # caller function - func_name = caller_frame.f_code.co_name - - message = "[" + filename + ":" + str(line_no) + \ - " " + func_name + "]" + log_str - acl.app_log(_ACL_WARNING, message) - -def log_info(*log_msg): - """Recode info level log to file - Args: - *log_msg: format string and args list - """ - log_str = [str(i) for i in log_msg] - log_str = "[INFO]\t" + "".join(log_str) - print(log_str) - caller_frame = sys._getframe().f_back - # caller file - filename = caller_frame.f_code.co_filename - # caller line no - line_no = caller_frame.f_lineno - # caller function - func_name = caller_frame.f_code.co_name - - message = "[" + filename + ":" + str(line_no) + \ - " " + func_name + "]" + log_str - acl.app_log(_ACL_INFO, message) - -def log_debug(*log_msg): - """Recode debug level log to file - Args: - *log_msg: format string and args list - """ - log_str = [str(i) for i in log_msg] - log_str = "[DEBUG]\t" + "".join(log_str) - caller_frame = sys._getframe().f_back - # caller file - filename = caller_frame.f_code.co_filename - # caller line no - line_no = caller_frame.f_lineno - # caller function - func_name = caller_frame.f_code.co_name - - message = "[" + filename + ":" + str(line_no) + \ - " " + func_name + "]" + log_str - acl.app_log(_ACL_DEBUG, message) diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py deleted file mode 100644 index 8c7bb09..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_model.py +++ /dev/null @@ -1,434 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2020-6-04 20:12:13 -MODIFIED: 2020-6-28 14:04:45 -""" -import acl -import struct -import numpy as np -import datetime -import sys -import os -import time - -import constants as const -import acllite_utils as utils -from acllite_logger import log_error, log_info, log_warning -from acllite_image import AclLiteImage -from acllite_resource import resource_list - -class AclLiteModel(object): - """ - wrap acl model inference interface, include input dataset construction, - execute, and output transform to numpy array - Attributes: - model_path: om offline mode file path - """ - - def __init__(self, model_path, load_type=0): - self._run_mode, ret = acl.rt.get_run_mode() - utils.check_ret("acl.rt.get_run_mode", ret) - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - if self._run_mode == const.ACL_HOST: - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST - - self._model_path = model_path # string - self._load_type = load_type - self._model_id = None # pointer - self._input_num = 0 - self._input_buffer = [] - self._input_dataset = None - self._output_dataset = None - self._model_desc = None # pointer when using - self._output_size = 0 - self._init_resource() - self._is_destroyed = False - self.runMode_ = acl.rt.get_run_mode() - resource_list.register(self) - - def _init_resource(self): - log_info("Init model resource start...") - if not os.path.isfile(self._model_path): - log_error( - "model_path failed, please check. model_path=%s" % - self._model_path) - return const.FAILED - - if self._load_type == 0: - self._model_id, ret = acl.mdl.load_from_file(self._model_path) - utils.check_ret("acl.mdl.load_from_file", ret) - elif self._load_type == 1: - with open(self._model_path, "rb") as f: - om_bytes = f.read() - if om_bytes: - ptr = acl.util.bytes_to_ptr(om_bytes) - self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) - utils.check_ret("acl.mdl.load_from_mem", ret) - else: - log_error( - "model_context is null, please check. model_path=%s" % - self._model_path) - return const.FAILED - else: - log_error( - "load_type is not in 0 or 1, please check. load_type=%d" % - self._load_type) - return const.FAILED - self._model_desc = acl.mdl.create_desc() - ret = acl.mdl.get_desc(self._model_desc, self._model_id) - utils.check_ret("acl.mdl.get_desc", ret) - # get outputs num of model - self._output_size = acl.mdl.get_num_outputs(self._model_desc) - # create output dataset - self._gen_output_dataset(self._output_size) - # recode input data address,if need malloc memory,the memory will be - # reuseable - self._init_input_buffer() - log_info("Init model resource success") - self._gen_input_dataset() - - return const.SUCCESS - - def _gen_output_dataset(self, ouput_num): - log_info("AclLiteModel create model output dataset:") - dataset = acl.mdl.create_dataset() - for i in range(ouput_num): - # malloc device memory for output - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) - utils.check_ret("acl.rt.malloc", ret) - # crate oputput data buffer - dataset_buffer = acl.create_data_buffer(buf, size) - _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) - log_info("malloc output %d, size %d" % (i, size)) - if ret: - acl.rt.free(buf) - acl.destroy_data_buffer(dataset_buffer) - utils.check_ret("acl.destroy_data_buffer", ret) - self._output_dataset = dataset - log_info("Create model output dataset success") - - def _init_input_buffer(self): - self._input_num = acl.mdl.get_num_inputs(self._model_desc) - for i in range(self._input_num): - item = {"addr": None, "size": 0} - self._input_buffer.append(item) - - def _gen_input_dataset(self): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - - self._input_dataset = acl.mdl.create_dataset() - self.input_buffers = [] - self.input_buffer_sizes = [] - for i in range(self._input_num): - input_buffer_size = acl.mdl.get_input_size_by_index(self._model_desc, i) - input_buffer, ret = acl.rt.malloc(input_buffer_size, const.ACL_MEM_MALLOC_HUGE_FIRST) - input_data = acl.create_data_buffer(input_buffer, input_buffer_size) - self._input_dataset, ret = acl.mdl.add_dataset_buffer(self._input_dataset, input_data) - if ret != const.ACL_SUCCESS: - print('acl.mdl.add_dataset_buffer failed, errorCode is', ret) - self.input_buffers.append(input_buffer) - self.input_buffer_sizes.append(input_buffer_size) - - return ret - - def _parse_input_data(self, input_data, index): - data = None - size = 0 - if isinstance(input_data, AclLiteImage): - size = input_data.size - data = input_data.data() - elif isinstance(input_data, np.ndarray): - size = input_data.size * input_data.itemsize - if "bytes_to_ptr" in dir(acl.util): - bytes_data = input_data.tobytes() - ptr = acl.util.bytes_to_ptr(bytes_data) - else: - ptr = acl.util.numpy_to_ptr(input_data) - - data = ptr - if data is None: - size = 0 - log_error("Copy input to device failed") - elif (isinstance(input_data, dict) and - ('data' in input_data.keys()) and ('size' in input_data.keys())): - size = input_data['size'] - data = input_data['data'] - else: - log_error("Unsupport input") - - return data, size - - def _copy_input_to_device(self, input_ptr, size, index): - buffer_item = self._input_buffer[index] - data = None - if buffer_item['addr'] is None: - if self._run_mode == const.ACL_HOST: - data = utils.copy_data_host_to_device(input_ptr, size) - else: - data = utils.copy_data_device_to_device(input_ptr, size) - if data is None: - log_error("Malloc memory and copy model %dth " - "input to device failed" % (index)) - return None - buffer_item['addr'] = data - buffer_item['size'] = size - elif size == buffer_item['size']: - if self._run_mode == const.ACL_HOST: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_HOST_TO_DEVICE) - else: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_DEVICE_TO_DEVICE) - if ret != const.ACL_SUCCESS: - log_error("Copy model %dth input to device failed" % (index)) - return None - data = buffer_item['addr'] - else: - log_error("The model %dth input size %d is change," - " before is %d" % (index, size, buffer_item['size'])) - return None - - return data - - def _set_dynamic_batch_size(self, batch): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - if ret != const.ACL_SUCCESS: - log_error("get_input_index_by_name failed") - return const.FAILED - batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("get_dynamic_batch failed") - return const.FAILED - log_info("get dynamic_batch = ", batch_dic) - ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) - if ret != const.ACL_SUCCESS: - log_error("set_dynamic_batch_size failed, ret = ", ret) - return const.FAILED - if batch in batch_dic["batch"]: - return const.SUCCESS - else: - assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID - log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) - return const.FAILED - - def _execute_with_dynamic_batch_size(self, input_list, batch): - ret = self._gen_input_dataset(input_list) - if ret == const.FAILED: - log_error("Gen model input dataset failed") - return None - - ret = self._set_dynamic_batch_size(batch) - if ret == const.FAILED: - log_error("Set dynamic batch failed") - return None - - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - - self._release_dataset(self._input_dataset) - self._input_dataset = None - - return self._output_dataset_to_numpy() - - def execute(self, input_list): - """ - inference input data - Args: - input_list: input data list, support AclLiteImage, - numpy array and {'data': ,'size':} dict - returns: - inference result data, which is a numpy array list, - each corresponse to a model output - """ - if self.runMode_ == const.ACL_DEVICE: - kind = const.ACL_MEMCPY_DEVICE_TO_DEVICE - else: - kind = const.ACL_MEMCPY_HOST_TO_DEVICE - for i,data_in in enumerate(input_list): - if "bytes_to_ptr" in dir(acl.util): - bytes_data = data_in.tobytes() - ptr = acl.util.bytes_to_ptr(bytes_data) - else: - ptr = acl.util.numpy_to_ptr(self.image_bytes) - ret = acl.rt.memcpy(self.input_buffers[i], - self.input_buffer_sizes[i], - ptr, - self.input_buffer_sizes[i], - kind) - # print(f'gen_input_data_time:{start - a}') - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - # print(f'infer execute run time: {b - start}') - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - # self._release_dataset(self._input_dataset) - # self._input_dataset = None - numpy_data = self._output_dataset_to_numpy() - return numpy_data - - def _output_dataset_to_numpy(self): - dataset = [] - output_tensor_list = self._gen_output_tensor() - num = acl.mdl.get_dataset_num_buffers(self._output_dataset) - - for i in range(num): - buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) - data = acl.get_data_buffer_addr(buf) - size = int(acl.get_data_buffer_size(buf)) - output_ptr = output_tensor_list[i]["ptr"] - output_data = output_tensor_list[i]["tensor"] - if isinstance (output_data,bytes): - data_size = len(output_data) - else: - data_size = output_data.size * output_data.itemsize - ret = acl.rt.memcpy(output_ptr, - data_size, - data, size, self._copy_policy) - if ret != const.ACL_SUCCESS: - log_error("Memcpy inference output to local failed") - return None - - if isinstance (output_data,bytes): - output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) - output_tensor = output_data.copy() - else: - output_tensor = output_data - dataset.append(output_tensor) - - return dataset - - def _gen_output_tensor(self): - output_tensor_list = [] - for i in range(self._output_size): - dims = acl.mdl.get_output_dims(self._model_desc, i) - shape = tuple(dims[0]["dims"]) - datatype = acl.mdl.get_output_data_type(self._model_desc, i) - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - - if datatype == const.ACL_FLOAT: - np_type = np.float32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_DOUBLE: - np_type = np.float64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT64: - np_type = np.int64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT64: - np_type = np.uint64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT32: - np_type = np.int32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT32: - np_type = np.uint32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_FLOAT16: - np_type = np.float16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT16: - np_type = np.int16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT16: - np_type = np.uint16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT8: - np_type = np.int8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: - np_type = np.uint8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - else: - log_error("Unspport model output datatype ", datatype) - return None - - if not output_tensor.flags['C_CONTIGUOUS']: - output_tensor = np.ascontiguousarray(output_tensor) - - if "bytes_to_ptr" in dir(acl.util): - bytes_data=output_tensor.tobytes() - tensor_ptr=acl.util.bytes_to_ptr(bytes_data) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": bytes_data, - "shape":output_tensor.shape, - "dtype":output_tensor.dtype},) - else: - tensor_ptr = acl.util.numpy_to_ptr(output_tensor) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": output_tensor}) - - return output_tensor_list - - def _release_dataset(self, dataset, free_memory=False): - if not dataset: - return - - num = acl.mdl.get_dataset_num_buffers(dataset) - for i in range(num): - data_buf = acl.mdl.get_dataset_buffer(dataset, i) - if data_buf: - self._release_databuffer(data_buf, free_memory) - - ret = acl.mdl.destroy_dataset(dataset) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def _release_databuffer(self, data_buffer, free_memory=False): - if free_memory: - data_addr = acl.get_data_buffer_addr(data_buffer) - if data_addr: - acl.rt.free(data_addr) - - ret = acl.destroy_data_buffer(data_buffer) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def destroy(self): - """ - release resource of model inference - Args: - null - Returns: - null - """ - if self._is_destroyed: - return - - self._release_dataset(self._output_dataset, free_memory=True) - if self._model_id: - ret = acl.mdl.unload(self._model_id) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.unload error:", ret) - - if self._model_desc: - ret = acl.mdl.destroy_desc(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.destroy_desc error:", ret) - - self._is_destroyed = True - resource_list.unregister(self) - log_info("AclLiteModel release source success") - - def __del__(self): - self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py deleted file mode 100644 index faa60f4..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_2.py +++ /dev/null @@ -1,477 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2020-6-04 20:12:13 -MODIFIED: 2020-6-28 14:04:45 -""" -import acl -import struct -import numpy as np -import datetime -import sys -import os -import time - -import constants as const -import acllite_utils as utils -from acllite_logger import log_error, log_info, log_warning -from acllite_image import AclLiteImage -from acllite_resource import resource_list - -class AclLiteModel(object): - """ - wrap acl model inference interface, include input dataset construction, - execute, and output transform to numpy array - Attributes: - model_path: om offline mode file path - """ - - def __init__(self, model_path, load_type=0): - self._run_mode, ret = acl.rt.get_run_mode() - utils.check_ret("acl.rt.get_run_mode", ret) - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - if self._run_mode == const.ACL_HOST: - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST - - self._model_path = model_path # string - self._load_type = load_type - self._model_id = None # pointer - self._input_num = 0 - self._input_buffer = [] - self._input_dataset = None - self._output_dataset = None - self._model_desc = None # pointer when using - self._output_size = 0 - self._init_resource() - self._is_destroyed = False - self.runMode_ = acl.rt.get_run_mode() - resource_list.register(self) - - def _init_resource(self): - log_info("Init model resource start...") - if not os.path.isfile(self._model_path): - log_error( - "model_path failed, please check. model_path=%s" % - self._model_path) - return const.FAILED - - if self._load_type == 0: - self._model_id, ret = acl.mdl.load_from_file(self._model_path) - utils.check_ret("acl.mdl.load_from_file", ret) - elif self._load_type == 1: - with open(self._model_path, "rb") as f: - om_bytes = f.read() - if om_bytes: - ptr = acl.util.bytes_to_ptr(om_bytes) - self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) - utils.check_ret("acl.mdl.load_from_mem", ret) - else: - log_error( - "model_context is null, please check. model_path=%s" % - self._model_path) - return const.FAILED - else: - log_error( - "load_type is not in 0 or 1, please check. load_type=%d" % - self._load_type) - return const.FAILED - self._model_desc = acl.mdl.create_desc() - ret = acl.mdl.get_desc(self._model_desc, self._model_id) - utils.check_ret("acl.mdl.get_desc", ret) - # get outputs num of model - self._output_size = acl.mdl.get_num_outputs(self._model_desc) - # create output dataset - self._gen_output_dataset(self._output_size) - # recode input data address,if need malloc memory,the memory will be - # reuseable - self._init_input_buffer() - log_info("Init model resource success") - self._gen_input_dataset() - - return const.SUCCESS - - def _gen_output_dataset(self, ouput_num): - log_info("AclLiteModel create model output dataset:") - dataset = acl.mdl.create_dataset() - for i in range(ouput_num): - # malloc device memory for output - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) - utils.check_ret("acl.rt.malloc", ret) - # crate oputput data buffer - dataset_buffer = acl.create_data_buffer(buf, size) - _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) - log_info("malloc output %d, size %d" % (i, size)) - if ret: - acl.rt.free(buf) - acl.destroy_data_buffer(dataset_buffer) - utils.check_ret("acl.destroy_data_buffer", ret) - self._output_dataset = dataset - log_info("Create model output dataset success") - - def _init_input_buffer(self): - self._input_num = acl.mdl.get_num_inputs(self._model_desc) - for i in range(self._input_num): - item = {"addr": None, "size": 0} - self._input_buffer.append(item) - - def _gen_input_dataset(self): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - # if ret == const.ACL_SUCCESS: - # print('111111111111111111111111111111') - # dataLen = acl.mdl.get_input_size_by_index(self._model_desc, dynamicIdx) - # buf, ret = acl.rt.malloc(dataLen, const.ACL_MEM_MALLOC_NORMAL_ONLY) - # utils.check_ret("acl.rt.malloc", ret) - # batch_buffer = {'data': buf, 'size':dataLen} - # input_list.append(batch_buffer) - - ret = const.SUCCESS - # if len(input_list) != self._input_num: - # log_error("Current input data num %d unequal to model " - # "input num %d" % (len(input_list), self._input_num)) - # return const.FAILED - - self._input_dataset = acl.mdl.create_dataset() - self.input_buffers = [] - self.input_buffer_sizes = [] - for i in range(self._input_num): - input_buffer_size = acl.mdl.get_input_size_by_index(self._model_desc, i) - input_buffer, ret = acl.rt.malloc(input_buffer_size, const.ACL_MEM_MALLOC_HUGE_FIRST) - input_data = acl.create_data_buffer(input_buffer, input_buffer_size) - self._input_dataset, ret = acl.mdl.add_dataset_buffer(self._input_dataset, input_data) - if ret != const.ACL_SUCCESS: - print('acl.mdl.add_dataset_buffer failed, errorCode is', ret) - self.input_buffers.append(input_buffer) - self.input_buffer_sizes.append(input_buffer_size) - - - # model_size = acl.mdl.get_input_size_by_index(self._model_desc, i) - # if size != model_size: - # log_warning(" Input[%d] size: %d not equal om size: %d" % (i, size, model_size) +\ - # ", may cause inference result error, please check model input") - - - # dataset_buffer = acl.create_data_buffer(data, size) - # _, ret = acl.mdl.add_dataset_buffer(self._input_dataset, - # dataset_buffer) - # if ret: - # log_error("Add input dataset buffer failed") - # acl.destroy_data_buffer(self._input_dataset) - # ret = const.FAILED - # break - # if ret == const.FAILED: - # self._release_dataset(self._input_dataset) - # self._input_dataset = None - - return ret - - def _parse_input_data(self, input_data, index): - data = None - size = 0 - if isinstance(input_data, AclLiteImage): - size = input_data.size - data = input_data.data() - elif isinstance(input_data, np.ndarray): - size = input_data.size * input_data.itemsize - if "bytes_to_ptr" in dir(acl.util): - bytes_data = input_data.tobytes() - ptr = acl.util.bytes_to_ptr(bytes_data) - else: - ptr = acl.util.numpy_to_ptr(input_data) - # start = time.time() - # data = self._copy_input_to_device(ptr, size, index) - # print(f'copy_to_device time:{time.time() - start}') - data = ptr - if data is None: - size = 0 - log_error("Copy input to device failed") - elif (isinstance(input_data, dict) and - ('data' in input_data.keys()) and ('size' in input_data.keys())): - size = input_data['size'] - data = input_data['data'] - else: - log_error("Unsupport input") - - return data, size - - def _copy_input_to_device(self, input_ptr, size, index): - buffer_item = self._input_buffer[index] - data = None - if buffer_item['addr'] is None: - if self._run_mode == const.ACL_HOST: - data = utils.copy_data_host_to_device(input_ptr, size) - else: - data = utils.copy_data_device_to_device(input_ptr, size) - if data is None: - log_error("Malloc memory and copy model %dth " - "input to device failed" % (index)) - return None - buffer_item['addr'] = data - buffer_item['size'] = size - elif size == buffer_item['size']: - if self._run_mode == const.ACL_HOST: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_HOST_TO_DEVICE) - else: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_DEVICE_TO_DEVICE) - if ret != const.ACL_SUCCESS: - log_error("Copy model %dth input to device failed" % (index)) - return None - data = buffer_item['addr'] - else: - log_error("The model %dth input size %d is change," - " before is %d" % (index, size, buffer_item['size'])) - return None - - return data - - def _set_dynamic_batch_size(self, batch): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - if ret != const.ACL_SUCCESS: - log_error("get_input_index_by_name failed") - return const.FAILED - batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("get_dynamic_batch failed") - return const.FAILED - log_info("get dynamic_batch = ", batch_dic) - ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) - if ret != const.ACL_SUCCESS: - log_error("set_dynamic_batch_size failed, ret = ", ret) - return const.FAILED - if batch in batch_dic["batch"]: - return const.SUCCESS - else: - assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID - log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) - return const.FAILED - - def _execute_with_dynamic_batch_size(self, input_list, batch): - ret = self._gen_input_dataset(input_list) - if ret == const.FAILED: - log_error("Gen model input dataset failed") - return None - - ret = self._set_dynamic_batch_size(batch) - if ret == const.FAILED: - log_error("Set dynamic batch failed") - return None - - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - - self._release_dataset(self._input_dataset) - self._input_dataset = None - - return self._output_dataset_to_numpy() - - def execute(self, input_list): - """ - inference input data - Args: - input_list: input data list, support AclLiteImage, - numpy array and {'data': ,'size':} dict - returns: - inference result data, which is a numpy array list, - each corresponse to a model output - """ - a = time.time() - # ret = self._gen_input_dataset(input_list) - # if ret == const.FAILED: - # log_error("Gen model input dataset failed") - # return None - if self.runMode_ == const.ACL_DEVICE: - kind = const.ACL_MEMCPY_DEVICE_TO_DEVICE - else: - kind = const.ACL_MEMCPY_HOST_TO_DEVICE - for i,data_in in enumerate(input_list): - if "bytes_to_ptr" in dir(acl.util): - bytes_data = data_in.tobytes() - ptr = acl.util.bytes_to_ptr(bytes_data) - else: - ptr = acl.util.numpy_to_ptr(self.image_bytes) - ret = acl.rt.memcpy(self.input_buffers[i], - self.input_buffer_sizes[i], - ptr, - self.input_buffer_sizes[i], - kind) - start = time.time() - # print(f'gen_input_data_time:{start - a}') - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - b = time.time() - # print(f'infer execute run time: {b - start}') - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - # self._release_dataset(self._input_dataset) - # self._input_dataset = None - numpy_data = self._output_dataset_to_numpy() - c = time.time() - print(f'frame_count_execute gen_input_data_time: {start - a} infer_execute_run_time: {b - start} output_dataset_to_numpy: {c - b}') - return numpy_data - - def _output_dataset_to_numpy(self): - dataset = [] - output_tensor_list = self._gen_output_tensor() - num = acl.mdl.get_dataset_num_buffers(self._output_dataset) - - for i in range(num): - buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) - data = acl.get_data_buffer_addr(buf) - size = int(acl.get_data_buffer_size(buf)) - output_ptr = output_tensor_list[i]["ptr"] - output_data = output_tensor_list[i]["tensor"] - if isinstance (output_data,bytes): - data_size = len(output_data) - else: - data_size = output_data.size * output_data.itemsize - ret = acl.rt.memcpy(output_ptr, - data_size, - data, size, self._copy_policy) - if ret != const.ACL_SUCCESS: - log_error("Memcpy inference output to local failed") - return None - - if isinstance (output_data,bytes): - output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) - output_tensor = output_data.copy() - else: - output_tensor = output_data - dataset.append(output_tensor) - - return dataset - - def _gen_output_tensor(self): - output_tensor_list = [] - for i in range(self._output_size): - dims = acl.mdl.get_output_dims(self._model_desc, i) - shape = tuple(dims[0]["dims"]) - datatype = acl.mdl.get_output_data_type(self._model_desc, i) - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - - if datatype == const.ACL_FLOAT: - np_type = np.float32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_DOUBLE: - np_type = np.float64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT64: - np_type = np.int64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT64: - np_type = np.uint64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT32: - np_type = np.int32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT32: - np_type = np.uint32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_FLOAT16: - np_type = np.float16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT16: - np_type = np.int16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT16: - np_type = np.uint16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT8: - np_type = np.int8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: - np_type = np.uint8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - else: - log_error("Unspport model output datatype ", datatype) - return None - - if not output_tensor.flags['C_CONTIGUOUS']: - output_tensor = np.ascontiguousarray(output_tensor) - - if "bytes_to_ptr" in dir(acl.util): - bytes_data=output_tensor.tobytes() - tensor_ptr=acl.util.bytes_to_ptr(bytes_data) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": bytes_data, - "shape":output_tensor.shape, - "dtype":output_tensor.dtype},) - else: - tensor_ptr = acl.util.numpy_to_ptr(output_tensor) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": output_tensor}) - - return output_tensor_list - - def _release_dataset(self, dataset, free_memory=False): - if not dataset: - return - - num = acl.mdl.get_dataset_num_buffers(dataset) - for i in range(num): - data_buf = acl.mdl.get_dataset_buffer(dataset, i) - if data_buf: - self._release_databuffer(data_buf, free_memory) - - ret = acl.mdl.destroy_dataset(dataset) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def _release_databuffer(self, data_buffer, free_memory=False): - if free_memory: - data_addr = acl.get_data_buffer_addr(data_buffer) - if data_addr: - acl.rt.free(data_addr) - - ret = acl.destroy_data_buffer(data_buffer) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def destroy(self): - """ - release resource of model inference - Args: - null - Returns: - null - """ - if self._is_destroyed: - return - - self._release_dataset(self._output_dataset, free_memory=True) - if self._model_id: - ret = acl.mdl.unload(self._model_id) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.unload error:", ret) - - if self._model_desc: - ret = acl.mdl.destroy_desc(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.destroy_desc error:", ret) - - self._is_destroyed = True - resource_list.unregister(self) - log_info("AclLiteModel release source success") - - def __del__(self): - self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py deleted file mode 100644 index 2b6ffeb..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_model_bak.py +++ /dev/null @@ -1,455 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2020-6-04 20:12:13 -MODIFIED: 2020-6-28 14:04:45 -""" -import acl -import struct -import numpy as np -import datetime -import sys -import os -import time - -import constants as const -import acllite_utils as utils -from acllite_logger import log_error, log_info, log_warning -from acllite_image import AclLiteImage -from acllite_resource import resource_list - -class AclLiteModel(object): - """ - wrap acl model inference interface, include input dataset construction, - execute, and output transform to numpy array - Attributes: - model_path: om offline mode file path - """ - - def __init__(self, model_path, load_type=0): - self._run_mode, ret = acl.rt.get_run_mode() - utils.check_ret("acl.rt.get_run_mode", ret) - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - if self._run_mode == const.ACL_HOST: - self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST - - self._model_path = model_path # string - self._load_type = load_type - self._model_id = None # pointer - self._input_num = 0 - self._input_buffer = [] - self._input_dataset = None - self._output_dataset = None - self._model_desc = None # pointer when using - self._output_size = 0 - self._init_resource() - self._is_destroyed = False - resource_list.register(self) - - def _init_resource(self): - log_info("Init model resource start...") - if not os.path.isfile(self._model_path): - log_error( - "model_path failed, please check. model_path=%s" % - self._model_path) - return const.FAILED - - if self._load_type == 0: - self._model_id, ret = acl.mdl.load_from_file(self._model_path) - utils.check_ret("acl.mdl.load_from_file", ret) - elif self._load_type == 1: - with open(self._model_path, "rb") as f: - om_bytes = f.read() - if om_bytes: - ptr = acl.util.bytes_to_ptr(om_bytes) - self._model_id, ret = acl.mdl.load_from_mem(ptr, len(om_bytes)) - utils.check_ret("acl.mdl.load_from_mem", ret) - else: - log_error( - "model_context is null, please check. model_path=%s" % - self._model_path) - return const.FAILED - else: - log_error( - "load_type is not in 0 or 1, please check. load_type=%d" % - self._load_type) - return const.FAILED - self._model_desc = acl.mdl.create_desc() - ret = acl.mdl.get_desc(self._model_desc, self._model_id) - utils.check_ret("acl.mdl.get_desc", ret) - # get outputs num of model - self._output_size = acl.mdl.get_num_outputs(self._model_desc) - # create output dataset - self._gen_output_dataset(self._output_size) - # recode input data address,if need malloc memory,the memory will be - # reuseable - self._init_input_buffer() - log_info("Init model resource success") - - return const.SUCCESS - - def _gen_output_dataset(self, ouput_num): - log_info("AclLiteModel create model output dataset:") - dataset = acl.mdl.create_dataset() - for i in range(ouput_num): - # malloc device memory for output - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) - utils.check_ret("acl.rt.malloc", ret) - # crate oputput data buffer - dataset_buffer = acl.create_data_buffer(buf, size) - _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) - log_info("malloc output %d, size %d" % (i, size)) - if ret: - acl.rt.free(buf) - acl.destroy_data_buffer(dataset_buffer) - utils.check_ret("acl.destroy_data_buffer", ret) - self._output_dataset = dataset - log_info("Create model output dataset success") - - def _init_input_buffer(self): - self._input_num = acl.mdl.get_num_inputs(self._model_desc) - for i in range(self._input_num): - item = {"addr": None, "size": 0} - self._input_buffer.append(item) - - def _gen_input_dataset(self, input_list): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - # if ret == const.ACL_SUCCESS: - # print('111111111111111111111111111111') - # dataLen = acl.mdl.get_input_size_by_index(self._model_desc, dynamicIdx) - # buf, ret = acl.rt.malloc(dataLen, const.ACL_MEM_MALLOC_NORMAL_ONLY) - # utils.check_ret("acl.rt.malloc", ret) - # batch_buffer = {'data': buf, 'size':dataLen} - # input_list.append(batch_buffer) - - ret = const.SUCCESS - if len(input_list) != self._input_num: - log_error("Current input data num %d unequal to model " - "input num %d" % (len(input_list), self._input_num)) - return const.FAILED - - self._input_dataset = acl.mdl.create_dataset() - for i in range(self._input_num): - item = input_list[i] - data, size = self._parse_input_data(item, i) - if (data is None) or (size == 0): - ret = const.FAILED - log_error("The %d input is invalid" % (i)) - break - - model_size = acl.mdl.get_input_size_by_index(self._model_desc, i) - if size != model_size: - log_warning(" Input[%d] size: %d not equal om size: %d" % (i, size, model_size) +\ - ", may cause inference result error, please check model input") - - - dataset_buffer = acl.create_data_buffer(data, size) - _, ret = acl.mdl.add_dataset_buffer(self._input_dataset, - dataset_buffer) - if ret: - log_error("Add input dataset buffer failed") - acl.destroy_data_buffer(self._input_dataset) - ret = const.FAILED - break - if ret == const.FAILED: - self._release_dataset(self._input_dataset) - self._input_dataset = None - - return ret - - def _parse_input_data(self, input_data, index): - data = None - size = 0 - if isinstance(input_data, AclLiteImage): - size = input_data.size - data = input_data.data() - elif isinstance(input_data, np.ndarray): - size = input_data.size * input_data.itemsize - if "bytes_to_ptr" in dir(acl.util): - bytes_data=input_data.tobytes() - ptr=acl.util.bytes_to_ptr(bytes_data) - else: - ptr = acl.util.numpy_to_ptr(input_data) - # start = time.time() - # data = self._copy_input_to_device(ptr, size, index) - # print(f'copy_to_device time:{time.time() - start}') - data = ptr - if data is None: - size = 0 - log_error("Copy input to device failed") - elif (isinstance(input_data, dict) and - ('data' in input_data.keys()) and ('size' in input_data.keys())): - size = input_data['size'] - data = input_data['data'] - else: - log_error("Unsupport input") - - return data, size - - def _copy_input_to_device(self, input_ptr, size, index): - buffer_item = self._input_buffer[index] - data = None - if buffer_item['addr'] is None: - if self._run_mode == const.ACL_HOST: - data = utils.copy_data_host_to_device(input_ptr, size) - else: - data = utils.copy_data_device_to_device(input_ptr, size) - if data is None: - log_error("Malloc memory and copy model %dth " - "input to device failed" % (index)) - return None - buffer_item['addr'] = data - buffer_item['size'] = size - elif size == buffer_item['size']: - if self._run_mode == const.ACL_HOST: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_HOST_TO_DEVICE) - else: - ret = acl.rt.memcpy(buffer_item['addr'], size, - input_ptr, size, - const.ACL_MEMCPY_DEVICE_TO_DEVICE) - if ret != const.ACL_SUCCESS: - log_error("Copy model %dth input to device failed" % (index)) - return None - data = buffer_item['addr'] - else: - log_error("The model %dth input size %d is change," - " before is %d" % (index, size, buffer_item['size'])) - return None - - return data - - def _set_dynamic_batch_size(self, batch): - dynamicIdx, ret = acl.mdl.get_input_index_by_name(self._model_desc, "ascend_mbatch_shape_data") - if ret != const.ACL_SUCCESS: - log_error("get_input_index_by_name failed") - return const.FAILED - batch_dic, ret = acl.mdl.get_dynamic_batch(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("get_dynamic_batch failed") - return const.FAILED - log_info("get dynamic_batch = ", batch_dic) - ret = acl.mdl.set_dynamic_batch_size(self._model_id, self._input_dataset, dynamicIdx, batch) - if ret != const.ACL_SUCCESS: - log_error("set_dynamic_batch_size failed, ret = ", ret) - return const.FAILED - if batch in batch_dic["batch"]: - return const.SUCCESS - else: - assert ret == ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID - log_info("dynamic batch {} is not in {}".format(batch, batch_dic["batch"])) - return const.FAILED - - def _execute_with_dynamic_batch_size(self, input_list, batch): - ret = self._gen_input_dataset(input_list) - if ret == const.FAILED: - log_error("Gen model input dataset failed") - return None - - ret = self._set_dynamic_batch_size(batch) - if ret == const.FAILED: - log_error("Set dynamic batch failed") - return None - - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - - self._release_dataset(self._input_dataset) - self._input_dataset = None - - return self._output_dataset_to_numpy() - - def execute(self, input_list): - """ - inference input data - Args: - input_list: input data list, support AclLiteImage, - numpy array and {'data': ,'size':} dict - returns: - inference result data, which is a numpy array list, - each corresponse to a model output - """ - a = time.time() - ret = self._gen_input_dataset(input_list) - if ret == const.FAILED: - log_error("Gen model input dataset failed") - return None - start = time.time() - # print(f'gen_input_data_time:{start - a}') - ret = acl.mdl.execute(self._model_id, - self._input_dataset, - self._output_dataset) - b = time.time() - # print(f'infer execute run time: {b - start}') - if ret != const.ACL_SUCCESS: - log_error("Execute model failed for acl.mdl.execute error ", ret) - return None - self._release_dataset(self._input_dataset) - self._input_dataset = None - numpy_data = self._output_dataset_to_numpy() - c = time.time() - print(f'frame_count_execute gen_input_data_time: {start - a} infer_execute_run_time: {b - start} output_dataset_to_numpy: {c - b}') - return numpy_data - - def _output_dataset_to_numpy(self): - dataset = [] - output_tensor_list = self._gen_output_tensor() - num = acl.mdl.get_dataset_num_buffers(self._output_dataset) - - for i in range(num): - buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) - data = acl.get_data_buffer_addr(buf) - size = int(acl.get_data_buffer_size(buf)) - output_ptr = output_tensor_list[i]["ptr"] - output_data = output_tensor_list[i]["tensor"] - if isinstance (output_data,bytes): - data_size = len(output_data) - else: - data_size = output_data.size * output_data.itemsize - ret = acl.rt.memcpy(output_ptr, - data_size, - data, size, self._copy_policy) - if ret != const.ACL_SUCCESS: - log_error("Memcpy inference output to local failed") - return None - - if isinstance (output_data,bytes): - output_data = np.frombuffer(output_data, dtype=output_tensor_list[i]["dtype"]).reshape(output_tensor_list[i]["shape"]) - output_tensor = output_data.copy() - else: - output_tensor = output_data - dataset.append(output_tensor) - - return dataset - - def _gen_output_tensor(self): - output_tensor_list = [] - for i in range(self._output_size): - dims = acl.mdl.get_output_dims(self._model_desc, i) - shape = tuple(dims[0]["dims"]) - datatype = acl.mdl.get_output_data_type(self._model_desc, i) - size = acl.mdl.get_output_size_by_index(self._model_desc, i) - - if datatype == const.ACL_FLOAT: - np_type = np.float32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_DOUBLE: - np_type = np.float64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT64: - np_type = np.int64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT64: - np_type = np.uint64 - output_tensor = np.zeros( - size // 8, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT32: - np_type = np.int32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT32: - np_type = np.uint32 - output_tensor = np.zeros( - size // 4, dtype=np_type).reshape(shape) - elif datatype == const.ACL_FLOAT16: - np_type = np.float16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT16: - np_type = np.int16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_UINT16: - np_type = np.uint16 - output_tensor = np.zeros( - size // 2, dtype=np_type).reshape(shape) - elif datatype == const.ACL_INT8: - np_type = np.int8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - elif datatype == const.ACL_BOOL or datatype == const.ACL_UINT8: - np_type = np.uint8 - output_tensor = np.zeros( - size, dtype=np_type).reshape(shape) - else: - log_error("Unspport model output datatype ", datatype) - return None - - if not output_tensor.flags['C_CONTIGUOUS']: - output_tensor = np.ascontiguousarray(output_tensor) - - if "bytes_to_ptr" in dir(acl.util): - bytes_data=output_tensor.tobytes() - tensor_ptr=acl.util.bytes_to_ptr(bytes_data) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": bytes_data, - "shape":output_tensor.shape, - "dtype":output_tensor.dtype},) - else: - tensor_ptr = acl.util.numpy_to_ptr(output_tensor) - output_tensor_list.append({"ptr": tensor_ptr, - "tensor": output_tensor}) - - return output_tensor_list - - def _release_dataset(self, dataset, free_memory=False): - if not dataset: - return - - num = acl.mdl.get_dataset_num_buffers(dataset) - for i in range(num): - data_buf = acl.mdl.get_dataset_buffer(dataset, i) - if data_buf: - self._release_databuffer(data_buf, free_memory) - - ret = acl.mdl.destroy_dataset(dataset) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def _release_databuffer(self, data_buffer, free_memory=False): - if free_memory: - data_addr = acl.get_data_buffer_addr(data_buffer) - if data_addr: - acl.rt.free(data_addr) - - ret = acl.destroy_data_buffer(data_buffer) - if ret != const.ACL_SUCCESS: - log_error("Destroy data buffer error ", ret) - - def destroy(self): - """ - release resource of model inference - Args: - null - Returns: - null - """ - if self._is_destroyed: - return - - self._release_dataset(self._output_dataset, free_memory=True) - if self._model_id: - ret = acl.mdl.unload(self._model_id) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.unload error:", ret) - - if self._model_desc: - ret = acl.mdl.destroy_desc(self._model_desc) - if ret != const.ACL_SUCCESS: - log_error("acl.mdl.destroy_desc error:", ret) - - self._is_destroyed = True - resource_list.unregister(self) - log_info("AclLiteModel release source success") - - def __del__(self): - self.destroy() diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py deleted file mode 100644 index 9021be8..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_resource.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2021-01-20 20:12:13 -MODIFIED: 2021-02-03 14:04:45 -""" -import threading -import acl -from acllite_logger import log_info -import acllite_utils as utils - -REGISTER = 0 -UNREGISTER = 1 - -class _ResourceList(object): - """Acl resources of current application - This class provide register inferace of acl resource, when application - exit, all register resource will release befor acl.rt.reset_device to - avoid program abnormal - """ - _instance_lock = threading.Lock() - - def __init__(self): - self.resources = [] - - def __new__(cls, *args, **kwargs): - if not hasattr(_ResourceList, "_instance"): - with _ResourceList._instance_lock: - if not hasattr(_ResourceList, "_instance"): - _ResourceList._instance = object.__new__( - cls, *args, **kwargs) - return _ResourceList._instance - - def register(self, resource): - """Resource register interface - Args: - resource: object with acl resource, the object must be has - method destroy() - """ - item = {"resource": resource, "status": REGISTER} - self.resources.append(item) - - def unregister(self, resource): - """Resource unregister interface - If registered resource release by self and no need _ResourceList - release, the resource object should unregister self - Args: - resource: registered resource - """ - for item in self.resources: - if resource == item["resource"]: - item["status"] = UNREGISTER - - def destroy(self): - """Destroy all register resource""" - for item in self.resources: - if item["status"] == REGISTER: - item["resource"].destroy() - item["status"] = UNREGISTER - -resource_list = _ResourceList() - -class AclLiteResource(object): - """ - AclLiteResource - """ - - def __init__(self, device_id=0): - self.device_id = device_id - self.context = None - self.stream = None - self.run_mode = None - - def init(self): - """ - init resource - """ - log_info("init resource stage:") - ret = acl.init() - utils.check_ret("acl.init", ret) - - ret = acl.rt.set_device(self.device_id) - utils.check_ret("acl.rt.set_device", ret) - - self.context, ret = acl.rt.create_context(self.device_id) - utils.check_ret("acl.rt.create_context", ret) - - self.stream, ret = acl.rt.create_stream() - utils.check_ret("acl.rt.create_stream", ret) - - self.run_mode, ret = acl.rt.get_run_mode() - utils.check_ret("acl.rt.get_run_mode", ret) - - log_info("Init resource success") - - def __del__(self): - log_info("acl resource release all resource") - resource_list.destroy() - if self.stream: - log_info("acl resource release stream") - acl.rt.destroy_stream(self.stream) - - if self.context: - log_info("acl resource release context") - acl.rt.destroy_context(self.context) - - log_info("Reset acl device ", self.device_id) - acl.rt.reset_device(self.device_id) - acl.finalize() - log_info("Release acl resource success") diff --git a/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py b/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py deleted file mode 100644 index 9ac84ea..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/acllite_utils.py +++ /dev/null @@ -1,261 +0,0 @@ -import numpy as np -import acl -import constants as const -from acllite_logger import log_error, log_info -import time - -from functools import wraps -DEBUG = True - -def check_ret(message, ret_int): - """Check int value is 0 or not - Args: - message: output log str - ret_int: check value that type is int - """ - if ret_int != 0: - raise Exception("{} failed ret_int={}" - .format(message, ret_int)) - -def check_none(message, ret_none): - """Check object is None or not - Args: - message: output log str - ret_none: check object - """ - if ret_none is None: - raise Exception("{} failed" - .format(message)) - -def copy_data_device_to_host(device_data, data_size): - """Copy device data to host - Args: - device_data: data that to be copyed - data_size: data size - Returns: - None: copy failed - others: host data which copy from device_data - """ - host_buffer, ret = acl.rt.malloc_host(data_size) - if ret != const.ACL_SUCCESS: - log_error("Malloc host memory failed, error: ", ret) - return None - - ret = acl.rt.memcpy(host_buffer, data_size, - device_data, data_size, - const.ACL_MEMCPY_DEVICE_TO_HOST) - if ret != const.ACL_SUCCESS: - log_error("Copy device data to host memory failed, error: ", ret) - acl.rt.free_host(host_buffer) - return None - - return host_buffer - -def copy_data_device_to_device(device_data, data_size): - """Copy device data to device - Args: - device_data: data that to be copyed - data_size: data size - Returns: - None: copy failed - others: device data which copy from device_data - """ - device_buffer, ret = acl.rt.malloc(data_size, - const.ACL_MEM_MALLOC_NORMAL_ONLY) - if ret != const.ACL_SUCCESS: - log_error("Malloc device memory failed, error: ", ret) - return None - - ret = acl.rt.memcpy(device_buffer, data_size, - device_data, data_size, - const.ACL_MEMCPY_DEVICE_TO_DEVICE) - if ret != const.ACL_SUCCESS: - log_error("Copy device data to device memory failed, error: ", ret) - acl.rt.free(device_buffer) - return None - - return device_buffer - -def copy_data_host_to_device(host_data, data_size): - """Copy host data to device - Args: - host_data: data that to be copyed - data_size: data size - Returns: - None: copy failed - others: device data which copy from host_data - """ - device_buffer, ret = acl.rt.malloc(data_size, - const.ACL_MEM_MALLOC_NORMAL_ONLY) - if ret != const.ACL_SUCCESS: - log_error("Malloc device memory failed, error: ", ret) - return None - - ret = acl.rt.memcpy(device_buffer, data_size, - host_data, data_size, - const.ACL_MEMCPY_HOST_TO_DEVICE) - if ret != const.ACL_SUCCESS: - log_error("Copy device data to device memory failed, error: ", ret) - acl.rt.free(device_buffer) - return None - - return device_buffer - -def copy_data_host_to_host(host_data, data_size): - """Copy host data to host - Args: - host_data: data that to be copyed - data_size: data size - Returns: - None: copy failed - others: host data which copy from host_data - """ - host_buffer, ret = acl.rt.malloc_host(data_size) - if ret != const.ACL_SUCCESS: - log_error("Malloc host memory failed, error: ", ret) - return None - - ret = acl.rt.memcpy(host_buffer, data_size, - host_data, data_size, - const.ACL_MEMCPY_HOST_TO_HOST) - if ret != const.ACL_SUCCESS: - log_error("Copy host data to host memory failed, error: ", ret) - acl.rt.free_host(host_buffer) - return None - - return host_buffer - -def copy_data_to_dvpp(data, size, run_mode): - """Copy data to dvpp - Args: - data: data that to be copyed - data_size: data size - run_mode: device run mode - Returns: - None: copy failed - others: data which copy from host_data - """ - policy = const.ACL_MEMCPY_HOST_TO_DEVICE - if run_mode == const.ACL_DEVICE: - policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - - dvpp_buf, ret = acl.media.dvpp_malloc(size) - check_ret("acl.rt.malloc_host", ret) - - ret = acl.rt.memcpy(dvpp_buf, size, data, size, policy) - check_ret("acl.rt.memcpy", ret) - - return dvpp_buf - -def copy_data_as_numpy(data, size, data_mem_type, run_mode): - """Copy data as numpy array - Args: - data: data that to be copyed - size: data size - data_mem_type: src data memory type - run_mode: device run mode - Returns: - None: copy failed - others: numpy array whoes data copy from host_data - """ - np_data = np.zeros(size, dtype=np.byte) - if "bytes_to_ptr" in dir(acl.util): - bytes_data=np_data.tobytes() - np_data_ptr=acl.util.bytes_to_ptr(bytes_data) - else: - np_data_ptr = acl.util.numpy_to_ptr(np_data) - - policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - if run_mode == const.ACL_HOST: - if ((data_mem_type == const.MEMORY_DEVICE) or - (data_mem_type == const.MEMORY_DVPP)): - policy = const.ACL_MEMCPY_DEVICE_TO_HOST - elif data_mem_type == const.MEMORY_HOST: - policy = const.ACL_MEMCPY_HOST_TO_HOST - - ret = acl.rt.memcpy(np_data_ptr, size, data, size, policy) - check_ret("acl.rt.memcpy", ret) - if "bytes_to_ptr" in dir(acl.util): - np_data=np.frombuffer(bytes_data,dtype=np_data.dtype).reshape(np_data.shape) - return np_data - -def align_up(value, align): - """Align up int value - Args: - value:input data - align: align data - Return: - aligned data - """ - return int(int((value + align - 1) / align) * align) - -def align_up16(value): - """Align up data with 16 - Args: - value:input data - Returns: - 16 aligned data - """ - return align_up(value, 16) - -def align_up64(value): - """Align up data with 128 - Args: - value:input data - Returns: - 128 aligned data - """ - return align_up(value, 64) - -def align_up128(value): - """Align up data with 128 - Args: - value:input data - Returns: - 128 aligned data - """ - return align_up(value, 128) - -def align_up2(value): - """Align up data with 2 - Args: - value:input data - Returns: - 2 aligned data - """ - return align_up(value, 2) - -def yuv420sp_size(width, height): - """Calculate yuv420sp image size - Args: - width: image width - height: image height - Returns: - image data size - """ - return int(width * height * 3 / 2) - -def rgbu8_size(width, height): - """Calculate rgb 24bit image size - Args: - width: image width - height: image height - Returns: - rgb 24bit image data size - """ - return int(width * height * 3) - -def display_time(func): - """print func execute time""" - @wraps(func) - def wrapper(*args, **kwargs): - """wrapper caller""" - if DEBUG: - btime = time.time() - res = func(*args, **kwargs) - use_time = time.time() - btime - log_info("in %s, use time:%s" % (func.__name__, use_time)) - return res - else: - return func(*args, **kwargs) - return wrapper diff --git a/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py b/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py deleted file mode 100644 index c9129c2..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/cameracapture.py +++ /dev/null @@ -1,94 +0,0 @@ -# !/usr/bin/env python -# -*- coding:utf-8 -*- -# -from ctypes import * -import os -import time -import sys -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(BASE_DIR) - -from lib.acllite_so import libacllite -import constants as const -from acllite_image import AclLiteImage -from acllite_logger import log_error, log_info - -CAMERA_OK = 0 -CAMERA_ERROR = 1 - -CAMERA_CLOSED = 0 -CAMERA_OPENED = 1 - -class CameraOutputC(Structure): - """Ctypes parameter object for frame data""" - _fields_ = [ - ('size', c_int), - ('data', POINTER(c_ubyte)) - ] - -class CameraCapture(object): - """Atlas200dk board camera access class""" - def __init__(self, camera_id, fps=15, size=(1280, 720)): - """Create camera instance - Args: - camera_id: camera slot - fps: frame per second - size: frame resolution - """ - self._id = camera_id - self._fps = fps - self._width = size[0] - self._height = size[1] - self._size = int(self._width * self._height * 3 / 2) - self._status = CAMERA_CLOSED - if CAMERA_OK == self._open(): - self._status = CAMERA_OPENED - else: - log_error("Open camera %d failed" % (camera_id)) - - def _open(self): - ret = libacllite.OpenCameraEx(self._id, self._fps, - self._width, self._height) - if (ret != CAMERA_OK): - log_error("Open camera %d failed ,ret = %d" % (self._id, ret)) - return CAMERA_ERROR - self._status = CAMERA_OPENED - return CAMERA_OK - - def is_opened(self): - """Camera is opened or not""" - return (self._status == CAMERA_OPENED) - - def read(self): - """Read frame from camera""" - frame_data = CameraOutputC() - ret = libacllite.ReadCameraFrame(self._id, byref(frame_data)) - if (ret != CAMERA_OK): - log_error("Read camera %d failed" % (self._id)) - return None - - return AclLiteImage( - addressof(frame_data.data.contents), - self._width, - self._height, - 0, - 0, - self._size, - const.MEMORY_DVPP) - - def close(self): - """Close camera""" - log_info("Close camera ", self._id) - libacllite.CloseCameraEx(self._id) - - def __del__(self): - self.close() - -if __name__ == "__main__": - cap = Camera(camera_id=0, fps=15, size=(1280, 720)) - - start = time.time() - for i in range(0, 100): - image = cap.read() - log_info("Read 100 frame exhaust ", time.time() - start) - diff --git a/Samples/YOLOV5MultiInput/python/src/python/constants.py b/Samples/YOLOV5MultiInput/python/src/python/constants.py deleted file mode 100644 index 8c22249..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/constants.py +++ /dev/null @@ -1,217 +0,0 @@ -""" -Copyright (R) @huawei.com, all rights reserved --*- coding:utf-8 -*- -CREATED: 2020-6-04 20:12:13 -MODIFIED: 2020-6-06 14:04:45 -""" -SUCCESS = 0 -FAILED = 1 - -ACL_DEVICE = 0 -ACL_HOST = 1 - -MEMORY_NORMAL = 0 -MEMORY_HOST = 1 -MEMORY_DEVICE = 2 -MEMORY_DVPP = 3 -MEMORY_CTYPES = 4 - -IMAGE_DATA_NUMPY = 0 -IMAGE_DATA_BUFFER = 1 - -READ_VIDEO_OK = 0 - -# error code -ACL_SUCCESS = 0 -ACL_ERROR_INVALID_PARAM = 100000 -ACL_ERROR_UNINITIALIZE = 100001 -ACL_ERROR_REPEAT_INITIALIZE = 100002 -ACL_ERROR_INVALID_FILE = 100003 -ACL_ERROR_WRITE_FILE = 100004 -ACL_ERROR_INVALID_FILE_SIZE = 100005 -ACL_ERROR_PARSE_FILE = 100006 -ACL_ERROR_FILE_MISSING_ATTR = 100007 -ACL_ERROR_FILE_ATTR_INVALID = 100008 -ACL_ERROR_INVALID_DUMP_CONFIG = 100009 -ACL_ERROR_INVALID_PROFILING_CONFIG = 100010 -ACL_ERROR_INVALID_MODEL_ID = 100011 -ACL_ERROR_DESERIALIZE_MODEL = 100012 -ACL_ERROR_PARSE_MODEL = 100013 -ACL_ERROR_READ_MODEL_FAILURE = 100014 -ACL_ERROR_MODEL_SIZE_INVALID = 100015 -ACL_ERROR_MODEL_MISSING_ATTR = 100016 -ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017 -ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018 -ACL_ERROR_MODEL_NOT_DYNAMIC = 100019 -ACL_ERROR_OP_TYPE_NOT_MATCH = 100020 -ACL_ERROR_OP_INPUT_NOT_MATCH = 100021 -ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022 -ACL_ERROR_OP_ATTR_NOT_MATCH = 100023 -ACL_ERROR_OP_NOT_FOUND = 100024 -ACL_ERROR_OP_LOAD_FAILED = 100025 -ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026 -ACL_ERROR_FORMAT_NOT_MATCH = 100027 -ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028 -ACL_ERROR_KERNEL_NOT_FOUND = 100029 -ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030 -ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031 -ACL_ERROR_INVALID_QUEUE_ID = 100032 -ACL_ERROR_REPEAT_SUBSCRIBE = 100033 -ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034 -ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035 -ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036 -ACL_ERROR_REPEAT_FINALIZE = 100037 -ACL_ERROR_BAD_ALLOC = 200000 -ACL_ERROR_API_NOT_SUPPORT = 200001 -ACL_ERROR_INVALID_DEVICE = 200002 -ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003 -ACL_ERROR_RESOURCE_NOT_MATCH = 200004 -ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005 -ACL_ERROR_STORAGE_OVER_LIMIT = 300000 -ACL_ERROR_INTERNAL_ERROR = 500000 -ACL_ERROR_FAILURE = 500001 -ACL_ERROR_GE_FAILURE = 500002 -ACL_ERROR_RT_FAILURE = 500003 -ACL_ERROR_DRV_FAILURE = 500004 -# rule for mem -ACL_MEM_MALLOC_HUGE_FIRST = 0 -ACL_MEM_MALLOC_HUGE_ONLY = 1 -ACL_MEM_MALLOC_NORMAL_ONLY = 2 -# rule for memory copy -ACL_MEMCPY_HOST_TO_HOST = 0 -ACL_MEMCPY_HOST_TO_DEVICE = 1 -ACL_MEMCPY_DEVICE_TO_HOST = 2 -ACL_MEMCPY_DEVICE_TO_DEVICE = 3 -# input -LAST_ONE = -1 -LAST_TWO = -2 -type_dict = { - "bool": 0, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "float16": 2, - "float32": 4, - "float64": 8, - "float_": 8 -} -NPY_BOOL = 0 -NPY_BYTE = 1 -NPY_UBYTE = 2 -NPY_SHORT = 3 -NPY_USHORT = 4 -NPY_INT = 5 -NPY_UINT = 6 -NPY_LONG = 7 -NPY_ULONG = 8 -NPY_LONGLONG = 9 -NPY_ULONGLONG = 10 - -ACL_DT_UNDEFINED = -1 -ACL_FLOAT = 0 -ACL_FLOAT16 = 1 -ACL_INT8 = 2 -ACL_INT32 = 3 -ACL_UINT8 = 4 -ACL_INT16 = 6 -ACL_UINT16 = 7 -ACL_UINT32 = 8 -ACL_INT64 = 9 -ACL_UINT64 = 10 -ACL_DOUBLE = 11 -ACL_BOOL = 12 - -# data format -ACL_FORMAT_UNDEFINED = -1 -ACL_FORMAT_NCHW = 0 -ACL_FORMAT_NHWC = 1 -ACL_FORMAT_ND = 2 -ACL_FORMAT_NC1HWC0 = 3 -ACL_FORMAT_FRACTAL_Z = 4 -ACL_DT_UNDEFINED = -1 -ACL_FLOAT = 0 -ACL_FLOAT16 = 1 -ACL_INT8 = 2 -ACL_INT32 = 3 -ACL_UINT8 = 4 -ACL_INT16 = 6 -ACL_UINT16 = 7 -ACL_UINT32 = 8 -ACL_INT64 = 9 -ACL_UINT64 = 10 -ACL_DOUBLE = 11 -ACL_BOOL = 12 -acl_dtype = { - "dt_undefined": -1, - "float": 0, - "float16": 1, - "int8": 2, - "int32": 3, - "uint8": 4, - "int16": 6, - "uint16": 7, - "uint32": 8, - "int64": 9, - "double": 11, - "bool": 12 -} -ACL_CALLBACK_NO_BLOCK = 0 -ACL_CALLBACK_BLOCK = 1 -PIXEL_FORMAT_YUV_400 = 0 # 0, YUV400 8bit -PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1 # 1, YUV420SP NV12 8bit -PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2 # 2, YUV420SP NV21 8bit -PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3 # 3, YUV422SP NV12 8bit -PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4 # 4, YUV422SP NV21 8bit -PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5 # 5, YUV444SP NV12 8bit -PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6 # 6, YUV444SP NV21 8bit -PIXEL_FORMAT_YUYV_PACKED_422 = 7 # 7, YUV422P YUYV 8bit -PIXEL_FORMAT_UYVY_PACKED_422 = 8 # 8, YUV422P UYVY 8bit -PIXEL_FORMAT_YVYU_PACKED_422 = 9 # 9, YUV422P YVYU 8bit -PIXEL_FORMAT_VYUY_PACKED_422 = 10 # 10, YUV422P VYUY 8bit -PIXEL_FORMAT_YUV_PACKED_444 = 11 # 11, YUV444P 8bit -PIXEL_FORMAT_RGB_888 = 12 # 12, RGB888 -PIXEL_FORMAT_BGR_888 = 13 # 13, BGR888 -PIXEL_FORMAT_ARGB_8888 = 14 # 14, ARGB8888 -PIXEL_FORMAT_ABGR_8888 = 15 # 15, ABGR8888 -PIXEL_FORMAT_RGBA_8888 = 16 # 16, RGBA8888 -PIXEL_FORMAT_BGRA_8888 = 17 # 17, BGRA8888 -PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18 # 18, YUV420SP 10bit -PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19 # 19, YVU420sp 10bit -PIXEL_FORMAT_YVU_PLANAR_420 = 20 # 20, YUV420P 8bit -# images format -IMG_EXT = ['.jpg', '.JPG', '.png', '.PNG', '.bmp', '.BMP', '.jpeg', '.JPEG'] - -ENCODE_FORMAT_UNKNOW = 0 -ENCODE_FORMAT_JPEG = 1 -ENCODE_FORMAT_PNG = 2 -ENCODE_FORMAT_YUV420_SP = 3 - -""" -enType 0 -0 H265 main level -1 H264 baseline level -2 H264 main level -3 H264 high level -""" -ENTYPE_H265_MAIN = 0 -ENTYPE_H264_BASE = 1 -ENTYPE_H264_MAIN = 2 -ENTYPE_H264_HIGH = 3 - -# h264 stream codec id -AV_CODEC_ID_H264 = 27 -# h265 stream codec id -AV_CODEC_ID_HEVC = 173 -# h264 baseline level -FF_PROFILE_H264_BASELINE = 66 -# h264 main level profile -FF_PROFILE_H264_MAIN = 77 -# h264 high level profile -FF_PROFILE_H264_HIGH = 100 -# h265 main level profile -FF_PROFILE_HEVC_MAIN = 1 diff --git a/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py b/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py deleted file mode 100644 index 5c82cdc..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/dvpp_vdec.py +++ /dev/null @@ -1,259 +0,0 @@ -import numpy as np -import acl -import queue - -import constants as const -import acllite_utils as utils -import acllite_logger as acl_log -from acllite_image import AclLiteImage - -READ_TIMEOUT = 5 -WAIT_INTERVAL = 0.1 - -class DvppVdec(object): - """Decode h264/h265 stream by dvpp vdec - Decode one frame of h264/h265 stream.The stream must be h264 main, baseline - or high level, annex-b format, or h265 main level.Output image is yuv420sp - Attributes: - _channel_id: dvpp vdec channel parameter, must global unique - _width: input frame width - _height:input frame height - _run_flag:deocde is running or not currently, callback thread daemon condition - _callbak_tid: decode callback thread id - _channel_desc: vdec channel desc handle - _ctx: current thread acl context - _entype: video stream encode type, dvpp vdec support: - const.ENTYPE_H265_MAIN = 0 H265 main level - const.ENTYPE_H264_BASE = 1 H264 baseline level - const.ENTYPE_H264_MAIN = 2 H264 main level - const.ENTYPE_H264_HIGH = 3 H264 high level - _format: output frame image format, use yuv420sp - _decod_complete_cnt: output decoded complete frames counter - _decode_cnt: input frames counter - _output_pic_size: output image data size - _frame_queue: output decoded frame image queue - """ - - def __init__(self, channel_id, width, height, entype, ctx, - output_format=const.PIXEL_FORMAT_YUV_SEMIPLANAR_420): - """Create dvpp vdec instance - Args: - channel_id: decode channel id, must be global unique - width: frame width - height: frame height - entype: video stream encode type - ctx: current thread acl context - output_format: output image format, support yuv420 nv12 and nv21 - """ - self._channel_id = channel_id - self._width = width - self._height = height - self._run_flag = True - self._callbak_tid = None - self._channel_desc = None - self._ctx = ctx - self._entype = entype - self._format = output_format - self._decode_complete_cnt = 0 - self._decode_cnt = 0 - self._output_pic_size = (self._width * self._height * 3) // 2 - self._frame_queue = queue.Queue(64) - self._frame_config = None - self._destory_channel_flag = False - print('dvpp init ') - - def _callback_thread_entry(self, args_list): - ret = acl.rt.set_context(self._ctx) - while self._run_flag is True: - ret = acl.rt.process_report(300) - - def _callback(self, input_stream_desc, output_pic_desc, user_data): - self._decode_complete_cnt += 1 - #print("callback ", self._decode_complete_cnt) - input_stream_data = acl.media.dvpp_get_stream_desc_data( - input_stream_desc) - input_stream_data_size = acl.media.dvpp_get_stream_desc_size( - input_stream_desc) - ret = acl.media.dvpp_destroy_stream_desc(input_stream_desc) - - self._get_pic_desc_data(output_pic_desc, user_data) - - def _get_pic_desc_data(self, pic_desc, user_data): - pic_data = acl.media.dvpp_get_pic_desc_data(pic_desc) - pic_data_size = acl.media.dvpp_get_pic_desc_size(pic_desc) - ret_code = acl.media.dvpp_get_pic_desc_ret_code(pic_desc) - if ret_code: - channel_id, frame_id = user_data - acl_log.log_error("Decode channel %d frame %d failed, error %d" - % (channel_id, frame_id, ret_code)) - acl.media.dvpp_free(pic_data) - else: - image = AclLiteImage(pic_data, self._width, self._height, 0, 0, - pic_data_size, const.MEMORY_DVPP) - self._frame_queue.put(image) - acl.media.dvpp_destroy_pic_desc(pic_desc) - - def init(self): - """Init dvpp vdec - Returns: - const.SUCCESS: init success - const.FAILED: init failed - """ - - print('--------111') - self._channel_desc = acl.media.vdec_create_channel_desc() - print('--------333') - self._callbak_tid, ret = acl.util.start_thread( - self._callback_thread_entry, []) - acl.media.vdec_set_channel_desc_channel_id(self._channel_desc, - self._channel_id) - acl.media.vdec_set_channel_desc_thread_id(self._channel_desc, - self._callbak_tid) - print('--------222') - acl.media.vdec_set_channel_desc_callback(self._channel_desc, - self._callback) - - acl.media.vdec_set_channel_desc_entype(self._channel_desc, - self._entype) - acl.media.vdec_set_channel_desc_out_pic_format(self._channel_desc, - self._format) - - out_mode = acl.media.vdec_get_channel_desc_out_mode(self._channel_desc) - if out_mode != 0: - acl_log.log_error("Dvpp vdec out mode(%d) is invalid" % (out_mode)) - return const.FAILED - - - acl.media.vdec_set_channel_desc_out_mode(self._channel_desc, - out_mode) - acl.media.vdec_create_channel(self._channel_desc) - - self._frame_config = acl.media.vdec_create_frame_config() - if self._frame_config is None: - acl_log.log_error("Create dvpp frame config failed") - return const.FAILED - - return const.SUCCESS - - def _thread_join(self): - if self._callbak_tid is not None: - self._run_flag = False - ret = acl.util.stop_thread(self._callbak_tid) - self._callbak_tid = None - - def process(self, input_data, input_size, user_data): - """Decode frame - Args: - input_data: input frame data - input_size: input frame data size - - Returns: - const.SUCCESS: process success - const.FAILED: process failed - """ - input_stream_desc = self._create_input_pic_stream_desc(input_data, - input_size) - if input_stream_desc is None: - acl_log.log_error("Dvpp vdec decode frame failed for " - "create input stream desc error") - return const.FAILED - - output_pic_desc = self._create_output_pic_desc() - if output_pic_desc is None: - acl_log.log_error("Dvpp vdec decode frame failed for create " - "output pic desc failed") - return const.FAILED - - ret = acl.media.vdec_send_frame(self._channel_desc, input_stream_desc, - output_pic_desc, self._frame_config, - user_data) - if ret: - acl_log.log_error("Dvpp vdec send frame failed, error ", ret) - return const.FAILED - - self._decode_cnt += 1 - #print("send frame ", self._decode_cnt) - - return const.SUCCESS - - def _create_input_pic_stream_desc(self, input_data, input_size): - stream_desc = acl.media.dvpp_create_stream_desc() - if stream_desc is None: - acl_log.log_error("Create dvpp vdec input pic stream desc failed") - return None - - acl.media.dvpp_set_stream_desc_size(stream_desc, input_size) - acl.media.dvpp_set_stream_desc_data(stream_desc, input_data) - - return stream_desc - - def _create_output_pic_desc(self): - output_buffer, ret = acl.media.dvpp_malloc(self._output_pic_size) - if (output_buffer is None) or ret: - acl_log.log_error( - "Dvpp vdec malloc output memory failed, " - "size %d, error %d" % - (self._output_pic_size, ret)) - return None - - pic_desc = acl.media.dvpp_create_pic_desc() - if pic_desc is None: - acl_log.log_error("Create dvpp vdec output pic desc failed") - return None - - acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) - acl.media.dvpp_set_pic_desc_size(pic_desc, self._output_pic_size) - acl.media.dvpp_set_pic_desc_format(pic_desc, self._format) - - return pic_desc - - def destroy(self): - """Release dvpp vdec resource""" - #print("vdec destroy****************") - if self._channel_desc is not None: - ret = acl.media.vdec_destroy_channel(self._channel_desc) - self._channel_desc = None - - self._thread_join() - - if self._frame_config is not None: - acl.media.vdec_destroy_frame_config(self._frame_config) - self._frame_config = None - self._destory_channel_flag = True - - def is_finished(self): - """Video decode finished""" - return ((self._decode_cnt > 0) and - (self._decode_complete_cnt >= self._decode_cnt)) - - def read(self, no_wait=False): - """Read decoded frame - no_wait: Get image without wait. If set this arg True, and - return image is None, should call is_finished() method - to confirm decode finish or failed - - Returns: - 1. const.SUCCESS, not None: get image success - 2. const.SUCCESS, None: all frames decoded and be token off - 3. const.FAILED, None: Has frame not decoded, but no image decoded, - it means decode video failed - """ - image = None - ret = const.SUCCESS - # received eos frame and all received frame decode complete - if no_wait or self.is_finished(): - try: - image = self._frame_queue.get_nowait() - except queue.Empty: - acl_log.log_info("No decode frame in queue anymore") - else: - try: - image = self._frame_queue.get(timeout=READ_TIMEOUT) - except queue.Empty: - ret = const.FAILED - acl_log.log_error("Read channel id %d frame timeout, " - "receive frame %d, decoded %d" - % (self._channel_id, self._decode_cnt, - self._decode_complete_cnt)) - return ret, image - diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/lib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py b/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py deleted file mode 100644 index c2fde93..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/lib/acllite_so.py +++ /dev/null @@ -1,38 +0,0 @@ -import threading -import ctypes -import os -import platform - -import acl - -from constants import ACL_HOST, ACL_DEVICE - - -def _load_lib_acllite(): - run_mode, ret = acl.rt.get_run_mode() - - lib = None - if run_mode == ACL_DEVICE: - cur_dir = os.path.dirname(os.path.abspath(__file__)) - so_path = os.path.join(cur_dir, 'atlas200dk/libpython_acllite.so') - lib=ctypes.CDLL(so_path) - - return lib - - -class _AclLiteLib(object): - _instance_lock=threading.Lock() - lib=_load_lib_acllite() - - def __init__(self): - pass - - def __new__(cls, *args, **kwargs): - if not hasattr(_AclLiteLib, "_instance"): - with _AclLiteLib._instance_lock: - if not hasattr(_AclLiteLib, "_instance"): - _AclLiteLib._instance=object.__new__( - cls, *args, **kwargs) - return _AclLiteLib._instance - -libacllite=_AclLiteLib.lib diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile b/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile deleted file mode 100644 index 9ba097a..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/lib/src/Makefile +++ /dev/null @@ -1,88 +0,0 @@ -TOPDIR := $(patsubst %,%,$(CURDIR)) - -ifndef CPU_ARCH -$(error "Can not find CPU_ARCH env, please set it in environment!.") -endif - -ifndef INSTALL_DIR -$(error "Can not find INSTALL_DIR env, please set it in environment!.") -endif - -ifndef THIRDPART_PATH -$(error "Can not find THIRDPART_PATH env, please set it in environment!.") -endif - -media_mini_exist = $(shell if [ -f ${INSTALL_DIR}/driver/libmedia_mini.so ]; then echo "exist"; else echo "notexist"; fi;) - -ifeq ($(CPU_ARCH), aarch64) - CC := aarch64-linux-gnu-g++ - OUT_DIR = ../atlas200dk -else ifeq ($(CPU_ARCH), x86_64) - CC := g++ -else - $(error "Unsupported param: "$(CPU_ARCH)) -endif - -LOCAL_MODULE_NAME := libpython_acllite.so - -LOCAL_DIR := . -OBJ_DIR = $(OUT_DIR)/obj -DEPS_DIR = $(OUT_DIR)/deps -LOCAL_LIBRARY=$(OUT_DIR)/$(LOCAL_MODULE_NAME) -OUT_INC_DIR = $(OUT_DIR)/include - -INC_DIR = \ - -I./ \ - -I../include \ - -I$(INSTALL_DIR)/driver/ \ - -I$(INSTALL_DIR)/runtime/include/ \ - -I$(THIRDPART_PATH)/include/ \ - -I$(THIRDPART_PATH)/include/presenter/agent/ \ - #-I$(DDK_PATH)/compiler/include/protobuf - -CC_FLAGS := $(INC_DIR) -DENABLE_DVPP_INTERFACE -std=c++11 -fPIC -Wall -O2 -LNK_FLAGS := \ - -Wl,-rpath-link=$(INSTALL_DIR)/runtime/lib64/stub \ - -Wl,-rpath-link=$(THIRDPART_PATH)/lib \ - -L$(INSTALL_DIR)/runtime/lib64/stub \ - -L$(THIRDPART_PATH)/lib \ - -lascendcl \ - -lacl_dvpp \ - -lstdc++ \ - -lpthread \ - -shared - -ifeq ($(media_mini_exist),exist) -LNK_FLAGS += -L${INSTALL_DIR}/driver -lmedia_mini -endif - -SRCS_ALL := $(patsubst $(LOCAL_DIR)/%.cpp, %.cpp, $(shell find $(LOCAL_DIR) -name "*.cpp")) -ifeq ($(media_mini_exist),exist) -SRCS := $(SRCS_ALL) -else -SRCS := $(subst camera.cpp, ,$(SRCS_ALL)) -endif - -OBJS := $(addprefix $(OBJ_DIR)/, $(patsubst %.cpp, %.o,$(SRCS))) -ALL_OBJS := $(OBJS) - -all: do_pre_build do_build - -do_pre_build: - $(Q)echo - do [$@] - $(Q)mkdir -p $(OBJ_DIR) - -do_build: $(LOCAL_LIBRARY) | do_pre_build - $(Q)echo - do [$@] - -$(LOCAL_LIBRARY): $(ALL_OBJS) - $(Q)echo [LD] $@ - $(Q)$(CC) $(CC_FLAGS) -o $@ $^ -Wl,--whole-archive -Wl,--no-whole-archive -Wl,--start-group -Wl,--end-group -Wl,-rpath='/home/HwHiAiUser/HIAI_PROJECTS/ascend_lib' $(LNK_FLAGS) - -$(OBJS): $(OBJ_DIR)/%.o : %.cpp | do_pre_build - $(Q)echo [CC] $@ - $(Q)mkdir -p $(dir $@) - $(Q)$(CC) $(CC_FLAGS) $(INC_DIR) -c -fstack-protector-all $< -o $@ - -clean: - rm -rf $(OUT_DIR)/* diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h b/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h deleted file mode 100644 index 0e5851b..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/lib/src/acllite_utils.h +++ /dev/null @@ -1,67 +0,0 @@ -/** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at - -* http://www.apache.org/licenses/LICENSE-2.0 - -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. - -* File utils.h -* Description: handle file operations -*/ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include "acl/acl.h" - -extern "C" { - -/** - * @brief calculate YUVSP420 image size - * @param [in] width: image width - * @param [in] height: image height - * @return bytes size of image - */ -#define YUV420SP_SIZE(width, height) ((width) * (height) * 3 / 2) - -/** - * @brief Write acl error level log to host log - * @param [in] fmt: the input format string - * @return none - */ -#define ACLLITE_LOG_ERROR(fmt, ...) \ - do{aclAppLog(ACL_ERROR, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ - fprintf(stdout, "[ERROR] " fmt "\n", ##__VA_ARGS__);}while(0) - -/** - * @brief Write acl info level log to host log - * @param [in] fmt: the input format string - * @return none - */ -#define ACLLITE_LOG_INFO(fmt, ...) \ - do{aclAppLog(ACL_INFO, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ - fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) - -/** - * @brief Write acl debug level log to host log - * @param [in] fmt: the input format string - * @return none - */ -#define ACLLITE_LOG_DEBUG(fmt, ...) \ - do{aclAppLog(ACL_DEBUG, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ - fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) -} \ No newline at end of file diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp deleted file mode 100644 index d280907..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.cpp +++ /dev/null @@ -1,167 +0,0 @@ -/** - * ============================================================================ - * - * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1 Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2 Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3 Neither the names of the copyright holders nor the names of the - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * ============================================================================ - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include "acl/acl.h" -#include "acl/ops/acl_dvpp.h" -#include "acllite_utils.h" -#include "camera.h" - -using namespace std; - -extern "C" { -#include "peripheral_api.h" -#include "camera.h" -CameraManager g_CameraMgr; - -void HwInit() { - if (!g_CameraMgr.hwInited) { - MediaLibInit(); - g_CameraMgr.hwInited = 1; - } -} - -int CameraInit(int id, int fps, int width, int height) { - Camera& cap = CAMERA(id); - cap.frameSize = YUV420SP_SIZE(width, height); - cap.id = id; - cap.fps = fps; - cap.width = width; - cap.height = height; - cap.inited = true; - - return ACLLITE_OK; -} - -int ConfigCamera(int id, int fps, int width, int height) { - int ret = SetCameraProperty(id, CAMERA_PROP_FPS, &fps); - if (ret == LIBMEDIA_STATUS_FAILED) { - ACLLITE_LOG_ERROR("Set camera fps failed"); - return ACLLITE_ERROR; - } - - CameraResolution resolution; - resolution.width = width; - resolution.height = height; - ret = SetCameraProperty(id, CAMERA_PROP_RESOLUTION, &resolution); - if (ret == LIBMEDIA_STATUS_FAILED) { - ACLLITE_LOG_ERROR("Set camera resolution failed"); - return ACLLITE_ERROR; - } - - CameraCapMode mode = CAMERA_CAP_ACTIVE; - ret = SetCameraProperty(id, CAMERA_PROP_CAP_MODE, &mode); - if (ret == LIBMEDIA_STATUS_FAILED) { - ACLLITE_LOG_ERROR("Set camera mode:%d failed", mode); - return ACLLITE_ERROR; - } - - return ACLLITE_OK; -} - -int OpenCameraEx(int id, int fps, int width, int height) { - if ((id < 0) || (id >= CAMERA_NUM)) { - ACLLITE_LOG_ERROR("Open camera failed for invalid id %d", id); - return ACLLITE_ERROR; - } - - HwInit(); - - CameraStatus status = QueryCameraStatus(id); - if (status == CAMERA_STATUS_CLOSED){ - // Open Camera - if (LIBMEDIA_STATUS_FAILED == OpenCamera(id)) { - ACLLITE_LOG_ERROR("Camera%d closed, and open failed.", id); - return ACLLITE_ERROR; - } - } else if (status != CAMERA_STATUS_OPEN) { - //如果摄像头状态不是close状态也不是open状态,则认为摄像头异常 - ACLLITE_LOG_ERROR("Invalid camera%d status %d", id, status); - return ACLLITE_ERROR; - } - - //Set camera property - if (ACLLITE_OK != ConfigCamera(id, fps, width, height)) { - CloseCamera(id); - ACLLITE_LOG_ERROR("Set camera%d property failed", id); - return ACLLITE_ERROR; - } - - if (!CAMERA(id).inited) { - CameraInit(id, fps, width, height); - } - - ACLLITE_LOG_INFO("Open camera %d success", id); - - return ACLLITE_OK; -} - -int ReadCameraFrame(int id, CameraOutput& frame) { - int size = CAMERA(id).frameSize; - void* data = nullptr; - auto aclRet = acldvppMalloc(&data, size); - if (aclRet != ACL_SUCCESS) { - ACLLITE_LOG_ERROR("acl malloc dvpp data failed, dataSize %d, error %d", - size, aclRet); - return ACLLITE_ERROR; - } - - int ret = ReadFrameFromCamera(id, (void*)data, (int *)&size); - if ((ret == LIBMEDIA_STATUS_FAILED) || - (size != CAMERA(id).frameSize)) { - acldvppFree(data); - ACLLITE_LOG_ERROR("Get image from camera %d failed, size %d", id, size); - return ACLLITE_ERROR; - } - frame.size = size; - frame.data = (uint8_t*)data; - - return ACLLITE_OK; -} - -int CloseCameraEx(int cameraId) { - if (LIBMEDIA_STATUS_FAILED == CloseCamera(cameraId)) { - ACLLITE_LOG_ERROR("Close camera %d failed", cameraId); - return ACLLITE_ERROR; - } - - return ACLLITE_OK; -} - -} diff --git a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h b/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h deleted file mode 100644 index ca40d61..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/lib/src/camera.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * ============================================================================ - * - * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1 Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2 Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3 Neither the names of the copyright holders nor the names of the - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * ============================================================================ - */ -#ifndef _CAMERA_H -#define _CAMERA_H - -#define CAMERA_NUM (2) -#define CAMERA(i) (g_CameraMgr.cap[i]) - -const int ACLLITE_OK = 0; -const int ACLLITE_ERROR = 1; - -struct CameraOutput { - int size; - uint8_t* data; -}; - -struct Camera { - bool inited = false; - int id = 255; - int fps = 0; - int width = 0; - int height = 0; - int frameSize = 0; -}; - -struct CameraManager { - bool hwInited = 0; - Camera cap[CAMERA_NUM]; -}; - -#endif diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py deleted file mode 100644 index a69c1a8..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_agent.py +++ /dev/null @@ -1,91 +0,0 @@ -# !/usr/bin/env python -# -*- coding:utf-8 -*- -import time -from threading import Thread -import sys - -from acllite_logger import log_error, log_info -from presenteragent.socket_client import AgentSocket -import presenteragent.presenter_message as pm -import presenteragent.presenter_datatype as datatype - - -class PresenterAgent(object): - """Message proxy to presenter server""" - def __init__(self, server_ip, port): - self.socket = AgentSocket(server_ip, port) - self._closed = False - self.heart_beat_thread = None - - def connect_server(self): - """Connect presenter server""" - return self.socket.connect() - - def start_heard_beat_thread(self): - """Start thread that send heardbeat messages""" - self.heart_beat_thread = Thread(target=self._keep_alive) - self.heart_beat_thread.start() - - def _keep_alive(self): - msg = pm.heartbeat_message() - - while True: - if self._closed: - log_error("Heard beat thread exit") - break - - self.socket.send_msg(msg) - time.sleep(2) - - def exit(self): - """Proxy exit""" - self.socket.close() - self._closed = True - - -def StartPresenterAgent( - msg_queue, - server_ip, - port, - open_status, - data_respone_counter): - """Startup presenter agent""" - agent = PresenterAgent(server_ip, port) - ret = agent.connect_server() - if ret: - log_error("Connect server failed, ret =", ret) - return - - open_status.value = datatype.STATUS_CONNECTED - - while True: - data = msg_queue.get() - if data is None: - continue - - if isinstance(data, datatype.FinishMsg): - log_info("Receive presenter agent exit notification, queue size ", - msg_queue.qsize()) - time.sleep(0.1) - agent.exit() - break - - agent.socket.send_msg(data) - msg_name, msg_body = agent.socket.recv_msg() - if (msg_name is None) or (msg_body is None): - log_error("Recv invalid message, message name ", msg_name) - continue - - if ((open_status.value == datatype.STATUS_CONNECTED) and - pm.is_open_channel_response(msg_name)): - log_info("Received open channel respone") - open_status.value = datatype.STATUS_OPENED - agent.start_heard_beat_thread() - log_info( - "presenter agent change connect_status to ", - open_status.value) - - if ((open_status.value == datatype.STATUS_OPENED) and - pm.is_image_frame_response(msg_name)): - data_respone_counter.value += 1 - #log_info("send ok ", data_respone_counter.value) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py deleted file mode 100644 index c5557ea..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_channel.py +++ /dev/null @@ -1,144 +0,0 @@ -# !/usr/bin/env python -# -*- coding:utf-8 -*- -import time -import configparser -from multiprocessing import Process, Queue, Manager -import queue -import numpy as np -import sys -# sys.path.append("..") - -import acl -import constants as const -from acllite_logger import log_error, log_info -from acllite_image import AclLiteImage - - -import presenteragent.presenter_datatype as dtype -import presenteragent.presenter_agent as agent -import presenteragent.presenter_message as pm - - -class PresenterChannel(object): - """Communication channel between presenter agent and server""" - def __init__(self, server_ip, port, name='video', - content_type=dtype.CONTENT_TYPE_VIDEO): - """Create instance""" - self._server_ip = server_ip - self._port = port - self._type = content_type - self._name = name - self.agent_msg_queue = Queue() - self.open_status = Manager().Value('i', dtype.STATUS_DISCONNECT) - self.data_respone_counter = Manager().Value('i', 0) - self._send_counter = 0 - - def startup(self): - """Create channel and connect with presenter server - Returns: - 0 connect success - 1 connect failed - """ - agent_process = Process( - target=agent.StartPresenterAgent, - args=( - self.agent_msg_queue, - self._server_ip, - self._port, - self.open_status, - self.data_respone_counter)) - agent_process.start() - time.sleep(0.5) - self._send_open_channel_request(self._name, self._type) - return self._wait_open_status(dtype.STATUS_OPENED) - - def _wait_open_status(self, listen_status): - ret = dtype.STATUS_ERROR - for i in range(0, 100): - time.sleep(0.001) - if self.open_status.value == listen_status: - log_info("Open status is %d now" % (listen_status)) - ret = dtype.STATUS_OK - break - return ret - - def send_message(self, data): - """Send message to presenter server""" - self.agent_msg_queue.put(data) - self._send_counter += 1 - - def _send_open_channel_request(self, channel_name, content_type): - request_msg = pm.open_channel_request(channel_name, content_type) - self.send_message(request_msg) - - def send_detection_data(self, image_width, image_height, - image, detection_result): - """Send image frame request to presenter server""" - image_data = None - if isinstance(image, AclLiteImage): - image_data = image.byte_data_to_np_array() - elif isinstance(image, np.ndarray): - image_data = image - else: - log_error("Invalid data to send, ", image) - return False - - request_msg = pm.image_frame_request(image_width, image_height, - image_data.tobytes(), - detection_result) - self.send_message(request_msg) - - return True - - def send_image(self, image_width, image_height, image): - """Send image frame request that only has image to presenter server""" - detection_result = [] - return self.send_detection_data(image_width, image_height, - image, detection_result) - - def _send_heart_beat_message(self): - msg = pm.heartbeat_message() - self.send_message(msg) - - def close(self): - """Close channel""" - if self.open_status == dtype.STATUS_EXITTED: - return - - log_info("Presenter channel close...") - eos = dtype.FinishMsg("exit") - self.send_message(eos) - while self.agent_msg_queue.qsize() > 0: - time.sleep(0.001) - self.open_status = dtype.STATUS_EXITTED - - def __del__(self): - self.close() - - -def get_channel_config(config_file): - """Get connect parameters from config file""" - config = configparser.ConfigParser() - config.read(config_file) - presenter_server_ip = config['baseconf']['presenter_server_ip'] - port = int(config['baseconf']['presenter_server_port']) - channel_name = config['baseconf']['channel_name'] - content_type = int(config['baseconf']['content_type']) - - log_info( - "presenter server ip %s, port %d, channel name %s, " - "type %d" % - (presenter_server_ip, port, channel_name, content_type)) - return presenter_server_ip, port, channel_name, content_type - - -def open_channel(config_file): - """Connect with presenter server""" - server_ip, port, channel_name, content_type = get_channel_config( - config_file) - channel = PresenterChannel(server_ip, port, channel_name, content_type) - ret = channel.startup() - if ret: - log_error("ERROR:Open channel failed") - return None - return channel diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py deleted file mode 100644 index b19415a..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_datatype.py +++ /dev/null @@ -1,70 +0,0 @@ -STATUS_DISCONNECT = 0 -STATUS_CONNECTED = 1 -STATUS_OPEN_CH_REQUEST = 2 -STATUS_OPENED = 3 -STATUS_EXITING = 4 -STATUS_EXITTED = 5 - -CONTENT_TYPE_IMAGE = 0 -CONTENT_TYPE_VIDEO = 1 - -STATUS_OK = 0 -STATUS_ERROR = 1 - - -class Point(object): - """ - point coordinate - """ - - def __init__(self, x=0, y=0): - self.x = x - self.y = y - - -class Box(object): - """ - object rectangle area - """ - - def __init__(self, lt, rb): - self.lt = Point(lt) - self.rb = Point(rb) - - def box_valid(self): - """ - verify box coordinate is valid - """ - return ((self.lt.x >= 0) - and (self.lt.y >= 0) - and (self.rb.x >= self.lt.x) - and (self.rb.y >= self.lt.y)) - - -class ObjectDetectionResult(object): - """ - object detection information, include object position, confidence and label - """ - - def __init__(self, ltx=0, lty=0, rbx=0, rby=0, text=None): - self.object_class = 0 - self.confidence = 0 - self.box = Box((ltx, lty), (rbx, rby)) - self.result_text = text - - def check_box_vaild(self, width, height): - """ - verify object position is valid - """ - return (self.box.box_valid() and - (self.box.rb.x <= width) and - (self.box.rb.y <= height)) - - -class FinishMsg(object): - """ - the message to notify presenter agent exit - """ - - def __init__(self, data): - self.data = data diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto deleted file mode 100644 index 879d557..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package ascend.presenter.proto; - -enum OpenChannelErrorCode { - kOpenChannelErrorNone = 0; - kOpenChannelErrorNoSuchChannel = 1; - kOpenChannelErrorChannelAlreadyOpened = 2; - kOpenChannelErrorOther = -1; -} - -enum ChannelContentType { - kChannelContentTypeImage = 0; - kChannelContentTypeVideo = 1; -} - -// By Protocol Buffer Style Guide, need to use underscore_separated_names -// for field names -message OpenChannelRequest { - string channel_name = 1; - ChannelContentType content_type = 2; -} - -message OpenChannelResponse { - OpenChannelErrorCode error_code = 1; - string error_message = 2; -} - -message HeartbeatMessage { - -} - -enum ImageFormat { - kImageFormatJpeg = 0; -} - -message Coordinate { - uint32 x = 1; - uint32 y = 2; -} - -message Rectangle_Attr { - Coordinate left_top = 1; - Coordinate right_bottom = 2; - string label_text = 3; -} - -message PresentImageRequest { - ImageFormat format = 1; - uint32 width = 2; - uint32 height = 3; - bytes data = 4; - repeated Rectangle_Attr rectangle_list = 5; -} - -enum PresentDataErrorCode { - kPresentDataErrorNone = 0; - kPresentDataErrorUnsupportedType = 1; - kPresentDataErrorUnsupportedFormat = 2; - kPresentDataErrorOther = -1; -} - -message PresentImageResponse { - PresentDataErrorCode error_code = 1; - string error_message = 2; -} - diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py deleted file mode 100644 index 383eff6..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message.py +++ /dev/null @@ -1,70 +0,0 @@ -# !/usr/bin/env python -# -*- coding:utf-8 -*- -import struct -import socket - -import presenteragent.presenter_message_pb2 as pb2 - - -def pack_message(msg_name, msg_data): - """Pack message name and data to byte stream""" - buf = msg_data.SerializeToString() - msg_body_len = len(buf) - msg_name_len = len(msg_name) - msg_total_len = msg_name_len + msg_body_len + 5 - data = b'' - msg_total_len = socket.htonl(msg_total_len) - pack_data = struct.pack('IB', msg_total_len, msg_name_len) - data += pack_data - data += msg_name.encode() - data += buf - - return data - - -def open_channel_request(channel_name, content_type): - """Create open channel request message""" - request = pb2.OpenChannelRequest() - request.channel_name = channel_name - request.content_type = content_type - - return pack_message(pb2._OPENCHANNELREQUEST.full_name, request) - - -def image_frame_request( - image_width, - image_height, - image_data, - detection_result): - """Create image frame request message""" - request = pb2.PresentImageRequest() - request.format = 0 - request.width = image_width - request.height = image_height - request.data = image_data - for i in range(0, len(detection_result)): - myadd = request.rectangle_list.add() - myadd.left_top.x = detection_result[i].box.lt.x - myadd.left_top.y = detection_result[i].box.lt.y - myadd.right_bottom.x = detection_result[i].box.rb.x - myadd.right_bottom.y = detection_result[i].box.rb.y - myadd.label_text = detection_result[i].result_text - - return pack_message(pb2._PRESENTIMAGEREQUEST.full_name, request) - - -def heartbeat_message(): - """Create headbeat message""" - return pack_message( - pb2._HEARTBEATMESSAGE.full_name, - pb2.HeartbeatMessage()) - - -def is_open_channel_response(msg_name): - """Confirm the message is open channel response or not""" - return (msg_name == pb2._OPENCHANNELRESPONSE.full_name) - - -def is_image_frame_response(msg_name): - """Confirm the message is image frame response or not""" - return (msg_name == pb2._PRESENTIMAGERESPONSE.full_name) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py deleted file mode 100644 index 6c99f06..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/presenter_message_pb2.py +++ /dev/null @@ -1,493 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: presenter_message.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='presenter_message.proto', - package='ascend.presenter.proto', - syntax='proto3', - serialized_pb=_b('\n\x17presenter_message.proto\x12\x16\x61scend.presenter.proto\"l\n\x12OpenChannelRequest\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t\x12@\n\x0c\x63ontent_type\x18\x02 \x01(\x0e\x32*.ascend.presenter.proto.ChannelContentType\"n\n\x13OpenChannelResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.OpenChannelErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\x12\n\x10HeartbeatMessage\"\"\n\nCoordinate\x12\t\n\x01x\x18\x01 \x01(\r\x12\t\n\x01y\x18\x02 \x01(\r\"\x94\x01\n\x0eRectangle_Attr\x12\x34\n\x08left_top\x18\x01 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x38\n\x0cright_bottom\x18\x02 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x12\n\nlabel_text\x18\x03 \x01(\t\"\xb7\x01\n\x13PresentImageRequest\x12\x33\n\x06\x66ormat\x18\x01 \x01(\x0e\x32#.ascend.presenter.proto.ImageFormat\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12>\n\x0erectangle_list\x18\x05 \x03(\x0b\x32&.ascend.presenter.proto.Rectangle_Attr\"o\n\x14PresentImageResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.PresentDataErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t*\xa5\x01\n\x14OpenChannelErrorCode\x12\x19\n\x15kOpenChannelErrorNone\x10\x00\x12\"\n\x1ekOpenChannelErrorNoSuchChannel\x10\x01\x12)\n%kOpenChannelErrorChannelAlreadyOpened\x10\x02\x12#\n\x16kOpenChannelErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01*P\n\x12\x43hannelContentType\x12\x1c\n\x18kChannelContentTypeImage\x10\x00\x12\x1c\n\x18kChannelContentTypeVideo\x10\x01*#\n\x0bImageFormat\x12\x14\n\x10kImageFormatJpeg\x10\x00*\xa4\x01\n\x14PresentDataErrorCode\x12\x19\n\x15kPresentDataErrorNone\x10\x00\x12$\n kPresentDataErrorUnsupportedType\x10\x01\x12&\n\"kPresentDataErrorUnsupportedFormat\x10\x02\x12#\n\x16kPresentDataErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x62\x06proto3') -) - -_OPENCHANNELERRORCODE = _descriptor.EnumDescriptor( - name='OpenChannelErrorCode', - full_name='ascend.presenter.proto.OpenChannelErrorCode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='kOpenChannelErrorNone', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kOpenChannelErrorNoSuchChannel', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kOpenChannelErrorChannelAlreadyOpened', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kOpenChannelErrorOther', index=3, number=-1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=780, - serialized_end=945, -) -_sym_db.RegisterEnumDescriptor(_OPENCHANNELERRORCODE) - -OpenChannelErrorCode = enum_type_wrapper.EnumTypeWrapper(_OPENCHANNELERRORCODE) -_CHANNELCONTENTTYPE = _descriptor.EnumDescriptor( - name='ChannelContentType', - full_name='ascend.presenter.proto.ChannelContentType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='kChannelContentTypeImage', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kChannelContentTypeVideo', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=947, - serialized_end=1027, -) -_sym_db.RegisterEnumDescriptor(_CHANNELCONTENTTYPE) - -ChannelContentType = enum_type_wrapper.EnumTypeWrapper(_CHANNELCONTENTTYPE) -_IMAGEFORMAT = _descriptor.EnumDescriptor( - name='ImageFormat', - full_name='ascend.presenter.proto.ImageFormat', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='kImageFormatJpeg', index=0, number=0, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1029, - serialized_end=1064, -) -_sym_db.RegisterEnumDescriptor(_IMAGEFORMAT) - -ImageFormat = enum_type_wrapper.EnumTypeWrapper(_IMAGEFORMAT) -_PRESENTDATAERRORCODE = _descriptor.EnumDescriptor( - name='PresentDataErrorCode', - full_name='ascend.presenter.proto.PresentDataErrorCode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='kPresentDataErrorNone', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kPresentDataErrorUnsupportedType', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kPresentDataErrorUnsupportedFormat', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='kPresentDataErrorOther', index=3, number=-1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1067, - serialized_end=1231, -) -_sym_db.RegisterEnumDescriptor(_PRESENTDATAERRORCODE) - -PresentDataErrorCode = enum_type_wrapper.EnumTypeWrapper(_PRESENTDATAERRORCODE) -kOpenChannelErrorNone = 0 -kOpenChannelErrorNoSuchChannel = 1 -kOpenChannelErrorChannelAlreadyOpened = 2 -kOpenChannelErrorOther = -1 -kChannelContentTypeImage = 0 -kChannelContentTypeVideo = 1 -kImageFormatJpeg = 0 -kPresentDataErrorNone = 0 -kPresentDataErrorUnsupportedType = 1 -kPresentDataErrorUnsupportedFormat = 2 -kPresentDataErrorOther = -1 - - - -_OPENCHANNELREQUEST = _descriptor.Descriptor( - name='OpenChannelRequest', - full_name='ascend.presenter.proto.OpenChannelRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channel_name', full_name='ascend.presenter.proto.OpenChannelRequest.channel_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='content_type', full_name='ascend.presenter.proto.OpenChannelRequest.content_type', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=51, - serialized_end=159, -) - - -_OPENCHANNELRESPONSE = _descriptor.Descriptor( - name='OpenChannelResponse', - full_name='ascend.presenter.proto.OpenChannelResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='error_code', full_name='ascend.presenter.proto.OpenChannelResponse.error_code', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='error_message', full_name='ascend.presenter.proto.OpenChannelResponse.error_message', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=161, - serialized_end=271, -) - - -_HEARTBEATMESSAGE = _descriptor.Descriptor( - name='HeartbeatMessage', - full_name='ascend.presenter.proto.HeartbeatMessage', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=273, - serialized_end=291, -) - - -_COORDINATE = _descriptor.Descriptor( - name='Coordinate', - full_name='ascend.presenter.proto.Coordinate', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='x', full_name='ascend.presenter.proto.Coordinate.x', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='y', full_name='ascend.presenter.proto.Coordinate.y', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=293, - serialized_end=327, -) - - -_RECTANGLE_ATTR = _descriptor.Descriptor( - name='Rectangle_Attr', - full_name='ascend.presenter.proto.Rectangle_Attr', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='left_top', full_name='ascend.presenter.proto.Rectangle_Attr.left_top', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='right_bottom', full_name='ascend.presenter.proto.Rectangle_Attr.right_bottom', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='label_text', full_name='ascend.presenter.proto.Rectangle_Attr.label_text', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=330, - serialized_end=478, -) - - -_PRESENTIMAGEREQUEST = _descriptor.Descriptor( - name='PresentImageRequest', - full_name='ascend.presenter.proto.PresentImageRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='format', full_name='ascend.presenter.proto.PresentImageRequest.format', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='width', full_name='ascend.presenter.proto.PresentImageRequest.width', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='height', full_name='ascend.presenter.proto.PresentImageRequest.height', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data', full_name='ascend.presenter.proto.PresentImageRequest.data', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rectangle_list', full_name='ascend.presenter.proto.PresentImageRequest.rectangle_list', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=481, - serialized_end=664, -) - - -_PRESENTIMAGERESPONSE = _descriptor.Descriptor( - name='PresentImageResponse', - full_name='ascend.presenter.proto.PresentImageResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='error_code', full_name='ascend.presenter.proto.PresentImageResponse.error_code', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='error_message', full_name='ascend.presenter.proto.PresentImageResponse.error_message', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=666, - serialized_end=777, -) - -_OPENCHANNELREQUEST.fields_by_name['content_type'].enum_type = _CHANNELCONTENTTYPE -_OPENCHANNELRESPONSE.fields_by_name['error_code'].enum_type = _OPENCHANNELERRORCODE -_RECTANGLE_ATTR.fields_by_name['left_top'].message_type = _COORDINATE -_RECTANGLE_ATTR.fields_by_name['right_bottom'].message_type = _COORDINATE -_PRESENTIMAGEREQUEST.fields_by_name['format'].enum_type = _IMAGEFORMAT -_PRESENTIMAGEREQUEST.fields_by_name['rectangle_list'].message_type = _RECTANGLE_ATTR -_PRESENTIMAGERESPONSE.fields_by_name['error_code'].enum_type = _PRESENTDATAERRORCODE -DESCRIPTOR.message_types_by_name['OpenChannelRequest'] = _OPENCHANNELREQUEST -DESCRIPTOR.message_types_by_name['OpenChannelResponse'] = _OPENCHANNELRESPONSE -DESCRIPTOR.message_types_by_name['HeartbeatMessage'] = _HEARTBEATMESSAGE -DESCRIPTOR.message_types_by_name['Coordinate'] = _COORDINATE -DESCRIPTOR.message_types_by_name['Rectangle_Attr'] = _RECTANGLE_ATTR -DESCRIPTOR.message_types_by_name['PresentImageRequest'] = _PRESENTIMAGEREQUEST -DESCRIPTOR.message_types_by_name['PresentImageResponse'] = _PRESENTIMAGERESPONSE -DESCRIPTOR.enum_types_by_name['OpenChannelErrorCode'] = _OPENCHANNELERRORCODE -DESCRIPTOR.enum_types_by_name['ChannelContentType'] = _CHANNELCONTENTTYPE -DESCRIPTOR.enum_types_by_name['ImageFormat'] = _IMAGEFORMAT -DESCRIPTOR.enum_types_by_name['PresentDataErrorCode'] = _PRESENTDATAERRORCODE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OpenChannelRequest = _reflection.GeneratedProtocolMessageType('OpenChannelRequest', (_message.Message,), dict( - DESCRIPTOR = _OPENCHANNELREQUEST, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelRequest) - )) -_sym_db.RegisterMessage(OpenChannelRequest) - -OpenChannelResponse = _reflection.GeneratedProtocolMessageType('OpenChannelResponse', (_message.Message,), dict( - DESCRIPTOR = _OPENCHANNELRESPONSE, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelResponse) - )) -_sym_db.RegisterMessage(OpenChannelResponse) - -HeartbeatMessage = _reflection.GeneratedProtocolMessageType('HeartbeatMessage', (_message.Message,), dict( - DESCRIPTOR = _HEARTBEATMESSAGE, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.HeartbeatMessage) - )) -_sym_db.RegisterMessage(HeartbeatMessage) - -Coordinate = _reflection.GeneratedProtocolMessageType('Coordinate', (_message.Message,), dict( - DESCRIPTOR = _COORDINATE, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Coordinate) - )) -_sym_db.RegisterMessage(Coordinate) - -Rectangle_Attr = _reflection.GeneratedProtocolMessageType('Rectangle_Attr', (_message.Message,), dict( - DESCRIPTOR = _RECTANGLE_ATTR, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Rectangle_Attr) - )) -_sym_db.RegisterMessage(Rectangle_Attr) - -PresentImageRequest = _reflection.GeneratedProtocolMessageType('PresentImageRequest', (_message.Message,), dict( - DESCRIPTOR = _PRESENTIMAGEREQUEST, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageRequest) - )) -_sym_db.RegisterMessage(PresentImageRequest) - -PresentImageResponse = _reflection.GeneratedProtocolMessageType('PresentImageResponse', (_message.Message,), dict( - DESCRIPTOR = _PRESENTIMAGERESPONSE, - __module__ = 'presenter_message_pb2' - # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageResponse) - )) -_sym_db.RegisterMessage(PresentImageResponse) - - -# @@protoc_insertion_point(module_scope) diff --git a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py b/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py deleted file mode 100644 index 1e69b88..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/presenteragent/socket_client.py +++ /dev/null @@ -1,135 +0,0 @@ -# !/usr/bin/env python -# -*- coding:utf-8 -*- -import sys -sys.path.append("..") - -import threading -import socket -import time -import struct - -from acllite_logger import log_error, log_info - -class AgentSocket(object): - """Create socket between app and presenter server""" - def __init__(self, server_ip, port): - """Create socket instance - Args: - server_ip: presenter server ip addr - port: connect port of presenter server - """ - self._server_address = (server_ip, port) - self._sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - def connect(self): - """Create connect with presenter server - Returns: - ret: connect error code, 0 is connect success, otherwise failed - """ - ret = 0 - for i in range(0, 5): - ret = self._sock_client.connect_ex(self._server_address) - if ret == 0: - break - time.sleep(0.2) - return ret - - def _read_socket(self, read_len): - has_read_len = 0 - read_buf = b'' - total_buf = b'' - - while has_read_len != read_len: - try: - read_buf = self._sock_client.recv(read_len - has_read_len) - except socket.error: - log_error("Read socket failed, error ", socket.error) - return False, None - if read_buf == b'': - return False, None - total_buf += read_buf - has_read_len = len(total_buf) - - return True, total_buf - - def _read_msg_head(self, read_len): - ret, msg_head = self._read_socket(read_len) - if not ret: - log_error("socket receive msg head null") - return None, None - - # in Struct(), 'I' is unsigned int, 'B' is unsigned char - msg_head_data = struct.Struct('IB') - (msg_total_len, msg_name_len) = msg_head_data.unpack(msg_head) - msg_total_len = socket.ntohl(msg_total_len) - - return msg_total_len, msg_name_len - - def _read_msg_name(self, msg_name_len): - ret, msg_name = self._read_socket(msg_name_len) - - if not ret: - log_error("Socket receive msg but name is null") - return False, None - try: - msg_name = msg_name.decode("utf-8") - except: - log_error("Msg name decode to utf-8 error") - return False, None - - return True, msg_name - - def _read_msg_body(self, msg_body_len): - ret, msg_body = self._read_socket(msg_body_len) - if not ret: - log_error("Socket receive msg but body null") - return False, None - return True, msg_body - - def recv_msg(self): - """Receive message from presenter server - Returns: - msg_name: received message name - msg_body: received message data - """ - # Step1: read msg head - msg_total_len, msg_name_len = self._read_msg_head(5) - if msg_total_len is None: - log_error("msg total len is None.") - return None, None - - # Step2: read msg name - ret, msg_name = self._read_msg_name(msg_name_len) - if not ret: - return None, None - - # Step3: read msg body - msg_body_len = msg_total_len - 5 - msg_name_len - if msg_body_len < 0: - log_error("msg total len is 0") - return None, None - ret, msg_body = self._read_msg_body(msg_body_len) - if not ret: - return None, None - - return msg_name, msg_body - - def send_msg(self, data): - """Send message to presenter server - Args: - data: message data - Returns: - 0 send success - 1 send failed - """ - try: - self._sock_client.sendall(data) - except: - log_error("Send msg failed") - return 1 - return 0 - - def close(self): - """Close connect""" - self._sock_client.shutdown(socket.SHUT_RDWR) - self._sock_client.close() diff --git a/Samples/YOLOV5MultiInput/python/src/python/videocapture.py b/Samples/YOLOV5MultiInput/python/src/python/videocapture.py deleted file mode 100644 index 361bd4e..0000000 --- a/Samples/YOLOV5MultiInput/python/src/python/videocapture.py +++ /dev/null @@ -1,383 +0,0 @@ -import av -import threading -import numpy as np -import acl -import time - -import constants as const -import acllite_utils as utils -import acllite_logger as acl_log -import dvpp_vdec as dvpp_vdec -from acllite_image import AclLiteImage - -WAIT_INTERVAL = 0.01 # 0.01 -WAIT_READY_MAX = 10 -WAIT_FIRST_DECODED_FRAME = 0.02 - -DECODE_STATUS_INIT = 0 -DECODE_STATUS_READY = 1 -DECODE_STATUS_RUNNING = 2 -DECODE_STATUS_PYAV_FINISH = 3 -DECODE_STATUS_ERROR = 4 -DECODE_STATUS_STOP = 5 -DECODE_STATUS_EXIT = 6 - - -class _ChannelIdGenerator(object): - """Generate global unique id number, single instance mode class""" - _instance_lock = threading.Lock() - channel_id = 0 - - def __init__(self): - pass - - def __new__(cls, *args, **kwargs): - if not hasattr(_ChannelIdGenerator, "_instance"): - with _ChannelIdGenerator._instance_lock: - if not hasattr(_ChannelIdGenerator, "_instance"): - _ChannelIdGenerator._instance = object.__new__( - cls, *args, **kwargs) - return _ChannelIdGenerator._instance - - def generator_channel_id(self): - """Generate global unique id number - The id number is increase - """ - curren_channel_id = 0 - with _ChannelIdGenerator._instance_lock: - curren_channel_id = _ChannelIdGenerator.channel_id - _ChannelIdGenerator.channel_id += 1 - - return curren_channel_id - - -def gen_unique_channel_id(): - """Interface of generate global unique id number""" - generator = _ChannelIdGenerator() - return generator.generator_channel_id() - - -class VideoCapture(object): - """Decode video by pyav and pyacl dvpp vdec - This class only support decode annex-b h264 file or rtsp ip camera. - You can use command: - ffmpeg -i aaa.mp4 -codec copy -bsf: h264_mp4toannexb -f h264 aaa.h264 - to transform mp4 file to h264 stream file. - If decode rtsp of ip camera or stream pull stream software, make sure - the stream format is annex-b - - Attributes: - _stream_name: video stream name - _input_buffer: dvpp vdec decode input data buffer - _ctx: decode thread acl context, use the same contxt with app - _entype: video stream encode type, dvpp vdec support: - const.ENTYPE_H265_MAIN = 0 H265 main level - const.ENTYPE_H264_BASE = 1 H264 baseline level - const.ENTYPE_H264_MAIN = 2 H264 main level - const.ENTYPE_H264_HIGH = 3 H264 high level - this attributes will read from video stream extradata - _channel_id: dvpp vdec decode channel id parameter, global unique - _vdec: pyacl dvpp vdec instance - _is_opened: the video stream wether open or not - _status: video decoder current status - _run_mode: the device mode - """ - - def __init__(self, strame_name): - self._stream_name = strame_name - self._input_buffer = None - self._vdec = None - self._is_opened = False - self._width = 0 - self._height = 0 - self._decode_thread_id = None - self._dextory_dvpp_flag = False - self._ctx, ret = acl.rt.get_context() - if ret: - acl_log.log_error("Get acl context failed when " - "instance AclVideo, error ", ret) - else: - self._entype = const.ENTYPE_H264_MAIN - self._channel_id = gen_unique_channel_id() - self._status = DECODE_STATUS_INIT - self._run_mode, ret = acl.rt.get_run_mode() - if ret: - acl_log.log_error("Get acl run mode failed when " - "instance AclVideo, error ", ret) - else: - self._open() - - def __del__(self): - self.destroy() - - def _open(self): - # Get frame width, height, encode type by pyav - if self._get_param(): - acl_log.log_error("Decode %s failed for get stream " - "parameters error" % (self._stream_name)) - return - print(000000000) - # Create decode thread and prepare to decode - self._decode_thread_id, ret = acl.util.start_thread( - self._decode_thread_entry, []) - if ret: - acl_log.log_error("Create %s decode thread failed, error %d" - % (self._stream_name, ret)) - return - print(1111111) - # Wait decode thread decode ready - for i in range(0, WAIT_READY_MAX): - print(f'====={i}') - if self._status == DECODE_STATUS_INIT: - print(f'-----{i}') - time.sleep(WAIT_INTERVAL) - print(222222222) - if self._status == DECODE_STATUS_READY: - self._is_opened = True - acl_log.log_info("Ready to decode %s..." % (self._stream_name)) - else: - acl_log.log_error("Open %s failed for wait ready timeout" - % (self._stream_name)) - return - - def _get_param(self): - container = av.open(self._stream_name) - stream = [s for s in container.streams if s.type == 'video'] - if len(stream) == 0: - # The stream is not video - acl_log.log_error("%s has no video stream" % (self._stream_name)) - return const.FAILED - - ret, profile = self._get_profile(stream) - if ret: - acl_log.log_error("%s is not annex-b format, decode failed" - % (self._stream_name)) - return const.FAILED - - video_context = container.streams.video[0].codec_context - codec_id_name = video_context.name - ret, self._entype = self._get_entype(codec_id_name, profile) - if ret: - return const.FAILED - - self._width = video_context.width - self._height = video_context.height - - acl_log.log_info( - "Get %s infomation: width %d, height %d, profile %d, " - "codec %s, entype %d" % - (self._stream_name, - self._width, - self._height, - profile, - codec_id_name, - self._entype)) - print('aaaaaa') - container.close() - print('bbbbbbbbb') - return const.SUCCESS - - def _get_profile(self, stream): - # Annex-b format h264 extradata is start with 0x000001 or 0x00000001 - extradata = np.frombuffer(stream[0].codec_context.extradata, np.ubyte) - if (extradata[0:3] == [0, 0, 1]).all(): - profile_id = extradata[4] - elif (extradata[0:4] == [0, 0, 0, 1]).all(): - profile_id = extradata[5] - else: - acl_log.log_error("The stream %s is not annex-b h264, " - "can not decode it" % (self._stream_name)) - return const.FAILED, None - - return const.SUCCESS, profile_id - - def _get_entype(self, codec_id_name, profile): - # Dvpp vdec support h264 baseline, main and high level - profile_entype_tbl = { - 'h264': {const.FF_PROFILE_H264_BASELINE: const.ENTYPE_H264_BASE, - const.FF_PROFILE_H264_MAIN: const.ENTYPE_H264_MAIN, - const.FF_PROFILE_H264_HIGH: const.ENTYPE_H264_HIGH}, - 'h265': {const.FF_PROFILE_HEVC_MAIN: const.ENTYPE_H265_MAIN}, - 'hevc': {const.FF_PROFILE_HEVC_MAIN: const.ENTYPE_H265_MAIN}} - entype = None - ret = const.SUCCESS - - if codec_id_name in profile_entype_tbl.keys(): - entype_tbl = profile_entype_tbl[codec_id_name] - if profile in entype_tbl.keys(): - entype = entype_tbl[profile] - elif codec_id_name == 'h264': - # if not support profile, try to decode as main - entype = const.ENTYPE_H264_MAIN - acl_log.log_error("Unsurpport h264 profile ", profile, - ", decode as main level") - else: - entype = const.ENTYPE_H265_MAIN - acl_log.log_error("Unsurpport h265 profile ", profile, - ", decode as main level") - else: - # Not h264 or h265 - ret = const.FAILED - acl_log.log_error("Unsupport codec type ", codec_id_name) - - return ret, entype - - def _pyav_vdec(self): - frame = 0 - video = av.open(self._stream_name) - stream = [s for s in video.streams if s.type == 'video'] - acl_log.log_info("Start decode %s frames" % (self._stream_name)) - for packet in video.demux([stream[0]]): - # Get frame data from packet and copy to dvpp - frame_data, data_size = self._prepare_frame_data(packet) - if data_size == 0: - # Last packet size is 0, no frame to decode anymore - break - - if self._vdec.process(frame_data, data_size, - [self._channel_id, frame]): - acl_log.log_error("Dvpp vdec deocde frame %d failed, " - "stop decode" % (frame)) - self._status = DECODE_STATUS_ERROR - break - frame += 1 - - # The status chang to stop when app stop decode - if self._status != DECODE_STATUS_RUNNING: - acl_log.log_info("Decode status change to %d, stop decode" - % (self._status)) - break - - def _prepare_frame_data(self, packet): - in_frame_np = np.frombuffer(packet.to_bytes(), np.byte) - size = in_frame_np.size - print(f'size:{size}') - if size == 0: - # Last frame data is empty - acl_log.log_info("Pyav decode finish") - self._status = DECODE_STATUS_PYAV_FINISH - return None, 0 - - if "bytes_to_ptr" in dir(acl.util): - bytes_data = in_frame_np.tobytes() - in_frame_ptr = acl.util.bytes_to_ptr(bytes_data) - else: - in_frame_ptr = acl.util.numpy_to_ptr(in_frame_np) - policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE - if self._run_mode == const.ACL_HOST: - policy = const.ACL_MEMCPY_HOST_TO_DEVICE - ret = acl.rt.memcpy(self._input_buffer, size, in_frame_ptr, size, - policy) - if ret: - acl_log.log_error("Copy data to dvpp failed, policy %d, error %d" - % (policy, ret)) - self._status = DECODE_STATUS_ERROR - return None, 0 - - return self._input_buffer, size - - def _decode_thread_entry(self, arg_list): - # Set acl context for decode thread - print('thread init=======') - if self._decode_thread_init(): - acl_log.log_error("Decode thread init failed") - return const.FAILED - - self._status = DECODE_STATUS_READY - while (self._status == DECODE_STATUS_READY): - time.sleep(WAIT_INTERVAL) - - self._pyav_vdec() - self._decode_thread_join() - - return const.SUCCESS - - def _decode_thread_init(self): - # Set acl context for decode thread - ret = acl.rt.set_context(self._ctx) - if ret: - acl_log.log_error("%s decode thread init dvpp vdec failed") - return const.FAILED - print('======111') - # Instance dvpp vdec and init it - self._vdec = dvpp_vdec.DvppVdec(self._channel_id, self._width, - self._height, self._entype, self._ctx) - print('=====222') - if self._vdec.init(): - acl_log.log_error("%s decode thread init dvpp vdec failed" - % (self._stream_name)) - return const.FAILED - print('=====333') - - # Malloc dvpp vdec decode input dvpp memory - self._input_buffer, ret = acl.media.dvpp_malloc( - utils.rgbu8_size(self._width, self._height)) - if ret: - acl_log.log_error("%s decode thread malloc input memory failed, " - "error %d. frame width %d, height %d, size %d" - % (self._stream_name, ret, - self._width, self._height, - utils.rgbu8_size(self._width, self._height))) - return const.FAILED - - return const.SUCCESS - - def _decode_thread_join(self): - self.destroy() - # Wait all decoded frame token off by read() - while self._status < DECODE_STATUS_STOP: - time.sleep(WAIT_INTERVAL) - self._status = DECODE_STATUS_EXIT - - def is_finished(self): - """Decode finished - Pyav and dvpp vdec decoded all frame, and all deocde frames were - token off. When read() return success but image is none, use this to - confirm decode finished - """ - return self._status == DECODE_STATUS_EXIT - - def read(self, no_wait=False): - """Read decoded frame - Args: - no_wait: Get image without wait. If set this arg True, and - return image is None, should call is_finished() method - to confirm decode finish or failed - - Returns: - 1. const.SUCCESS, not None: get image success - 2. const.SUCCESS, None: all frames decoded and be token off - 3. const.FAILED, None: Has frame not decoded, but no image decoded, - it means decode video failed - """ - # Pyav and dvpp vdec decoded all frame, - # and all deocde frames were token off - if self._status == DECODE_STATUS_EXIT: - return const.SUCCESS, None - - # When call read first time, the decode thread only ready to decode, - # but not decoding already. Set status to DECODE_STATUS_RUNNING will - # cause pyav and dvpp vdec start decode actually - if self._status == DECODE_STATUS_READY: - self._status = DECODE_STATUS_RUNNING - # The decode just begin, need wait the first frame to be decoded - time.sleep(WAIT_FIRST_DECODED_FRAME) - - ret, image = self._vdec.read(no_wait) - - # Decode finish or stopped, and all decode frames were token off - if (image is None) and (self._status > DECODE_STATUS_RUNNING): - self._status = DECODE_STATUS_EXIT - - return ret, image - - def destroy(self): - """Release all decode resource""" - if self._vdec is not None: - self._vdec.destroy() - while self._vdec._destory_channel_flag == False: - time.sleep(0.001) - if self._input_buffer is not None: - acl.media.dvpp_free(self._input_buffer) - self._input_buffer = None - self._dextory_dvpp_flag = True -- Gitee From 77601b79d053e89d0d9c8d2fd68b7cc8d9353b13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Tue, 26 Mar 2024 03:39:17 +0000 Subject: [PATCH 26/38] update Samples/YOLOV5Video/src/YOLOV5Video.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/src/YOLOV5Video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index e71b341..b748da9 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -131,7 +131,7 @@ if __name__ == '__main__': elif mode == "video": path = "../data/output.h264" video_infer(path, model) - elif: + else: print('input mode is incorrect.') model.release_resource() -- Gitee From 0322d44f172c9e520160f96462b32c878f69bb31 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Tue, 26 Mar 2024 21:29:13 +0800 Subject: [PATCH 27/38] update --- Samples/YOLOV5MultiInput/python/README.md | 100 +++++++++++ Samples/YOLOV5MultiInput/python/data/.keep | 0 .../python/scripts/sample_run.sh | 12 ++ .../python/src/YOLOV5MultiInput.py | 158 ++++++++++++++++++ 4 files changed, 270 insertions(+) create mode 100644 Samples/YOLOV5MultiInput/python/README.md create mode 100644 Samples/YOLOV5MultiInput/python/data/.keep create mode 100644 Samples/YOLOV5MultiInput/python/scripts/sample_run.sh create mode 100644 Samples/YOLOV5MultiInput/python/src/YOLOV5MultiInput.py diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md new file mode 100644 index 0000000..48f6bd2 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -0,0 +1,100 @@ +# 目标检测(YoloV5s) + +#### 样例介绍 + +使用多路离线视频流(*.mp4)作为应用程序的输入,基于YoloV5s模型对输入视频中的物体做实时检测,将推理结果信息使用imshow方式显示。 +样例代码逻辑如下所示:![输入图片说明](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/samples-pic/EdgeAndRobotics/%E5%A4%9A%E7%BA%BF%E7%A8%8B%E7%A4%BA%E4%BE%8B%E5%9B%BE%E7%89%87.png) + +#### 样例下载 + +可以使用以下两种方式下载,请选择其中一种进行源码准备。 + +- 命令行方式下载(**下载时间较长,但步骤简单**)。 + + ``` + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/YOLOV5MultiInput + ``` + +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 + + ``` + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/YOLOV5MultiInput/python + ``` + +#### 准备环境 + +1. 以HwHiAiUser用户登录开发板。 + +2. 设置环境变量。 + + ``` + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + export PYTHONPATH=`pwd`/python:$PYTHONPATH + ``` + +#### 运行样例 + +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 + +2. 获取PyTorch框架的YoloV5s模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + ``` + cd model + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/yolov5s_nms.onnx --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/aipp_rgb.cfg --no-check-certificate + atc --model=yolov5s_nms.onnx --framework=5 --output=yolov5s_rgb --input_shape="images:1,3,640,640;img_info:1,4" --soc_version=Ascend310B4 --insert_op_conf=aipp_rgb.cfg + ``` + +* Home page: https://www.hiascend.com/hardware/developer-kit-a2 +* Documentation: https://www.hiascend.com/hardware/developer-kit-a2/resource + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 + + - --model:YoloV5s网络的模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:om模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 + +3. 准备测试视频。 + + 请从以下链接获取该样例的测试视频,放在data目录下。 + + ``` + cd ../data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.mp4 --no-check-certificate + ``` + + **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 + +4. 运行样例。 + + ``` + bash sample_run.sh + ``` + +#### 相关操作 + +- 获取更多样例,请单击[Link](https://gitee.com/ascend/samples/tree/master/inference/modelInference)。 +- 获取在线视频课程,请单击[Link](https://www.hiascend.com/edu/courses?activeTab=%E5%BA%94%E7%94%A8%E5%BC%80%E5%8F%91)。 +- 获取学习文档,请单击[AscendCL python](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC1alpha002/devguide/appdevg/aclpythondevg/aclpythondevg_0001.html),查看最新版本的AscendCL推理应用开发指南。 +- 查模型的输入输出 + + 可使用第三方工具Netron打开网络模型,查看模型输入或输出的数据类型、Shape,便于在分析应用开发场景时使用。 diff --git a/Samples/YOLOV5MultiInput/python/data/.keep b/Samples/YOLOV5MultiInput/python/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh new file mode 100644 index 0000000..edae843 --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/scripts/sample_run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +echo "[INFO] The sample starts to run" +running_command="python3 YOLOV5MultiInput.py" +cd ${ScriptPath}/../src +${running_command} +if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" +else + echo "[INFO] The program runs successfully" +fi diff --git a/Samples/YOLOV5MultiInput/python/src/YOLOV5MultiInput.py b/Samples/YOLOV5MultiInput/python/src/YOLOV5MultiInput.py new file mode 100644 index 0000000..596456d --- /dev/null +++ b/Samples/YOLOV5MultiInput/python/src/YOLOV5MultiInput.py @@ -0,0 +1,158 @@ +import numpy as np +import videocapture as video +import acl +import acllite_utils as utils +import time +import cv2 +import constants as const + +from acllite_resource import AclLiteResource +from acllite_model import AclLiteModel +from acllite_imageproc import AclLiteImageProc +from acllite_image import AclLiteImage +from acllite_logger import log_error, log_info +from multiprocessing import Process, Queue, Pool, Value + +Q_PRE_SIZE = 32 +Q_OUT_SIZE = 32 +VIDEO_WIDTH = 1920 +VIDEO_HEIGHT = 1080 +WAIT_TIME = 0.000003 +labels = ["person", "bicycle", "car", "motorbike", "aeroplane", + "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", + "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", + "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", + "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", + "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table", + "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", + "scissors", "teddy bear", "hair drier", "toothbrush"] + +def preprocess(path, q_pre, model_width, model_height, channel): + print(f'sub process preprocess{i} start') + VIDEO_WIDTH = 1920 + VIDEO_HEIGHT = 1080 + scale_x = VIDEO_WIDTH / model_width + scale_y = VIDEO_HEIGHT / model_height + # get scale factor + if scale_x > scale_y: + max_scale = scale_x + resize_shape = (model_width, int(VIDEO_HEIGHT/max_scale)) + else: + max_scale = scale_y + resize_shape = (int(VIDEO_WIDTH/max_scale), model_height) + count = 0 + cap = cv2.VideoCapture(path) + if not cap.isOpened() : + print('video connect failed') + exit(0) + while True: + ret, frame = cap.read() + if not ret: # + print('cap read end! close subprocess cap read') + q_pre.put('EOF') + break + else: + img = np.zeros([model_height, model_width, 3], dtype=np.uint8) + # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + resize_image = cv2.resize(frame, resize_shape) + img[0:resize_shape[1],0:resize_shape[0]] = resize_image + q_pre.put(img) + count += 1 + print(f'pre process end! {channel}') + +def infer(model_path, q_pre, q_out, pnums): + resource = AclLiteResource() + resource.init() + model = AclLiteModel(model_path) + count = 0 + nums = pnums + image_info = np.array([640, 640, + 640, 640], + dtype=np.float32) + start = time.time() + while True: + if pnums == 0: + break + for i,q in enumerate(q_pre): + img = q.get() + if isinstance(img, str): + pnums -= 1 + q_out[i].put('EOF') + continue + output = model.execute([img, image_info]) + count += 1 + q_out[i].put(output) + end = time.time() + print(f'fps: {count/(end-start):.3f}') + del resource + del model + print('infer end! close infer ') + +def postprocess(q_out, model_width, model_height): + VIDEO_WIDTH = 1920 + VIDEO_HEIGHT = 1080 + while True: + output = q_out.get() + if isinstance(output, str): + print('postprocess end! close subprocess postprocess') + break + box_num = output[1][0, 0] + box_info = output[0].flatten() + scale_x = VIDEO_WIDTH / model_width + scale_y = VIDEO_HEIGHT / model_height + + # get scale factor + if scale_x > scale_y: + max_scale = scale_x + else: + max_scale = scale_y + colors = [0, 0, 255] + + # draw the boxes in original image + result_msg = "" + for n in range(int(box_num)): + ids = int(box_info[5 * int(box_num) + n]) + score = box_info[4 * int(box_num) + n] + label = labels[ids] + ":" + str("%.2f" % score) + top_left_x = box_info[0 * int(box_num) + n] * max_scale + top_left_y = box_info[1 * int(box_num) + n] * max_scale + bottom_right_x = box_info[2 * int(box_num) + n] * max_scale + bottom_right_y = box_info[3 * int(box_num) + n] * max_scale + result_msg += f'label:{label} ' + # cv2.rectangle(src_image, (int(top_left_x), int(top_left_y)), + # (int(bottom_right_x), int(bottom_right_y)), colors) + # p3 = (max(int(top_left_x), 15), max(int(top_left_y), 15)) + # cv2.putText(src_image, label, p3, cv2.FONT_ITALIC, 0.6, colors, 1) + # cv2.imshow('frame', src_image) + # cv2.imwrite(f'../out/out_{count}.jpg', src_image) + print(f'results: {result_msg}') + +if __name__ == '__main__': + stream_path = "../data/test.mp4" + model_path = "../model/yolov5s_rgb.om" + model_width = 640 + model_height = 640 + pnums = 2 + + q_pre = [Queue(maxsize=Q_PRE_SIZE) for i in range(pnums)] + q_out = [Queue(maxsize=Q_OUT_SIZE) for i in range(pnums)] + + loopTime, initTime = time.time(), time.time() + + processes = [] + for i in range(pnums): + processes.append(Process(target=preprocess, args=(stream_path, q_pre[i], model_width, model_height, i))) + processes.append(Process(target=infer, args=(model_path, q_pre, q_out, pnums))) + for i in range(pnums): + processes.append(Process(target=postprocess, args=(q_out[i],model_width, model_width))) + + [process.start() for process in processes] + [process.join() for process in processes] + print('子进程运行结束') + -- Gitee From afc7bad90fb7c9f7f2d1578f1ad58972d4360b53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Wed, 27 Mar 2024 01:32:16 +0000 Subject: [PATCH 28/38] update Samples/YOLOV5Video/src/YOLOV5Video.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/src/YOLOV5Video.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index b748da9..52cdac1 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -105,7 +105,6 @@ def video_infer(video_path, model): else: log_info("read frame finish") break - cv2.release() def image_infer(image_path, model): frame = AclLiteImage(image_path) -- Gitee From baf89e270d8dd586ed7cd37f6d3955387c70e13a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Wed, 27 Mar 2024 01:55:33 +0000 Subject: [PATCH 29/38] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20Samples/YOL?= =?UTF-8?q?OV5USBCamera/python/src/YOLOV7USBCamera.py=20=E4=B8=BA=20Sample?= =?UTF-8?q?s/YOLOV5USBCamera/python/src/YOLOV5USBCamera.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../python/src/{YOLOV7USBCamera.py => YOLOV5USBCamera.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Samples/YOLOV5USBCamera/python/src/{YOLOV7USBCamera.py => YOLOV5USBCamera.py} (100%) diff --git a/Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py b/Samples/YOLOV5USBCamera/python/src/YOLOV5USBCamera.py similarity index 100% rename from Samples/YOLOV5USBCamera/python/src/YOLOV7USBCamera.py rename to Samples/YOLOV5USBCamera/python/src/YOLOV5USBCamera.py -- Gitee From 8fcdead3a3dc36938a68317792a8c975e98eeee0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Wed, 27 Mar 2024 01:55:49 +0000 Subject: [PATCH 30/38] update Samples/YOLOV5USBCamera/python/scripts/sample_run.sh. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5USBCamera/python/scripts/sample_run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh b/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh index 524e08a..14e9d7e 100644 --- a/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh +++ b/Samples/YOLOV5USBCamera/python/scripts/sample_run.sh @@ -4,7 +4,7 @@ ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" echo "[INFO] The sample starts to run" cd ${ScriptPath}/../src -python3 YOLOV7USBCamera.py +python3 YOLOV5USBCamera.py if [ $? -ne 0 ];then echo "[INFO] The program runs failed" else -- Gitee From a66dc3147d73610080517ccb44d1a7b6b0305134 Mon Sep 17 00:00:00 2001 From: cajcak <1039183305@qq.com> Date: Wed, 27 Mar 2024 11:12:07 +0800 Subject: [PATCH 31/38] update --- Samples/YOLOV5Video/src/YOLOV5Video.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index 52cdac1..2928730 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -61,7 +61,6 @@ class sampleYOLOV7(object): scale_x = width / self.model_width scale_y = height / self.model_height - colors = [0, 0, 255] text = "" # draw the boxes in original image @@ -84,10 +83,10 @@ class sampleYOLOV7(object): def release_resource(self): # release resource includes acl resource, data set and unload model - del self._resource - del self._dvpp - del self._model del self.resized_image + del self._model + del self._dvpp + del self._resource def video_infer(video_path, model): cap = video.VideoCapture(video_path) @@ -97,14 +96,15 @@ def video_infer(video_path, model): print('cap read end! close subprocess cap read') break if frame is not None: + print('start preprocess') model.preprocess_vis(frame) model.infer() model.postprocess(video_path) - if cv2.waitKey(1) & 0xFF == ord('q'): - break + cv2.waitKey(1) else: log_info("read frame finish") break + del cap def image_infer(image_path, model): frame = AclLiteImage(image_path) @@ -133,5 +133,5 @@ if __name__ == '__main__': else: print('input mode is incorrect.') - model.release_resource() cv2.destroyAllWindows() + model.release_resource() -- Gitee From e927f9c00e8bbb3048f1f0565530424d500ce111 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:34:19 +0000 Subject: [PATCH 32/38] update Samples/YOLOV5Video/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Samples/YOLOV5Video/README.md b/Samples/YOLOV5Video/README.md index 72d93fd..b6b0243 100644 --- a/Samples/YOLOV5Video/README.md +++ b/Samples/YOLOV5Video/README.md @@ -81,8 +81,8 @@ ``` cd ../data - wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.mp4 --no-check-certificate - wget wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/yolov5s/test.和64 --no-check-certificate + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg --no-check-certificate ``` **注:**若需更换测试视频,则需自行准备测试视频,并将测试视频放到data目录下。 -- Gitee From 7f92b5ee0c735bbc3ae921526776cca644ef2493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:34:47 +0000 Subject: [PATCH 33/38] update Samples/YOLOV5Video/src/YOLOV5Video.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/src/YOLOV5Video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Samples/YOLOV5Video/src/YOLOV5Video.py b/Samples/YOLOV5Video/src/YOLOV5Video.py index 2928730..5b827ec 100644 --- a/Samples/YOLOV5Video/src/YOLOV5Video.py +++ b/Samples/YOLOV5Video/src/YOLOV5Video.py @@ -128,7 +128,7 @@ if __name__ == '__main__': path = "../data/dog1_1024_683.jpg" image_infer(path, model) elif mode == "video": - path = "../data/output.h264" + path = "../data/test.h264" video_infer(path, model) else: print('input mode is incorrect.') -- Gitee From 2dd88e26aeccb3185c604f24d316249261bd3a96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:37:58 +0000 Subject: [PATCH 34/38] update Samples/YOLOV5Video/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5Video/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Samples/YOLOV5Video/README.md b/Samples/YOLOV5Video/README.md index b6b0243..ac623ec 100644 --- a/Samples/YOLOV5Video/README.md +++ b/Samples/YOLOV5Video/README.md @@ -37,15 +37,7 @@ 2. 以HwHiAiUser用户登录开发板。 -3. 设置环境变量。 - - ``` - # 配置程序编译依赖的头文件与库文件路径 - export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - ``` - -4. 安装ACLLite库。 +3. 安装ACLLite库。 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装PyACLLite库。 -- Gitee From 053c04ba16868ce582fbdcb712b74551a06bbe87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:38:41 +0000 Subject: [PATCH 35/38] update Samples/YOLOV5USBCamera/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5USBCamera/python/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Samples/YOLOV5USBCamera/python/README.md b/Samples/YOLOV5USBCamera/python/README.md index 58cd70a..f7f14b9 100644 --- a/Samples/YOLOV5USBCamera/python/README.md +++ b/Samples/YOLOV5USBCamera/python/README.md @@ -37,15 +37,7 @@ 2. 以HwHiAiUser用户登录开发板。 -3. 设置环境变量。 - - ``` - # 配置程序编译依赖的头文件与库文件路径 - export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - ``` - -4. 安装ACLLite库。 +3. 安装ACLLite库。 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装PyACLLite库。 -- Gitee From 61afa31d4365849031202af7c67724a58c588d3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:40:13 +0000 Subject: [PATCH 36/38] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20model?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Samples/YOLOV5MultiInput/python/model/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 Samples/YOLOV5MultiInput/python/model/.keep diff --git a/Samples/YOLOV5MultiInput/python/model/.keep b/Samples/YOLOV5MultiInput/python/model/.keep new file mode 100644 index 0000000..e69de29 -- Gitee From 86dc540bc826f5dfea0eda991cba798be074e665 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 08:43:28 +0000 Subject: [PATCH 37/38] update Samples/YOLOV5MultiInput/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/YOLOV5MultiInput/python/README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/Samples/YOLOV5MultiInput/python/README.md b/Samples/YOLOV5MultiInput/python/README.md index 48f6bd2..3f4a1b6 100644 --- a/Samples/YOLOV5MultiInput/python/README.md +++ b/Samples/YOLOV5MultiInput/python/README.md @@ -36,14 +36,10 @@ 1. 以HwHiAiUser用户登录开发板。 -2. 设置环境变量。 +2. 安装ACLLite库。 + + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装PyACLLite库。 - ``` - # 配置程序编译依赖的头文件与库文件路径 - export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - export PYTHONPATH=`pwd`/python:$PYTHONPATH - ``` #### 运行样例 -- Gitee From dd63359ae993e062c71e93cdbc7e83cd91ef218f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=94=90=E7=92=9E?= <1039183305@qq.com> Date: Thu, 28 Mar 2024 09:02:20 +0000 Subject: [PATCH 38/38] update Samples/ResnetPicture/python/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 唐璞 <1039183305@qq.com> --- Samples/ResnetPicture/python/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Samples/ResnetPicture/python/README.md b/Samples/ResnetPicture/python/README.md index 076ba96..85f74b8 100644 --- a/Samples/ResnetPicture/python/README.md +++ b/Samples/ResnetPicture/python/README.md @@ -35,15 +35,7 @@ 1. 以HwHiAiUser用户登录开发板。 -2. 设置环境变量。 - - ``` - # 配置程序编译依赖的头文件与库文件路径 - export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest - export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub - ``` - -3. 安装ACLLite库。 +2. 安装ACLLite库。 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装ACLLite库python版。 -- Gitee