From 2615ad3706b4f985845d7c545db5046842ae5d5e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?=
<10542047+wu-yiying@user.noreply.gitee.com>
Date: Tue, 24 May 2022 11:35:27 +0000
Subject: [PATCH 01/58] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20PoseC3D?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep
new file mode 100644
index 0000000000..e69de29bb2
--
Gitee
From 9d0e05225501205bb3bd53f359e9a6c6796c803b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Tue, 24 May 2022 19:53:56 +0800
Subject: [PATCH 02/58] my first commit
---
.../PoseC3D/poseC3D_hmdb51_preprocess.py | 71 +++++++++++++++++++
.../cv/pose_estimation/PoseC3D/postprocess.py | 71 +++++++++++++++++++
2 files changed, 142 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
new file mode 100644
index 0000000000..6ab53cd9fc
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
@@ -0,0 +1,71 @@
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='/home/wyy/mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ file.write('\n')
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin = imgs.cpu().numpy()
+ print(bin.shape)
+ preprocess = torch.from_numpy(bin)
+ print(preprocess.shape)
+ bin.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py
new file mode 100644
index 0000000000..3fffc9c9a3
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py
@@ -0,0 +1,71 @@
+import os
+import argparse
+import numpy as np
+from collections import OrderedDict
+from mmaction.core import top_k_accuracy
+import torch
+import pdb
+import torch.nn.functional as F
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Dataset K400 Postprocessing')
+ parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
+ parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load info file
+ gt_labels = []
+ with open(args.info_path, 'r') as f:
+ for line in f.readlines():
+ t = line.split( )[-1]
+ gt_labels.append(int(t))
+
+ # load inference result
+ results = []
+
+ num_file = len(os.listdir(args.result_path))
+ for idx in range(num_file):
+ file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
+ result = np.loadtxt(file)
+ result = torch.from_numpy(result)
+ batch_size = result.shape[0]
+# pdb.set_trace()
+ result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
+
+ result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
+ results.extend(result)
+
+
+ metrics = ['top_k_accuracy']
+ metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
+ eval_results = OrderedDict()
+ for metric in metrics:
+ print(f'Evaluating {metric} ...')
+ if metric == 'top_k_accuracy':
+ topk = metric_options.setdefault('top_k_accuracy',
+ {}).setdefault('topk', (1, 5))
+ if not isinstance(topk, (int, tuple)):
+ raise TypeError(
+ f'topk must be int or tuple of int, but got {type(topk)}')
+ if isinstance(topk, int):
+ topk = (topk, )
+
+ top_k_acc = top_k_accuracy(results, gt_labels, topk)
+ log_msg = []
+ for k, acc in zip(topk, top_k_acc):
+ eval_results[f'top{k}_acc'] = acc
+ log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
+ log_msg = ''.join(log_msg)
+ print(log_msg)
+ continue
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From 1c02d0fcdc99232839bc398c4e96ca694cada4f9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Tue, 7 Jun 2022 04:06:05 +0000
Subject: [PATCH 03/58] =?UTF-8?q?=E6=B5=8B=E8=AF=95t4=E6=80=A7=E8=83=BD?=
=?UTF-8?q?=E5=92=8C=E7=94=9F=E6=88=90onnx=E3=80=81om=E6=A8=A1=E5=9E=8B?=
=?UTF-8?q?=E7=9A=84sh=E6=96=87=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/perf_t4.sh | 13 +++++
.../cv/pose_estimation/PoseC3D/pth2om.sh | 48 +++++++++++++++++++
2 files changed, 61 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
new file mode 100644
index 0000000000..fa0568b478
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+
+rm -rf perf_bs1.log
+trtexec --onnx=posec3d.onnx --fp16 --threads > perf_bs1.log
+perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
+if [ -n "$perf_str" ]; then
+ perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
+else
+ perf_str=`grep "mean.*ms$" perf_bs1.log`
+ perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
+fi
+awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
new file mode 100644
index 0000000000..ee64a50e7e
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+not_skip_onnx=true
+
+for para in $*
+do
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+ if [[ $para == --not_skip_onnx* ]]; then
+ not_skip_onnx=`echo ${para#*=}`
+ fi
+done
+
+# ======================= convert onnx =======================================
+if [ $not_skip_onnx == true ]; then
+ rm -rf posec3d.onnx
+ python ./posec3d_pytorch2onnx.py \
+ ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py \
+ ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth \
+ --shape 1 20 17 48 56 56 \
+ --verify \
+ --output-file ./posec3d.onnx
+ if [ -f "posec3d.onnx" ]; then
+ echo "==> 1. creating onnx model successfully."
+ else
+ echo "onnx export failed"
+ exit -1
+ fi
+fi
+
+
+# ======================= convert om =========================================
+rm -rf posec3d_bs${batch_size}.om
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d.onnx \
+ --output=./posec3d_bs${batch_size} \
+ --input_format=ND --input_shape="invals:${batch_size},20,17,48,56,56" \
+ --log=debug --soc_version=Ascend710
+if [ -f "posec3d_bs${batch_size}.om" ] ; then
+ echo "==> 2. creating om model successfully."
+else
+ echo "sim_om export failed"
+fi
+echo "==> 3. Done."
\ No newline at end of file
--
Gitee
From 1da285b27c385bd0310669cdce8bf1fba0799706 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 08:37:29 +0000
Subject: [PATCH 04/58] =?UTF-8?q?=E7=B2=BE=E5=BA=A6=E6=B5=8B=E8=AF=95?=
=?UTF-8?q?=E6=8C=87=E4=BB=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../pose_estimation/PoseC3D/eval_acc_perf.sh | 57 +++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
new file mode 100644
index 0000000000..84243bd40d
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+datasets_path="/opt/npu/hmdb51"
+
+for para in $*
+do
+ if [[ $para == --datasets_path* ]]; then
+ datasets_path=`echo ${para#*=}`
+ fi
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# ======================= generate prep_dataset ==============================
+rm -rf ./prep_hmdb51_bs${batch_size}
+python posec3d_preprocess.py \
+ --batch-size ${batch_size} \
+ --data_root ${datasets_path}/rawframes/ \
+ --ann_file ./hmdb51.pkl \
+ --name ./prep_hmdb51_bs${batch_size}
+if [ $? != 0 ]; then
+ echo "posec3d preprocess fail!"
+ exit -1
+fi
+echo "==> 1. creating ./prep_hmdb51_bs${batch_size} successfully."
+
+# =============================== msame ======================================
+if [ ! -d ./result ]; then
+ mkdir ./result
+fi
+rm -rf ./result/outputs_bs${batch_size}_om
+./msame --model "./hmdb51_bs${batch_size}.om" \
+ --input "./prep_hmdb51_bs${batch_size}" \
+ --output "./result/outputs_bs${batch_size}_om" \
+ --outfmt TXT > ./msame_bs${batch_size}.txt
+if [ $? != 0 ]; then
+ echo "msame bs${batch_size} fail!"
+ exit -1
+fi
+echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
+
+
+# ============================ evaluate ======================================
+python postprocess.py \
+ --result_path ./result/outputs_bs${batch_size}_om/ \
+ --info_path /opt/npu/hmdb51/hmdb51.txt
+
+if [ $? != 0 ]; then
+ echo "fail!"
+ exit -1
+fi
+echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
+echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 2646ca5539b7b46b982187ee8076b821271147b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 11:23:00 +0000
Subject: [PATCH 05/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/perf=5Ft4.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/pose_estimation/PoseC3D/perf_t4.sh | 13 -------------
1 file changed, 13 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
deleted file mode 100644
index fa0568b478..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-
-rm -rf perf_bs1.log
-trtexec --onnx=posec3d.onnx --fp16 --threads > perf_bs1.log
-perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
-if [ -n "$perf_str" ]; then
- perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
-else
- perf_str=`grep "mean.*ms$" perf_bs1.log`
- perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
-fi
-awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
--
Gitee
From bf6c29740a03c041324cd81cb5a340be64de4696 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 11:31:15 +0000
Subject: [PATCH 06/58] =?UTF-8?q?t4=E6=80=A7=E8=83=BD=E6=B5=8B=E8=AF=95?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/pose_estimation/PoseC3D/perf_t4.sh | 13 +++++++++++++
1 file changed, 13 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
new file mode 100644
index 0000000000..bd3100b7cb
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+rm -rf perf_bs1.log
+trtexec --onnx=posec3d.onnx --fp16 --threads > perf_bs1.log
+perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
+if [ -n "$perf_str" ]; then
+ perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
+else
+ perf_str=`grep "mean.*ms$" perf_bs1.log`
+ perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
+fi
+awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
--
Gitee
From 08a3e5ae17a09c9f0a95d1f83abc3c3cc91aa845 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 11:31:23 +0000
Subject: [PATCH 07/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/.keep?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/.keep
deleted file mode 100644
index e69de29bb2..0000000000
--
Gitee
From eed75cfd4bbe7531ed48745a86f13fb0b9de0450 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 12:33:30 +0000
Subject: [PATCH 08/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/poseC3D=5Fhmdb?=
=?UTF-8?q?51=5Fpreprocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/poseC3D_hmdb51_preprocess.py | 71 -------------------
1 file changed, 71 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
deleted file mode 100644
index 6ab53cd9fc..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/poseC3D_hmdb51_preprocess.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='/home/wyy/mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- file.write('\n')
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin = imgs.cpu().numpy()
- print(bin.shape)
- preprocess = torch.from_numpy(bin)
- print(preprocess.shape)
- bin.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 10f76f5bfc84336162dfeed6afe84c0da081b42c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 12:34:35 +0000
Subject: [PATCH 09/58] =?UTF-8?q?=E9=A2=84=E5=A4=84=E7=90=86=E6=96=87?=
=?UTF-8?q?=E4=BB=B6=20=E4=BF=AE=E6=94=B9=E4=BA=86=E4=B8=8Ebuilt-in?=
=?UTF-8?q?=E5=8F=98=E9=87=8F=E9=87=8D=E5=90=8D=E7=9A=84info=E5=90=8D?=
=?UTF-8?q?=E7=A7=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/preprocess.py | 73 +++++++++++++++++++
1 file changed, 73 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
new file mode 100644
index 0000000000..d53fbd7dc2
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
@@ -0,0 +1,73 @@
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ info_file.close()
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin = imgs.cpu().numpy()
+ print(bin.shape)
+ preprocess = torch.from_numpy(bin)
+ print(preprocess.shape)
+ bin.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From e1e0f9791b06cfdcf4db57aec3d4d6fcbb2afefe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 12:36:01 +0000
Subject: [PATCH 10/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/preprocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/preprocess.py | 73 -------------------
1 file changed, 73 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
deleted file mode 100644
index d53fbd7dc2..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- info_file.close()
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin = imgs.cpu().numpy()
- print(bin.shape)
- preprocess = torch.from_numpy(bin)
- print(preprocess.shape)
- bin.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 4c19802ce7864476f4f424abbc2bff62c273e19b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Wed, 8 Jun 2022 12:36:17 +0000
Subject: [PATCH 11/58] =?UTF-8?q?=E9=A2=84=E5=A4=84=E7=90=86=E6=96=87?=
=?UTF-8?q?=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/preprocess.py | 73 +++++++++++++++++++
1 file changed, 73 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
new file mode 100644
index 0000000000..93ee919329
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
@@ -0,0 +1,73 @@
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ info_file.close()
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin_info = imgs.cpu().numpy()
+ print(bin_info.shape)
+ preprocess = torch.from_numpy(bin_info)
+ print(preprocess.shape)
+ bin_info.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From 98328e1ec2b239cdc463024536bf417b103629fe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:05:06 +0000
Subject: [PATCH 12/58] =?UTF-8?q?pth=E6=9D=83=E9=87=8D=E6=96=87=E4=BB=B6?=
=?UTF-8?q?=E8=BD=AConnx=E6=A8=A1=E5=9E=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_pytorch2onnx.py | 186 ++++++++++++++++++
1 file changed, 186 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
new file mode 100644
index 0000000000..354021740d
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
@@ -0,0 +1,186 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import argparse
+import warnings
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.runner import load_checkpoint
+
+from mmaction.models import build_model
+
+try:
+ import onnx
+ import onnxruntime as rt
+except ImportError as e:
+ raise ImportError(f'Please install onnx and onnxruntime first. {e}')
+
+try:
+ from mmcv.onnx.symbolic import register_extra_symbolics
+except ModuleNotFoundError:
+ raise NotImplementedError('please update mmcv to version>=1.0.4')
+
+
+def _convert_batchnorm(module):
+ """Convert the syncBNs into normal BN3ds."""
+ module_output = module
+ if isinstance(module, torch.nn.SyncBatchNorm):
+ module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
+ module.momentum, module.affine,
+ module.track_running_stats)
+ if module.affine:
+ module_output.weight.data = module.weight.data.clone().detach()
+ module_output.bias.data = module.bias.data.clone().detach()
+ # keep requires_grad unchanged
+ module_output.weight.requires_grad = module.weight.requires_grad
+ module_output.bias.requires_grad = module.bias.requires_grad
+ module_output.running_mean = module.running_mean
+ module_output.running_var = module.running_var
+ module_output.num_batches_tracked = module.num_batches_tracked
+ for name, child in module.named_children():
+ module_output.add_module(name, _convert_batchnorm(child))
+ del module
+ return module_output
+
+
+def pytorch2onnx(model,
+ input_shape,
+ opset_version=11,
+ show=False,
+ output_file='tmp.onnx',
+ verify=False):
+ """Convert pytorch model to onnx model.
+
+ Args:
+ model (:obj:`nn.Module`): The pytorch model to be exported.
+ input_shape (tuple[int]): The input tensor shape of the model.
+ opset_version (int): Opset version of onnx used. Default: 11.
+ show (bool): Determines whether to print the onnx model architecture.
+ Default: False.
+ output_file (str): Output onnx model name. Default: 'tmp.onnx'.
+ verify (bool): Determines whether to verify the onnx model.
+ Default: False.
+ """
+ model.cpu().eval()
+
+ input_tensor = torch.randn(input_shape)
+
+ register_extra_symbolics(opset_version)
+ input_names = ["invals"]
+ torch.onnx.export(
+ model,
+ input_tensor,
+ output_file,
+ export_params=True,
+ keep_initializers_as_inputs=True,
+ verbose=show,
+ opset_version=opset_version,
+ input_names=input_names
+ )
+
+ print(f'Successfully exported ONNX model: {output_file}')
+ if verify:
+ # check by onnx
+ onnx_model = onnx.load(output_file)
+ onnx.checker.check_model(onnx_model)
+
+ # check the numerical value
+ # get pytorch output
+ pytorch_result = model(input_tensor)[0].detach().numpy()
+
+ # get onnx output
+ input_all = [node.name for node in onnx_model.graph.input]
+ input_initializer = [
+ node.name for node in onnx_model.graph.initializer
+ ]
+ net_feed_input = list(set(input_all) - set(input_initializer))
+ assert len(net_feed_input) == 1
+ sess = rt.InferenceSession(output_file)
+ onnx_result = sess.run(
+ None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
+ # only compare part of results
+ random_class = np.random.randint(pytorch_result.shape[1])
+ assert np.allclose(
+ pytorch_result[:, random_class], onnx_result[:, random_class]
+ ), 'The outputs are different between Pytorch and ONNX'
+ print('The numerical values are same between Pytorch and ONNX')
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Convert MMAction2 models to ONNX')
+ parser.add_argument('config', help='test config file path')
+ parser.add_argument('checkpoint', help='checkpoint file')
+ parser.add_argument('--show', action='store_true', help='show onnx graph')
+ parser.add_argument('--output-file', type=str, default='tmp.onnx')
+ parser.add_argument('--opset-version', type=int, default=11)
+ parser.add_argument(
+ '--verify',
+ action='store_true',
+ help='verify the onnx model output against pytorch output')
+ parser.add_argument(
+ '--is-localizer',
+ action='store_true',
+ help='whether it is a localizer')
+ parser.add_argument(
+ '--shape',
+ type=int,
+ nargs='+',
+ default=[1, 3, 8, 224, 224],
+ help='input video size')
+ parser.add_argument(
+ '--softmax',
+ action='store_true',
+ help='wheter to add softmax layer at the end of recognizers')
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+
+ assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
+
+ cfg = mmcv.Config.fromfile(args.config)
+ # import modules from string list.
+
+ if not args.is_localizer:
+ cfg.model.backbone.pretrained = None
+
+ # build the model
+ model = build_model(
+ cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
+ model = _convert_batchnorm(model)
+
+ # onnx.export does not support kwargs
+ if hasattr(model, 'forward_dummy'):
+ from functools import partial
+ model.forward = partial(model.forward_dummy, softmax=args.softmax)
+ elif hasattr(model, '_forward') and args.is_localizer:
+ model.forward = model._forward
+ else:
+ raise NotImplementedError(
+ 'Please implement the forward method for exporting.')
+
+ checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
+
+ # convert model to onnx file
+ pytorch2onnx(
+ model,
+ args.shape,
+ opset_version=args.opset_version,
+ show=args.show,
+ output_file=args.output_file,
+ verify=args.verify)
+
+ # Following strings of text style are from colorama package
+ bright_style, reset_style = '\x1b[1m', '\x1b[0m'
+ red_text, blue_text = '\x1b[31m', '\x1b[34m'
+ white_background = '\x1b[107m'
+
+ msg = white_background + bright_style + red_text
+ msg += 'DeprecationWarning: This tool will be deprecated in future. '
+ msg += blue_text + 'Welcome to use the unified model deployment toolbox '
+ msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
+ msg += reset_style
+ warnings.warn(msg)
--
Gitee
From 6565a88e7cd2897f8999a6209a63c97b80438ec6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:05:45 +0000
Subject: [PATCH 13/58] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20test?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep
new file mode 100644
index 0000000000..e69de29bb2
--
Gitee
From beb1a1c06e05e42ccfef8688744b55ecb33a1a4a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:06:04 +0000
Subject: [PATCH 14/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/pth2om.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/pth2om.sh | 48 -------------------
1 file changed, 48 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
deleted file mode 100644
index ee64a50e7e..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/pth2om.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-not_skip_onnx=true
-
-for para in $*
-do
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
- if [[ $para == --not_skip_onnx* ]]; then
- not_skip_onnx=`echo ${para#*=}`
- fi
-done
-
-# ======================= convert onnx =======================================
-if [ $not_skip_onnx == true ]; then
- rm -rf posec3d.onnx
- python ./posec3d_pytorch2onnx.py \
- ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py \
- ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth \
- --shape 1 20 17 48 56 56 \
- --verify \
- --output-file ./posec3d.onnx
- if [ -f "posec3d.onnx" ]; then
- echo "==> 1. creating onnx model successfully."
- else
- echo "onnx export failed"
- exit -1
- fi
-fi
-
-
-# ======================= convert om =========================================
-rm -rf posec3d_bs${batch_size}.om
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d.onnx \
- --output=./posec3d_bs${batch_size} \
- --input_format=ND --input_shape="invals:${batch_size},20,17,48,56,56" \
- --log=debug --soc_version=Ascend710
-if [ -f "posec3d_bs${batch_size}.om" ] ; then
- echo "==> 2. creating om model successfully."
-else
- echo "sim_om export failed"
-fi
-echo "==> 3. Done."
\ No newline at end of file
--
Gitee
From c7a988ef9e70c0dd1972be977f2c8c0dc5b905b5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:06:47 +0000
Subject: [PATCH 15/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/perf=5Ft4.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/pose_estimation/PoseC3D/perf_t4.sh | 13 -------------
1 file changed, 13 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
deleted file mode 100644
index bd3100b7cb..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/perf_t4.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-rm -rf perf_bs1.log
-trtexec --onnx=posec3d.onnx --fp16 --threads > perf_bs1.log
-perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
-if [ -n "$perf_str" ]; then
- perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
-else
- perf_str=`grep "mean.*ms$" perf_bs1.log`
- perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
-fi
-awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
--
Gitee
From adf2c4a2e5d4a77c6b681a7de5f28899f519c7d8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:07:26 +0000
Subject: [PATCH 16/58] =?UTF-8?q?t4=E6=80=A7=E8=83=BD=E6=B5=8B=E8=AF=95?=
=?UTF-8?q?=E5=92=8Cpth=E8=BD=AConnx=E5=86=8D=E8=BD=ACom=E6=A8=A1=E5=9E=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../pose_estimation/PoseC3D/test/perf_t4.sh | 13 +++++
.../cv/pose_estimation/PoseC3D/test/pth2om.sh | 48 +++++++++++++++++++
2 files changed, 61 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
new file mode 100644
index 0000000000..a5ad593d36
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+rm -rf perf_bs1.log
+trtexec --onnx=posec3d_bs1.onnx --fp16 --threads > perf_bs1.log
+perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
+if [ -n "$perf_str" ]; then
+ perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
+else
+ perf_str=`grep "mean.*ms$" perf_bs1.log`
+ perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
+fi
+awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
new file mode 100644
index 0000000000..eda7511676
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+not_skip_onnx=true
+
+for para in $*
+do
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+ if [[ $para == --not_skip_onnx* ]]; then
+ not_skip_onnx=`echo ${para#*=}`
+ fi
+done
+
+# ======================= convert onnx =======================================
+if [ $not_skip_onnx == true ]; then
+ rm -rf posec3d.onnx
+ python ./posec3d_pytorch2onnx.py \
+ ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py \
+ ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth \
+ --shape 1 20 17 48 56 56 \
+ --verify \
+ --output-file ./posec3d_bs${batch_size}.onnx
+ if [ -f "posec3d_bs${batch_size}.onnx" ]; then
+ echo "==> 1. creating onnx model successfully."
+ else
+ echo "onnx export failed"
+ exit -1
+ fi
+fi
+
+
+# ======================= convert om =========================================
+rm -rf posec3d_bs${batch_size}.om
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs${batch_size}.onnx \
+ --output=./posec3d_bs${batch_size} \
+ --input_format=ND --input_shape="invals:${batch_size},20,17,48,56,56" \
+ --log=debug --soc_version=Ascend710
+if [ -f "posec3d_bs${batch_size}.om" ] ; then
+ echo "==> 2. creating om model successfully."
+else
+ echo "sim_om export failed"
+fi
+echo "==> 3. Done."
\ No newline at end of file
--
Gitee
From 5fde8c72fc243f0fed735de913db240e144fb9de Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:07:32 +0000
Subject: [PATCH 17/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/test/.keep?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/.keep
deleted file mode 100644
index e69de29bb2..0000000000
--
Gitee
From 4e39060c82f09a79e8eebe502e40664dd4026102 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:08:11 +0000
Subject: [PATCH 18/58] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20ACL=5FPyTor?=
=?UTF-8?q?ch/contrib/cv/pose=5Festimation/PoseC3D/postprocess.py=20?=
=?UTF-8?q?=E4=B8=BA=20ACL=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/?=
=?UTF-8?q?posec3d=5Fpostprocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/{postprocess.py => posec3d_postprocess.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/{postprocess.py => posec3d_postprocess.py} (100%)
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
similarity index 100%
rename from ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/postprocess.py
rename to ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
--
Gitee
From 45d69b26c9a6f6533eb242c92a39158d705de2fc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:08:23 +0000
Subject: [PATCH 19/58] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20ACL=5FPyTor?=
=?UTF-8?q?ch/contrib/cv/pose=5Festimation/PoseC3D/preprocess.py=20?=
=?UTF-8?q?=E4=B8=BA=20ACL=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/?=
=?UTF-8?q?posec3d=5Fpreprocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/{preprocess.py => posec3d_preprocess.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/{preprocess.py => posec3d_preprocess.py} (100%)
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
similarity index 100%
rename from ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/preprocess.py
rename to ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
--
Gitee
From 80b98165afb8b64a5e832152f0c0639ad9f507a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:10:05 +0000
Subject: [PATCH 20/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/eval=5Facc=5Fp?=
=?UTF-8?q?erf.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../pose_estimation/PoseC3D/eval_acc_perf.sh | 57 -------------------
1 file changed, 57 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
deleted file mode 100644
index 84243bd40d..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/eval_acc_perf.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-datasets_path="/opt/npu/hmdb51"
-
-for para in $*
-do
- if [[ $para == --datasets_path* ]]; then
- datasets_path=`echo ${para#*=}`
- fi
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
-done
-
-# ======================= generate prep_dataset ==============================
-rm -rf ./prep_hmdb51_bs${batch_size}
-python posec3d_preprocess.py \
- --batch-size ${batch_size} \
- --data_root ${datasets_path}/rawframes/ \
- --ann_file ./hmdb51.pkl \
- --name ./prep_hmdb51_bs${batch_size}
-if [ $? != 0 ]; then
- echo "posec3d preprocess fail!"
- exit -1
-fi
-echo "==> 1. creating ./prep_hmdb51_bs${batch_size} successfully."
-
-# =============================== msame ======================================
-if [ ! -d ./result ]; then
- mkdir ./result
-fi
-rm -rf ./result/outputs_bs${batch_size}_om
-./msame --model "./hmdb51_bs${batch_size}.om" \
- --input "./prep_hmdb51_bs${batch_size}" \
- --output "./result/outputs_bs${batch_size}_om" \
- --outfmt TXT > ./msame_bs${batch_size}.txt
-if [ $? != 0 ]; then
- echo "msame bs${batch_size} fail!"
- exit -1
-fi
-echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
-
-
-# ============================ evaluate ======================================
-python postprocess.py \
- --result_path ./result/outputs_bs${batch_size}_om/ \
- --info_path /opt/npu/hmdb51/hmdb51.txt
-
-if [ $? != 0 ]; then
- echo "fail!"
- exit -1
-fi
-echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
-echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 6d29198a9c58bd08fb26833abe2455478bdc4468 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:18:03 +0000
Subject: [PATCH 21/58] requirements
---
.../contrib/cv/pose_estimation/PoseC3D/requirements.txt | 7 +++++++
1 file changed, 7 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
new file mode 100644
index 0000000000..0fc693ca80
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
@@ -0,0 +1,7 @@
+torch == 1.11.0
+torchvision == 0.12.0
+onnx == 1.10.2
+numpy == 1.21.2
+Pillow == 9.0.1
+opencv-python == 4.5.5.64
+mmcv-full == 1.4.8
\ No newline at end of file
--
Gitee
From baa08a9c17d10de73dcd1b92eb370c300a775b2b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:21:46 +0000
Subject: [PATCH 22/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fprep?=
=?UTF-8?q?rocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 73 -------------------
1 file changed, 73 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
deleted file mode 100644
index 93ee919329..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- info_file.close()
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin_info = imgs.cpu().numpy()
- print(bin_info.shape)
- preprocess = torch.from_numpy(bin_info)
- print(preprocess.shape)
- bin_info.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 200737867e7657604931e342938567cb2a79207b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Fri, 10 Jun 2022 17:22:05 +0000
Subject: [PATCH 23/58] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=E5=8F=98?=
=?UTF-8?q?=E9=87=8F?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 73 +++++++++++++++++++
1 file changed, 73 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
new file mode 100644
index 0000000000..93ee919329
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
@@ -0,0 +1,73 @@
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ info_file.close()
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin_info = imgs.cpu().numpy()
+ print(bin_info.shape)
+ preprocess = torch.from_numpy(bin_info)
+ print(preprocess.shape)
+ bin_info.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From ccb17f344445ae671ac160ad7c5adbe64fe5e170 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 14:55:12 +0000
Subject: [PATCH 24/58] =?UTF-8?q?=E8=AE=B8=E5=8F=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/LICENSE | 204 ++++++++++++++++++
1 file changed, 204 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
new file mode 100644
index 0000000000..7da074284e
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
@@ -0,0 +1,204 @@
+Copyright 2018-2019 Open-MMLab. All rights reserved.
+Copyright 2022 Huawei Technologies Co., Ltd
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018-2019 Open-MMLab.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--
Gitee
From 82f4f6061fa6397daca4e0dddaa037ae5987f6c1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:15:31 +0000
Subject: [PATCH 25/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/LICENSE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/LICENSE | 204 ------------------
1 file changed, 204 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
deleted file mode 100644
index 7da074284e..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
+++ /dev/null
@@ -1,204 +0,0 @@
-Copyright 2018-2019 Open-MMLab. All rights reserved.
-Copyright 2022 Huawei Technologies Co., Ltd
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2018-2019 Open-MMLab.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--
Gitee
From e3f59f9e73dea3291e57b9ef967d6038a08c9e26 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:15:45 +0000
Subject: [PATCH 26/58] =?UTF-8?q?=E8=AE=B8=E5=8F=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/LICENSE | 201 ++++++++++++++++++
1 file changed, 201 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
new file mode 100644
index 0000000000..6cd71007de
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018-2019 Open-MMLab.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--
Gitee
From 48a5f3ba6c3938d5e36c915781119bf3675e824d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:48:30 +0000
Subject: [PATCH 27/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fpyto?=
=?UTF-8?q?rch2onnx.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_pytorch2onnx.py | 186 ------------------
1 file changed, 186 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
deleted file mode 100644
index 354021740d..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import argparse
-import warnings
-
-import mmcv
-import numpy as np
-import torch
-from mmcv.runner import load_checkpoint
-
-from mmaction.models import build_model
-
-try:
- import onnx
- import onnxruntime as rt
-except ImportError as e:
- raise ImportError(f'Please install onnx and onnxruntime first. {e}')
-
-try:
- from mmcv.onnx.symbolic import register_extra_symbolics
-except ModuleNotFoundError:
- raise NotImplementedError('please update mmcv to version>=1.0.4')
-
-
-def _convert_batchnorm(module):
- """Convert the syncBNs into normal BN3ds."""
- module_output = module
- if isinstance(module, torch.nn.SyncBatchNorm):
- module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
- module.momentum, module.affine,
- module.track_running_stats)
- if module.affine:
- module_output.weight.data = module.weight.data.clone().detach()
- module_output.bias.data = module.bias.data.clone().detach()
- # keep requires_grad unchanged
- module_output.weight.requires_grad = module.weight.requires_grad
- module_output.bias.requires_grad = module.bias.requires_grad
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- for name, child in module.named_children():
- module_output.add_module(name, _convert_batchnorm(child))
- del module
- return module_output
-
-
-def pytorch2onnx(model,
- input_shape,
- opset_version=11,
- show=False,
- output_file='tmp.onnx',
- verify=False):
- """Convert pytorch model to onnx model.
-
- Args:
- model (:obj:`nn.Module`): The pytorch model to be exported.
- input_shape (tuple[int]): The input tensor shape of the model.
- opset_version (int): Opset version of onnx used. Default: 11.
- show (bool): Determines whether to print the onnx model architecture.
- Default: False.
- output_file (str): Output onnx model name. Default: 'tmp.onnx'.
- verify (bool): Determines whether to verify the onnx model.
- Default: False.
- """
- model.cpu().eval()
-
- input_tensor = torch.randn(input_shape)
-
- register_extra_symbolics(opset_version)
- input_names = ["invals"]
- torch.onnx.export(
- model,
- input_tensor,
- output_file,
- export_params=True,
- keep_initializers_as_inputs=True,
- verbose=show,
- opset_version=opset_version,
- input_names=input_names
- )
-
- print(f'Successfully exported ONNX model: {output_file}')
- if verify:
- # check by onnx
- onnx_model = onnx.load(output_file)
- onnx.checker.check_model(onnx_model)
-
- # check the numerical value
- # get pytorch output
- pytorch_result = model(input_tensor)[0].detach().numpy()
-
- # get onnx output
- input_all = [node.name for node in onnx_model.graph.input]
- input_initializer = [
- node.name for node in onnx_model.graph.initializer
- ]
- net_feed_input = list(set(input_all) - set(input_initializer))
- assert len(net_feed_input) == 1
- sess = rt.InferenceSession(output_file)
- onnx_result = sess.run(
- None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
- # only compare part of results
- random_class = np.random.randint(pytorch_result.shape[1])
- assert np.allclose(
- pytorch_result[:, random_class], onnx_result[:, random_class]
- ), 'The outputs are different between Pytorch and ONNX'
- print('The numerical values are same between Pytorch and ONNX')
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Convert MMAction2 models to ONNX')
- parser.add_argument('config', help='test config file path')
- parser.add_argument('checkpoint', help='checkpoint file')
- parser.add_argument('--show', action='store_true', help='show onnx graph')
- parser.add_argument('--output-file', type=str, default='tmp.onnx')
- parser.add_argument('--opset-version', type=int, default=11)
- parser.add_argument(
- '--verify',
- action='store_true',
- help='verify the onnx model output against pytorch output')
- parser.add_argument(
- '--is-localizer',
- action='store_true',
- help='whether it is a localizer')
- parser.add_argument(
- '--shape',
- type=int,
- nargs='+',
- default=[1, 3, 8, 224, 224],
- help='input video size')
- parser.add_argument(
- '--softmax',
- action='store_true',
- help='wheter to add softmax layer at the end of recognizers')
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- args = parse_args()
-
- assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
-
- cfg = mmcv.Config.fromfile(args.config)
- # import modules from string list.
-
- if not args.is_localizer:
- cfg.model.backbone.pretrained = None
-
- # build the model
- model = build_model(
- cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
- model = _convert_batchnorm(model)
-
- # onnx.export does not support kwargs
- if hasattr(model, 'forward_dummy'):
- from functools import partial
- model.forward = partial(model.forward_dummy, softmax=args.softmax)
- elif hasattr(model, '_forward') and args.is_localizer:
- model.forward = model._forward
- else:
- raise NotImplementedError(
- 'Please implement the forward method for exporting.')
-
- checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
-
- # convert model to onnx file
- pytorch2onnx(
- model,
- args.shape,
- opset_version=args.opset_version,
- show=args.show,
- output_file=args.output_file,
- verify=args.verify)
-
- # Following strings of text style are from colorama package
- bright_style, reset_style = '\x1b[1m', '\x1b[0m'
- red_text, blue_text = '\x1b[31m', '\x1b[34m'
- white_background = '\x1b[107m'
-
- msg = white_background + bright_style + red_text
- msg += 'DeprecationWarning: This tool will be deprecated in future. '
- msg += blue_text + 'Welcome to use the unified model deployment toolbox '
- msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
- msg += reset_style
- warnings.warn(msg)
--
Gitee
From 8fb33c8ef878770bd2d3cbac1e2b884b838cdd32 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:48:37 +0000
Subject: [PATCH 28/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fprep?=
=?UTF-8?q?rocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 73 -------------------
1 file changed, 73 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
deleted file mode 100644
index 93ee919329..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- info_file.close()
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin_info = imgs.cpu().numpy()
- print(bin_info.shape)
- preprocess = torch.from_numpy(bin_info)
- print(preprocess.shape)
- bin_info.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 931f236a5edfbe86f83446ceb458dbf494638be7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:48:44 +0000
Subject: [PATCH 29/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fpost?=
=?UTF-8?q?process.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 71 -------------------
1 file changed, 71 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
deleted file mode 100644
index 3fffc9c9a3..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import os
-import argparse
-import numpy as np
-from collections import OrderedDict
-from mmaction.core import top_k_accuracy
-import torch
-import pdb
-import torch.nn.functional as F
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Dataset K400 Postprocessing')
- parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
- parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
-
- # load info file
- gt_labels = []
- with open(args.info_path, 'r') as f:
- for line in f.readlines():
- t = line.split( )[-1]
- gt_labels.append(int(t))
-
- # load inference result
- results = []
-
- num_file = len(os.listdir(args.result_path))
- for idx in range(num_file):
- file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
- result = np.loadtxt(file)
- result = torch.from_numpy(result)
- batch_size = result.shape[0]
-# pdb.set_trace()
- result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
-
- result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
- results.extend(result)
-
-
- metrics = ['top_k_accuracy']
- metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
- eval_results = OrderedDict()
- for metric in metrics:
- print(f'Evaluating {metric} ...')
- if metric == 'top_k_accuracy':
- topk = metric_options.setdefault('top_k_accuracy',
- {}).setdefault('topk', (1, 5))
- if not isinstance(topk, (int, tuple)):
- raise TypeError(
- f'topk must be int or tuple of int, but got {type(topk)}')
- if isinstance(topk, int):
- topk = (topk, )
-
- top_k_acc = top_k_accuracy(results, gt_labels, topk)
- log_msg = []
- for k, acc in zip(topk, top_k_acc):
- eval_results[f'top{k}_acc'] = acc
- log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
- log_msg = ''.join(log_msg)
- print(log_msg)
- continue
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 919089c045fa6612b79a72379bd642832335d2b5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:49:29 +0000
Subject: [PATCH 30/58] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BA=86LICENSE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 100 ++++++++
.../PoseC3D/posec3d_preprocess.py | 102 +++++++++
.../PoseC3D/posec3d_pytorch2onnx.py | 214 ++++++++++++++++++
3 files changed, 416 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
new file mode 100644
index 0000000000..e79fc12c13
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
@@ -0,0 +1,100 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+
+import os
+import argparse
+import numpy as np
+from collections import OrderedDict
+from mmaction.core import top_k_accuracy
+import torch
+import pdb
+import torch.nn.functional as F
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Dataset K400 Postprocessing')
+ parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
+ parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load info file
+ gt_labels = []
+ with open(args.info_path, 'r') as f:
+ for line in f.readlines():
+ t = line.split( )[-1]
+ gt_labels.append(int(t))
+
+ # load inference result
+ results = []
+
+ num_file = len(os.listdir(args.result_path))
+ for idx in range(num_file):
+ file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
+ result = np.loadtxt(file)
+ result = torch.from_numpy(result)
+ batch_size = result.shape[0]
+# pdb.set_trace()
+ result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
+
+ result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
+ results.extend(result)
+
+
+ metrics = ['top_k_accuracy']
+ metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
+ eval_results = OrderedDict()
+ for metric in metrics:
+ print(f'Evaluating {metric} ...')
+ if metric == 'top_k_accuracy':
+ topk = metric_options.setdefault('top_k_accuracy',
+ {}).setdefault('topk', (1, 5))
+ if not isinstance(topk, (int, tuple)):
+ raise TypeError(
+ f'topk must be int or tuple of int, but got {type(topk)}')
+ if isinstance(topk, int):
+ topk = (topk, )
+
+ top_k_acc = top_k_accuracy(results, gt_labels, topk)
+ log_msg = []
+ for k, acc in zip(topk, top_k_acc):
+ eval_results[f'top{k}_acc'] = acc
+ log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
+ log_msg = ''.join(log_msg)
+ print(log_msg)
+ continue
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
new file mode 100644
index 0000000000..8b49a20ab8
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
@@ -0,0 +1,102 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ info_file.close()
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin_info = imgs.cpu().numpy()
+ print(bin_info.shape)
+ preprocess = torch.from_numpy(bin_info)
+ print(preprocess.shape)
+ bin_info.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
new file mode 100644
index 0000000000..4f8bbeada0
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
@@ -0,0 +1,214 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+
+import argparse
+import warnings
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.runner import load_checkpoint
+
+from mmaction.models import build_model
+
+try:
+ import onnx
+ import onnxruntime as rt
+except ImportError as e:
+ raise ImportError(f'Please install onnx and onnxruntime first. {e}')
+
+try:
+ from mmcv.onnx.symbolic import register_extra_symbolics
+except ModuleNotFoundError:
+ raise NotImplementedError('please update mmcv to version>=1.0.4')
+
+
+def _convert_batchnorm(module):
+ """Convert the syncBNs into normal BN3ds."""
+ module_output = module
+ if isinstance(module, torch.nn.SyncBatchNorm):
+ module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
+ module.momentum, module.affine,
+ module.track_running_stats)
+ if module.affine:
+ module_output.weight.data = module.weight.data.clone().detach()
+ module_output.bias.data = module.bias.data.clone().detach()
+ # keep requires_grad unchanged
+ module_output.weight.requires_grad = module.weight.requires_grad
+ module_output.bias.requires_grad = module.bias.requires_grad
+ module_output.running_mean = module.running_mean
+ module_output.running_var = module.running_var
+ module_output.num_batches_tracked = module.num_batches_tracked
+ for name, child in module.named_children():
+ module_output.add_module(name, _convert_batchnorm(child))
+ del module
+ return module_output
+
+
+def pytorch2onnx(model,
+ input_shape,
+ opset_version=11,
+ show=False,
+ output_file='tmp.onnx',
+ verify=False):
+ """Convert pytorch model to onnx model.
+
+ Args:
+ model (:obj:`nn.Module`): The pytorch model to be exported.
+ input_shape (tuple[int]): The input tensor shape of the model.
+ opset_version (int): Opset version of onnx used. Default: 11.
+ show (bool): Determines whether to print the onnx model architecture.
+ Default: False.
+ output_file (str): Output onnx model name. Default: 'tmp.onnx'.
+ verify (bool): Determines whether to verify the onnx model.
+ Default: False.
+ """
+ model.cpu().eval()
+
+ input_tensor = torch.randn(input_shape)
+
+ register_extra_symbolics(opset_version)
+ input_names = ["invals"]
+ torch.onnx.export(
+ model,
+ input_tensor,
+ output_file,
+ export_params=True,
+ keep_initializers_as_inputs=True,
+ verbose=show,
+ opset_version=opset_version,
+ input_names=input_names
+ )
+
+ print(f'Successfully exported ONNX model: {output_file}')
+ if verify:
+ # check by onnx
+ onnx_model = onnx.load(output_file)
+ onnx.checker.check_model(onnx_model)
+
+ # check the numerical value
+ # get pytorch output
+ pytorch_result = model(input_tensor)[0].detach().numpy()
+
+ # get onnx output
+ input_all = [node.name for node in onnx_model.graph.input]
+ input_initializer = [
+ node.name for node in onnx_model.graph.initializer
+ ]
+ net_feed_input = list(set(input_all) - set(input_initializer))
+ assert len(net_feed_input) == 1
+ sess = rt.InferenceSession(output_file)
+ onnx_result = sess.run(
+ None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
+ # only compare part of results
+ random_class = np.random.randint(pytorch_result.shape[1])
+ assert np.allclose(
+ pytorch_result[:, random_class], onnx_result[:, random_class]
+ ), 'The outputs are different between Pytorch and ONNX'
+ print('The numerical values are same between Pytorch and ONNX')
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Convert MMAction2 models to ONNX')
+ parser.add_argument('config', help='test config file path')
+ parser.add_argument('checkpoint', help='checkpoint file')
+ parser.add_argument('--show', action='store_true', help='show onnx graph')
+ parser.add_argument('--output-file', type=str, default='tmp.onnx')
+ parser.add_argument('--opset-version', type=int, default=11)
+ parser.add_argument(
+ '--verify',
+ action='store_true',
+ help='verify the onnx model output against pytorch output')
+ parser.add_argument(
+ '--is-localizer',
+ action='store_true',
+ help='whether it is a localizer')
+ parser.add_argument(
+ '--shape',
+ type=int,
+ nargs='+',
+ default=[1, 3, 8, 224, 224],
+ help='input video size')
+ parser.add_argument(
+ '--softmax',
+ action='store_true',
+ help='wheter to add softmax layer at the end of recognizers')
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+
+ assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
+
+ cfg = mmcv.Config.fromfile(args.config)
+ # import modules from string list.
+
+ if not args.is_localizer:
+ cfg.model.backbone.pretrained = None
+
+ # build the model
+ model = build_model(
+ cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
+ model = _convert_batchnorm(model)
+
+ # onnx.export does not support kwargs
+ if hasattr(model, 'forward_dummy'):
+ from functools import partial
+ model.forward = partial(model.forward_dummy, softmax=args.softmax)
+ elif hasattr(model, '_forward') and args.is_localizer:
+ model.forward = model._forward
+ else:
+ raise NotImplementedError(
+ 'Please implement the forward method for exporting.')
+
+ checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
+
+ # convert model to onnx file
+ pytorch2onnx(
+ model,
+ args.shape,
+ opset_version=args.opset_version,
+ show=args.show,
+ output_file=args.output_file,
+ verify=args.verify)
+
+ # Following strings of text style are from colorama package
+ bright_style, reset_style = '\x1b[1m', '\x1b[0m'
+ red_text, blue_text = '\x1b[31m', '\x1b[34m'
+ white_background = '\x1b[107m'
+
+ msg = white_background + bright_style + red_text
+ msg += 'DeprecationWarning: This tool will be deprecated in future. '
+ msg += blue_text + 'Welcome to use the unified model deployment toolbox '
+ msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
+ msg += reset_style
+ warnings.warn(msg)
--
Gitee
From 301bf66ba582984d88810bf67725c1d96dd6a184 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:52:39 +0000
Subject: [PATCH 31/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/requirements.t?=
=?UTF-8?q?xt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/pose_estimation/PoseC3D/requirements.txt | 7 -------
1 file changed, 7 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
deleted file mode 100644
index 0fc693ca80..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-torch == 1.11.0
-torchvision == 0.12.0
-onnx == 1.10.2
-numpy == 1.21.2
-Pillow == 9.0.1
-opencv-python == 4.5.5.64
-mmcv-full == 1.4.8
\ No newline at end of file
--
Gitee
From 6bf1e0b1de8bf114f032c870bd518899ac40c7e0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 15:53:27 +0000
Subject: [PATCH 32/58] requirements
---
.../contrib/cv/pose_estimation/PoseC3D/requirements.txt | 8 ++++++++
1 file changed, 8 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
new file mode 100644
index 0000000000..1aad32e539
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
@@ -0,0 +1,8 @@
+torch == 1.11.0
+torchvision == 0.12.0
+onnx == 1.10.2
+numpy == 1.21.2
+Pillow == 9.0.1
+opencv-python == 4.5.5.64
+mmcv-full == 1.4.8
+git==2.17.7
\ No newline at end of file
--
Gitee
From 1747a93549fcdd325f31d28bfaedb4242719cff7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:09:36 +0000
Subject: [PATCH 33/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/requirements.t?=
=?UTF-8?q?xt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/pose_estimation/PoseC3D/requirements.txt | 8 --------
1 file changed, 8 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
deleted file mode 100644
index 1aad32e539..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-torch == 1.11.0
-torchvision == 0.12.0
-onnx == 1.10.2
-numpy == 1.21.2
-Pillow == 9.0.1
-opencv-python == 4.5.5.64
-mmcv-full == 1.4.8
-git==2.17.7
\ No newline at end of file
--
Gitee
From 25a2cdd6a0d0b36cbf141ede24f02cd23fff6688 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:09:50 +0000
Subject: [PATCH 34/58] requirements
---
.../cv/pose_estimation/PoseC3D/requirements.txt | 11 +++++++++++
1 file changed, 11 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
new file mode 100644
index 0000000000..b5981002bf
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
@@ -0,0 +1,11 @@
+decord == 0.6.0
+einops == 0.4.1
+matplotlib == 3.5.1
+numpy == 1.21.2
+onnx == 1.10.2
+onnxruntime == 1.11.0
+opencv-contrib-python == 4.5.5.64
+Pillow == 9.0.1
+scipy == 1.7.3
+torch == 1.11.0
+yapf == 0.32.0
\ No newline at end of file
--
Gitee
From f508e13363e6c6e3f74e4982c2cc86f7bd9efc8f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:34:30 +0000
Subject: [PATCH 35/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fpost?=
=?UTF-8?q?process.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 100 ------------------
1 file changed, 100 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
deleted file mode 100644
index e79fc12c13..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# ============================================================================
-
-import os
-import argparse
-import numpy as np
-from collections import OrderedDict
-from mmaction.core import top_k_accuracy
-import torch
-import pdb
-import torch.nn.functional as F
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Dataset K400 Postprocessing')
- parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
- parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
-
- # load info file
- gt_labels = []
- with open(args.info_path, 'r') as f:
- for line in f.readlines():
- t = line.split( )[-1]
- gt_labels.append(int(t))
-
- # load inference result
- results = []
-
- num_file = len(os.listdir(args.result_path))
- for idx in range(num_file):
- file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
- result = np.loadtxt(file)
- result = torch.from_numpy(result)
- batch_size = result.shape[0]
-# pdb.set_trace()
- result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
-
- result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
- results.extend(result)
-
-
- metrics = ['top_k_accuracy']
- metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
- eval_results = OrderedDict()
- for metric in metrics:
- print(f'Evaluating {metric} ...')
- if metric == 'top_k_accuracy':
- topk = metric_options.setdefault('top_k_accuracy',
- {}).setdefault('topk', (1, 5))
- if not isinstance(topk, (int, tuple)):
- raise TypeError(
- f'topk must be int or tuple of int, but got {type(topk)}')
- if isinstance(topk, int):
- topk = (topk, )
-
- top_k_acc = top_k_accuracy(results, gt_labels, topk)
- log_msg = []
- for k, acc in zip(topk, top_k_acc):
- eval_results[f'top{k}_acc'] = acc
- log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
- log_msg = ''.join(log_msg)
- print(log_msg)
- continue
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 56b3e0c592cfa7a783e9fa76a867c7f295f29a97 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:34:36 +0000
Subject: [PATCH 36/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fprep?=
=?UTF-8?q?rocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 102 ------------------
1 file changed, 102 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
deleted file mode 100644
index 8b49a20ab8..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# ============================================================================
-
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- info_file.close()
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin_info = imgs.cpu().numpy()
- print(bin_info.shape)
- preprocess = torch.from_numpy(bin_info)
- print(preprocess.shape)
- bin_info.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 7f540a1a76997c6b6c537beca3311933f00942dc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:34:42 +0000
Subject: [PATCH 37/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fpyto?=
=?UTF-8?q?rch2onnx.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_pytorch2onnx.py | 214 ------------------
1 file changed, 214 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
deleted file mode 100644
index 4f8bbeada0..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# ============================================================================
-
-import argparse
-import warnings
-
-import mmcv
-import numpy as np
-import torch
-from mmcv.runner import load_checkpoint
-
-from mmaction.models import build_model
-
-try:
- import onnx
- import onnxruntime as rt
-except ImportError as e:
- raise ImportError(f'Please install onnx and onnxruntime first. {e}')
-
-try:
- from mmcv.onnx.symbolic import register_extra_symbolics
-except ModuleNotFoundError:
- raise NotImplementedError('please update mmcv to version>=1.0.4')
-
-
-def _convert_batchnorm(module):
- """Convert the syncBNs into normal BN3ds."""
- module_output = module
- if isinstance(module, torch.nn.SyncBatchNorm):
- module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
- module.momentum, module.affine,
- module.track_running_stats)
- if module.affine:
- module_output.weight.data = module.weight.data.clone().detach()
- module_output.bias.data = module.bias.data.clone().detach()
- # keep requires_grad unchanged
- module_output.weight.requires_grad = module.weight.requires_grad
- module_output.bias.requires_grad = module.bias.requires_grad
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- for name, child in module.named_children():
- module_output.add_module(name, _convert_batchnorm(child))
- del module
- return module_output
-
-
-def pytorch2onnx(model,
- input_shape,
- opset_version=11,
- show=False,
- output_file='tmp.onnx',
- verify=False):
- """Convert pytorch model to onnx model.
-
- Args:
- model (:obj:`nn.Module`): The pytorch model to be exported.
- input_shape (tuple[int]): The input tensor shape of the model.
- opset_version (int): Opset version of onnx used. Default: 11.
- show (bool): Determines whether to print the onnx model architecture.
- Default: False.
- output_file (str): Output onnx model name. Default: 'tmp.onnx'.
- verify (bool): Determines whether to verify the onnx model.
- Default: False.
- """
- model.cpu().eval()
-
- input_tensor = torch.randn(input_shape)
-
- register_extra_symbolics(opset_version)
- input_names = ["invals"]
- torch.onnx.export(
- model,
- input_tensor,
- output_file,
- export_params=True,
- keep_initializers_as_inputs=True,
- verbose=show,
- opset_version=opset_version,
- input_names=input_names
- )
-
- print(f'Successfully exported ONNX model: {output_file}')
- if verify:
- # check by onnx
- onnx_model = onnx.load(output_file)
- onnx.checker.check_model(onnx_model)
-
- # check the numerical value
- # get pytorch output
- pytorch_result = model(input_tensor)[0].detach().numpy()
-
- # get onnx output
- input_all = [node.name for node in onnx_model.graph.input]
- input_initializer = [
- node.name for node in onnx_model.graph.initializer
- ]
- net_feed_input = list(set(input_all) - set(input_initializer))
- assert len(net_feed_input) == 1
- sess = rt.InferenceSession(output_file)
- onnx_result = sess.run(
- None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
- # only compare part of results
- random_class = np.random.randint(pytorch_result.shape[1])
- assert np.allclose(
- pytorch_result[:, random_class], onnx_result[:, random_class]
- ), 'The outputs are different between Pytorch and ONNX'
- print('The numerical values are same between Pytorch and ONNX')
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Convert MMAction2 models to ONNX')
- parser.add_argument('config', help='test config file path')
- parser.add_argument('checkpoint', help='checkpoint file')
- parser.add_argument('--show', action='store_true', help='show onnx graph')
- parser.add_argument('--output-file', type=str, default='tmp.onnx')
- parser.add_argument('--opset-version', type=int, default=11)
- parser.add_argument(
- '--verify',
- action='store_true',
- help='verify the onnx model output against pytorch output')
- parser.add_argument(
- '--is-localizer',
- action='store_true',
- help='whether it is a localizer')
- parser.add_argument(
- '--shape',
- type=int,
- nargs='+',
- default=[1, 3, 8, 224, 224],
- help='input video size')
- parser.add_argument(
- '--softmax',
- action='store_true',
- help='wheter to add softmax layer at the end of recognizers')
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- args = parse_args()
-
- assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
-
- cfg = mmcv.Config.fromfile(args.config)
- # import modules from string list.
-
- if not args.is_localizer:
- cfg.model.backbone.pretrained = None
-
- # build the model
- model = build_model(
- cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
- model = _convert_batchnorm(model)
-
- # onnx.export does not support kwargs
- if hasattr(model, 'forward_dummy'):
- from functools import partial
- model.forward = partial(model.forward_dummy, softmax=args.softmax)
- elif hasattr(model, '_forward') and args.is_localizer:
- model.forward = model._forward
- else:
- raise NotImplementedError(
- 'Please implement the forward method for exporting.')
-
- checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
-
- # convert model to onnx file
- pytorch2onnx(
- model,
- args.shape,
- opset_version=args.opset_version,
- show=args.show,
- output_file=args.output_file,
- verify=args.verify)
-
- # Following strings of text style are from colorama package
- bright_style, reset_style = '\x1b[1m', '\x1b[0m'
- red_text, blue_text = '\x1b[31m', '\x1b[34m'
- white_background = '\x1b[107m'
-
- msg = white_background + bright_style + red_text
- msg += 'DeprecationWarning: This tool will be deprecated in future. '
- msg += blue_text + 'Welcome to use the unified model deployment toolbox '
- msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
- msg += reset_style
- warnings.warn(msg)
--
Gitee
From 338a76de237f9e5b6c942f7ef34bf5563f25e7b0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sat, 11 Jun 2022 16:35:10 +0000
Subject: [PATCH 38/58] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=A4=B4=E9=83=A8?=
=?UTF-8?q?=E8=AE=B8=E5=8F=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 87 ++++++++
.../PoseC3D/posec3d_preprocess.py | 89 ++++++++
.../PoseC3D/posec3d_pytorch2onnx.py | 201 ++++++++++++++++++
3 files changed, 377 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
new file mode 100644
index 0000000000..01b244d8d4
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
@@ -0,0 +1,87 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+import argparse
+import numpy as np
+from collections import OrderedDict
+from mmaction.core import top_k_accuracy
+import torch
+import pdb
+import torch.nn.functional as F
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Dataset K400 Postprocessing')
+ parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
+ parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load info file
+ gt_labels = []
+ with open(args.info_path, 'r') as f:
+ for line in f.readlines():
+ t = line.split( )[-1]
+ gt_labels.append(int(t))
+
+ # load inference result
+ results = []
+
+ num_file = len(os.listdir(args.result_path))
+ for idx in range(num_file):
+ file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
+ result = np.loadtxt(file)
+ result = torch.from_numpy(result)
+ batch_size = result.shape[0]
+# pdb.set_trace()
+ result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
+
+ result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
+ results.extend(result)
+
+
+ metrics = ['top_k_accuracy']
+ metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
+ eval_results = OrderedDict()
+ for metric in metrics:
+ print(f'Evaluating {metric} ...')
+ if metric == 'top_k_accuracy':
+ topk = metric_options.setdefault('top_k_accuracy',
+ {}).setdefault('topk', (1, 5))
+ if not isinstance(topk, (int, tuple)):
+ raise TypeError(
+ f'topk must be int or tuple of int, but got {type(topk)}')
+ if isinstance(topk, int):
+ topk = (topk, )
+
+ top_k_acc = top_k_accuracy(results, gt_labels, topk)
+ log_msg = []
+ for k, acc in zip(topk, top_k_acc):
+ eval_results[f'top{k}_acc'] = acc
+ log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
+ log_msg = ''.join(log_msg)
+ print(log_msg)
+ continue
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
new file mode 100644
index 0000000000..d894f30d29
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
@@ -0,0 +1,89 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='out_bin', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = '/home/wyy/hmdb51/out_bin_1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ info_file.close()
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin_info = imgs.cpu().numpy()
+ print(bin_info.shape)
+ preprocess = torch.from_numpy(bin_info)
+ print(preprocess.shape)
+ bin_info.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
new file mode 100644
index 0000000000..9b9cd71d48
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
@@ -0,0 +1,201 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import argparse
+import warnings
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.runner import load_checkpoint
+
+from mmaction.models import build_model
+
+try:
+ import onnx
+ import onnxruntime as rt
+except ImportError as e:
+ raise ImportError(f'Please install onnx and onnxruntime first. {e}')
+
+try:
+ from mmcv.onnx.symbolic import register_extra_symbolics
+except ModuleNotFoundError:
+ raise NotImplementedError('please update mmcv to version>=1.0.4')
+
+
+def _convert_batchnorm(module):
+ """Convert the syncBNs into normal BN3ds."""
+ module_output = module
+ if isinstance(module, torch.nn.SyncBatchNorm):
+ module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
+ module.momentum, module.affine,
+ module.track_running_stats)
+ if module.affine:
+ module_output.weight.data = module.weight.data.clone().detach()
+ module_output.bias.data = module.bias.data.clone().detach()
+ # keep requires_grad unchanged
+ module_output.weight.requires_grad = module.weight.requires_grad
+ module_output.bias.requires_grad = module.bias.requires_grad
+ module_output.running_mean = module.running_mean
+ module_output.running_var = module.running_var
+ module_output.num_batches_tracked = module.num_batches_tracked
+ for name, child in module.named_children():
+ module_output.add_module(name, _convert_batchnorm(child))
+ del module
+ return module_output
+
+
+def pytorch2onnx(model,
+ input_shape,
+ opset_version=11,
+ show=False,
+ output_file='tmp.onnx',
+ verify=False):
+ """Convert pytorch model to onnx model.
+
+ Args:
+ model (:obj:`nn.Module`): The pytorch model to be exported.
+ input_shape (tuple[int]): The input tensor shape of the model.
+ opset_version (int): Opset version of onnx used. Default: 11.
+ show (bool): Determines whether to print the onnx model architecture.
+ Default: False.
+ output_file (str): Output onnx model name. Default: 'tmp.onnx'.
+ verify (bool): Determines whether to verify the onnx model.
+ Default: False.
+ """
+ model.cpu().eval()
+
+ input_tensor = torch.randn(input_shape)
+
+ register_extra_symbolics(opset_version)
+ input_names = ["invals"]
+ torch.onnx.export(
+ model,
+ input_tensor,
+ output_file,
+ export_params=True,
+ keep_initializers_as_inputs=True,
+ verbose=show,
+ opset_version=opset_version,
+ input_names=input_names
+ )
+
+ print(f'Successfully exported ONNX model: {output_file}')
+ if verify:
+ # check by onnx
+ onnx_model = onnx.load(output_file)
+ onnx.checker.check_model(onnx_model)
+
+ # check the numerical value
+ # get pytorch output
+ pytorch_result = model(input_tensor)[0].detach().numpy()
+
+ # get onnx output
+ input_all = [node.name for node in onnx_model.graph.input]
+ input_initializer = [
+ node.name for node in onnx_model.graph.initializer
+ ]
+ net_feed_input = list(set(input_all) - set(input_initializer))
+ assert len(net_feed_input) == 1
+ sess = rt.InferenceSession(output_file)
+ onnx_result = sess.run(
+ None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
+ # only compare part of results
+ random_class = np.random.randint(pytorch_result.shape[1])
+ assert np.allclose(
+ pytorch_result[:, random_class], onnx_result[:, random_class]
+ ), 'The outputs are different between Pytorch and ONNX'
+ print('The numerical values are same between Pytorch and ONNX')
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Convert MMAction2 models to ONNX')
+ parser.add_argument('config', help='test config file path')
+ parser.add_argument('checkpoint', help='checkpoint file')
+ parser.add_argument('--show', action='store_true', help='show onnx graph')
+ parser.add_argument('--output-file', type=str, default='tmp.onnx')
+ parser.add_argument('--opset-version', type=int, default=11)
+ parser.add_argument(
+ '--verify',
+ action='store_true',
+ help='verify the onnx model output against pytorch output')
+ parser.add_argument(
+ '--is-localizer',
+ action='store_true',
+ help='whether it is a localizer')
+ parser.add_argument(
+ '--shape',
+ type=int,
+ nargs='+',
+ default=[1, 3, 8, 224, 224],
+ help='input video size')
+ parser.add_argument(
+ '--softmax',
+ action='store_true',
+ help='wheter to add softmax layer at the end of recognizers')
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+
+ assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
+
+ cfg = mmcv.Config.fromfile(args.config)
+ # import modules from string list.
+
+ if not args.is_localizer:
+ cfg.model.backbone.pretrained = None
+
+ # build the model
+ model = build_model(
+ cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
+ model = _convert_batchnorm(model)
+
+ # onnx.export does not support kwargs
+ if hasattr(model, 'forward_dummy'):
+ from functools import partial
+ model.forward = partial(model.forward_dummy, softmax=args.softmax)
+ elif hasattr(model, '_forward') and args.is_localizer:
+ model.forward = model._forward
+ else:
+ raise NotImplementedError(
+ 'Please implement the forward method for exporting.')
+
+ checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
+
+ # convert model to onnx file
+ pytorch2onnx(
+ model,
+ args.shape,
+ opset_version=args.opset_version,
+ show=args.show,
+ output_file=args.output_file,
+ verify=args.verify)
+
+ # Following strings of text style are from colorama package
+ bright_style, reset_style = '\x1b[1m', '\x1b[0m'
+ red_text, blue_text = '\x1b[31m', '\x1b[34m'
+ white_background = '\x1b[107m'
+
+ msg = white_background + bright_style + red_text
+ msg += 'DeprecationWarning: This tool will be deprecated in future. '
+ msg += blue_text + 'Welcome to use the unified model deployment toolbox '
+ msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
+ msg += reset_style
+ warnings.warn(msg)
--
Gitee
From 0006e7d48fced4763917f263815402643be719e7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 12:58:56 +0000
Subject: [PATCH 39/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fprep?=
=?UTF-8?q?rocess.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 89 -------------------
1 file changed, 89 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
deleted file mode 100644
index d894f30d29..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='out_bin', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = '/home/wyy/hmdb51/out_bin_1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- info_file.close()
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin_info = imgs.cpu().numpy()
- print(bin_info.shape)
- preprocess = torch.from_numpy(bin_info)
- print(preprocess.shape)
- bin_info.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From 5ab6ab50ccdc2e35640ca7dfe10ce843b7c27dd3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 12:59:29 +0000
Subject: [PATCH 40/58] =?UTF-8?q?=E6=9B=B4=E6=AD=A3=E4=BA=86=E4=B8=80?=
=?UTF-8?q?=E4=BA=9B=E9=94=99=E8=AF=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_preprocess.py | 87 +++++++++++++++++++
1 file changed, 87 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
new file mode 100644
index 0000000000..079fa358df
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
@@ -0,0 +1,87 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+import torch
+import numpy as np
+import argparse
+from mmcv import Config
+import torch.nn.functional as F
+from mmaction.datasets import build_dataloader, build_dataset
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
+ parser.add_argument('--config',
+ default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
+ help='config file path')
+ parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
+ parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
+ parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
+ parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
+ parser.add_argument('--name', default='prep_hmdb51_bs', type=str)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+ cfg = Config.fromfile(args.config)
+
+ cfg.data.test.ann_file = args.ann_file
+ cfg.data.test.data_prefix = args.data_root
+
+ # build the dataloader
+ dataset = build_dataset(cfg.data.test, dict(test_mode=True))
+ dataloader_setting = dict(
+ videos_per_gpu=args.batch_size,
+ workers_per_gpu=args.num_worker,
+ dist=False,
+ shuffle=False)
+ dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
+ data_loader = build_dataloader(dataset, **dataloader_setting)
+
+ root_path = os.path.dirname(args.ann_file)
+ out_path = './prep_hmdb51_bs1'
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
+
+ for i, data in enumerate(data_loader):
+ print('Preprocessing video {}/{}'.format(i, len(data_loader)))
+ imgs = data['imgs']
+ label = data['label']
+ print(imgs.shape)
+
+ for batch in range(imgs.shape[0]):
+ l = label.cpu().numpy()[batch]
+ info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
+ info_file.write('\n')
+
+ if imgs.shape[0] != args.batch_size:
+ imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
+
+ bin_info = imgs.cpu().numpy()
+ print(bin_info.shape)
+ preprocess = torch.from_numpy(bin_info)
+ print(preprocess.shape)
+ bin_info.tofile(out_path + '/' + str(i) + '.bin')
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From bbffff692bd8cf44e429f61ed4f4e4203e5ff043 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:54:12 +0000
Subject: [PATCH 41/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/posec3d=5Fpost?=
=?UTF-8?q?process.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 87 -------------------
1 file changed, 87 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
deleted file mode 100644
index 01b244d8d4..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import os
-import argparse
-import numpy as np
-from collections import OrderedDict
-from mmaction.core import top_k_accuracy
-import torch
-import pdb
-import torch.nn.functional as F
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Dataset K400 Postprocessing')
- parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
- parser.add_argument('--info_path', default='/home/wyy/data/hmdb51.info', type=str)
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
-
- # load info file
- gt_labels = []
- with open(args.info_path, 'r') as f:
- for line in f.readlines():
- t = line.split( )[-1]
- gt_labels.append(int(t))
-
- # load inference result
- results = []
-
- num_file = len(os.listdir(args.result_path))
- for idx in range(num_file):
- file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
- result = np.loadtxt(file)
- result = torch.from_numpy(result)
- batch_size = result.shape[0]
-# pdb.set_trace()
- result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
-
- result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
- results.extend(result)
-
-
- metrics = ['top_k_accuracy']
- metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
- eval_results = OrderedDict()
- for metric in metrics:
- print(f'Evaluating {metric} ...')
- if metric == 'top_k_accuracy':
- topk = metric_options.setdefault('top_k_accuracy',
- {}).setdefault('topk', (1, 5))
- if not isinstance(topk, (int, tuple)):
- raise TypeError(
- f'topk must be int or tuple of int, but got {type(topk)}')
- if isinstance(topk, int):
- topk = (topk, )
-
- top_k_acc = top_k_accuracy(results, gt_labels, topk)
- log_msg = []
- for k, acc in zip(topk, top_k_acc):
- eval_results[f'top{k}_acc'] = acc
- log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
- log_msg = ''.join(log_msg)
- print(log_msg)
- continue
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
--
Gitee
From c14d22aee964b10bf59d419508ef97381d87718b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:54:52 +0000
Subject: [PATCH 42/58] =?UTF-8?q?=E5=90=8E=E5=A4=84=E7=90=86=E6=96=87?=
=?UTF-8?q?=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/posec3d_postprocess.py | 87 +++++++++++++++++++
1 file changed, 87 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
new file mode 100644
index 0000000000..03982676ab
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
@@ -0,0 +1,87 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+import argparse
+import numpy as np
+from collections import OrderedDict
+from mmaction.core import top_k_accuracy
+import torch
+import pdb
+import torch.nn.functional as F
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description='Dataset K400 Postprocessing')
+ parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
+ parser.add_argument('--info_path', default='./hmdb51.info', type=str)
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load info file
+ gt_labels = []
+ with open(args.info_path, 'r') as f:
+ for line in f.readlines():
+ t = line.split( )[-1]
+ gt_labels.append(int(t))
+
+ # load inference result
+ results = []
+
+ num_file = len(os.listdir(args.result_path))
+ for idx in range(num_file):
+ file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
+ result = np.loadtxt(file)
+ result = torch.from_numpy(result)
+ batch_size = result.shape[0]
+# pdb.set_trace()
+ result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
+
+ result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
+ results.extend(result)
+
+
+ metrics = ['top_k_accuracy']
+ metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
+ eval_results = OrderedDict()
+ for metric in metrics:
+ print(f'Evaluating {metric} ...')
+ if metric == 'top_k_accuracy':
+ topk = metric_options.setdefault('top_k_accuracy',
+ {}).setdefault('topk', (1, 5))
+ if not isinstance(topk, (int, tuple)):
+ raise TypeError(
+ f'topk must be int or tuple of int, but got {type(topk)}')
+ if isinstance(topk, int):
+ topk = (topk, )
+
+ top_k_acc = top_k_accuracy(results, gt_labels, topk)
+ log_msg = []
+ for k, acc in zip(topk, top_k_acc):
+ eval_results[f'top{k}_acc'] = acc
+ log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
+ log_msg = ''.join(log_msg)
+ print(log_msg)
+ continue
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--
Gitee
From 705ccc92f10f5babb07fb30276f79499440f6463 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:55:41 +0000
Subject: [PATCH 43/58] =?UTF-8?q?=E6=80=A7=E8=83=BD=E5=92=8C=E7=B2=BE?=
=?UTF-8?q?=E5=BA=A6=E6=B5=8B=E8=AF=95=E6=8C=87=E4=BB=A4=E6=96=87=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/test/eval_acc_perf.sh | 68 +++++++++++++++++++
1 file changed, 68 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
new file mode 100644
index 0000000000..97a659c1bd
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+datasets_path="/opt/npu/hmdb51"
+
+for para in $*
+do
+ if [[ $para == --datasets_path* ]]; then
+ datasets_path=`echo ${para#*=}`
+ fi
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# ============================= prepare dataset ==============================
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+
+# ======================= generate prep_dataset ==============================
+rm -rf ./prep_hmdb51_bs${batch_size}
+chmod u+x msame
+python posec3d_preprocess.py \
+ --batch_size ${batch_size} \
+ --data_root ${datasets_path}/rawframes/ \
+ --ann_file ./hmdb51.pkl \
+ --name ./prep_hmdb51_bs${batch_size}
+if [ $? != 0 ]; then
+ echo "posec3d preprocess fail!"
+ exit -1
+fi
+echo "==> 1. creating ./prep_hmdb51_bs${batch_size} successfully."
+
+# =============================== msame ======================================
+if [ ! -d ./result ]; then
+ mkdir ./result
+fi
+rm -rf ./result/outputs_bs${batch_size}_om
+./msame --model "./posec3d_bs${batch_size}.om" \
+ --input "./prep_hmdb51_bs${batch_size}" \
+ --output "./result/outputs_bs${batch_size}_om" \
+ --outfmt TXT > ./msame_bs${batch_size}.txt
+if [ $? != 0 ]; then
+ echo "msame bs${batch_size} fail!"
+ exit -1
+fi
+echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
+
+
+# ============================ evaluate ======================================
+python posec3d_postprocess.py \
+ --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
+ --info_path ./hmdb51.info
+
+if [ $? != 0 ]; then
+ echo "fail!"
+ exit -1
+fi
+echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
+echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 7a904fef4d6b46e42d7668e8a643f15c3b329bad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:56:07 +0000
Subject: [PATCH 44/58] =?UTF-8?q?=E8=8E=B7=E5=8F=96=E6=80=A7=E8=83=BD?=
=?UTF-8?q?=E6=B5=8B=E8=AF=95=E7=BB=93=E6=9E=9C=E6=96=87=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/test/parse.py | 40 +++++++++++++++++++
1 file changed, 40 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
new file mode 100644
index 0000000000..da50748128
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
@@ -0,0 +1,40 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import re
+import sys
+import json
+import argparse
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--result-file', type=str, default="./msame_bs1.txt")
+ parser.add_argument('--batch-size', type=int, default=1)
+ args = parser.parse_args()
+
+ if args.result_file.endswith('.json'):
+ result_json = args.result_file
+ with open(result_json, 'r') as f:
+ content = f.read()
+ tops = [i.get('value') for i in json.loads(content).get('value') if 'Top' in i.get('key')]
+ print('om {} top1:{}'.format(result_json.split('_')[1].split('.')[0], tops[0]))
+ elif args.result_file.endswith('.txt'):
+ result_txt = args.result_file
+ with open(result_txt, 'r') as f:
+ content = f.read()
+ txt_data_list = re.findall(r'Inference average time without first time:.*ms', content.replace('\n', ',') + ',')[-1]
+ avg_time = txt_data_list.split(' ')[-2]
+ fps = args.batch_size * 1000 / float(avg_time)
+ print('310P bs{} fps:{:.3f}'.format(args.batch_size, fps))
\ No newline at end of file
--
Gitee
From 51377e247d38be0fd18ab55ea61a313c56002dc3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:56:34 +0000
Subject: [PATCH 45/58] README
---
.../cv/pose_estimation/PoseC3D/README.md | 172 ++++++++++++++++++
1 file changed, 172 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
new file mode 100644
index 0000000000..14daef0e8e
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
@@ -0,0 +1,172 @@
+# convmixer ONNX模型端到端推理指导
+- [1. 模型概述](#1)
+ - [论文地址](#11)
+ - [代码地址](#12)
+- [2. 环境说明](#2)
+ - [深度学习框架](#21)
+ - [python第三方库](#22)
+- [3. 模型转换](#3)
+ - [pth转onnx模型](#31)
+ - [onnx转om模型](#32)
+- [4. 数据预处理](#4)
+ - [数据处理](#41)
+- [5. 离线推理](#5)
+ - [msame工具概述](#51)
+ - [离线推理](#52)
+ - [精度和性能比较](#53)
+
+## 1. 模型概述
+### 1.1 论文地址
+[posec3d论文](https://arxiv.org/abs/2104.13586)
+### 1.2 代码地址
+[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+```bash
+git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
+mv posec3d_patch.patch mmaction2//pth2onnx.py→posec3d_pytorch2onnx.py
+```
+> **说明:**
+> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
+
+## 2. 环境说明
+### 2.1 深度学习框架
+```
+CANN 5.1.RC1
+torch==1.11.0
+torchvision==0.12.0
+onnx==1.11.0
+```
+
+### 2.2 python第三方库
+```
+numpy==1.21.6
+pillow==9.1.1
+mmcv==1.4.0
+```
+> **说明:**
+> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
+> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
+
+## 3. 模型转换
+一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
+```bash
+bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
+```
+运行后会生成如下文件:
+```bash
+├── posec3d_bs1.onnx
+├── posec3d_bs1.om
+```
+
+### 3.1 pth转onnx模型
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51的权重文件:
+[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
+
+3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
+```bash
+python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
+```
+其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
+
+### 3.2 onnx转om模型
+1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
+
+```bash
+# 将知识库文件夹通过scp -r指令复制到当前目录
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
+```
+
+## 4. 数据预处理
+在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
+
+数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
+### 4.1 数据处理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
+```bash
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+```
+> **说明:**
+> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
+
+3. 执行输入数据的生成脚本,生成模型输入的bin文件
+```bash
+python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name ./prep_hmdb51_bs1
+```
+其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
+运行后,将会得到如下形式的文件夹:
+```
+├── prep_image_bs1
+│ ├──0.bin
+│ ├──......
+```
+
+## 5. 离线推理
+执行一步式推理前,请先按照5.1节准备msame离线推理工具
+一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
+```bash
+bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
+```
+运行后会生成如下文件/文件夹:
+```bash
+├── prep_hmdb51_bs1 # 模型的标签输入(文件夹)
+├── msame_bs1.txt # msame推理过程的输出
+├── result
+│ ├── outputs_bs1_om # 模型的输出(文件夹)
+│ ├── result_bs1.json # 模型的精度输出
+```
+
+### 5.1 msame工具概述
+msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
+### 5.2 离线推理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 执行离线推理
+运行如下命令进行离线推理:
+```bash
+./msame --model "./posec3d_bs1.om" --input "./prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
+```
+模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
+
+### 5.3 精度和性能比较
+1. 性能数据的获取
+通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
+```bash
+python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
+```
+其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
+
+2. 精度数据的计算
+精度计算利用posec3d_postprocess.py脚本
+```
+ python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
+```
+其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
+
+| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
+| :------: | :------: | :------: | :------: | :------: |
+| convmixer_1536_20 bs1 | top1:81.37% | top1:81.35% | 44.445fps | 115.790fps |
+| convmixer_1536_20 bs4 | top1:81.37% | top1:81.35% |59.353fps | 117.252fps |
+
+> **说明:**
+> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 25c2fd67d3f6ba91da2d81197e85cf7e11f35b0e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 15:57:08 +0000
Subject: [PATCH 46/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 172 ------------------
1 file changed, 172 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
deleted file mode 100644
index 14daef0e8e..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# convmixer ONNX模型端到端推理指导
-- [1. 模型概述](#1)
- - [论文地址](#11)
- - [代码地址](#12)
-- [2. 环境说明](#2)
- - [深度学习框架](#21)
- - [python第三方库](#22)
-- [3. 模型转换](#3)
- - [pth转onnx模型](#31)
- - [onnx转om模型](#32)
-- [4. 数据预处理](#4)
- - [数据处理](#41)
-- [5. 离线推理](#5)
- - [msame工具概述](#51)
- - [离线推理](#52)
- - [精度和性能比较](#53)
-
-## 1. 模型概述
-### 1.1 论文地址
-[posec3d论文](https://arxiv.org/abs/2104.13586)
-### 1.2 代码地址
-[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-```bash
-git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
-mv posec3d_patch.patch mmaction2//pth2onnx.py→posec3d_pytorch2onnx.py
-```
-> **说明:**
-> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
-
-## 2. 环境说明
-### 2.1 深度学习框架
-```
-CANN 5.1.RC1
-torch==1.11.0
-torchvision==0.12.0
-onnx==1.11.0
-```
-
-### 2.2 python第三方库
-```
-numpy==1.21.6
-pillow==9.1.1
-mmcv==1.4.0
-```
-> **说明:**
-> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
-> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
-
-## 3. 模型转换
-一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
-```bash
-bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
-```
-运行后会生成如下文件:
-```bash
-├── posec3d_bs1.onnx
-├── posec3d_bs1.om
-```
-
-### 3.1 pth转onnx模型
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51的权重文件:
-[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
-
-3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
-```bash
-python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
-```
-其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
-
-### 3.2 onnx转om模型
-1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
-
-```bash
-# 将知识库文件夹通过scp -r指令复制到当前目录
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
-```
-
-## 4. 数据预处理
-在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
-
-数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
-### 4.1 数据处理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
-```bash
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-```
-> **说明:**
-> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
-
-3. 执行输入数据的生成脚本,生成模型输入的bin文件
-```bash
-python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name ./prep_hmdb51_bs1
-```
-其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
-运行后,将会得到如下形式的文件夹:
-```
-├── prep_image_bs1
-│ ├──0.bin
-│ ├──......
-```
-
-## 5. 离线推理
-执行一步式推理前,请先按照5.1节准备msame离线推理工具
-一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
-```bash
-bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
-```
-运行后会生成如下文件/文件夹:
-```bash
-├── prep_hmdb51_bs1 # 模型的标签输入(文件夹)
-├── msame_bs1.txt # msame推理过程的输出
-├── result
-│ ├── outputs_bs1_om # 模型的输出(文件夹)
-│ ├── result_bs1.json # 模型的精度输出
-```
-
-### 5.1 msame工具概述
-msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
-### 5.2 离线推理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 执行离线推理
-运行如下命令进行离线推理:
-```bash
-./msame --model "./posec3d_bs1.om" --input "./prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
-```
-模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
-
-### 5.3 精度和性能比较
-1. 性能数据的获取
-通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
-```bash
-python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
-```
-其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
-
-2. 精度数据的计算
-精度计算利用posec3d_postprocess.py脚本
-```
- python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
-```
-其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
-
-| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
-| :------: | :------: | :------: | :------: | :------: |
-| convmixer_1536_20 bs1 | top1:81.37% | top1:81.35% | 44.445fps | 115.790fps |
-| convmixer_1536_20 bs4 | top1:81.37% | top1:81.35% |59.353fps | 117.252fps |
-
-> **说明:**
-> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From d59f2564e744b6803f4b0a1cd6a722f11ab9380a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Sun, 12 Jun 2022 16:07:34 +0000
Subject: [PATCH 47/58] README
---
.../cv/pose_estimation/PoseC3D/README.md | 169 ++++++++++++++++++
1 file changed, 169 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
new file mode 100644
index 0000000000..414eaac71e
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
@@ -0,0 +1,169 @@
+# convmixer ONNX模型端到端推理指导
+- [1. 模型概述](#1)
+ - [论文地址](#11)
+ - [代码地址](#12)
+- [2. 环境说明](#2)
+ - [深度学习框架](#21)
+ - [python第三方库](#22)
+- [3. 模型转换](#3)
+ - [pth转onnx模型](#31)
+ - [onnx转om模型](#32)
+- [4. 数据预处理](#4)
+ - [数据处理](#41)
+- [5. 离线推理](#5)
+ - [msame工具概述](#51)
+ - [离线推理](#52)
+ - [精度和性能比较](#53)
+
+## 1. 模型概述
+### 1.1 论文地址
+[posec3d论文](https://arxiv.org/abs/2104.13586)
+### 1.2 代码地址
+[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+```bash
+git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
+```
+> **说明:**
+> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
+
+## 2. 环境说明
+### 2.1 深度学习框架
+```
+CANN 5.1.RC1
+torch==1.11.0
+torchvision==0.12.0
+onnx==1.11.0
+```
+
+### 2.2 python第三方库
+```
+numpy==1.21.6
+pillow==9.1.1
+mmcv==1.4.0
+```
+> **说明:**
+> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
+> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
+
+## 3. 模型转换
+一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
+```bash
+bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
+```
+运行后会生成如下文件:
+```bash
+├── posec3d_bs1.onnx
+├── posec3d_bs1.om
+```
+
+### 3.1 pth转onnx模型
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51的权重文件:
+[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
+
+3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
+```bash
+python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
+```
+其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
+
+### 3.2 onnx转om模型
+1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
+
+```bash
+# 将知识库文件夹通过scp -r指令复制到当前目录
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
+```
+
+## 4. 数据预处理
+在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
+
+数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
+### 4.1 数据处理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
+```bash
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+```
+> **说明:**
+> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
+
+3. 执行输入数据的生成脚本,生成模型输入的bin文件
+```bash
+python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name ./prep_hmdb51_bs1
+```
+其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
+运行后,将会得到如下形式的文件夹:
+```
+├── prep_image_bs1
+│ ├──0.bin
+│ ├──......
+```
+
+## 5. 离线推理
+执行一步式推理前,请先按照5.1节准备msame离线推理工具
+一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
+```bash
+bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
+```
+运行后会生成如下文件/文件夹:
+```bash
+├── prep_hmdb51_bs1 # 模型的标签输入(文件夹)
+├── msame_bs1.txt # msame推理过程的输出
+├── result
+│ ├── outputs_bs1_om # 模型的输出(文件夹)
+```
+
+### 5.1 msame工具概述
+msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
+### 5.2 离线推理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 执行离线推理
+运行如下命令进行离线推理:
+```bash
+./msame --model "./posec3d_bs1.om" --input "./prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
+```
+模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
+
+### 5.3 精度和性能比较
+1. 性能数据的获取
+通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
+```bash
+python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
+```
+其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
+
+2. 精度数据的计算
+精度计算利用posec3d_postprocess.py脚本
+```
+ python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
+```
+其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
+
+| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
+| :------: | :------: | :------: | :------: | :------: |
+| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
+
+> **说明:**
+> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 78fe620f4fa79107faa28f35304cb373d69b6e4a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:20:09 +0000
Subject: [PATCH 48/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 169 ------------------
1 file changed, 169 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
deleted file mode 100644
index 414eaac71e..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# convmixer ONNX模型端到端推理指导
-- [1. 模型概述](#1)
- - [论文地址](#11)
- - [代码地址](#12)
-- [2. 环境说明](#2)
- - [深度学习框架](#21)
- - [python第三方库](#22)
-- [3. 模型转换](#3)
- - [pth转onnx模型](#31)
- - [onnx转om模型](#32)
-- [4. 数据预处理](#4)
- - [数据处理](#41)
-- [5. 离线推理](#5)
- - [msame工具概述](#51)
- - [离线推理](#52)
- - [精度和性能比较](#53)
-
-## 1. 模型概述
-### 1.1 论文地址
-[posec3d论文](https://arxiv.org/abs/2104.13586)
-### 1.2 代码地址
-[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-```bash
-git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
-```
-> **说明:**
-> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
-
-## 2. 环境说明
-### 2.1 深度学习框架
-```
-CANN 5.1.RC1
-torch==1.11.0
-torchvision==0.12.0
-onnx==1.11.0
-```
-
-### 2.2 python第三方库
-```
-numpy==1.21.6
-pillow==9.1.1
-mmcv==1.4.0
-```
-> **说明:**
-> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
-> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
-
-## 3. 模型转换
-一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
-```bash
-bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
-```
-运行后会生成如下文件:
-```bash
-├── posec3d_bs1.onnx
-├── posec3d_bs1.om
-```
-
-### 3.1 pth转onnx模型
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51的权重文件:
-[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
-
-3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
-```bash
-python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
-```
-其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
-
-### 3.2 onnx转om模型
-1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
-
-```bash
-# 将知识库文件夹通过scp -r指令复制到当前目录
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
-```
-
-## 4. 数据预处理
-在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
-
-数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
-### 4.1 数据处理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
-```bash
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-```
-> **说明:**
-> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
-
-3. 执行输入数据的生成脚本,生成模型输入的bin文件
-```bash
-python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name ./prep_hmdb51_bs1
-```
-其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
-运行后,将会得到如下形式的文件夹:
-```
-├── prep_image_bs1
-│ ├──0.bin
-│ ├──......
-```
-
-## 5. 离线推理
-执行一步式推理前,请先按照5.1节准备msame离线推理工具
-一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
-```bash
-bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
-```
-运行后会生成如下文件/文件夹:
-```bash
-├── prep_hmdb51_bs1 # 模型的标签输入(文件夹)
-├── msame_bs1.txt # msame推理过程的输出
-├── result
-│ ├── outputs_bs1_om # 模型的输出(文件夹)
-```
-
-### 5.1 msame工具概述
-msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
-### 5.2 离线推理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 执行离线推理
-运行如下命令进行离线推理:
-```bash
-./msame --model "./posec3d_bs1.om" --input "./prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
-```
-模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
-
-### 5.3 精度和性能比较
-1. 性能数据的获取
-通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
-```bash
-python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
-```
-其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
-
-2. 精度数据的计算
-精度计算利用posec3d_postprocess.py脚本
-```
- python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
-```
-其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
-
-| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
-| :------: | :------: | :------: | :------: | :------: |
-| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
-
-> **说明:**
-> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 1496fc93ebe00f67f1de8bca0c6bfee753a789ce Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:20:36 +0000
Subject: [PATCH 49/58] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86=E9=A2=84?=
=?UTF-8?q?=E5=A4=84=E7=90=86=E4=BA=A7=E7=94=9F=E6=95=B0=E6=8D=AE=E7=9A=84?=
=?UTF-8?q?=E5=9C=B0=E5=9D=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 175 ++++++++++++++++++
1 file changed, 175 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
new file mode 100644
index 0000000000..25073f3321
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
@@ -0,0 +1,175 @@
+# convmixer ONNX模型端到端推理指导
+- [1. 模型概述](#1)
+ - [论文地址](#11)
+ - [代码地址](#12)
+- [2. 环境说明](#2)
+ - [深度学习框架](#21)
+ - [python第三方库](#22)
+- [3. 模型转换](#3)
+ - [pth转onnx模型](#31)
+ - [onnx转om模型](#32)
+- [4. 数据预处理](#4)
+ - [数据处理](#41)
+- [5. 离线推理](#5)
+ - [msame工具概述](#51)
+ - [离线推理](#52)
+ - [精度和性能比较](#53)
+
+## 1. 模型概述
+### 1.1 论文地址
+[posec3d论文](https://arxiv.org/abs/2104.13586)
+### 1.2 代码地址
+[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+```bash
+git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
+```
+> **说明:**
+> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
+
+## 2. 环境说明
+### 2.1 深度学习框架
+```
+CANN 5.1.RC1
+torch==1.11.0
+torchvision==0.12.0
+onnx==1.11.0
+```
+
+### 2.2 python第三方库
+```
+numpy==1.21.6
+pillow==9.1.1
+mmcv==1.4.0
+```
+> **说明:**
+> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
+> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
+
+## 3. 模型转换
+一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
+```bash
+bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
+```
+运行后会生成如下文件:
+```bash
+├── posec3d_bs1.onnx
+├── posec3d_bs1.om
+```
+
+### 3.1 pth转onnx模型
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51的权重文件:
+[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
+
+3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
+```bash
+python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
+```
+其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
+
+### 3.2 onnx转om模型
+1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
+
+```bash
+# 将知识库文件夹通过scp -r指令复制到当前目录
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
+```
+
+## 4. 数据预处理
+在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
+
+数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
+### 4.1 数据处理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
+```bash
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+```
+> **说明:**
+> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
+
+3. 执行输入数据的生成脚本,生成模型输入的bin文件
+```bash
+python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
+```
+其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
+运行后,将会得到如下形式的文件夹:
+```
+├── prep_image_bs1
+│ ├──0.bin
+│ ├──......
+```
+
+## 5. 离线推理
+执行一步式推理前,请先按照5.1节准备msame离线推理工具
+一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
+```bash
+bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
+```
+运行后会生成如下文件/文件夹:
+```bash
+├── hmdb51 # /opt/npu下数据集(文件夹)
+│ ├── annotations # 注释文件(文件夹)
+│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
+│ ├── rawframes # 原始帧(文件夹)
+│ ├── videos # 视频数据(文件夹)
+```
+```bash
+├── msame_bs1.txt # msame推理过程的输出
+├── result
+│ ├── outputs_bs1_om # 模型的输出(文件夹)
+```
+
+### 5.1 msame工具概述
+msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
+### 5.2 离线推理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 执行离线推理
+运行如下命令进行离线推理:
+```bash
+./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
+```
+模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
+
+### 5.3 精度和性能比较
+1. 性能数据的获取
+通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
+```bash
+python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
+```
+其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
+
+2. 精度数据的计算
+精度计算利用posec3d_postprocess.py脚本
+```
+ python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
+```
+其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
+
+| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
+| :------: | :------: | :------: | :------: | :------: |
+| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
+
+> **说明:**
+> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From f5e5b8b763adaadff40a1379dad468fef7f02228 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:20:46 +0000
Subject: [PATCH 50/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/test/eval=5Fac?=
=?UTF-8?q?c=5Fperf.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/test/eval_acc_perf.sh | 68 -------------------
1 file changed, 68 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
deleted file mode 100644
index 97a659c1bd..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-datasets_path="/opt/npu/hmdb51"
-
-for para in $*
-do
- if [[ $para == --datasets_path* ]]; then
- datasets_path=`echo ${para#*=}`
- fi
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
-done
-
-# ============================= prepare dataset ==============================
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-
-# ======================= generate prep_dataset ==============================
-rm -rf ./prep_hmdb51_bs${batch_size}
-chmod u+x msame
-python posec3d_preprocess.py \
- --batch_size ${batch_size} \
- --data_root ${datasets_path}/rawframes/ \
- --ann_file ./hmdb51.pkl \
- --name ./prep_hmdb51_bs${batch_size}
-if [ $? != 0 ]; then
- echo "posec3d preprocess fail!"
- exit -1
-fi
-echo "==> 1. creating ./prep_hmdb51_bs${batch_size} successfully."
-
-# =============================== msame ======================================
-if [ ! -d ./result ]; then
- mkdir ./result
-fi
-rm -rf ./result/outputs_bs${batch_size}_om
-./msame --model "./posec3d_bs${batch_size}.om" \
- --input "./prep_hmdb51_bs${batch_size}" \
- --output "./result/outputs_bs${batch_size}_om" \
- --outfmt TXT > ./msame_bs${batch_size}.txt
-if [ $? != 0 ]; then
- echo "msame bs${batch_size} fail!"
- exit -1
-fi
-echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
-
-
-# ============================ evaluate ======================================
-python posec3d_postprocess.py \
- --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
- --info_path ./hmdb51.info
-
-if [ $? != 0 ]; then
- echo "fail!"
- exit -1
-fi
-echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
-echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 2730aed40d3f087f1a8d74fa8be74664334e1f21 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:21:17 +0000
Subject: [PATCH 51/58] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86=E9=A2=84?=
=?UTF-8?q?=E5=A4=84=E7=90=86=E4=BA=A7=E7=94=9F=E6=95=B0=E6=8D=AE=E7=9A=84?=
=?UTF-8?q?=E5=9C=B0=E5=9D=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/test/eval_acc_perf.sh | 68 +++++++++++++++++++
1 file changed, 68 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
new file mode 100644
index 0000000000..f9d1cc83a0
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+datasets_path="/opt/npu/hmdb51"
+
+for para in $*
+do
+ if [[ $para == --datasets_path* ]]; then
+ datasets_path=`echo ${para#*=}`
+ fi
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# ============================= prepare dataset ==============================
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+
+# ======================= generate prep_dataset ==============================
+rm -rf ./prep_hmdb51_bs${batch_size}
+chmod u+x msame
+python posec3d_preprocess.py \
+ --batch_size ${batch_size} \
+ --data_root ${datasets_path}/rawframes/ \
+ --ann_file ./hmdb51.pkl \
+ --name /opt/npu/hmdb51/prep_hmdb51_bs${batch_size}
+if [ $? != 0 ]; then
+ echo "posec3d preprocess fail!"
+ exit -1
+fi
+echo "==> 1. creating /opt/npu/hmdb51/prep_hmdb51_bs${batch_size} successfully."
+
+# =============================== msame ======================================
+if [ ! -d ./result ]; then
+ mkdir ./result
+fi
+rm -rf ./result/outputs_bs${batch_size}_om
+./msame --model "./posec3d_bs${batch_size}.om" \
+ --input "/opt/npu/hmdb51/prep_hmdb51_bs${batch_size}" \
+ --output "./result/outputs_bs${batch_size}_om" \
+ --outfmt TXT > ./msame_bs${batch_size}.txt
+if [ $? != 0 ]; then
+ echo "msame bs${batch_size} fail!"
+ exit -1
+fi
+echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
+
+
+# ============================ evaluate ======================================
+python posec3d_postprocess.py \
+ --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
+ --info_path ./hmdb51.info
+
+if [ $? != 0 ]; then
+ echo "fail!"
+ exit -1
+fi
+echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
+echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From ab27fdfae721fccc13f84f0145cdab80a3f9e53e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:31:20 +0000
Subject: [PATCH 52/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 175 ------------------
1 file changed, 175 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
deleted file mode 100644
index 25073f3321..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# convmixer ONNX模型端到端推理指导
-- [1. 模型概述](#1)
- - [论文地址](#11)
- - [代码地址](#12)
-- [2. 环境说明](#2)
- - [深度学习框架](#21)
- - [python第三方库](#22)
-- [3. 模型转换](#3)
- - [pth转onnx模型](#31)
- - [onnx转om模型](#32)
-- [4. 数据预处理](#4)
- - [数据处理](#41)
-- [5. 离线推理](#5)
- - [msame工具概述](#51)
- - [离线推理](#52)
- - [精度和性能比较](#53)
-
-## 1. 模型概述
-### 1.1 论文地址
-[posec3d论文](https://arxiv.org/abs/2104.13586)
-### 1.2 代码地址
-[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-```bash
-git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
-```
-> **说明:**
-> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
-
-## 2. 环境说明
-### 2.1 深度学习框架
-```
-CANN 5.1.RC1
-torch==1.11.0
-torchvision==0.12.0
-onnx==1.11.0
-```
-
-### 2.2 python第三方库
-```
-numpy==1.21.6
-pillow==9.1.1
-mmcv==1.4.0
-```
-> **说明:**
-> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
-> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
-
-## 3. 模型转换
-一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
-```bash
-bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
-```
-运行后会生成如下文件:
-```bash
-├── posec3d_bs1.onnx
-├── posec3d_bs1.om
-```
-
-### 3.1 pth转onnx模型
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51的权重文件:
-[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
-
-3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
-```bash
-python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
-```
-其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
-
-### 3.2 onnx转om模型
-1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
-
-```bash
-# 将知识库文件夹通过scp -r指令复制到当前目录
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
-```
-
-## 4. 数据预处理
-在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
-
-数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
-### 4.1 数据处理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
-```bash
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-```
-> **说明:**
-> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
-
-3. 执行输入数据的生成脚本,生成模型输入的bin文件
-```bash
-python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
-```
-其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
-运行后,将会得到如下形式的文件夹:
-```
-├── prep_image_bs1
-│ ├──0.bin
-│ ├──......
-```
-
-## 5. 离线推理
-执行一步式推理前,请先按照5.1节准备msame离线推理工具
-一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
-```bash
-bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
-```
-运行后会生成如下文件/文件夹:
-```bash
-├── hmdb51 # /opt/npu下数据集(文件夹)
-│ ├── annotations # 注释文件(文件夹)
-│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
-│ ├── rawframes # 原始帧(文件夹)
-│ ├── videos # 视频数据(文件夹)
-```
-```bash
-├── msame_bs1.txt # msame推理过程的输出
-├── result
-│ ├── outputs_bs1_om # 模型的输出(文件夹)
-```
-
-### 5.1 msame工具概述
-msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
-### 5.2 离线推理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 执行离线推理
-运行如下命令进行离线推理:
-```bash
-./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
-```
-模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
-
-### 5.3 精度和性能比较
-1. 性能数据的获取
-通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
-```bash
-python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
-```
-其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
-
-2. 精度数据的计算
-精度计算利用posec3d_postprocess.py脚本
-```
- python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
-```
-其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
-
-| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
-| :------: | :------: | :------: | :------: | :------: |
-| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
-
-> **说明:**
-> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From b30af714ccec12c69f3462e8fcb845dc146199f6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 05:32:32 +0000
Subject: [PATCH 53/58] README
---
.../cv/pose_estimation/PoseC3D/README.md | 175 ++++++++++++++++++
1 file changed, 175 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
new file mode 100644
index 0000000000..6aacb4aec5
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
@@ -0,0 +1,175 @@
+# PoseC3D ONNX模型端到端推理指导
+- [1. 模型概述](#1)
+ - [论文地址](#11)
+ - [代码地址](#12)
+- [2. 环境说明](#2)
+ - [深度学习框架](#21)
+ - [python第三方库](#22)
+- [3. 模型转换](#3)
+ - [pth转onnx模型](#31)
+ - [onnx转om模型](#32)
+- [4. 数据预处理](#4)
+ - [数据处理](#41)
+- [5. 离线推理](#5)
+ - [msame工具概述](#51)
+ - [离线推理](#52)
+ - [精度和性能比较](#53)
+
+## 1. 模型概述
+### 1.1 论文地址
+[posec3d论文](https://arxiv.org/abs/2104.13586)
+### 1.2 代码地址
+[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+```bash
+git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
+```
+> **说明:**
+> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
+
+## 2. 环境说明
+### 2.1 深度学习框架
+```
+CANN 5.1.RC1
+torch==1.11.0
+torchvision==0.12.0
+onnx==1.11.0
+```
+
+### 2.2 python第三方库
+```
+numpy==1.21.6
+pillow==9.1.1
+mmcv==1.4.0
+```
+> **说明:**
+> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
+> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
+
+## 3. 模型转换
+一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
+```bash
+bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
+```
+运行后会生成如下文件:
+```bash
+├── posec3d_bs1.onnx
+├── posec3d_bs1.om
+```
+
+### 3.1 pth转onnx模型
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51的权重文件:
+[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
+
+3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
+```bash
+python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
+```
+其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
+
+### 3.2 onnx转om模型
+1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
+
+```bash
+# 将知识库文件夹通过scp -r指令复制到当前目录
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
+```
+
+## 4. 数据预处理
+在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
+
+数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
+### 4.1 数据处理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
+```bash
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+../../../data/hmdb51 /opt/npu
+cd ../../../..
+```
+> **说明:**
+> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
+
+3. 执行输入数据的生成脚本,生成模型输入的bin文件
+```bash
+python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
+```
+其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
+运行后,将会得到如下形式的文件夹:
+```
+├── prep_image_bs1
+│ ├──0.bin
+│ ├──......
+```
+
+## 5. 离线推理
+执行一步式推理前,请先按照5.1节准备msame离线推理工具
+一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
+```bash
+bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
+```
+运行后会生成如下文件/文件夹:
+```bash
+├── hmdb51 # /opt/npu下数据集(文件夹)
+│ ├── annotations # 注释文件(文件夹)
+│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
+│ ├── rawframes # 原始帧(文件夹)
+│ ├── videos # 视频数据(文件夹)
+```
+```bash
+├── msame_bs1.txt # msame推理过程的输出
+├── result
+│ ├── outputs_bs1_om # 模型的输出(文件夹)
+```
+
+### 5.1 msame工具概述
+msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
+### 5.2 离线推理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 执行离线推理
+运行如下命令进行离线推理:
+```bash
+./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
+```
+模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
+
+### 5.3 精度和性能比较
+1. 性能数据的获取
+通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
+```bash
+python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
+```
+其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
+
+2. 精度数据的计算
+精度计算利用posec3d_postprocess.py脚本
+```
+ python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
+```
+其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
+
+| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
+| :------: | :------: | :------: | :------: | :------: |
+| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
+
+> **说明:**
+> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 6f351eaed808ed52f1f418135e49c29908cf86c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 07:37:45 +0000
Subject: [PATCH 54/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/test/eval=5Fac?=
=?UTF-8?q?c=5Fperf.sh?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/test/eval_acc_perf.sh | 68 -------------------
1 file changed, 68 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
deleted file mode 100644
index f9d1cc83a0..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-datasets_path="/opt/npu/hmdb51"
-
-for para in $*
-do
- if [[ $para == --datasets_path* ]]; then
- datasets_path=`echo ${para#*=}`
- fi
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
-done
-
-# ============================= prepare dataset ==============================
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-
-# ======================= generate prep_dataset ==============================
-rm -rf ./prep_hmdb51_bs${batch_size}
-chmod u+x msame
-python posec3d_preprocess.py \
- --batch_size ${batch_size} \
- --data_root ${datasets_path}/rawframes/ \
- --ann_file ./hmdb51.pkl \
- --name /opt/npu/hmdb51/prep_hmdb51_bs${batch_size}
-if [ $? != 0 ]; then
- echo "posec3d preprocess fail!"
- exit -1
-fi
-echo "==> 1. creating /opt/npu/hmdb51/prep_hmdb51_bs${batch_size} successfully."
-
-# =============================== msame ======================================
-if [ ! -d ./result ]; then
- mkdir ./result
-fi
-rm -rf ./result/outputs_bs${batch_size}_om
-./msame --model "./posec3d_bs${batch_size}.om" \
- --input "/opt/npu/hmdb51/prep_hmdb51_bs${batch_size}" \
- --output "./result/outputs_bs${batch_size}_om" \
- --outfmt TXT > ./msame_bs${batch_size}.txt
-if [ $? != 0 ]; then
- echo "msame bs${batch_size} fail!"
- exit -1
-fi
-echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
-
-
-# ============================ evaluate ======================================
-python posec3d_postprocess.py \
- --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
- --info_path ./hmdb51.info
-
-if [ $? != 0 ]; then
- echo "fail!"
- exit -1
-fi
-echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
-echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 2afa6a1281dcda1f54c8bea7e3c90167a2777103 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 07:39:48 +0000
Subject: [PATCH 55/58] =?UTF-8?q?=E6=B5=8B=E8=AF=95=E7=B2=BE=E5=BA=A6?=
=?UTF-8?q?=E5=92=8C=E6=80=A7=E8=83=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../PoseC3D/test/eval_acc_perf.sh | 68 +++++++++++++++++++
1 file changed, 68 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
new file mode 100644
index 0000000000..87e2a14b62
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+
+batch_size=1
+datasets_path="/opt/npu/hmdb51"
+
+for para in $*
+do
+ if [[ $para == --datasets_path* ]]; then
+ datasets_path=`echo ${para#*=}`
+ fi
+ if [[ $para == --batch_size* ]]; then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# ============================= prepare dataset ==============================
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+mv ../../../data/hmdb51 /opt/npu
+cd ../../../..
+
+# ======================= generate prep_dataset ==============================
+rm -rf ./prep_hmdb51_bs${batch_size}
+chmod u+x msame
+python posec3d_preprocess.py \
+ --batch_size ${batch_size} \
+ --data_root ${datasets_path}/rawframes/ \
+ --ann_file ./hmdb51.pkl \
+ --name /opt/npu/hmdb51/prep_hmdb51_bs${batch_size}
+if [ $? != 0 ]; then
+ echo "posec3d preprocess fail!"
+ exit -1
+fi
+echo "==> 1. creating /opt/npu/hmdb51/prep_hmdb51_bs${batch_size} successfully."
+
+# =============================== msame ======================================
+if [ ! -d ./result ]; then
+ mkdir ./result
+fi
+rm -rf ./result/outputs_bs${batch_size}_om
+./msame --model "./posec3d_bs${batch_size}.om" \
+ --input "/opt/npu/hmdb51/prep_hmdb51_bs${batch_size}" \
+ --output "./result/outputs_bs${batch_size}_om" \
+ --outfmt TXT > ./msame_bs${batch_size}.txt
+if [ $? != 0 ]; then
+ echo "msame bs${batch_size} fail!"
+ exit -1
+fi
+echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
+
+
+# ============================ evaluate ======================================
+python posec3d_postprocess.py \
+ --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
+ --info_path ./hmdb51.info
+
+if [ $? != 0 ]; then
+ echo "fail!"
+ exit -1
+fi
+echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
+echo '==> 4. Done.'
\ No newline at end of file
--
Gitee
From 20394b3316c5f9314a9db58ab67b06876d142d6b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 07:53:38 +0000
Subject: [PATCH 56/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 175 ------------------
1 file changed, 175 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
deleted file mode 100644
index 6aacb4aec5..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# PoseC3D ONNX模型端到端推理指导
-- [1. 模型概述](#1)
- - [论文地址](#11)
- - [代码地址](#12)
-- [2. 环境说明](#2)
- - [深度学习框架](#21)
- - [python第三方库](#22)
-- [3. 模型转换](#3)
- - [pth转onnx模型](#31)
- - [onnx转om模型](#32)
-- [4. 数据预处理](#4)
- - [数据处理](#41)
-- [5. 离线推理](#5)
- - [msame工具概述](#51)
- - [离线推理](#52)
- - [精度和性能比较](#53)
-
-## 1. 模型概述
-### 1.1 论文地址
-[posec3d论文](https://arxiv.org/abs/2104.13586)
-### 1.2 代码地址
-[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-```bash
-git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
-```
-> **说明:**
-> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
-
-## 2. 环境说明
-### 2.1 深度学习框架
-```
-CANN 5.1.RC1
-torch==1.11.0
-torchvision==0.12.0
-onnx==1.11.0
-```
-
-### 2.2 python第三方库
-```
-numpy==1.21.6
-pillow==9.1.1
-mmcv==1.4.0
-```
-> **说明:**
-> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
-> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
-
-## 3. 模型转换
-一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
-```bash
-bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
-```
-运行后会生成如下文件:
-```bash
-├── posec3d_bs1.onnx
-├── posec3d_bs1.om
-```
-
-### 3.1 pth转onnx模型
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51的权重文件:
-[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth)
-
-3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
-```bash
-python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
-```
-其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
-
-### 3.2 onnx转om模型
-1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
-
-```bash
-# 将知识库文件夹通过scp -r指令复制到当前目录
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
-```
-
-## 4. 数据预处理
-在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://download.openmmlab.com/mmaction/posec3d/hmdb51.pkl)
-
-数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
-### 4.1 数据处理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
-```bash
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-../../../data/hmdb51 /opt/npu
-cd ../../../..
-```
-> **说明:**
-> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
-
-3. 执行输入数据的生成脚本,生成模型输入的bin文件
-```bash
-python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
-```
-其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
-运行后,将会得到如下形式的文件夹:
-```
-├── prep_image_bs1
-│ ├──0.bin
-│ ├──......
-```
-
-## 5. 离线推理
-执行一步式推理前,请先按照5.1节准备msame离线推理工具
-一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
-```bash
-bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
-```
-运行后会生成如下文件/文件夹:
-```bash
-├── hmdb51 # /opt/npu下数据集(文件夹)
-│ ├── annotations # 注释文件(文件夹)
-│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
-│ ├── rawframes # 原始帧(文件夹)
-│ ├── videos # 视频数据(文件夹)
-```
-```bash
-├── msame_bs1.txt # msame推理过程的输出
-├── result
-│ ├── outputs_bs1_om # 模型的输出(文件夹)
-```
-
-### 5.1 msame工具概述
-msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
-### 5.2 离线推理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 执行离线推理
-运行如下命令进行离线推理:
-```bash
-./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
-```
-模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
-
-### 5.3 精度和性能比较
-1. 性能数据的获取
-通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
-```bash
-python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
-```
-其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
-
-2. 精度数据的计算
-精度计算利用posec3d_postprocess.py脚本
-```
- python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
-```
-其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
-
-| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
-| :------: | :------: | :------: | :------: | :------: |
-| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
-
-> **说明:**
-> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 409b233ed1d7cd901c5239fc28793392195a6581 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Mon, 13 Jun 2022 07:54:23 +0000
Subject: [PATCH 57/58] =?UTF-8?q?README=20=E5=88=A0=E9=99=A4=E4=BA=86?=
=?UTF-8?q?=E7=9B=B4=E6=8E=A5=E4=B8=8B=E8=BD=BD=E6=95=B0=E6=8D=AE=E9=9B=86?=
=?UTF-8?q?=E7=9A=84=E9=93=BE=E6=8E=A5?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/README.md | 175 ++++++++++++++++++
1 file changed, 175 insertions(+)
create mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
new file mode 100644
index 0000000000..1603e2c2e3
--- /dev/null
+++ b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
@@ -0,0 +1,175 @@
+# PoseC3D ONNX模型端到端推理指导
+- [1. 模型概述](#1)
+ - [论文地址](#11)
+ - [代码地址](#12)
+- [2. 环境说明](#2)
+ - [深度学习框架](#21)
+ - [python第三方库](#22)
+- [3. 模型转换](#3)
+ - [pth转onnx模型](#31)
+ - [onnx转om模型](#32)
+- [4. 数据预处理](#4)
+ - [数据处理](#41)
+- [5. 离线推理](#5)
+ - [msame工具概述](#51)
+ - [离线推理](#52)
+ - [精度和性能比较](#53)
+
+## 1. 模型概述
+### 1.1 论文地址
+[posec3d论文](https://arxiv.org/abs/2104.13586)
+### 1.2 代码地址
+[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+```bash
+git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
+```
+> **说明:**
+> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
+
+## 2. 环境说明
+### 2.1 深度学习框架
+```
+CANN 5.1.RC1
+torch==1.11.0
+torchvision==0.12.0
+onnx==1.11.0
+```
+
+### 2.2 python第三方库
+```
+numpy==1.21.6
+pillow==9.1.1
+mmcv==1.4.0
+```
+> **说明:**
+> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
+> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
+
+## 3. 模型转换
+一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
+```bash
+bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
+```
+运行后会生成如下文件:
+```bash
+├── posec3d_bs1.onnx
+├── posec3d_bs1.om
+```
+
+### 3.1 pth转onnx模型
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51的权重文件:
+[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
+
+3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
+```bash
+python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
+```
+其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
+
+### 3.2 onnx转om模型
+1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
+
+```bash
+# 将知识库文件夹通过scp -r指令复制到当前目录
+export TUNE_BANK_PATH="./aoe_result_bs1"
+atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
+```
+
+## 4. 数据预处理
+在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://github.com/open-mmlab/mmaction2/tree/master/tools/data/skeleton)
+
+数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
+### 4.1 数据处理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
+```bash
+cd ./mmaction2/tools/data/hmdb51
+bash download_annotations.sh
+bash download_videos.sh
+bash extract_rgb_frames_opencv.sh
+bash generate_rawframes_filelist.sh
+bash generate_videos_filelist.sh
+mv ../../../data/hmdb51 /opt/npu
+cd ../../../..
+```
+> **说明:**
+> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
+
+3. 执行输入数据的生成脚本,生成模型输入的bin文件
+```bash
+python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
+```
+其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
+运行后,将会得到如下形式的文件夹:
+```
+├── prep_image_bs1
+│ ├──0.bin
+│ ├──......
+```
+
+## 5. 离线推理
+执行一步式推理前,请先按照5.1节准备msame离线推理工具
+一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
+```bash
+bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
+```
+运行后会生成如下文件/文件夹:
+```bash
+├── hmdb51 # /opt/npu下数据集(文件夹)
+│ ├── annotations # 注释文件(文件夹)
+│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
+│ ├── rawframes # 原始帧(文件夹)
+│ ├── videos # 视频数据(文件夹)
+```
+```bash
+├── msame_bs1.txt # msame推理过程的输出
+├── result
+│ ├── outputs_bs1_om # 模型的输出(文件夹)
+```
+
+### 5.1 msame工具概述
+msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
+### 5.2 离线推理
+1. 设置环境变量
+```bash
+source /usr/local/Ascend/ascend-toolkit/set_env.sh
+```
+
+2. 执行离线推理
+运行如下命令进行离线推理:
+```bash
+./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
+```
+模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
+
+### 5.3 精度和性能比较
+1. 性能数据的获取
+通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
+```bash
+python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
+```
+其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
+
+2. 精度数据的计算
+精度计算利用posec3d_postprocess.py脚本
+```
+ python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
+```
+其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
+
+| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
+| :------: | :------: | :------: | :------: | :------: |
+| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
+
+> **说明:**
+> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
--
Gitee
From 97f405795b7501495c2cac7e2e11669bdf159545 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=90=B4=E4=BB=AA=E7=9B=88?= <850763196@qq.com>
Date: Tue, 14 Jun 2022 06:42:18 +0000
Subject: [PATCH 58/58] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20AC?=
=?UTF-8?q?L=5FPyTorch/contrib/cv/pose=5Festimation/PoseC3D?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/pose_estimation/PoseC3D/LICENSE | 201 ------------------
.../cv/pose_estimation/PoseC3D/README.md | 175 ---------------
.../PoseC3D/posec3d_postprocess.py | 87 --------
.../PoseC3D/posec3d_preprocess.py | 87 --------
.../PoseC3D/posec3d_pytorch2onnx.py | 201 ------------------
.../pose_estimation/PoseC3D/requirements.txt | 11 -
.../PoseC3D/test/eval_acc_perf.sh | 68 ------
.../cv/pose_estimation/PoseC3D/test/parse.py | 40 ----
.../pose_estimation/PoseC3D/test/perf_t4.sh | 13 --
.../cv/pose_estimation/PoseC3D/test/pth2om.sh | 48 -----
10 files changed, 931 deletions(-)
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
delete mode 100644 ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
deleted file mode 100644
index 6cd71007de..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2018-2019 Open-MMLab.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
deleted file mode 100644
index 1603e2c2e3..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# PoseC3D ONNX模型端到端推理指导
-- [1. 模型概述](#1)
- - [论文地址](#11)
- - [代码地址](#12)
-- [2. 环境说明](#2)
- - [深度学习框架](#21)
- - [python第三方库](#22)
-- [3. 模型转换](#3)
- - [pth转onnx模型](#31)
- - [onnx转om模型](#32)
-- [4. 数据预处理](#4)
- - [数据处理](#41)
-- [5. 离线推理](#5)
- - [msame工具概述](#51)
- - [离线推理](#52)
- - [精度和性能比较](#53)
-
-## 1. 模型概述
-### 1.1 论文地址
-[posec3d论文](https://arxiv.org/abs/2104.13586)
-### 1.2 代码地址
-[posec3d代码](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-```bash
-git clone https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d.git
-```
-> **说明:**
-> 本离线推理项目中posec3d模型对应论文中PoseConv3D,以下说明中将PoseConv3D简称为posec3d
-
-## 2. 环境说明
-### 2.1 深度学习框架
-```
-CANN 5.1.RC1
-torch==1.11.0
-torchvision==0.12.0
-onnx==1.11.0
-```
-
-### 2.2 python第三方库
-```
-numpy==1.21.6
-pillow==9.1.1
-mmcv==1.4.0
-```
-> **说明:**
-> pytorch,torchvision和onnx:(X86架构)可以通过pip方式安装或官方下载whl包安装; (Arm架构)可以通过源码编译安装
-> 其他第三方库: 可以通过 pip3.7 install -r requirements.txt 进行安装
-
-## 3. 模型转换
-一步式从pth权重文件转om模型的脚本,能够由pth权重文件生成bacth为1的om模型:
-```bash
-bash ./test/pth2om.sh --batch_size=1 --not_skip_onnx=true
-```
-运行后会生成如下文件:
-```bash
-├── posec3d_bs1.onnx
-├── posec3d_bs1.om
-```
-
-### 3.1 pth转onnx模型
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51的权重文件:
-[slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth](https://github.com/open-mmlab/mmaction2/tree/master/configs/skeleton/posec3d)
-
-3. 执行posec3d_pth2onnx.py脚本,生成onnx模型文件
-```bash
-python ./posec3d_pytorch2onnx.py ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth --shape 1 20 17 48 56 56 --verify --output-file ./posec3d_bs1.onnx
-```
-其中"shape"表示输入节点的shape,"output-file"表示转换后生成的onnx模型的存储地址和名称
-
-### 3.2 onnx转om模型
-1. 使用atc将onnx模型转换为om模型文件,posec3d模型需要借助aoe优化获得的知识库,工具使用方法可以参考[CANN V100R020C10 开发辅助工具指南 (推理) 01](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/51RC2alpha002/infacldevg/atctool)
-
-```bash
-# 将知识库文件夹通过scp -r指令复制到当前目录
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs1.onnx --output=./posec3d_bs1 --input_format=ND --input_shape="invals:1,20,17,48,56,56" --log=debug --soc_version=Ascend710
-```
-
-## 4. 数据预处理
-在当前目录下载hmdb51.pkl注释文件[hmdb51.pkl](https://github.com/open-mmlab/mmaction2/tree/master/tools/data/skeleton)
-
-数据预处理过程包含在 test/eval_acc_perf.sh 的脚本中
-### 4.1 数据处理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 下载hmdb51数据集。posec3d模型使用hmdb51中的1530个视频数据进行测试,具体来说参考posec3d的源码仓中的测试过程对验证集视频进行提取帧,对预处理后的图像进行缩放,中心裁剪以及归一化,并将图像数据转换为二进制文件(.bin)
-```bash
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-mv ../../../data/hmdb51 /opt/npu
-cd ../../../..
-```
-> **说明:**
-> 本项目使用的推理工具为msame,需要针对不同的batch_size生成不同的输入数据
-
-3. 执行输入数据的生成脚本,生成模型输入的bin文件
-```bash
-python3.7 posec3d_preprocess.py --batch_size 1 --data_root /opt/npu/hmdb51/rawframes/ --ann_file hmdb51.pkl --name /opt/npu/hmdb51/prep_hmdb51_bs1
-```
-其中"batch_size"表示生成数据集对应的batch size,data_root表示处理前原数据集的地址,ann_file表示对应的注解文件,"name"表示生成数据集的文件夹名称。
-运行后,将会得到如下形式的文件夹:
-```
-├── prep_image_bs1
-│ ├──0.bin
-│ ├──......
-```
-
-## 5. 离线推理
-执行一步式推理前,请先按照5.1节准备msame离线推理工具
-一步式进行输入数据的准备,模型离线推理和NPU性能数据的获取:
-```bash
-bash ./test/eval_acc_perf.sh --batch_size=1 --datasets_path=/opt/npu/hmdb51
-```
-运行后会生成如下文件/文件夹:
-```bash
-├── hmdb51 # /opt/npu下数据集(文件夹)
-│ ├── annotations # 注释文件(文件夹)
-│ ├── prep_hmdb51_bs1 # 预处理输出的二进制文件(文件夹)
-│ ├── rawframes # 原始帧(文件夹)
-│ ├── videos # 视频数据(文件夹)
-```
-```bash
-├── msame_bs1.txt # msame推理过程的输出
-├── result
-│ ├── outputs_bs1_om # 模型的输出(文件夹)
-```
-
-### 5.1 msame工具概述
-msame模型推理工具,其输入是om模型以及模型所需要的输入bin文件,其输出是模型根据相应输入产生的输出文件。获取工具及使用方法可以参考[msame模型推理工具指南](https://gitee.com/ascend/tools/tree/master/msame)
-### 5.2 离线推理
-1. 设置环境变量
-```bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-```
-
-2. 执行离线推理
-运行如下命令进行离线推理:
-```bash
-./msame --model "./posec3d_bs1.om" --input "/opt/npu/hmdb51/prep_hmdb51_bs1" --output "./result/outputs_bs1_om" --outfmt TXT > msame_bs1.txt
-```
-模型输出格式是txt,输出保存在"output"参数指定的文件夹中,同时会生成推理的日志文件msame_bs1.txt
-
-### 5.3 精度和性能比较
-1. 性能数据的获取
-通过给test/parser.py指定推理后的日志文件,可以得到离线推理的性能数据
-```bash
-python3.7 test/parse.py --result-file ./msame_bs1.txt --batch-size 1
-```
-其中"result-file"表示性能数据的地址和名称,"batch_size"表示性能测试时模型对应的batch size
-
-2. 精度数据的计算
-精度计算利用posec3d_postprocess.py脚本
-```
- python3.7 posec3d_postprocess.py --result_path ./result/outputs_bs1_om/{实际文件夹名}
-```
-其中result_path表示离线推理输出所在的文件夹,info_path(默认为"./hmdb51.info")表示hmdb51验证集标签的地址和名称。
-
-| 模型 | 参考精度 | 310P精度 | 性能基准 | 310P性能 |
-| :------: | :------: | :------: | :------: | :------: |
-| posec3d_hmdb51_bs1 | top1:69.3% | top1:69.2% | 15.668fps | 24.461fps |
-
-> **说明:**
-> Top1表示预测结果中概率最大的类别与真实类别一致的概率,其值越大说明分类模型的效果越优
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
deleted file mode 100644
index 03982676ab..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_postprocess.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import os
-import argparse
-import numpy as np
-from collections import OrderedDict
-from mmaction.core import top_k_accuracy
-import torch
-import pdb
-import torch.nn.functional as F
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Dataset K400 Postprocessing')
- parser.add_argument('--result_path', default='/home/wyy/output/out_bs1/20220414_113751', type=str)
- parser.add_argument('--info_path', default='./hmdb51.info', type=str)
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
-
- # load info file
- gt_labels = []
- with open(args.info_path, 'r') as f:
- for line in f.readlines():
- t = line.split( )[-1]
- gt_labels.append(int(t))
-
- # load inference result
- results = []
-
- num_file = len(os.listdir(args.result_path))
- for idx in range(num_file):
- file = os.path.join(args.result_path, str(idx) + '_output_0.txt')
- result = np.loadtxt(file)
- result = torch.from_numpy(result)
- batch_size = result.shape[0]
-# pdb.set_trace()
- result = result.view(batch_size // 20, 20, -1) # cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
-
- result = F.softmax(result, dim=2).mean(dim=1).numpy() # cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
- results.extend(result)
-
-
- metrics = ['top_k_accuracy']
- metric_options = dict(top_k_accuracy=dict(topk=(1, 5)))
- eval_results = OrderedDict()
- for metric in metrics:
- print(f'Evaluating {metric} ...')
- if metric == 'top_k_accuracy':
- topk = metric_options.setdefault('top_k_accuracy',
- {}).setdefault('topk', (1, 5))
- if not isinstance(topk, (int, tuple)):
- raise TypeError(
- f'topk must be int or tuple of int, but got {type(topk)}')
- if isinstance(topk, int):
- topk = (topk, )
-
- top_k_acc = top_k_accuracy(results, gt_labels, topk)
- log_msg = []
- for k, acc in zip(topk, top_k_acc):
- eval_results[f'top{k}_acc'] = acc
- log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
- log_msg = ''.join(log_msg)
- print(log_msg)
- continue
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
deleted file mode 100644
index 079fa358df..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_preprocess.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import os
-import torch
-import numpy as np
-import argparse
-from mmcv import Config
-import torch.nn.functional as F
-from mmaction.datasets import build_dataloader, build_dataset
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Dataset HMDB51 Preprocessing')
- parser.add_argument('--config',
- default='./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py',
- help='config file path')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
- parser.add_argument('--num_worker', default=8, type=int, help='Number of workers for inference')
- parser.add_argument('--data_root', default='/opt/npu/hmdb51/rawframes/', type=str)
- parser.add_argument('--ann_file', default='/opt/npu/hmdb51/hmdb51.pkl', type=str)
- parser.add_argument('--name', default='prep_hmdb51_bs', type=str)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = parse_args()
- cfg = Config.fromfile(args.config)
-
- cfg.data.test.ann_file = args.ann_file
- cfg.data.test.data_prefix = args.data_root
-
- # build the dataloader
- dataset = build_dataset(cfg.data.test, dict(test_mode=True))
- dataloader_setting = dict(
- videos_per_gpu=args.batch_size,
- workers_per_gpu=args.num_worker,
- dist=False,
- shuffle=False)
- dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
- data_loader = build_dataloader(dataset, **dataloader_setting)
-
- root_path = os.path.dirname(args.ann_file)
- out_path = './prep_hmdb51_bs1'
- if not os.path.exists(out_path):
- os.mkdir(out_path)
- info_file = open(os.path.join(root_path, 'hmdb51.info'), 'w')
-
- for i, data in enumerate(data_loader):
- print('Preprocessing video {}/{}'.format(i, len(data_loader)))
- imgs = data['imgs']
- label = data['label']
- print(imgs.shape)
-
- for batch in range(imgs.shape[0]):
- l = label.cpu().numpy()[batch]
- info_file.write(str(args.batch_size*i+batch) + ' ' + str(l))
- info_file.write('\n')
-
- if imgs.shape[0] != args.batch_size:
- imgs = F.pad(imgs, (0,0,0,0,0,0,0,0,0,args.batch_size-imgs.shape[0]))
-
- bin_info = imgs.cpu().numpy()
- print(bin_info.shape)
- preprocess = torch.from_numpy(bin_info)
- print(preprocess.shape)
- bin_info.tofile(out_path + '/' + str(i) + '.bin')
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
deleted file mode 100644
index 9b9cd71d48..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/posec3d_pytorch2onnx.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import argparse
-import warnings
-
-import mmcv
-import numpy as np
-import torch
-from mmcv.runner import load_checkpoint
-
-from mmaction.models import build_model
-
-try:
- import onnx
- import onnxruntime as rt
-except ImportError as e:
- raise ImportError(f'Please install onnx and onnxruntime first. {e}')
-
-try:
- from mmcv.onnx.symbolic import register_extra_symbolics
-except ModuleNotFoundError:
- raise NotImplementedError('please update mmcv to version>=1.0.4')
-
-
-def _convert_batchnorm(module):
- """Convert the syncBNs into normal BN3ds."""
- module_output = module
- if isinstance(module, torch.nn.SyncBatchNorm):
- module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
- module.momentum, module.affine,
- module.track_running_stats)
- if module.affine:
- module_output.weight.data = module.weight.data.clone().detach()
- module_output.bias.data = module.bias.data.clone().detach()
- # keep requires_grad unchanged
- module_output.weight.requires_grad = module.weight.requires_grad
- module_output.bias.requires_grad = module.bias.requires_grad
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- for name, child in module.named_children():
- module_output.add_module(name, _convert_batchnorm(child))
- del module
- return module_output
-
-
-def pytorch2onnx(model,
- input_shape,
- opset_version=11,
- show=False,
- output_file='tmp.onnx',
- verify=False):
- """Convert pytorch model to onnx model.
-
- Args:
- model (:obj:`nn.Module`): The pytorch model to be exported.
- input_shape (tuple[int]): The input tensor shape of the model.
- opset_version (int): Opset version of onnx used. Default: 11.
- show (bool): Determines whether to print the onnx model architecture.
- Default: False.
- output_file (str): Output onnx model name. Default: 'tmp.onnx'.
- verify (bool): Determines whether to verify the onnx model.
- Default: False.
- """
- model.cpu().eval()
-
- input_tensor = torch.randn(input_shape)
-
- register_extra_symbolics(opset_version)
- input_names = ["invals"]
- torch.onnx.export(
- model,
- input_tensor,
- output_file,
- export_params=True,
- keep_initializers_as_inputs=True,
- verbose=show,
- opset_version=opset_version,
- input_names=input_names
- )
-
- print(f'Successfully exported ONNX model: {output_file}')
- if verify:
- # check by onnx
- onnx_model = onnx.load(output_file)
- onnx.checker.check_model(onnx_model)
-
- # check the numerical value
- # get pytorch output
- pytorch_result = model(input_tensor)[0].detach().numpy()
-
- # get onnx output
- input_all = [node.name for node in onnx_model.graph.input]
- input_initializer = [
- node.name for node in onnx_model.graph.initializer
- ]
- net_feed_input = list(set(input_all) - set(input_initializer))
- assert len(net_feed_input) == 1
- sess = rt.InferenceSession(output_file)
- onnx_result = sess.run(
- None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
- # only compare part of results
- random_class = np.random.randint(pytorch_result.shape[1])
- assert np.allclose(
- pytorch_result[:, random_class], onnx_result[:, random_class]
- ), 'The outputs are different between Pytorch and ONNX'
- print('The numerical values are same between Pytorch and ONNX')
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Convert MMAction2 models to ONNX')
- parser.add_argument('config', help='test config file path')
- parser.add_argument('checkpoint', help='checkpoint file')
- parser.add_argument('--show', action='store_true', help='show onnx graph')
- parser.add_argument('--output-file', type=str, default='tmp.onnx')
- parser.add_argument('--opset-version', type=int, default=11)
- parser.add_argument(
- '--verify',
- action='store_true',
- help='verify the onnx model output against pytorch output')
- parser.add_argument(
- '--is-localizer',
- action='store_true',
- help='whether it is a localizer')
- parser.add_argument(
- '--shape',
- type=int,
- nargs='+',
- default=[1, 3, 8, 224, 224],
- help='input video size')
- parser.add_argument(
- '--softmax',
- action='store_true',
- help='wheter to add softmax layer at the end of recognizers')
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- args = parse_args()
-
- assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
-
- cfg = mmcv.Config.fromfile(args.config)
- # import modules from string list.
-
- if not args.is_localizer:
- cfg.model.backbone.pretrained = None
-
- # build the model
- model = build_model(
- cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
- model = _convert_batchnorm(model)
-
- # onnx.export does not support kwargs
- if hasattr(model, 'forward_dummy'):
- from functools import partial
- model.forward = partial(model.forward_dummy, softmax=args.softmax)
- elif hasattr(model, '_forward') and args.is_localizer:
- model.forward = model._forward
- else:
- raise NotImplementedError(
- 'Please implement the forward method for exporting.')
-
- checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
-
- # convert model to onnx file
- pytorch2onnx(
- model,
- args.shape,
- opset_version=args.opset_version,
- show=args.show,
- output_file=args.output_file,
- verify=args.verify)
-
- # Following strings of text style are from colorama package
- bright_style, reset_style = '\x1b[1m', '\x1b[0m'
- red_text, blue_text = '\x1b[31m', '\x1b[34m'
- white_background = '\x1b[107m'
-
- msg = white_background + bright_style + red_text
- msg += 'DeprecationWarning: This tool will be deprecated in future. '
- msg += blue_text + 'Welcome to use the unified model deployment toolbox '
- msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
- msg += reset_style
- warnings.warn(msg)
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
deleted file mode 100644
index b5981002bf..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-decord == 0.6.0
-einops == 0.4.1
-matplotlib == 3.5.1
-numpy == 1.21.2
-onnx == 1.10.2
-onnxruntime == 1.11.0
-opencv-contrib-python == 4.5.5.64
-Pillow == 9.0.1
-scipy == 1.7.3
-torch == 1.11.0
-yapf == 0.32.0
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
deleted file mode 100644
index 87e2a14b62..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/eval_acc_perf.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-datasets_path="/opt/npu/hmdb51"
-
-for para in $*
-do
- if [[ $para == --datasets_path* ]]; then
- datasets_path=`echo ${para#*=}`
- fi
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
-done
-
-# ============================= prepare dataset ==============================
-cd ./mmaction2/tools/data/hmdb51
-bash download_annotations.sh
-bash download_videos.sh
-bash extract_rgb_frames_opencv.sh
-bash generate_rawframes_filelist.sh
-bash generate_videos_filelist.sh
-mv ../../../data/hmdb51 /opt/npu
-cd ../../../..
-
-# ======================= generate prep_dataset ==============================
-rm -rf ./prep_hmdb51_bs${batch_size}
-chmod u+x msame
-python posec3d_preprocess.py \
- --batch_size ${batch_size} \
- --data_root ${datasets_path}/rawframes/ \
- --ann_file ./hmdb51.pkl \
- --name /opt/npu/hmdb51/prep_hmdb51_bs${batch_size}
-if [ $? != 0 ]; then
- echo "posec3d preprocess fail!"
- exit -1
-fi
-echo "==> 1. creating /opt/npu/hmdb51/prep_hmdb51_bs${batch_size} successfully."
-
-# =============================== msame ======================================
-if [ ! -d ./result ]; then
- mkdir ./result
-fi
-rm -rf ./result/outputs_bs${batch_size}_om
-./msame --model "./posec3d_bs${batch_size}.om" \
- --input "/opt/npu/hmdb51/prep_hmdb51_bs${batch_size}" \
- --output "./result/outputs_bs${batch_size}_om" \
- --outfmt TXT > ./msame_bs${batch_size}.txt
-if [ $? != 0 ]; then
- echo "msame bs${batch_size} fail!"
- exit -1
-fi
-echo "==> 2. conducting hmdb51_bs${batch_size}.om successfully."
-
-
-# ============================ evaluate ======================================
-python posec3d_postprocess.py \
- --result_path ./result/outputs_bs${batch_size}_om/20220612_142755 \
- --info_path ./hmdb51.info
-
-if [ $? != 0 ]; then
- echo "fail!"
- exit -1
-fi
-echo "==> 3. evaluating hmda51 on bs${batch_size} successfully."
-echo '==> 4. Done.'
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
deleted file mode 100644
index da50748128..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/parse.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import re
-import sys
-import json
-import argparse
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--result-file', type=str, default="./msame_bs1.txt")
- parser.add_argument('--batch-size', type=int, default=1)
- args = parser.parse_args()
-
- if args.result_file.endswith('.json'):
- result_json = args.result_file
- with open(result_json, 'r') as f:
- content = f.read()
- tops = [i.get('value') for i in json.loads(content).get('value') if 'Top' in i.get('key')]
- print('om {} top1:{}'.format(result_json.split('_')[1].split('.')[0], tops[0]))
- elif args.result_file.endswith('.txt'):
- result_txt = args.result_file
- with open(result_txt, 'r') as f:
- content = f.read()
- txt_data_list = re.findall(r'Inference average time without first time:.*ms', content.replace('\n', ',') + ',')[-1]
- avg_time = txt_data_list.split(' ')[-2]
- fps = args.batch_size * 1000 / float(avg_time)
- print('310P bs{} fps:{:.3f}'.format(args.batch_size, fps))
\ No newline at end of file
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
deleted file mode 100644
index a5ad593d36..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/perf_t4.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-rm -rf perf_bs1.log
-trtexec --onnx=posec3d_bs1.onnx --fp16 --threads > perf_bs1.log
-perf_str=`grep "GPU.* mean.*ms$" perf_bs1.log`
-if [ -n "$perf_str" ]; then
- perf_num=`echo $perf_str | awk -F' ' '{print $16}'`
-else
- perf_str=`grep "mean.*ms$" perf_bs1.log`
- perf_num=`echo $perf_str | awk -F' ' '{print $4}'`
-fi
-awk 'BEGIN{printf "t4 bs1 fps:%.3f\n", 1000*1/('$perf_num'/1)}'
diff --git a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh b/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
deleted file mode 100644
index eda7511676..0000000000
--- a/ACL_PyTorch/contrib/cv/pose_estimation/PoseC3D/test/pth2om.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-source /usr/local/Ascend/ascend-toolkit/set_env.sh
-
-batch_size=1
-not_skip_onnx=true
-
-for para in $*
-do
- if [[ $para == --batch_size* ]]; then
- batch_size=`echo ${para#*=}`
- fi
- if [[ $para == --not_skip_onnx* ]]; then
- not_skip_onnx=`echo ${para#*=}`
- fi
-done
-
-# ======================= convert onnx =======================================
-if [ $not_skip_onnx == true ]; then
- rm -rf posec3d.onnx
- python ./posec3d_pytorch2onnx.py \
- ./mmaction2/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py \
- ./slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth \
- --shape 1 20 17 48 56 56 \
- --verify \
- --output-file ./posec3d_bs${batch_size}.onnx
- if [ -f "posec3d_bs${batch_size}.onnx" ]; then
- echo "==> 1. creating onnx model successfully."
- else
- echo "onnx export failed"
- exit -1
- fi
-fi
-
-
-# ======================= convert om =========================================
-rm -rf posec3d_bs${batch_size}.om
-export TUNE_BANK_PATH="./aoe_result_bs1"
-atc --framework=5 --model=./posec3d_bs${batch_size}.onnx \
- --output=./posec3d_bs${batch_size} \
- --input_format=ND --input_shape="invals:${batch_size},20,17,48,56,56" \
- --log=debug --soc_version=Ascend710
-if [ -f "posec3d_bs${batch_size}.om" ] ; then
- echo "==> 2. creating om model successfully."
-else
- echo "sim_om export failed"
-fi
-echo "==> 3. Done."
\ No newline at end of file
--
Gitee