diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4246e35a2d0b6c4d6fa2939d57cb4a689f62e336
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE
@@ -0,0 +1,251 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+## Some of TensorFlow's code is derived from Caffe, which is subject to the following copyright notice:
+
+COPYRIGHT
+
+All contributions by the University of California:
+
+Copyright (c) 2014, The Regents of the University of California (Regents)
+All rights reserved.
+
+All other contributions:
+
+Copyright (c) 2014, the respective contributors
+All rights reserved.
+
+Caffe uses a shared copyright model: each contributor holds copyright over
+their contributions to Caffe. The project versioning records all such
+contribution and copyright details. If a contributor wants to further mark
+their specific copyright on a particular contribution, they should indicate
+their copyright solely in the commit message of the change when it is
+committed.
+
+LICENSE
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+CONTRIBUTION AGREEMENT
+
+By contributing to the BVLC/caffe repository through pull-request, comment,
+or otherwise, the contributor releases their content to the
+license and copyright terms herein.
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..734516deed6a3263ba22605388011fc05e6f02d9
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md
@@ -0,0 +1,177 @@
+## 基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Computer Vision**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Research**
+
+**描述(Description):基于TensorFlow框架对图片中姿势进行检测的训练代码**
+
+## 概述
+
+ LiftingFromTheDeep
+
+- 参考论文:
+
+ https://openaccess.thecvf.com/content_cvpr_2017/papers/Tome_Lifting_From_the_CVPR_2017_paper.pdf
+
+- 参考实现:
+
+ https://github.com/DenisTome/Lifting-from-the-Deep-release
+
+
+## 默认配置
+
+- 训练数据集预处理:
+
+ - 图像的输入尺寸为 1080*720
+ - 图像输入格式:jpg
+
+- 训练超参
+
+ - Batch size: 4
+ - Train epoch: 30
+
+## 快速上手
+
+数据集准备
+模型训练使用MPII数据集,数据集请用户自行获取.
+obs桶地址:
+>obs://cann-id0891/npu/
+
+## 模型训练
+单卡训练
+
+1. 配置训练参数
+2. 启动训练
+```
+bash train_full_1p.sh \
+ --data_path="./dataset/MPII" \
+ --output_path="./checkpoint/model.ckpt"
+```
+
+
+## 训练结果
+
+- 精度结果比对
+
+|精度指标项|GPU实测|NPU实测|
+|---|---|---|
+|PCkh@0.5|0.819410|0.814496|
+
+- 性能结果比对
+
+|性能指标项|GPU实测|NPU实测|
+|---|---|---|
+|FPS|10.04|10.56|
+
+
+## 文件夹结构
+
+```
+├── README.md //代码说明文档
+├── train.py //网络训练
+├── online_inference.py //在线推理代码,用于推理单张图片
+├── evaluate.py //用于衡量模型在数据集上的精度
+├── requirements.txt //依赖列表
+├── LICENSE
+├── packages
+│ ├──lifting
+│ ├──__init__.py
+│ ├──_pose_estimator.py
+│ ├──utils
+│ ├──__init__.py
+│ ├──config.py
+│ ├──cpm.py
+│ ├──draw.py
+│ ├──prob_model.py
+│ ├──process.py
+│ ├──upright.py
+├── checkpoint //checkpoint模型保存地址
+├── data
+│ ├──prob_model
+│ ├──prob_model_params.mat
+│ ├──init_session
+│ ├──checkpoint
+│ ├──init.data-00000-of-00001
+│ ├──init.index
+│ ├──init.meta
+├── dataset //数据集文件夹
+│ ├──MPII
+│ ├──images
+│ ├──000001163.jpg
+│ ├──000003072.jpg
+│ ├──...
+│ ├──mpii_human_pose_v1_u12_2
+│ ├──bsd.txt
+│ ├──mpii_human_pose_v1_u12_1.mat
+│ ├──README.md
+├──result //单张图片推理结果
+│ ├──result2d.jpg
+│ ├──result3d_0.jpg
+├── test
+│ ├──train_performance_1p.sh //单卡训练验证性能启动脚本
+│ ├──train_full_1p.sh //单卡全量训练启动脚本
+
+```
+
+
+## 启动脚本说明
+在test文件夹下, 有train_performace_1p.sh和train_full_1p.sh脚本,
+可分别用于检测训练性能与训练精度.
+
+### 检测性能
+命令:
+```
+bash train_performace_1p.sh \
+ --data_path="./dataset/MPII" \
+ --output_path="./checkpoint/model.ckpt"
+```
+打屏信息:
+
+> awk: cmd. line:1: fatal: division by zero attempted
+>------------------ INFO NOTICE START------------------
+>INFO, your task have used Ascend NPU, please check your result.
+>------------------ INFO NOTICE END------------------
+>------------------ Final result ------------------
+>Final Performance images/sec : 10.56
+>Final Performance sec/step : 0.38
+>E2E Training Duration sec : 754
+>Final Train Accuracy :
+
+**注**:此处Final Train Accuracy为空, 原因是性能检测中不涉及validation过程, 不能检测精度.
+打屏日志的第一行错误提示来源于此.
+
+### 检测精度
+命令:
+```
+bash train_full_1p.sh \
+ --data_path="./dataset/MPII" \
+ --output_path="./checkpoint/model.ckpt"
+```
+
+打屏信息:
+>------------------ INFO NOTICE START------------------
+>INFO, your task have used Ascend NPU, please check your result.
+>------------------ INFO NOTICE END------------------
+>------------------ Final result ------------------
+>Final Performance images/sec : 10.19
+>Final Performance sec/step : 0.39
+>E2E Training Duration sec : 21154
+>Final Train Accuracy : 0.814496
+
+## 在线推理结果
+### 输出图片:
+
+
+
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/prob_model/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/prob_model/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/images/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/images/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py
new file mode 100644
index 0000000000000000000000000000000000000000..cff5a53ceb33f3f5f71fc459bc903751f26e24da
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py
@@ -0,0 +1,215 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+
+from packages.lifting import PoseEstimator
+from packages.lifting.utils import gaussian_heatmap, config
+import cv2
+import argparse
+import os
+import numpy as np
+from scipy.io import loadmat
+from tqdm import tqdm
+
+
+# set up argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--data_path', type=str, default='./dataset/MPII') # MPII dataset root
+parser.add_argument('--label_path', type=str,
+ default='./dataset/MPII/mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat') #label path
+parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/model.ckpt') # checkpoint path
+parser.add_argument('--prob_model_path', type=str,
+ default='./data/prob_model/prob_model_params.mat') # 3d model path
+
+args = parser.parse_args()
+input_width = 654
+input_height = 368
+
+
+def save_joints(): # read mpii dataset and label
+ mat = loadmat(args.label_path)
+ d = {}
+ fd = []
+ for i, (anno, train_flag) in enumerate(
+ zip(mat['RELEASE']['annolist'][0, 0][0],
+ mat['RELEASE']['img_train'][0, 0][0],
+ )):
+ img_fn = anno['image']['name'][0, 0][0]
+ train_flag = int(train_flag)
+
+ if 'annopoints' in str(anno['annorect'].dtype):
+ # only one person
+ annopoints = anno['annorect']['annopoints'][0]
+ head_x1s = anno['annorect']['x1'][0]
+ head_y1s = anno['annorect']['y1'][0]
+ head_x2s = anno['annorect']['x2'][0]
+ head_y2s = anno['annorect']['y2'][0]
+ datas = []
+ for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(
+ annopoints, head_x1s, head_y1s, head_x2s, head_y2s):
+ if annopoint != []:
+ head_rect = [float(head_x1[0, 0]),
+ float(head_y1[0, 0]),
+ float(head_x2[0, 0]),
+ float(head_y2[0, 0])]
+ # build feed_dict
+ feed_dict = {}
+ feed_dict['width'] = int(abs(float(head_x2[0, 0]) - float(head_x1[0, 0])))
+ feed_dict['height'] = int(abs(float(head_y2[0, 0]) - float(head_y1[0, 0])))
+
+ # joint coordinates
+ annopoint = annopoint['point'][0, 0]
+ j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
+ x = [x[0, 0] for x in annopoint['x'][0]]
+ y = [y[0, 0] for y in annopoint['y'][0]]
+ joint_pos = {}
+ for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
+ joint_pos[str(_j_id)] = [float(_x), float(_y)]
+
+ # visiblity list
+ if 'is_visible' in str(annopoint.dtype):
+ vis = [v[0] if v else [0] for v in annopoint['is_visible'][0]]
+ vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)])
+ else:
+ vis = None
+ feed_dict['x'] = x
+ feed_dict['y'] = y
+ feed_dict['vis'] = vis
+ feed_dict['filename'] = img_fn
+ if len(joint_pos) == 16:
+ data = {
+ 'filename': img_fn,
+ 'train': train_flag,
+ 'head_rect': head_rect,
+ 'is_visible': vis,
+ 'joint_pos': joint_pos
+ }
+ datas.append(data)
+
+ for data in datas:
+ head_center = [(data['head_rect'][0] + data['head_rect'][2]) / 2, (data['head_rect'][1] + data['head_rect'][3]) / 2]
+ if d.get(data['filename']):
+ d.get(data['filename']).append(data)
+ else:
+ d[data['filename']] = [data]
+ filt = []
+ for key, value in d.items():
+ if len(value) != 1:
+ filt.append(key)
+ for key in filt:
+ del d[key]
+ return d
+
+
+def generate_center_map(center_poses, img_shape): # input label position and generate a heat-map
+ """
+ Given the position of the person and the size of the input image it
+ generates
+ a heat-map where a gaissian distribution is fit in the position of the
+ person in the image.
+ """
+ img_height = img_shape[1]
+ img_width = img_shape[0]
+ # Gaussian operator generate a heat-map
+ center_map = [gaussian_heatmap(
+ img_height, img_width, center_poses[1], center_poses[0],
+ config.SIGMA_CENTER, config.SIGMA_CENTER)]
+
+ out = np.zeros_like(center_map[0])
+ # multiple map composition
+ for map in center_map:
+ out += map
+ out[out > 1] = 1
+ return out
+
+
+def preprocess(k, input_width=654, input_height=368): # read image pretreatment
+ # read image
+ image = cv2.imread(os.path.join(args.data_path, 'images', k))
+ ratio = (input_width / image.shape[1], input_height / image.shape[0])
+ image = cv2.resize(image, (input_width, input_height))
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb
+ # obtain label
+ labels = [d[k][0]['joint_pos']['7'][0] * 0.5 + d[k][0]['joint_pos']['6'][0] * 0.5, d[k][0]['joint_pos']['7'][1] * 0.5 + d[k][0]['joint_pos']['6'][1] * 0.5]
+ labels[0] *= ratio[0]
+ labels[1] *= ratio[1]
+ # obtain headSize
+ headSize = d[k][0]['head_rect']
+ headSize = (headSize[2] - headSize[0]) * 0.5 + (headSize[3] - headSize[1]) * 0.5
+ heatmap_gt = generate_center_map(labels, (input_width, input_height))# generate a heat-map
+ return image, labels, heatmap_gt, headSize
+
+def get_batch(idxs): # read batch data
+ name_lst = np.array(list(d.keys()))[idxs]
+ images = []
+ labels = []
+ heatmap_gts = []
+ headSizes = []
+ for name in name_lst:
+ image, label, heatmap_gt, headSize = preprocess(name)
+ images.append(image)
+ labels.append(label)
+ heatmap_gts.append(heatmap_gt)
+ headSizes.append(headSize)
+ images = np.stack(images, 0)
+ labels = np.stack(labels, 0)
+ heatmap_gts = np.stack(heatmap_gts, 0)
+ headSizes = np.stack(headSizes, 0)
+ return images, labels, heatmap_gts, headSizes
+
+
+def calCKh(pred, label, headSize):
+ dist = np.sqrt(np.sum((np.array(pred) - np.array(label)) ** 2)) / headSize
+ CKh = 1 if dist < 0.5 else 0
+ # print(Chk)
+ return CKh
+
+def main():
+ # generate batch
+
+ batch_idxs = np.random.permutation(len(d))
+ batch_idxs = np.array_split(batch_idxs, len(d))
+
+ # model definition
+ pose_estimator = PoseEstimator((input_height, input_width, 3), args.checkpoint_path, args.prob_model_path)
+
+ # model initialisation
+ pose_estimator.initialise()
+
+ # validation
+ CKh_num = 0
+ for i, idxs in enumerate(tqdm(batch_idxs)):
+ # generate batch
+ images, labels, heatmap_gts, headSizes = get_batch(idxs)
+ pose_2d, heatmap_pred = pose_estimator.estimate(images[0])
+
+ if len(pose_2d) < 1:
+ continue
+ pred = [pose_2d[0, 8, 1] * 0.25 + pose_2d[0, 11, 1] * 0.25 + pose_2d[0, 1, 1] * 0.5,
+ pose_2d[0, 8, 0] * 0.25 + pose_2d[0, 11, 0] * 0.25 + pose_2d[0, 1, 0] * 0.5]
+
+ CKh = calCKh(pred, labels[0], headSizes[0])
+ CKh_num += CKh
+ PCKh = CKh_num / len(batch_idxs)
+ print('PCKh@0.5: ', PCKh)
+
+ # close model
+ pose_estimator.close()
+
+
+d = save_joints()
+if __name__ == '__main__':
+ import sys
+ sys.exit(main())
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b1413df692d6f83802d9f2518710d9c2988ec73c
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt
@@ -0,0 +1,5 @@
+FuncStatus:OK
+PrecisionStatus:OK
+PerfStatus:OK
+GPUStatus:OK
+NPUMigrationStatus:OK
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..13c9bc86ab3f71ccd4a7c4ba086869619166d3f8
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py
@@ -0,0 +1,77 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+
+from packages.lifting import PoseEstimator
+from packages.lifting.utils import plot_pose, draw_limbs
+
+import cv2
+import matplotlib.pyplot as plt
+import argparse
+import os
+
+
+# set up the argparse
+parser = argparse.ArgumentParser()
+
+parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/model.ckpt') # checkpoint path
+parser.add_argument('--prob_model_path', type=str,
+ default='./data/prob_model/prob_model_params.mat') # 3d model path
+parser.add_argument('--test_image', type=str,
+ default='./dataset/MPII/images/099363014.jpg')
+parser.add_argument('--result_path', type=str,
+ default='./result')
+
+args = parser.parse_args()
+
+
+def main():
+ # read image
+ image = cv2.imread(args.test_image)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb
+ image_size = image.shape
+
+ # initialize model
+ pose_estimator = PoseEstimator(image_size, args.checkpoint_path, args.prob_model_path)
+
+ # load model
+ pose_estimator.initialise()
+
+ # estimation
+ pose_2d, visibility, pose_3d = pose_estimator.estimate(image, lifting=True)
+
+ # Show 2D and 3D poses
+ display_results(image, pose_2d, visibility, pose_3d)
+ # close model
+ pose_estimator.close()
+
+
+def display_results(in_image, data_2d, joint_visibility, data_3d): # 2d3d resalt visualization
+ """Plot 2D and 3D poses for each of the people in the image."""
+ plt.figure()
+ draw_limbs(in_image, data_2d, joint_visibility)
+ plt.imshow(in_image)
+
+ plt.axis('off')
+ # save 2d image
+ plt.savefig(os.path.join(args.result_path,'result2d.jpg'))
+
+ # Show 3D poses
+ for i, single_3D in enumerate(data_3d):
+ plot_pose(single_3D)
+ plt.savefig(os.path.join(args.result_path, 'result3d_{}.jpg'.format(i))) # save images
+
+if __name__ == '__main__':
+ import sys
+ sys.exit(main())
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bb056e45137099bf4f3633ed5f1101581609d8b
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from ._pose_estimator import *
+from . import utils
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py
new file mode 100644
index 0000000000000000000000000000000000000000..986ea7bbf7e3ef86438072ec0965e564c5d85898
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py
@@ -0,0 +1,187 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from . import utils
+import cv2
+import numpy as np
+import tensorflow as tf
+import abc
+
+ABC = abc.ABCMeta('ABC', (object,), {})
+
+__all__ = [
+ 'PoseEstimatorInterface',
+ 'PoseEstimator'
+]
+
+
+class PoseEstimatorInterface(ABC):
+
+ @abc.abstractmethod
+ def initialise(self, args):
+ pass
+
+ @abc.abstractmethod
+ def estimate(self, image):
+ return
+
+ @abc.abstractmethod
+ def train(self, image, labels):
+ return
+
+ @abc.abstractmethod
+ def close(self):
+ pass
+
+
+class PoseEstimator(PoseEstimatorInterface):
+
+ def __init__(self, image_size, session_path, prob_model_path):
+ """Initialising the graph in tensorflow.
+ INPUT:
+ image_size: Size of the image in the format (w x h x 3)"""
+
+ self.session = None
+ self.poseLifting = utils.Prob3dPose(prob_model_path)
+ self.sess = -1
+ self.orig_img_size = np.array(image_size)
+ self.scale = utils.config.INPUT_SIZE / (self.orig_img_size[0] * 1.0)
+ self.img_size = np.round(
+ self.orig_img_size * self.scale).astype(np.int32)
+ self.image_in = None
+ self.heatmap_person_large = None
+ self.pose_image_in = None
+ self.pose_centermap_in = None
+ self.pred_2d_pose = None
+ self.likelihoods = None
+ self.session_path = session_path
+
+ def initialise(self):
+ """Load saved model in the graph
+ INPUT:
+ sess_path: path to the dir containing the tensorflow saved session
+ OUTPUT:
+ sess: tensorflow session"""
+ # initialize graph structrue
+ tf.reset_default_graph()
+
+ with tf.variable_scope('CPM'):
+ # placeholders for person network
+ self.image_in = tf.placeholder(
+ tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 3])
+ self.label_in = tf.placeholder(
+ tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 1])
+
+ heatmap_person = utils.inference_person(self.image_in)
+
+ self.heatmap_person_large = tf.image.resize_images(
+ heatmap_person, [utils.config.INPUT_SIZE, self.img_size[1]])
+
+ # placeholders for pose network
+ self.pose_image_in = tf.placeholder(
+ tf.float32,
+ [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 3])
+
+ self.pose_centermap_in = tf.placeholder(
+ tf.float32,
+ [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 1])
+
+ self.pred_2d_pose, self.likelihoods = utils.inference_pose(
+ self.pose_image_in, self.pose_centermap_in,
+ utils.config.INPUT_SIZE)
+
+ # set up loss and optimizer
+ self.loss = tf.reduce_mean(tf.abs(self.heatmap_person_large - self.label_in))
+ self.optimizer = npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate=0.0000001)).minimize(self.loss)
+
+ # load pretraining model
+ sess = tf.Session(config=npu_config_proto())
+ sess.run(tf.global_variables_initializer())
+ variables = tf.contrib.framework.get_variables_to_restore()
+ variables_to_resotre = [v for v in variables if v.name.split('/')[-1][:4] != 'Adam' and v.name[:4] != 'beta']
+ self.saver = tf.train.Saver(variables_to_resotre)
+ self.saver.restore(sess, self.session_path)
+ self.session = sess
+
+ def train(self, image, labels):
+ # input model,back propagation and then output loss
+ b_image = np.array(image / 255.0 - 0.5, dtype=np.float32)
+ labels = labels[:, :, :, np.newaxis]
+
+ # self.session.run(self.optimizer, {self.image_in: b_image, self.label_in: labels})
+ _, loss, heatmap_pred = self.session.run([self.optimizer, self.loss, self.heatmap_person_large],
+ feed_dict={self.image_in: b_image, self.label_in: labels})
+ return loss, heatmap_pred
+
+ def estimate(self, image, lifting=False):
+ """
+ Estimate 2d and 3d poses on the image.
+ INPUT:
+ image: RGB image in the format (w x h x 3)
+ sess: tensorflow session
+ OUTPUT:
+ pose_2d: 2D pose for each of the people in the image in the format
+ (num_ppl x num_joints x 2)
+ visibility: vector containing a bool
+ value for each joint representing the visibility of the joint in
+ the image (could be due to occlusions or the joint is not in the
+ image)
+ pose_3d: 3D pose for each of the people in the image in the
+ format (num_ppl x 3 x num_joints)
+ hmap_person: heatmap
+ """
+ # test model
+ sess = self.session
+
+ image = cv2.resize(image, (0, 0), fx=self.scale,
+ fy=self.scale, interpolation=cv2.INTER_CUBIC)
+ b_image = np.array(image[np.newaxis] / 255.0 - 0.5, dtype=np.float32)
+
+ hmap_person_viz = sess.run(self.heatmap_person_large, {
+ self.image_in: b_image})
+ hmap_person = np.squeeze(hmap_person_viz)
+
+ centers = utils.detect_objects_heatmap(hmap_person)
+ b_pose_image, b_pose_cmap = utils.prepare_input_posenet(
+ b_image[0], centers,
+ [utils.config.INPUT_SIZE, image.shape[1]],
+ [utils.config.INPUT_SIZE, utils.config.INPUT_SIZE],
+ batch_size=utils.config.BATCH_SIZE)
+
+ feed_dict = {
+ self.pose_image_in: b_pose_image,
+ self.pose_centermap_in: b_pose_cmap
+ }
+
+ # Estimate 2D poses
+ pred_2d_pose, pred_likelihood = sess.run([self.pred_2d_pose,
+ self.likelihoods],
+ feed_dict)
+
+ estimated_2d_pose, visibility = utils.detect_parts_from_likelihoods(pred_2d_pose,
+ centers,
+ pred_likelihood)
+
+ pose_2d = np.round(estimated_2d_pose / self.scale).astype(np.int32)
+
+ # Estimate 3D poses
+ if lifting:
+ transformed_pose2d, weights = self.poseLifting.transform_joints(
+ estimated_2d_pose.copy(), visibility)
+ pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights)
+ return pose_2d, visibility, pose_3d
+
+ return pose_2d, hmap_person
+ def close(self):
+ self.session.close()
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbbdebfd80dd9f8a5855d7b20e8ae3e0d4c3b61a
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from .prob_model import *
+from .draw import *
+from .cpm import *
+from .process import *
+from . import config
+from . import upright_fast
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9b54b98460e853dd5cc244dd522bdf91cc9a351
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py
@@ -0,0 +1,51 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+__all__ = [
+ 'VISIBLE_PART',
+ 'MIN_NUM_JOINTS',
+ 'CENTER_TR',
+ 'SIGMA',
+ 'STRIDE',
+ 'SIGMA_CENTER',
+ 'INPUT_SIZE',
+ 'OUTPUT_SIZE',
+ 'NUM_JOINTS',
+ 'NUM_OUTPUT',
+ 'H36M_NUM_JOINTS',
+ 'JOINT_DRAW_SIZE',
+ 'LIMB_DRAW_SIZE'
+]
+
+# threshold
+VISIBLE_PART = 1e-3
+MIN_NUM_JOINTS = 5
+CENTER_TR = 0.4
+
+# net attributes
+SIGMA = 7
+STRIDE = 8
+SIGMA_CENTER = 21
+INPUT_SIZE = 368
+OUTPUT_SIZE = 46
+NUM_JOINTS = 14
+NUM_OUTPUT = NUM_JOINTS + 1
+H36M_NUM_JOINTS = 17
+
+# draw options
+JOINT_DRAW_SIZE = 3
+LIMB_DRAW_SIZE = 1
+NORMALISATION_COEFFICIENT = 1280*720
+
+# test options
+BATCH_SIZE = 4
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecbb95beb766f3515d6c9e06e4f5a81f5aaae578
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py
@@ -0,0 +1,406 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+
+import tensorflow as tf
+import tensorflow.contrib.layers as layers
+
+__all__ = [
+ 'inference_person',
+ 'inference_pose'
+]
+
+
+def inference_person(image):
+ with tf.variable_scope('PersonNet'):
+ conv1_1 = layers.conv2d(
+ image, 64, 3, 1, activation_fn=None, scope='conv1_1')
+ conv1_1 = tf.nn.relu(conv1_1)
+ conv1_2 = layers.conv2d(
+ conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2')
+ conv1_2 = tf.nn.relu(conv1_2)
+ pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2)
+ conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1,
+ activation_fn=None, scope='conv2_1')
+ conv2_1 = tf.nn.relu(conv2_1)
+ conv2_2 = layers.conv2d(
+ conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2')
+ conv2_2 = tf.nn.relu(conv2_2)
+ pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2)
+ conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1,
+ activation_fn=None, scope='conv3_1')
+ conv3_1 = tf.nn.relu(conv3_1)
+ conv3_2 = layers.conv2d(
+ conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2')
+ conv3_2 = tf.nn.relu(conv3_2)
+ conv3_3 = layers.conv2d(
+ conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3')
+ conv3_3 = tf.nn.relu(conv3_3)
+ conv3_4 = layers.conv2d(
+ conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4')
+ conv3_4 = tf.nn.relu(conv3_4)
+ pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2)
+ conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1,
+ activation_fn=None, scope='conv4_1')
+ conv4_1 = tf.nn.relu(conv4_1)
+ conv4_2 = layers.conv2d(
+ conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2')
+ conv4_2 = tf.nn.relu(conv4_2)
+ conv4_3 = layers.conv2d(
+ conv4_2, 512, 3, 1, activation_fn=None, scope='conv4_3')
+ conv4_3 = tf.nn.relu(conv4_3)
+ conv4_4 = layers.conv2d(
+ conv4_3, 512, 3, 1, activation_fn=None, scope='conv4_4')
+ conv4_4 = tf.nn.relu(conv4_4)
+ conv5_1 = layers.conv2d(
+ conv4_4, 512, 3, 1, activation_fn=None, scope='conv5_1')
+ conv5_1 = tf.nn.relu(conv5_1)
+ conv5_2_CPM = layers.conv2d(
+ conv5_1, 128, 3, 1, activation_fn=None, scope='conv5_2_CPM')
+ conv5_2_CPM = tf.nn.relu(conv5_2_CPM)
+ conv6_1_CPM = layers.conv2d(
+ conv5_2_CPM, 512, 1, 1, activation_fn=None, scope='conv6_1_CPM')
+ conv6_1_CPM = tf.nn.relu(conv6_1_CPM)
+ conv6_2_CPM = layers.conv2d(
+ conv6_1_CPM, 1, 1, 1, activation_fn=None, scope='conv6_2_CPM')
+ concat_stage2 = tf.concat([conv6_2_CPM, conv5_2_CPM], 3)
+ Mconv1_stage2 = layers.conv2d(
+ concat_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage2')
+ Mconv1_stage2 = tf.nn.relu(Mconv1_stage2)
+ Mconv2_stage2 = layers.conv2d(
+ Mconv1_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage2')
+ Mconv2_stage2 = tf.nn.relu(Mconv2_stage2)
+ Mconv3_stage2 = layers.conv2d(
+ Mconv2_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage2')
+ Mconv3_stage2 = tf.nn.relu(Mconv3_stage2)
+ Mconv4_stage2 = layers.conv2d(
+ Mconv3_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage2')
+ Mconv4_stage2 = tf.nn.relu(Mconv4_stage2)
+ Mconv5_stage2 = layers.conv2d(
+ Mconv4_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage2')
+ Mconv5_stage2 = tf.nn.relu(Mconv5_stage2)
+ Mconv6_stage2 = layers.conv2d(
+ Mconv5_stage2, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage2')
+ Mconv6_stage2 = tf.nn.relu(Mconv6_stage2)
+ Mconv7_stage2 = layers.conv2d(
+ Mconv6_stage2, 1, 1, 1, activation_fn=None, scope='Mconv7_stage2')
+ concat_stage3 = tf.concat([Mconv7_stage2, conv5_2_CPM], 3)
+ Mconv1_stage3 = layers.conv2d(
+ concat_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage3')
+ Mconv1_stage3 = tf.nn.relu(Mconv1_stage3)
+ Mconv2_stage3 = layers.conv2d(
+ Mconv1_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage3')
+ Mconv2_stage3 = tf.nn.relu(Mconv2_stage3)
+ Mconv3_stage3 = layers.conv2d(
+ Mconv2_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage3')
+ Mconv3_stage3 = tf.nn.relu(Mconv3_stage3)
+ Mconv4_stage3 = layers.conv2d(
+ Mconv3_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage3')
+ Mconv4_stage3 = tf.nn.relu(Mconv4_stage3)
+ Mconv5_stage3 = layers.conv2d(
+ Mconv4_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage3')
+ Mconv5_stage3 = tf.nn.relu(Mconv5_stage3)
+ Mconv6_stage3 = layers.conv2d(
+ Mconv5_stage3, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage3')
+ Mconv6_stage3 = tf.nn.relu(Mconv6_stage3)
+ Mconv7_stage3 = layers.conv2d(
+ Mconv6_stage3, 1, 1, 1, activation_fn=None,
+ scope='Mconv7_stage3')
+ concat_stage4 = tf.concat([Mconv7_stage3, conv5_2_CPM], 3)
+ Mconv1_stage4 = layers.conv2d(
+ concat_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage4')
+ Mconv1_stage4 = tf.nn.relu(Mconv1_stage4)
+ Mconv2_stage4 = layers.conv2d(
+ Mconv1_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage4')
+ Mconv2_stage4 = tf.nn.relu(Mconv2_stage4)
+ Mconv3_stage4 = layers.conv2d(
+ Mconv2_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage4')
+ Mconv3_stage4 = tf.nn.relu(Mconv3_stage4)
+ Mconv4_stage4 = layers.conv2d(
+ Mconv3_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage4')
+ Mconv4_stage4 = tf.nn.relu(Mconv4_stage4)
+ Mconv5_stage4 = layers.conv2d(
+ Mconv4_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage4')
+ Mconv5_stage4 = tf.nn.relu(Mconv5_stage4)
+ Mconv6_stage4 = layers.conv2d(
+ Mconv5_stage4, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage4')
+ Mconv6_stage4 = tf.nn.relu(Mconv6_stage4)
+ Mconv7_stage4 = layers.conv2d(
+ Mconv6_stage4, 1, 1, 1, activation_fn=None, scope='Mconv7_stage4')
+ return Mconv7_stage4
+
+
+def _argmax_2d(tensor):
+ """
+ Compute argmax on the 2nd and 3d dimensions of the tensor.
+ e.g. given an input tensor of size N x K x K x C, then it computes the (x,y) coordinates for
+ each of the N images and C channels, corresponding to the max for that image and channel.
+ :param tensor: image of size N x K x K x C
+ :return: argmax in the format N x 2 x C (where C corresponds to NUM_JOINTS)
+ """
+ # get size
+ shape = tensor.get_shape().as_list()[1]
+ n_channels = tf.shape(tensor)[-1]
+
+ # process each channel
+ linearised_channel = tf.reshape(tensor, [-1, shape * shape, n_channels])
+ best_channel = tf.argmax(linearised_channel, axis=1)
+
+ idx_y = tf.expand_dims(tf.floordiv(best_channel, shape), axis=1)
+ idx_x = tf.expand_dims(tf.mod(best_channel, shape), axis=1)
+ argmax_channels = tf.concat([idx_x, idx_y], axis=1, name='output')
+ return argmax_channels
+
+
+def _process_stage(heat_maps, hm_size):
+ """
+ For each heat-map identify joint position and likelihood
+ :param heat_maps: input heat-maps
+ :param hm_size: size in which to return the coordinates
+ :return: 2d joints (BATCH_SIZE x 14 x 2)
+ likelihood for each joint (BATCH_SIZE x 14)
+ """
+ rescaled = tf.image.resize_images(heat_maps[:, :, :, :-1], [hm_size, hm_size])
+ uncertainty = tf.reduce_max(tf.reduce_mean(rescaled, axis=1), axis=1, name='prob')
+ return _argmax_2d(rescaled), uncertainty
+
+
+def inference_pose(image, center_map, hm_size, stage=6):
+ with tf.variable_scope('PoseNet'):
+ pool_center_lower = layers.avg_pool2d(center_map, 9, 8, padding='SAME')
+ conv1_1 = layers.conv2d(
+ image, 64, 3, 1, activation_fn=None, scope='conv1_1')
+ conv1_1 = tf.nn.relu(conv1_1)
+ conv1_2 = layers.conv2d(
+ conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2')
+ conv1_2 = tf.nn.relu(conv1_2)
+ pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2)
+ conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1,
+ activation_fn=None, scope='conv2_1')
+ conv2_1 = tf.nn.relu(conv2_1)
+ conv2_2 = layers.conv2d(
+ conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2')
+ conv2_2 = tf.nn.relu(conv2_2)
+ pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2)
+ conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1,
+ activation_fn=None, scope='conv3_1')
+ conv3_1 = tf.nn.relu(conv3_1)
+ conv3_2 = layers.conv2d(
+ conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2')
+ conv3_2 = tf.nn.relu(conv3_2)
+ conv3_3 = layers.conv2d(
+ conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3')
+ conv3_3 = tf.nn.relu(conv3_3)
+ conv3_4 = layers.conv2d(
+ conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4')
+ conv3_4 = tf.nn.relu(conv3_4)
+ pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2)
+ conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1,
+ activation_fn=None, scope='conv4_1')
+ conv4_1 = tf.nn.relu(conv4_1)
+ conv4_2 = layers.conv2d(
+ conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2')
+ conv4_2 = tf.nn.relu(conv4_2)
+ conv4_3_CPM = layers.conv2d(
+ conv4_2, 256, 3, 1, activation_fn=None, scope='conv4_3_CPM')
+ conv4_3_CPM = tf.nn.relu(conv4_3_CPM)
+ conv4_4_CPM = layers.conv2d(
+ conv4_3_CPM, 256, 3, 1, activation_fn=None, scope='conv4_4_CPM')
+ conv4_4_CPM = tf.nn.relu(conv4_4_CPM)
+ conv4_5_CPM = layers.conv2d(
+ conv4_4_CPM, 256, 3, 1, activation_fn=None, scope='conv4_5_CPM')
+ conv4_5_CPM = tf.nn.relu(conv4_5_CPM)
+ conv4_6_CPM = layers.conv2d(
+ conv4_5_CPM, 256, 3, 1, activation_fn=None, scope='conv4_6_CPM')
+ conv4_6_CPM = tf.nn.relu(conv4_6_CPM)
+ conv4_7_CPM = layers.conv2d(
+ conv4_6_CPM, 128, 3, 1, activation_fn=None, scope='conv4_7_CPM')
+ conv4_7_CPM = tf.nn.relu(conv4_7_CPM)
+ conv5_1_CPM = layers.conv2d(
+ conv4_7_CPM, 512, 1, 1, activation_fn=None, scope='conv5_1_CPM')
+ conv5_1_CPM = tf.nn.relu(conv5_1_CPM)
+ conv5_2_CPM = layers.conv2d(
+ conv5_1_CPM, 15, 1, 1, activation_fn=None, scope='conv5_2_CPM')
+ concat_stage2 = tf.concat(
+ [conv5_2_CPM, conv4_7_CPM, pool_center_lower], 3)
+ Mconv1_stage2 = layers.conv2d(
+ concat_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage2')
+ Mconv1_stage2 = tf.nn.relu(Mconv1_stage2)
+ Mconv2_stage2 = layers.conv2d(
+ Mconv1_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage2')
+ Mconv2_stage2 = tf.nn.relu(Mconv2_stage2)
+ Mconv3_stage2 = layers.conv2d(
+ Mconv2_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage2')
+ Mconv3_stage2 = tf.nn.relu(Mconv3_stage2)
+ Mconv4_stage2 = layers.conv2d(
+ Mconv3_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage2')
+ Mconv4_stage2 = tf.nn.relu(Mconv4_stage2)
+ Mconv5_stage2 = layers.conv2d(
+ Mconv4_stage2, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage2')
+ Mconv5_stage2 = tf.nn.relu(Mconv5_stage2)
+ Mconv6_stage2 = layers.conv2d(
+ Mconv5_stage2, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage2')
+ Mconv6_stage2 = tf.nn.relu(Mconv6_stage2)
+ Mconv7_stage2 = layers.conv2d(
+ Mconv6_stage2, 15, 1, 1, activation_fn=None, scope='Mconv7_stage2')
+ if stage == 2:
+ return _process_stage(Mconv7_stage2, hm_size)
+
+ concat_stage3 = tf.concat(
+ [Mconv7_stage2, conv4_7_CPM, pool_center_lower], 3)
+ Mconv1_stage3 = layers.conv2d(
+ concat_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage3')
+ Mconv1_stage3 = tf.nn.relu(Mconv1_stage3)
+ Mconv2_stage3 = layers.conv2d(
+ Mconv1_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage3')
+ Mconv2_stage3 = tf.nn.relu(Mconv2_stage3)
+ Mconv3_stage3 = layers.conv2d(
+ Mconv2_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage3')
+ Mconv3_stage3 = tf.nn.relu(Mconv3_stage3)
+ Mconv4_stage3 = layers.conv2d(
+ Mconv3_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage3')
+ Mconv4_stage3 = tf.nn.relu(Mconv4_stage3)
+ Mconv5_stage3 = layers.conv2d(
+ Mconv4_stage3, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage3')
+ Mconv5_stage3 = tf.nn.relu(Mconv5_stage3)
+ Mconv6_stage3 = layers.conv2d(
+ Mconv5_stage3, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage3')
+ Mconv6_stage3 = tf.nn.relu(Mconv6_stage3)
+ Mconv7_stage3 = layers.conv2d(
+ Mconv6_stage3, 15, 1, 1, activation_fn=None, scope='Mconv7_stage3')
+ if stage == 3:
+ return _process_stage(Mconv7_stage3, hm_size)
+
+ concat_stage4 = tf.concat(
+ [Mconv7_stage3, conv4_7_CPM, pool_center_lower], 3)
+ Mconv1_stage4 = layers.conv2d(
+ concat_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage4')
+ Mconv1_stage4 = tf.nn.relu(Mconv1_stage4)
+ Mconv2_stage4 = layers.conv2d(
+ Mconv1_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage4')
+ Mconv2_stage4 = tf.nn.relu(Mconv2_stage4)
+ Mconv3_stage4 = layers.conv2d(
+ Mconv2_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage4')
+ Mconv3_stage4 = tf.nn.relu(Mconv3_stage4)
+ Mconv4_stage4 = layers.conv2d(
+ Mconv3_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage4')
+ Mconv4_stage4 = tf.nn.relu(Mconv4_stage4)
+ Mconv5_stage4 = layers.conv2d(
+ Mconv4_stage4, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage4')
+ Mconv5_stage4 = tf.nn.relu(Mconv5_stage4)
+ Mconv6_stage4 = layers.conv2d(
+ Mconv5_stage4, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage4')
+ Mconv6_stage4 = tf.nn.relu(Mconv6_stage4)
+ Mconv7_stage4 = layers.conv2d(
+ Mconv6_stage4, 15, 1, 1, activation_fn=None, scope='Mconv7_stage4')
+ if stage == 4:
+ return _process_stage(Mconv7_stage4, hm_size)
+
+ concat_stage5 = tf.concat(
+ [Mconv7_stage4, conv4_7_CPM, pool_center_lower], 3)
+ Mconv1_stage5 = layers.conv2d(
+ concat_stage5, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage5')
+ Mconv1_stage5 = tf.nn.relu(Mconv1_stage5)
+ Mconv2_stage5 = layers.conv2d(
+ Mconv1_stage5, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage5')
+ Mconv2_stage5 = tf.nn.relu(Mconv2_stage5)
+ Mconv3_stage5 = layers.conv2d(
+ Mconv2_stage5, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage5')
+ Mconv3_stage5 = tf.nn.relu(Mconv3_stage5)
+ Mconv4_stage5 = layers.conv2d(
+ Mconv3_stage5, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage5')
+ Mconv4_stage5 = tf.nn.relu(Mconv4_stage5)
+ Mconv5_stage5 = layers.conv2d(
+ Mconv4_stage5, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage5')
+ Mconv5_stage5 = tf.nn.relu(Mconv5_stage5)
+ Mconv6_stage5 = layers.conv2d(
+ Mconv5_stage5, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage5')
+ Mconv6_stage5 = tf.nn.relu(Mconv6_stage5)
+ Mconv7_stage5 = layers.conv2d(
+ Mconv6_stage5, 15, 1, 1, activation_fn=None, scope='Mconv7_stage5')
+ if stage == 5:
+ return _process_stage(Mconv7_stage5, hm_size)
+
+ concat_stage6 = tf.concat(
+ [Mconv7_stage5, conv4_7_CPM, pool_center_lower], 3)
+ Mconv1_stage6 = layers.conv2d(
+ concat_stage6, 128, 7, 1, activation_fn=None,
+ scope='Mconv1_stage6')
+ Mconv1_stage6 = tf.nn.relu(Mconv1_stage6)
+ Mconv2_stage6 = layers.conv2d(
+ Mconv1_stage6, 128, 7, 1, activation_fn=None,
+ scope='Mconv2_stage6')
+ Mconv2_stage6 = tf.nn.relu(Mconv2_stage6)
+ Mconv3_stage6 = layers.conv2d(
+ Mconv2_stage6, 128, 7, 1, activation_fn=None,
+ scope='Mconv3_stage6')
+ Mconv3_stage6 = tf.nn.relu(Mconv3_stage6)
+ Mconv4_stage6 = layers.conv2d(
+ Mconv3_stage6, 128, 7, 1, activation_fn=None,
+ scope='Mconv4_stage6')
+ Mconv4_stage6 = tf.nn.relu(Mconv4_stage6)
+ Mconv5_stage6 = layers.conv2d(
+ Mconv4_stage6, 128, 7, 1, activation_fn=None,
+ scope='Mconv5_stage6')
+ Mconv5_stage6 = tf.nn.relu(Mconv5_stage6)
+ Mconv6_stage6 = layers.conv2d(
+ Mconv5_stage6, 128, 1, 1, activation_fn=None,
+ scope='Mconv6_stage6')
+ Mconv6_stage6 = tf.nn.relu(Mconv6_stage6)
+ Mconv7_stage6 = layers.conv2d(
+ Mconv6_stage6, 15, 1, 1, activation_fn=None,
+ scope='Mconv7_stage6')
+ return _process_stage(Mconv7_stage6, hm_size)
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py
new file mode 100644
index 0000000000000000000000000000000000000000..d95affa0d4e05c3113c58df8e5909d18ad97af86
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py
@@ -0,0 +1,112 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+import cv2
+import numpy as np
+from .config import JOINT_DRAW_SIZE
+from .config import NORMALISATION_COEFFICIENT
+import matplotlib.pyplot as plt
+import math
+
+__all__ = [
+ 'draw_limbs',
+ 'plot_pose'
+]
+
+
+def draw_limbs(image, pose_2d, visible):
+ """Draw the 2D pose without the occluded/not visible joints."""
+
+ _COLORS = [
+ [0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0],
+ [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170],
+ [170, 0, 255]
+ ]
+ # _COLORS = [
+ # [0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0],
+ # [128, 0, 0], [0, 128, 0], [0, 0, 128], [255, 255, 255],
+ # [128, 128, 128]
+ # ]
+ _LIMBS = np.array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9,
+ 9, 10, 11, 12, 12, 13]).reshape((-1, 2))
+
+ _NORMALISATION_FACTOR = int(math.floor(math.sqrt(image.shape[0] * image.shape[1] / NORMALISATION_COEFFICIENT)))
+
+ for oid in range(pose_2d.shape[0]):
+ # for i in range(14):
+ # cv2.putText(image, str(i), (pose_2d[oid][i][1], pose_2d[oid][i][0]), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255))
+ for lid, (p0, p1) in enumerate(_LIMBS):
+
+ if not (visible[oid][p0] and visible[oid][p1]):
+ continue
+ y0, x0 = pose_2d[oid][p0]
+ y1, x1 = pose_2d[oid][p1]
+ cv2.circle(image, (x0, y0), JOINT_DRAW_SIZE *_NORMALISATION_FACTOR , _COLORS[lid], -1)
+ cv2.circle(image, (x1, y1), JOINT_DRAW_SIZE*_NORMALISATION_FACTOR , _COLORS[lid], -1)
+ cv2.line(image, (x0, y0), (x1, y1),
+ _COLORS[lid], 10 , 16) # LIMB_DRAW_SIZE*_NORMALISATION_FACTOR
+
+
+def plot_pose(pose):
+ """Plot the 3D pose showing the joint connections."""
+ import mpl_toolkits.mplot3d.axes3d as p3
+
+ _CONNECTION = [
+ [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
+ [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],
+ [15, 16]]
+
+ def joint_color(j):
+ """
+ TODO: 'j' shadows name 'j' from outer scope
+ """
+
+ colors = [(0, 0, 0), (255, 0, 255), (0, 0, 255),
+ (0, 255, 255), (255, 0, 0), (0, 255, 0)]
+ _c = 0
+ if j in range(1, 4):
+ _c = 1
+ if j in range(4, 7):
+ _c = 2
+ if j in range(9, 11):
+ _c = 3
+ if j in range(11, 14):
+ _c = 4
+ if j in range(14, 17):
+ _c = 5
+ return colors[_c]
+
+ assert (pose.ndim == 2)
+ assert (pose.shape[0] == 3)
+ fig = plt.figure()
+ ax = fig.gca(projection='3d')
+ for c in _CONNECTION:
+ col = '#%02x%02x%02x' % joint_color(c[0])
+ ax.plot([pose[0, c[0]], pose[0, c[1]]],
+ [pose[1, c[0]], pose[1, c[1]]],
+ [pose[2, c[0]], pose[2, c[1]]], c=col)
+ for j in range(pose.shape[1]):
+ col = '#%02x%02x%02x' % joint_color(j)
+ ax.scatter(pose[0, j], pose[1, j], pose[2, j],
+ c=col, marker='o', edgecolor=col)
+ smallest = pose.min()
+ largest = pose.max()
+ ax.set_xlim3d(smallest, largest)
+ ax.set_ylim3d(smallest, largest)
+ ax.set_zlim3d(smallest, largest)
+
+ return fig
+
+
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8ffe330bc032f13b678c4c635ec043e0fc5a98c
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py
@@ -0,0 +1,270 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+import os
+import scipy.io as sio
+import numpy as np
+from .upright_fast import pick_e
+from . import config
+
+__all__ = ['Prob3dPose']
+
+
+class Prob3dPose:
+
+ def __init__(self, prob_model_path):
+ model_param = sio.loadmat(prob_model_path)
+ self.mu = np.reshape(
+ model_param['mu'], (model_param['mu'].shape[0], 3, -1))
+ self.e = np.reshape(model_param['e'], (model_param['e'].shape[
+ 0], model_param['e'].shape[1], 3, -1))
+ self.sigma = model_param['sigma']
+ self.cam = np.array(
+ [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]])
+
+ @staticmethod
+ def cost3d(model, gt):
+ """3d error in mm"""
+ out = np.sqrt(((gt - model) ** 2).sum(1)).mean(-1)
+ return out
+
+ @staticmethod
+ def renorm_gt(gt):
+ """Compel gt data to have mean joint length of one"""
+ _POSE_TREE = np.asarray([
+ [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
+ [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],
+ [15, 16]]).T
+ scale = np.sqrt(((gt[:, :, _POSE_TREE[0]] -
+ gt[:, :, _POSE_TREE[1]]) ** 2).sum(2).sum(1))
+ return gt / scale[:, np.newaxis, np.newaxis]
+
+ @staticmethod
+ def build_model(a, e, s0):
+ """Build 3D model"""
+ assert (s0.shape[1] == 3)
+ assert (e.shape[2] == 3)
+ assert (a.shape[1] == e.shape[1])
+ out = np.einsum('...i,...ijk', a, e)
+ out += s0
+ return out
+
+ @staticmethod
+ def build_and_rot_model(a, e, s0, r):
+ """
+ Build model and rotate according to the identified rotation matrix
+ """
+ from numpy.core.umath_tests import matrix_multiply
+
+ r2 = Prob3dPose.upgrade_r(r.T).transpose((0, 2, 1))
+ mod = Prob3dPose.build_model(a, e, s0)
+ mod = matrix_multiply(r2, mod)
+ return mod
+
+ @staticmethod
+ def upgrade_r(r):
+ """
+ Upgrades complex parameterisation of planar rotation to tensor
+ containing per frame 3x3 rotation matrices
+ """
+ assert (r.ndim == 2)
+ # Technically optional assert, but if this fails data is probably
+ # transposed
+ assert (r.shape[1] == 2)
+ assert (np.all(np.isfinite(r)))
+ norm = np.sqrt((r[:, :2] ** 2).sum(1))
+ assert (np.all(norm > 0))
+ r /= norm[:, np.newaxis]
+ assert (np.all(np.isfinite(r)))
+ newr = np.zeros((r.shape[0], 3, 3))
+ newr[:, :2, 0] = r[:, :2]
+ newr[:, 2, 2] = 1
+ newr[:, 1::-1, 1] = r[:, :2]
+ newr[:, 0, 1] *= -1
+ return newr
+
+ @staticmethod
+ def centre(data_2d):
+ """center data according to each of the coordiante components"""
+ return (data_2d.T - data_2d.mean(1)).T
+
+ @staticmethod
+ def centre_all(data):
+ """center all data"""
+ if data.ndim == 2:
+ return Prob3dPose.centre(data)
+ return (data.transpose(2, 0, 1) - data.mean(2)).transpose(1, 2, 0)
+
+ @staticmethod
+ def normalise_data(d2, weights):
+ """Normalise data according to height"""
+
+ # the joints with weight set to 0 should not be considered in the
+ # normalisation process
+ d2 = d2.reshape(d2.shape[0], -1, 2).transpose(0, 2, 1)
+ idx_consider = weights[0, 0].astype(np.bool)
+ if np.sum(weights[:, 0].sum(1) >= config.MIN_NUM_JOINTS) == 0:
+ raise Exception(
+ 'Not enough 2D joints identified to generate 3D pose')
+ d2[:, :, idx_consider] = Prob3dPose.centre_all(d2[:, :, idx_consider])
+
+ # Height normalisation (2 meters)
+ m2 = d2[:, 1, idx_consider].min(1) / 2.0
+ m2 -= d2[:, 1, idx_consider].max(1) / 2.0
+ crap = m2 == 0
+ m2[crap] = 1.0
+ d2[:, :, idx_consider] /= m2[:, np.newaxis, np.newaxis]
+ return d2, m2
+
+ @staticmethod
+ def transform_joints(pose_2d, visible_joints):
+ """
+ Transform the set of joints according to what the probabilistic model
+ expects as input.
+
+ It returns the new set of joints of each of the people and the set of
+ weights for the joints.
+ """
+
+ _H36M_ORDER = [8, 9, 10, 11, 12, 13, 1, 0, 5, 6, 7, 2, 3, 4]
+ _W_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16]
+
+ def swap_xy(poses):
+ tmp = np.copy(poses[:, :, 0])
+ poses[:, :, 0] = poses[:, :, 1]
+ poses[:, :, 1] = tmp
+ return poses
+
+ assert (pose_2d.ndim == 3)
+ new_pose = pose_2d.copy()
+ new_pose = swap_xy(new_pose)
+ new_pose = new_pose[:, _H36M_ORDER]
+
+ # defining weights according to occlusions
+ weights = np.zeros((pose_2d.shape[0], 2, config.H36M_NUM_JOINTS))
+ ordered_visibility = np.repeat(
+ visible_joints[:, _H36M_ORDER, np.newaxis], 2, 2
+ ).transpose([0, 2, 1])
+ weights[:, :, _W_POS] = ordered_visibility
+ return new_pose, weights
+
+ def affine_estimate(self, w, depth_reg=0.085, weights=None, scale=10.0,
+ scale_mean=0.0016 * 1.8 * 1.2, scale_std=1.2 * 0,
+ cap_scale=-0.00129):
+ """
+ Quick switch to allow reconstruction at unknown scale returns a,r
+ and scale
+ """
+ weights = np.zeros((0, 0, 0)) if weights is None else weights
+
+ s = np.empty((self.sigma.shape[0], self.sigma.shape[1] + 4)) # e,y,x,z
+ s[:, :4] = 10 ** -5 # Tiny but makes stuff well-posed
+ s[:, 0] = scale_std
+ s[:, 4:] = self.sigma
+ s[:, 4:-1] *= scale
+
+ e2 = np.zeros((self.e.shape[0], self.e.shape[
+ 1] + 4, 3, self.e.shape[3]))
+ e2[:, 1, 0] = 1.0
+ e2[:, 2, 1] = 1.0
+ e2[:, 3, 0] = 1.0
+ # This makes the least_squares problem ill posed, as X,Z are
+ # interchangable
+ # Hence regularisation above to speed convergence and stop blow-up
+ e2[:, 0] = self.mu
+ e2[:, 4:] = self.e
+ t_m = np.zeros_like(self.mu)
+
+ res, a, r = pick_e(w, e2, t_m, self.cam, s, weights=weights,
+ interval=0.01, depth_reg=depth_reg,
+ scale_prior=scale_mean)
+
+ scale = a[:, :, 0]
+ reestimate = scale > cap_scale
+ m = self.mu * cap_scale
+ for i in range(scale.shape[0]):
+ if reestimate[i].sum() > 0:
+ ehat = e2[i:i + 1, 1:]
+ mhat = m[i:i + 1]
+ shat = s[i:i + 1, 1:]
+ (res2, a2, r2) = pick_e(
+ w[reestimate[i]], ehat, mhat, self.cam, shat,
+ weights=weights[reestimate[i]],
+ interval=0.01, depth_reg=depth_reg,
+ scale_prior=scale_mean
+ )
+ res[i:i + 1, reestimate[i]] = res2
+ a[i:i + 1, reestimate[i], 1:] = a2
+ a[i:i + 1, reestimate[i], 0] = cap_scale
+ r[i:i + 1, :, reestimate[i]] = r2
+ scale = a[:, :, 0]
+ a = a[:, :, 1:] / a[:, :, 0][:, :, np.newaxis]
+ return res, e2[:, 1:], a, r, scale
+
+ def better_rec(self, w, model, s=1, weights=1, damp_z=1):
+ """Quick switch to allow reconstruction at unknown scale
+ returns a,r and scale"""
+ from numpy.core.umath_tests import matrix_multiply
+ proj = matrix_multiply(self.cam[np.newaxis], model)
+ proj[:, :2] = (proj[:, :2] * s + w * weights) / (s + weights)
+ proj[:, 2] *= damp_z
+ out = matrix_multiply(self.cam.T[np.newaxis], proj)
+ return out
+
+ def create_rec(self, w2, weights, res_weight=1):
+ """Reconstruct 3D pose given a 2D pose"""
+ _SIGMA_SCALING = 5.2
+
+ res, e, a, r, scale = self.affine_estimate(
+ w2, scale=_SIGMA_SCALING, weights=weights,
+ depth_reg=0, cap_scale=-0.001, scale_mean=-0.003
+ )
+
+ remaining_dims = 3 * w2.shape[2] - e.shape[1]
+ assert (remaining_dims >= 0)
+ llambda = -np.log(self.sigma)
+ lgdet = np.sum(llambda[:, :-1], 1) + llambda[:, -1] * remaining_dims
+ score = (res * res_weight + lgdet[:, np.newaxis] * (scale ** 2))
+ best = np.argmin(score, 0)
+ index = np.arange(best.shape[0])
+ a2 = a[best, index]
+ r2 = r[best, :, index].T
+ rec = Prob3dPose.build_and_rot_model(a2, e[best], self.mu[best], r2)
+ rec *= -np.abs(scale[best, index])[:, np.newaxis, np.newaxis]
+
+ rec = self.better_rec(w2, rec, 1, 1.55 * weights, 1) * -1
+ rec = Prob3dPose.renorm_gt(rec)
+ rec *= 0.97
+ return rec
+
+ def compute_3d(self, pose_2d, weights):
+ """Reconstruct 3D poses given 2D estimations"""
+
+ _J_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16]
+ _SCALE_3D = 1174.88312988
+
+ if pose_2d.shape[1] != config.H36M_NUM_JOINTS:
+ # need to call the linear regressor
+ reg_joints = np.zeros(
+ (pose_2d.shape[0], config.H36M_NUM_JOINTS, 2))
+ for oid, singe_pose in enumerate(pose_2d):
+ reg_joints[oid, _J_POS] = singe_pose
+
+ norm_pose, _ = Prob3dPose.normalise_data(reg_joints, weights)
+ else:
+ norm_pose, _ = Prob3dPose.normalise_data(pose_2d, weights)
+
+ pose_3d = self.create_rec(norm_pose, weights) * _SCALE_3D
+ return pose_3d
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab25ff010df367c7d6f64e73a566d133037dc342
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py
@@ -0,0 +1,310 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import division
+from npu_bridge.npu_init import *
+
+import skimage.io
+import skimage.transform
+import scipy.ndimage as ndimage
+import scipy.ndimage.filters as filters
+from scipy.stats import multivariate_normal
+
+import os
+import json
+import numpy as np
+from . import config
+import cv2
+from itertools import compress
+
+__all__ = [
+ 'detect_objects_heatmap',
+ 'detect_objects_heatmap',
+ 'gaussian_kernel',
+ 'gaussian_heatmap',
+ 'prepare_input_posenet',
+ 'detect_parts_heatmaps',
+ 'detect_parts_from_likelihoods',
+ 'import_json',
+ 'generate_labels',
+ 'generate_center_map',
+ 'rescale',
+ 'crop_image'
+]
+
+
+def detect_objects_heatmap(heatmap):
+ data = 256 * heatmap
+ data_max = filters.maximum_filter(data, 3)
+ maxima = (data == data_max)
+ data_min = filters.minimum_filter(data, 3)
+ diff = ((data_max - data_min) > 0.3)
+ maxima[diff == 0] = 0
+
+ labeled, num_objects = ndimage.label(maxima)
+ slices = ndimage.find_objects(labeled)
+ objects = np.zeros((num_objects, 2), dtype=np.int32)
+ pidx = 0
+ for (dy, dx) in slices:
+ pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2]
+ if heatmap[pos[0], pos[1]] > config.CENTER_TR:
+ objects[pidx, :] = pos
+ pidx += 1
+ return objects[:pidx]
+
+
+def gaussian_kernel(h, w, sigma_h, sigma_w):
+ yx = np.mgrid[-h // 2:h // 2, -w // 2:w // 2] ** 2
+ return np.exp(-yx[0, :, :] / sigma_h ** 2 - yx[1, :, :] / sigma_w ** 2)
+
+
+def gaussian_heatmap(h, w, pos_x, pos_y, sigma_h=1, sigma_w=1, init=None):
+ """
+ Compute the heat-map of size (w x h) with a gaussian distribution fit in
+ position (pos_x, pos_y) and a convariance matix defined by the related
+ sigma values.
+ The resulting heat-map can be summed to a given heat-map init.
+ """
+ init = init if init is not None else []
+
+ cov_matrix = np.eye(2) * ([sigma_h**2, sigma_w**2])
+
+ x, y = np.mgrid[0:h, 0:w]
+ pos = np.dstack((x, y))
+ rv = multivariate_normal([pos_x, pos_y], cov_matrix)
+
+ tmp = rv.pdf(pos)
+ hmap = np.multiply(
+ tmp, np.sqrt(np.power(2 * np.pi, 2) * np.linalg.det(cov_matrix))
+ )
+ idx = np.where(hmap.flatten() <= np.exp(-4.6052))
+ hmap.flatten()[idx] = 0
+
+ if np.size(init) == 0:
+ return hmap
+
+ assert (np.shape(init) == hmap.shape)
+ hmap += init
+ idx = np.where(hmap.flatten() > 1)
+ hmap.flatten()[idx] = 1
+ return hmap
+
+
+def prepare_input_posenet(image, objects, size_person, size,
+ batch_size, sigma=25, border=400):
+ result = np.zeros((batch_size, size[0], size[1], 4))
+ padded_image = np.zeros(
+ (1, size_person[0] + border, size_person[1] + border, 4))
+ padded_image[0, border // 2:-border // 2,
+ border // 2:-border // 2, :3] = image
+ if objects.shape[0] > batch_size:
+ objects = objects[:batch_size]
+ for oid, (yc, xc) in enumerate(objects):
+ dh, dw = size[0] // 2, size[1] // 2
+ y0, x0, y1, x1 = np.array(
+ [yc - dh, xc - dw, yc + dh, xc + dw]) + border // 2
+ result[oid, :, :, :4] = padded_image[:, y0:y1, x0:x1, :]
+ result[oid, :, :, 3] = gaussian_kernel(size[0], size[1], sigma, sigma)
+ return np.split(result, [3], 3)
+
+
+def detect_parts_heatmaps(heatmaps, centers, size, num_parts=14):
+ """
+ Given heat-maps find the position of each joint by means of n argmax
+ function
+ """
+ parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32)
+ visible = np.zeros((len(centers), num_parts), dtype=bool)
+ for oid, (yc, xc) in enumerate(centers):
+ part_hmap = skimage.transform.resize(
+ np.clip(heatmaps[oid], -1, 1), size)
+ for pid in range(num_parts):
+ y, x = np.unravel_index(np.argmax(part_hmap[:, :, pid]), size)
+ parts[oid, pid] = y + yc - size[0] // 2, x + xc - size[1] // 2
+ visible[oid, pid] = np.mean(
+ part_hmap[:, :, pid]) > config.VISIBLE_PART
+ return parts, visible
+
+
+def detect_parts_from_likelihoods(poses, centers, likelihoods, num_parts=14):
+ """
+ Given heat-maps find the position of each joint by means of n argmax
+ function
+ """
+ if len(centers) > config.BATCH_SIZE:
+ centers = centers[:config.BATCH_SIZE]
+ parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32)
+ visible = np.zeros((len(centers), num_parts), dtype=bool)
+ for oid, (yc, xc) in enumerate(centers):
+ for pid in range(num_parts):
+ x, y = poses[oid, :, pid]
+ parts[oid, pid] = y + yc - config.INPUT_SIZE // 2, x + xc - config.INPUT_SIZE // 2
+ visible[oid, pid] = likelihoods[oid, pid] > config.VISIBLE_PART
+ return parts, visible
+
+
+def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'):
+ """Get the json file containing the dataset.
+ We want the data to be shuffled, however the training has to be repeatable.
+ This means that once shuffled the order has to me mantained."""
+ with open(path) as data_file:
+ data_this = json.load(data_file)
+ data_this = np.array(data_this['root'])
+ num_samples = len(data_this)
+
+ if os.path.exists(order):
+ idx = np.load(order)
+ else:
+ idx = np.random.permutation(num_samples).tolist()
+ np.save(order, idx)
+
+ is_not_validation = [not data_this[i]['isValidation']
+ for i in range(num_samples)]
+ keep_data_idx = list(compress(idx, is_not_validation))
+
+ data = data_this[keep_data_idx]
+ return data, len(keep_data_idx)
+
+
+def generate_labels(image_shape, joint_positions, num_other_people,
+ joints_other_people, offset):
+ """
+ Given as input a set of joint positions and the size of the input image
+ it generates
+ a set of heat-maps of the same size. It generates both heat-maps used as
+ labels for the first stage (label_1st_lower) and for all the other stages
+ (label_lower).
+ """
+ _FILTER_JOINTS = np.array([9, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5])
+
+ img_height, img_width, _ = image_shape
+ heat_maps_single_p = np.zeros(
+ (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE))
+ heat_maps_other_p = np.zeros(
+ (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE))
+
+ # generate first set of heat-maps
+ for i in range(config.NUM_JOINTS):
+ # the set of joints can be different fromt the one in the json file
+ curr_joint = joint_positions[_FILTER_JOINTS[i]]
+ skip = (curr_joint[0] < 0 or curr_joint[1] < 0 or
+ curr_joint[0] >= img_width or curr_joint[1] >= img_height)
+ if not skip:
+ heat_maps_single_p[i] = gaussian_heatmap(
+ config.INPUT_SIZE, config.INPUT_SIZE,
+ curr_joint[
+ 1] - offset[1], curr_joint[0] - offset[0],
+ config.SIGMA, config.SIGMA)
+
+ heat_maps_other_p[i] = gaussian_heatmap(
+ config.INPUT_SIZE, config.INPUT_SIZE,
+ curr_joint[
+ 1] - offset[1], curr_joint[0] - offset[0],
+ config.SIGMA, config.SIGMA)
+
+ heat_maps_single_p[-1] = np.maximum(
+ 1 - np.max(heat_maps_single_p[:-1], axis=0),
+ np.zeros((config.INPUT_SIZE, config.INPUT_SIZE)))
+ heat_maps_single_p = np.transpose(heat_maps_single_p, (1, 2, 0))
+
+ # generate second set of heat-maps for other people in the image
+ for p in range(int(num_other_people)):
+ for i in range(config.NUM_JOINTS):
+ # the set of joints can be different fromt the one in the json file
+ try:
+ if num_other_people == 1:
+ curr_joint = joints_other_people[_FILTER_JOINTS[i]]
+ else:
+ curr_joint = joints_other_people[p][_FILTER_JOINTS[i]]
+ skip = (
+ curr_joint[0] < 0 or curr_joint[1] < 0 or
+ curr_joint[0] >= img_width or curr_joint[1] >= img_height)
+ except IndexError:
+ skip = True
+ if not skip:
+ heat_maps_other_p[i] = gaussian_heatmap(
+ config.INPUT_SIZE, config.INPUT_SIZE,
+ curr_joint[1] - offset[1], curr_joint[0] - offset[0],
+ config.SIGMA, config.SIGMA, init=heat_maps_other_p[i])
+
+ heat_maps_other_p[-1] = np.maximum(
+ 1 - np.max(heat_maps_other_p[:-1], axis=0),
+ np.zeros((config.INPUT_SIZE, config.INPUT_SIZE)))
+
+ heat_maps_other_p = np.transpose(heat_maps_other_p, (1, 2, 0))
+
+ # rescaling heat-maps accoring to the right shape
+ labels_single = rescale(heat_maps_single_p, config.OUTPUT_SIZE)
+ labels_people = rescale(heat_maps_other_p, config.OUTPUT_SIZE)
+ return labels_people, labels_single
+
+
+def generate_center_map(center_pos, img_shape):
+ """
+ Given the position of the person and the size of the input image it
+ generates
+ a heat-map where a gaissian distribution is fit in the position of the
+ person in the image.
+ """
+ img_height = img_shape
+ img_width = img_shape
+ center_map = gaussian_heatmap(
+ img_height, img_width, center_pos[1], center_pos[0],
+ config.SIGMA_CENTER, config.SIGMA_CENTER)
+ return center_map
+
+
+def rescale(data, new_size):
+ """Rescale data to a fixed dimension, regardless the number of channels.
+ Data has to be in the format (h,w,c)."""
+ if data.ndim > 2:
+ assert data.shape[2] < data.shape[0]
+ assert data.shape[2] < data.shape[1]
+ resized_data = cv2.resize(
+ data, (new_size, new_size), interpolation=cv2.INTER_CUBIC)
+ return resized_data
+
+
+def crop_image(image, obj_pose):
+ """
+ Crop the image in order to have the person at the center and the final
+ image size
+ is the same as the expected CNN input size.
+ Returns the cropped image and the offset that is used to update the joint
+ positions.
+ """
+ offset_left = int(obj_pose[0] - config.INPUT_SIZE // 2)
+ offset_up = int(obj_pose[1] - config.INPUT_SIZE // 2)
+ # just for checking that it's inside the image
+ offset_right = int(image.shape[1] - obj_pose[0] - config.INPUT_SIZE // 2)
+ offset_bottom = int(image.shape[0] - obj_pose[1] - config.INPUT_SIZE // 2)
+
+ pad_left, pad_right, pad_up, pad_bottom = 0, 0, 0, 0
+ if offset_left < 0:
+ pad_left = -offset_left
+ if offset_right < 0:
+ pad_right = -offset_right
+ if offset_up < 0:
+ pad_up = -offset_up
+ if offset_bottom < 0:
+ pad_bottom = -offset_bottom
+ padded_image = np.lib.pad(
+ image, ((pad_up, pad_bottom), (pad_left, pad_right), (0, 0)),
+ 'constant', constant_values=((0, 0), (0, 0), (0, 0)))
+
+ cropped_image = padded_image[
+ offset_up + pad_up: offset_up + pad_up + config.INPUT_SIZE,
+ offset_left + pad_left: offset_left + pad_left + config.INPUT_SIZE]
+
+ return cropped_image, np.array([offset_left, offset_up])
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbfd5ec221cff91411b65c9f566ecc4d69ad1397
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py
@@ -0,0 +1,302 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+import numpy as np
+import scipy
+
+__all__ = [
+ 'upgrade_r',
+ 'update_cam',
+ 'estimate_a_and_r_with_res',
+ 'estimate_a_and_r_with_res_weights',
+ 'pick_e'
+]
+
+
+def upgrade_r(r):
+ """Upgrades complex parameterisation of planar rotation to tensor containing
+ per frame 3x3 rotation matrices"""
+ newr = np.zeros((3, 3))
+ newr[:2, 0] = r
+ newr[2, 2] = 1
+ newr[1::-1, 1] = r
+ newr[0, 1] *= -1
+ return newr
+
+
+def update_cam(cam):
+ new_cam = cam[[0, 2, 1]].copy()
+ new_cam = new_cam[:, [0, 2, 1]]
+ return new_cam
+
+
+def estimate_a_and_r_with_res(
+ w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e,
+ residue, Ps, depth_reg, scale_prior):
+ """
+ TODO: Missing the following parameters in docstring:
+ - w, e, s0, camera_r, Lambda, check, a, res, proj_e, depth_reg,
+ scale_prior
+
+ TODO: The following parameters are not used:
+ - s0, weights
+
+ So local optima are a problem in general.
+ However:
+
+ 1. This problem is convex in a but not in r, and
+
+ 2. each frame can be solved independently.
+
+ So for each frame, we can do a grid search in r and take the globally
+ optimal solution.
+
+ In practice, we just brute force over 100 different estimates of r, and
+ take the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a
+ given r.
+
+ Arguments:
+
+ w is a 3d measurement matrix of form frames*2*points
+
+ e is a 3d set of basis vectors of from basis*3*points
+
+ s0 is the 3d rest shape of form 3*points
+
+ Lambda are the regularisor coefficients on the coefficients of the
+ weights typically generated using PPCA
+
+ interval is how far round the circle we should check for break points
+ we check every interval*2*pi radians
+
+ Returns:
+
+ a (basis coefficients) and r (representation of rotations as a complex
+ number)
+ """
+ frames = w.shape[0]
+ points = w.shape[2]
+ basis = e.shape[0]
+ r = np.empty(2)
+ Ps_reshape = Ps.reshape(2 * points)
+ w_reshape = w.reshape((frames, points * 2))
+
+ for i in range(check.size):
+ c = check[i]
+ r[0] = np.cos(c)
+ r[1] = np.sin(c)
+ grot = camera_r.dot(upgrade_r(r))
+ rot = grot[:2]
+ res[:, :points * 2] = w_reshape
+ res[:, :points * 2] -= Ps_reshape
+ proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape(
+ e.shape[0], 2 * points)
+
+ if Lambda.size != 0:
+ proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1])
+ res[:, 2 * points:].fill(0)
+ res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1]
+ proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1]
+ # depth regularizer not used
+ proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] *
+ depth_reg) * grot[2]).dot(e)
+ # we let the person change scale
+ res[:, 2 * points] = scale_prior
+
+ """
+ TODO: PLEASE REVIEW THE FOLLOWING CODE....
+ overwrite_a and overwrite_b ARE UNEXPECTED ARGUMENTS OF
+ scipy.linalg.lstsq
+ """
+ a[i], residue[i], _, _ = scipy.linalg.lstsq(
+ proj_e.T, res.T, overwrite_a=True, overwrite_b=True)
+
+ # find and return best coresponding solution
+ best = np.argmin(residue, 0)
+ assert (best.shape[0] == frames)
+ theta = check[best]
+ index = (best, np.arange(frames))
+ aa = a.transpose(0, 2, 1)[index]
+ retres = residue[index]
+ r = np.empty((2, frames))
+ r[0] = np.sin(theta)
+ r[1] = np.cos(theta)
+ return aa, r, retres
+
+
+def estimate_a_and_r_with_res_weights(
+ w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e,
+ residue, Ps, depth_reg, scale_prior):
+ """
+ TODO: Missing the following parameters in docstring:
+ - w, e, s0, camera)r, Lambda, check, a, res, proj_e, residue,
+ Ps, depth_reg, scale_prior
+
+ So local optima are a problem in general.
+ However:
+
+ 1. This problem is convex in a but not in r, and
+
+ 2. each frame can be solved independently.
+
+ So for each frame, we can do a grid search in r and take the globally
+ optimal solution.
+
+ In practice, we just brute force over 100 different estimates of r, and
+ take
+ the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a given r.
+
+ Arguments:
+
+ w is a 3d measurement matrix of form frames*2*points
+
+ e is a 3d set of basis vectors of from basis*3*points
+
+ s0 is the 3d rest shape of form 3*points
+
+ Lambda are the regularisor coefficients on the coefficients of the
+ weights
+ typically generated using PPCA
+
+ interval is how far round the circle we should check for break points
+ we check every interval*2*pi radians
+
+ Returns:
+
+ a (basis coefficients) and r (representation of rotations as a complex
+ number)
+ """
+ frames = w.shape[0]
+ points = w.shape[2]
+ basis = e.shape[0]
+ r = np.empty(2)
+ Ps_reshape = Ps.reshape(2 * points)
+ w_reshape = w.reshape((frames, points * 2))
+ p_copy = np.empty_like(proj_e)
+
+ for i in range(check.size):
+ c = check[i]
+ r[0] = np.sin(c)
+ r[1] = np.cos(c)
+ grot = camera_r.dot(upgrade_r(r).T)
+ rot = grot[:2]
+ rot.dot(s0, Ps) # TODO: remove?
+ res[:, :points * 2] = w_reshape
+ res[:, :points * 2] -= Ps_reshape
+ proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape(
+ e.shape[0], 2 * points)
+
+ if Lambda.size != 0:
+ proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1])
+ res[:, 2 * points:].fill(0)
+ res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1]
+ proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1]
+ proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] *
+ depth_reg) * grot[2]).dot(e)
+ res[:, 2 * points] = scale_prior
+ if weights.size != 0:
+ res[:, :points * 2] *= weights
+ for j in range(frames):
+ p_copy[:] = proj_e
+ p_copy[:, :points * 2] *= weights[j]
+ a[i, :, j], comp_residual, _, _ = np.linalg.lstsq(
+ p_copy.T, res[j].T)
+ if not comp_residual:
+ # equations are over-determined
+ residue[i, j] = 1e-5
+ else:
+ residue[i, j] = comp_residual
+ # find and return best coresponding solution
+ best = np.argmin(residue, 0)
+ index = (best, np.arange(frames))
+ theta = check[best]
+ aa = a.transpose(0, 2, 1)[index]
+ retres = residue[index]
+ r = np.empty((2, frames))
+ r[0] = np.sin(theta)
+ r[1] = np.cos(theta)
+ return aa, r, retres
+
+
+def pick_e(w, e, s0, camera_r=None, Lambda=None,
+ weights=None, scale_prior=-0.0014, interval=0.01, depth_reg=0.0325):
+ """Brute force over charts from the manifold to find the best one.
+ Returns best chart index and its a and r coefficients
+ Returns assignment, and a and r coefficents"""
+
+ camera_r = np.asarray([[1, 0, 0], [0, 0, -1], [0, 1, 0]]
+ ) if camera_r is None else camera_r
+ Lambda = np.ones((0, 0)) if Lambda is None else Lambda
+ weights = np.ones((0, 0, 0)) if weights is None else weights
+
+ charts = e.shape[0]
+ frames = w.shape[0]
+ basis = e.shape[1]
+ points = e.shape[3]
+ assert (s0.shape[0] == charts)
+ r = np.empty((charts, 2, frames))
+ a = np.empty((charts, frames, e.shape[1]))
+ score = np.empty((charts, frames))
+ check = np.arange(0, 1, interval) * 2 * np.pi
+ cache_a = np.empty((check.size, basis, frames))
+ residue = np.empty((check.size, frames))
+
+ if Lambda.size != 0:
+ res = np.zeros((frames, points * 2 + basis + points))
+ proj_e = np.zeros((basis, 2 * points + basis + points))
+ else:
+ res = np.empty((frames, points * 2))
+ proj_e = np.empty((basis, 2 * points))
+ Ps = np.empty((2, points))
+
+ if weights.size == 0:
+ for i in range(charts):
+ if Lambda.size != 0:
+ a[i], r[i], score[i] = estimate_a_and_r_with_res(
+ w, e[i], s0[i], camera_r,
+ Lambda[i], check, cache_a, weights,
+ res, proj_e, residue, Ps,
+ depth_reg, scale_prior)
+ else:
+ a[i], r[i], score[i] = estimate_a_and_r_with_res(
+ w, e[i], s0[i], camera_r, Lambda,
+ check, cache_a, weights,
+ res, proj_e, residue, Ps,
+ depth_reg, scale_prior)
+ else:
+ w2 = weights.reshape(weights.shape[0], -1)
+ for i in range(charts):
+ if Lambda.size != 0:
+ a[i], r[i], score[i] = estimate_a_and_r_with_res_weights(
+ w, e[i], s0[i], camera_r,
+ Lambda[i], check, cache_a, w2,
+ res, proj_e, residue, Ps,
+ depth_reg, scale_prior)
+ else:
+ a[i], r[i], score[i] = estimate_a_and_r_with_res_weights(
+ w, e[i], s0[i], camera_r, Lambda,
+ check, cache_a, w2,
+ res, proj_e, residue, Ps,
+ depth_reg, scale_prior)
+
+ remaining_dims = 3 * w.shape[2] - e.shape[1]
+ assert (np.all(score > 0))
+ assert (remaining_dims >= 0)
+ # Zero problems in log space due to un-regularised first co-efficient
+ l = Lambda.copy()
+ l[Lambda == 0] = 1
+ llambda = -np.log(l)
+ score /= 2
+ return score, a, r
+
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..959dee0a614abbe4d178bb806c3d69736426095e
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt
@@ -0,0 +1,8 @@
+matplotlib==3.4.0
+numpy==1.19.3
+opencv_python==4.5.1.48
+scikit_image==0.18.1
+scipy==1.2.1
+skimage==0.0
+tensorflow==1.15.0
+tqdm==4.62.2
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ecebc1e1e82a673646474cde182db656db3ddc4f
Binary files /dev/null and b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg differ
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cf4bc4a4caf7df7fe7dc532731b06a16b938bff6
Binary files /dev/null and b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg differ
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c74dd2525c2d600059ae6aad7fb6e12fab37dc32
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh
@@ -0,0 +1,192 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path=""
+output_path=""
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./test/output/${ASCEND_DEVICE_ID}"
+fi
+
+# 设置打屏日志文件名,请保留,文件名为${print_log}
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+modelarts_flag=${MODELARTS_MODEL_PATH}
+if [ x"${modelarts_flag}" != x ];
+then
+ echo "running without etp..."
+ print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank`
+ print_log="/home/ma-user/modelarts/log/${print_log_name}"
+fi
+echo "### get your log here : ${print_log}"
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+train_epochs=30
+batch_size=4
+
+if [ x"${modelarts_flag}" != x ];
+then
+ python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \
+ --epochs=${train_epochs} --batch_size=${batch_size}
+else
+ python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \
+ --epochs=${train_epochs} --batch_size=${batch_size} 1>${print_log} 2>&1
+fi
+
+# 性能相关数据计算
+
+#读取iteration/s数据(ITPS),再计算StepTime = 1/ITPS; FPS=BATCH_SIZE * ITPS
+ITPS=`grep "100%" ${print_log} | awk '{print $NF}'| cut -d "i" -f 1 | awk '{sum+=$1} END {print sum/NR}'`
+StepTime=`awk 'BEGIN{printf "%.2f", '1'/'${ITPS}'}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*'${ITPS}'}'`
+
+
+# 精度相关数据计算
+train_accuracy=`grep "Validation PCKh@0.5:" ${print_log} | tail -n 1| awk '{print $4}' | cut -c 10- | awk '{sum+=$1} END {print sum/NR}'`
+
+# 提取所有loss打印信息
+grep "loss=" ${print_log} | awk -F "=" '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 判断本次执行是否正确使用Ascend NPU
+use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l`
+if [ x"${use_npu_flag}" == x0 ];
+then
+ echo "------------------ ERROR NOTICE START ------------------"
+ echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration."
+ echo "------------------ ERROR NOTICE END------------------"
+else
+ echo "------------------ INFO NOTICE START------------------"
+ echo "INFO, your task have used Ascend NPU, please check your result."
+ echo "------------------ INFO NOTICE END------------------"
+fi
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance sec/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+
+# 输出训练精度
+echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..977218b85e294e659d9a9f8f384898f2045b28ef
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh
@@ -0,0 +1,191 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path=""
+output_path=""
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./test/output/${ASCEND_DEVICE_ID}"
+fi
+
+# 设置打屏日志文件名,请保留,文件名为${print_log}
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+modelarts_flag=${MODELARTS_MODEL_PATH}
+if [ x"${modelarts_flag}" != x ];
+then
+ echo "running with modelarts..."
+ print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank`
+ print_log="/home/ma-user/modelarts/log/${print_log_name}"
+fi
+echo "### get your log here : ${print_log}"
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+train_epochs=1
+batch_size=4
+
+if [ x"${modelarts_flag}" != x ];
+then
+ python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \
+ --epochs=${train_epochs} --batch_size=${batch_size}
+else
+ python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \
+ --epochs=${train_epochs} --batch_size=${batch_size} 1>${print_log} 2>&1
+fi
+
+# 性能相关数据计算
+
+#读取iteration/s数据(ITPS),再计算StepTime = 1/ITPS; FPS=BATCH_SIZE * ITPS
+ITPS=`grep "100%" ${print_log} | awk '{print $NF}'| cut -d "i" -f 1 | awk '{sum+=$1} END {print sum/NR}'`
+StepTime=`awk 'BEGIN{printf "%.2f", '1'/'${ITPS}'}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*'${ITPS}'}'`
+
+
+# 精度相关数据计算
+train_accuracy=`grep "Validation PCKh@0.5:" ${print_log} | tail -n 1| awk '{print $4}' | cut -c 10- | awk '{sum+=$1} END {print sum/NR}'`
+
+# 提取所有loss打印信息
+grep "loss=" ${print_log} | awk -F "=" '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 判断本次执行是否正确使用Ascend NPU
+use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l`
+if [ x"${use_npu_flag}" == x0 ];
+then
+ echo "------------------ ERROR NOTICE START ------------------"
+ echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration."
+ echo "------------------ ERROR NOTICE END------------------"
+else
+ echo "------------------ INFO NOTICE START------------------"
+ echo "INFO, your task have used Ascend NPU, please check your result."
+ echo "------------------ INFO NOTICE END------------------"
+fi
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance sec/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+
+# 输出训练精度
+echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..1650840b596cbb761d45af70bd628c9f2f4bcdb4
--- /dev/null
+++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py
@@ -0,0 +1,255 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from scipy.io import loadmat
+from packages.lifting import PoseEstimator
+from packages.lifting.utils import gaussian_heatmap, config, plot_pose, draw_limbs
+import cv2
+import os
+import numpy as np
+from tqdm import tqdm
+import argparse
+
+# set up argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--data_path', type=str, default='./dataset/MPII') # dataset path
+parser.add_argument('--batch_size', type=int, default=4) # batchsize
+parser.add_argument('--save_step', type=int, default=5) # model saved interval
+parser.add_argument('--epochs', type=int, default=10) # train epoch
+parser.add_argument('--output_path',type=str, # where to save checkpoint
+ default='./checkpoint/model.ckpt')
+
+parser.add_argument('--label_path', type=str,
+ default='./dataset/MPII/mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat') #label path
+parser.add_argument('--prob_model_path', type=str,
+ default='./data/prob_model/prob_model_params.mat') # 3d model path
+parser.add_argument('--init_session_path',type=str,
+ default='./data/init_session/init')
+args = parser.parse_args()
+
+
+
+input_width = 654
+input_height = 368
+
+#if not os.path.exists(OUT_SESSION_PATH):
+# os.mkdir(OUT_SESSION_PATH)
+
+def save_joints(): # read mpii dataset image and label
+ mat = loadmat(args.label_path)
+ d = {}
+ for i, (anno, train_flag) in enumerate(
+ zip(mat['RELEASE']['annolist'][0, 0][0],
+ mat['RELEASE']['img_train'][0, 0][0],
+ )):
+ img_fn = anno['image']['name'][0, 0][0]
+ train_flag = int(train_flag)
+
+ if 'annopoints' in str(anno['annorect'].dtype):
+ # only one person
+ annopoints = anno['annorect']['annopoints'][0]
+ head_x1s = anno['annorect']['x1'][0]
+ head_y1s = anno['annorect']['y1'][0]
+ head_x2s = anno['annorect']['x2'][0]
+ head_y2s = anno['annorect']['y2'][0]
+ datas = []
+ for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(
+ annopoints, head_x1s, head_y1s, head_x2s, head_y2s):
+ if annopoint != []:
+ head_rect = [float(head_x1[0, 0]),
+ float(head_y1[0, 0]),
+ float(head_x2[0, 0]),
+ float(head_y2[0, 0])]
+ # build feed_dict
+ feed_dict = {}
+ feed_dict['width'] = int(abs(float(head_x2[0, 0]) - float(head_x1[0, 0])))
+ feed_dict['height'] = int(abs(float(head_y2[0, 0]) - float(head_y1[0, 0])))
+
+ # joint coordinates
+ annopoint = annopoint['point'][0, 0]
+ j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
+ x = [x[0, 0] for x in annopoint['x'][0]]
+ y = [y[0, 0] for y in annopoint['y'][0]]
+ joint_pos = {}
+ for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
+ joint_pos[str(_j_id)] = [float(_x), float(_y)]
+
+ # visiblity list
+ if 'is_visible' in str(annopoint.dtype):
+ vis = [v[0] if v else [0] for v in annopoint['is_visible'][0]]
+ vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)])
+ else:
+ vis = None
+ feed_dict['x'] = x
+ feed_dict['y'] = y
+ feed_dict['vis'] = vis
+ feed_dict['filename'] = img_fn
+ if len(joint_pos) == 16:
+ data = {
+ 'filename': img_fn,
+ 'train': train_flag,
+ 'head_rect': head_rect,
+ 'is_visible': vis,
+ 'joint_pos': joint_pos
+ }
+ datas.append(data)
+
+ for data in datas:
+ if d.get(data['filename']):
+ d.get(data['filename']).append(data)
+ else:
+ d[data['filename']] = [data]
+ filt = []
+ for key, value in d.items():
+ if len(value) != 1:
+ filt.append(key)
+ for key in filt:
+ del d[key]
+ return d
+
+
+def generate_center_map(center_poses, img_shape): # input label position and generate a heat-map
+ """
+ Given the position of the person and the size of the input image it
+ generates
+ a heat-map where a gaissian distribution is fit in the position of the
+ person in the image.
+ """
+ img_height = img_shape[1]
+ img_width = img_shape[0]
+ # Gaussian operator generate a heat-map
+ center_map = [gaussian_heatmap(
+ img_height, img_width, center_poses[1], center_poses[0],
+ config.SIGMA_CENTER, config.SIGMA_CENTER)]
+
+ out = np.zeros_like(center_map[0])
+ # multiple map composition
+ for map in center_map:
+ out += map
+ out[out > 1] = 1
+ return out
+
+
+def preprocess(k, input_width=654, input_height=368): # read image pretreatment
+ # read image
+ image = cv2.imread(os.path.join(args.data_path, 'images', k))
+ ratio = (input_width / image.shape[1], input_height / image.shape[0])
+ image = cv2.resize(image, (input_width, input_height))
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb
+ # obtain label
+ labels = [d[k][0]['joint_pos']['7'][0] * 0.5 + d[k][0]['joint_pos']['6'][0] * 0.5, d[k][0]['joint_pos']['7'][1] * 0.5 + d[k][0]['joint_pos']['6'][1] * 0.5]
+ labels[0] *= ratio[0]
+ labels[1] *= ratio[1]
+ # obtain headsize
+ headsize = d[k][0]['head_rect']
+ headsize = (headsize[2] - headsize[0]) * 0.5 + (headsize[3] - headsize[1]) * 0.5
+ heatmap_gt = generate_center_map(labels, (input_width, input_height))# generate a heat-map
+ return image, labels, heatmap_gt, headsize
+
+
+def get_batch(idxs):
+ name_lst = np.array(list(d.keys()))[idxs]
+ images = []
+ labels = []
+ heatmap_gts = []
+ headsizes = []
+ for name in name_lst:
+ image, label, heatmap_gt, headsize = preprocess(name)
+ images.append(image)
+ labels.append(label)
+ heatmap_gts.append(heatmap_gt)
+ headsizes.append(headsize)
+ images = np.stack(images, 0)
+ labels = np.stack(labels, 0)
+ heatmap_gts = np.stack(heatmap_gts, 0)
+ headsizes = np.stack(headsizes, 0)
+ return images, labels, heatmap_gts, headsizes
+
+
+def calCKh(pred, label, headsize):
+ dist = np.sqrt(np.sum((np.array(pred) - np.array(label)) ** 2)) / headsize
+ CKh = 1 if dist < 0.5 else 0
+ # print(Chk)
+ return CKh
+
+def shuffle_batch():
+ batch_size = args.batch_size
+ # generate batch
+
+ batch_idxs = np.random.permutation(len(d))
+ np.random.shuffle(batch_idxs)
+
+ # 10-fold cross-validation
+ num_train_idxs = (len(d) * 9 // (batch_size * 10)) * batch_size
+
+ train_batch_idxs = batch_idxs[:num_train_idxs]
+ train_batch_idxs = np.array_split(train_batch_idxs, len(train_batch_idxs) // batch_size)
+ test_batch_idxs = batch_idxs[num_train_idxs: ]
+ test_batch_idxs = np.array_split(test_batch_idxs, len(test_batch_idxs) // 1)
+
+ return train_batch_idxs, test_batch_idxs
+
+
+def main():
+ # define model
+ pose_estimator = PoseEstimator((input_height, input_width, 3), args.init_session_path, args.prob_model_path)
+ # initialization
+ pose_estimator.initialise()
+
+ train_batch_idxs, test_batch_idxs = shuffle_batch()
+
+ # start training
+ epochs = args.epochs
+ print('Start training!')
+ for epoch in range(epochs):
+ train_losses = 0
+ for i, idxs in enumerate(tqdm(train_batch_idxs)):
+ images, labels, heatmap_gts, headsizes = get_batch(idxs)
+ # input network training
+ train_loss, heatmap_pred = pose_estimator.train(images, heatmap_gts)
+ train_losses += train_loss
+ print('Epoch {}: loss={}'.format(epoch, train_losses))
+
+ if (epoch+1) % args.save_step == 0:
+ pose_estimator.saver.save(pose_estimator.session, args.output_path) # save checkpoint
+ print('Checkpoint saved successfully!')
+ print('Start validation!')
+ # validation
+ CKh_num = 0
+ for i, idxs in enumerate(test_batch_idxs):
+ # generate batch
+ images, labels, heatmap_gts, headsizes = get_batch(idxs)
+ # input network training
+ pose_2d, heatmap_pred = pose_estimator.estimate(images[0])
+
+ if len(pose_2d) < 1:
+ continue
+ pose_2d = [pose_2d[0, 8, 1] * 0.25 + pose_2d[0, 11, 1] * 0.25 + pose_2d[0, 1, 1] * 0.5,
+ pose_2d[0, 8, 0] * 0.25 + pose_2d[0, 11, 0] * 0.25 + pose_2d[0, 1, 0] * 0.5]
+ CKh = calCKh(pose_2d, labels[0], headsizes[0])
+ CKh_num += CKh
+ PCKh = CKh_num / len(test_batch_idxs)
+ print('Epoch {}: Validation PCKh@0.5:{} '.format(epoch, PCKh))
+
+ train_batch_idxs, test_batch_idxs = shuffle_batch()
+
+ # close model
+ pose_estimator.close()
+d = save_joints()
+
+if __name__ == '__main__':
+ import sys
+
+ sys.exit(main())
+