From 05dda5d7d82d1027228a62a185b9031dca965b18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:00:21 +0000 Subject: [PATCH 01/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep. --- .../dataset / MPII / images/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 906c54c442de2b4c8d5665b906f5ef3b6a1619bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:02:40 +0000 Subject: [PATCH 02/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py. --- .../train.py | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py new file mode 100644 index 000000000..1650840b5 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/train.py @@ -0,0 +1,255 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from scipy.io import loadmat +from packages.lifting import PoseEstimator +from packages.lifting.utils import gaussian_heatmap, config, plot_pose, draw_limbs +import cv2 +import os +import numpy as np +from tqdm import tqdm +import argparse + +# set up argparse +parser = argparse.ArgumentParser() +parser.add_argument('--data_path', type=str, default='./dataset/MPII') # dataset path +parser.add_argument('--batch_size', type=int, default=4) # batchsize +parser.add_argument('--save_step', type=int, default=5) # model saved interval +parser.add_argument('--epochs', type=int, default=10) # train epoch +parser.add_argument('--output_path',type=str, # where to save checkpoint + default='./checkpoint/model.ckpt') + +parser.add_argument('--label_path', type=str, + default='./dataset/MPII/mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat') #label path +parser.add_argument('--prob_model_path', type=str, + default='./data/prob_model/prob_model_params.mat') # 3d model path +parser.add_argument('--init_session_path',type=str, + default='./data/init_session/init') +args = parser.parse_args() + + + +input_width = 654 +input_height = 368 + +#if not os.path.exists(OUT_SESSION_PATH): +# os.mkdir(OUT_SESSION_PATH) + +def save_joints(): # read mpii dataset image and label + mat = loadmat(args.label_path) + d = {} + for i, (anno, train_flag) in enumerate( + zip(mat['RELEASE']['annolist'][0, 0][0], + mat['RELEASE']['img_train'][0, 0][0], + )): + img_fn = anno['image']['name'][0, 0][0] + train_flag = int(train_flag) + + if 'annopoints' in str(anno['annorect'].dtype): + # only one person + annopoints = anno['annorect']['annopoints'][0] + head_x1s = anno['annorect']['x1'][0] + head_y1s = anno['annorect']['y1'][0] + head_x2s = anno['annorect']['x2'][0] + head_y2s = anno['annorect']['y2'][0] + datas = [] + for annopoint, head_x1, head_y1, head_x2, head_y2 in zip( + annopoints, head_x1s, head_y1s, head_x2s, head_y2s): + if annopoint != []: + head_rect = [float(head_x1[0, 0]), + float(head_y1[0, 0]), + float(head_x2[0, 0]), + float(head_y2[0, 0])] + # build feed_dict + feed_dict = {} + feed_dict['width'] = int(abs(float(head_x2[0, 0]) - float(head_x1[0, 0]))) + feed_dict['height'] = int(abs(float(head_y2[0, 0]) - float(head_y1[0, 0]))) + + # joint coordinates + annopoint = annopoint['point'][0, 0] + j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]] + x = [x[0, 0] for x in annopoint['x'][0]] + y = [y[0, 0] for y in annopoint['y'][0]] + joint_pos = {} + for _j_id, (_x, _y) in zip(j_id, zip(x, y)): + joint_pos[str(_j_id)] = [float(_x), float(_y)] + + # visiblity list + if 'is_visible' in str(annopoint.dtype): + vis = [v[0] if v else [0] for v in annopoint['is_visible'][0]] + vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)]) + else: + vis = None + feed_dict['x'] = x + feed_dict['y'] = y + feed_dict['vis'] = vis + feed_dict['filename'] = img_fn + if len(joint_pos) == 16: + data = { + 'filename': img_fn, + 'train': train_flag, + 'head_rect': head_rect, + 'is_visible': vis, + 'joint_pos': joint_pos + } + datas.append(data) + + for data in datas: + if d.get(data['filename']): + d.get(data['filename']).append(data) + else: + d[data['filename']] = [data] + filt = [] + for key, value in d.items(): + if len(value) != 1: + filt.append(key) + for key in filt: + del d[key] + return d + + +def generate_center_map(center_poses, img_shape): # input label position and generate a heat-map + """ + Given the position of the person and the size of the input image it + generates + a heat-map where a gaissian distribution is fit in the position of the + person in the image. + """ + img_height = img_shape[1] + img_width = img_shape[0] + # Gaussian operator generate a heat-map + center_map = [gaussian_heatmap( + img_height, img_width, center_poses[1], center_poses[0], + config.SIGMA_CENTER, config.SIGMA_CENTER)] + + out = np.zeros_like(center_map[0]) + # multiple map composition + for map in center_map: + out += map + out[out > 1] = 1 + return out + + +def preprocess(k, input_width=654, input_height=368): # read image pretreatment + # read image + image = cv2.imread(os.path.join(args.data_path, 'images', k)) + ratio = (input_width / image.shape[1], input_height / image.shape[0]) + image = cv2.resize(image, (input_width, input_height)) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb + # obtain label + labels = [d[k][0]['joint_pos']['7'][0] * 0.5 + d[k][0]['joint_pos']['6'][0] * 0.5, d[k][0]['joint_pos']['7'][1] * 0.5 + d[k][0]['joint_pos']['6'][1] * 0.5] + labels[0] *= ratio[0] + labels[1] *= ratio[1] + # obtain headsize + headsize = d[k][0]['head_rect'] + headsize = (headsize[2] - headsize[0]) * 0.5 + (headsize[3] - headsize[1]) * 0.5 + heatmap_gt = generate_center_map(labels, (input_width, input_height))# generate a heat-map + return image, labels, heatmap_gt, headsize + + +def get_batch(idxs): + name_lst = np.array(list(d.keys()))[idxs] + images = [] + labels = [] + heatmap_gts = [] + headsizes = [] + for name in name_lst: + image, label, heatmap_gt, headsize = preprocess(name) + images.append(image) + labels.append(label) + heatmap_gts.append(heatmap_gt) + headsizes.append(headsize) + images = np.stack(images, 0) + labels = np.stack(labels, 0) + heatmap_gts = np.stack(heatmap_gts, 0) + headsizes = np.stack(headsizes, 0) + return images, labels, heatmap_gts, headsizes + + +def calCKh(pred, label, headsize): + dist = np.sqrt(np.sum((np.array(pred) - np.array(label)) ** 2)) / headsize + CKh = 1 if dist < 0.5 else 0 + # print(Chk) + return CKh + +def shuffle_batch(): + batch_size = args.batch_size + # generate batch + + batch_idxs = np.random.permutation(len(d)) + np.random.shuffle(batch_idxs) + + # 10-fold cross-validation + num_train_idxs = (len(d) * 9 // (batch_size * 10)) * batch_size + + train_batch_idxs = batch_idxs[:num_train_idxs] + train_batch_idxs = np.array_split(train_batch_idxs, len(train_batch_idxs) // batch_size) + test_batch_idxs = batch_idxs[num_train_idxs: ] + test_batch_idxs = np.array_split(test_batch_idxs, len(test_batch_idxs) // 1) + + return train_batch_idxs, test_batch_idxs + + +def main(): + # define model + pose_estimator = PoseEstimator((input_height, input_width, 3), args.init_session_path, args.prob_model_path) + # initialization + pose_estimator.initialise() + + train_batch_idxs, test_batch_idxs = shuffle_batch() + + # start training + epochs = args.epochs + print('Start training!') + for epoch in range(epochs): + train_losses = 0 + for i, idxs in enumerate(tqdm(train_batch_idxs)): + images, labels, heatmap_gts, headsizes = get_batch(idxs) + # input network training + train_loss, heatmap_pred = pose_estimator.train(images, heatmap_gts) + train_losses += train_loss + print('Epoch {}: loss={}'.format(epoch, train_losses)) + + if (epoch+1) % args.save_step == 0: + pose_estimator.saver.save(pose_estimator.session, args.output_path) # save checkpoint + print('Checkpoint saved successfully!') + print('Start validation!') + # validation + CKh_num = 0 + for i, idxs in enumerate(test_batch_idxs): + # generate batch + images, labels, heatmap_gts, headsizes = get_batch(idxs) + # input network training + pose_2d, heatmap_pred = pose_estimator.estimate(images[0]) + + if len(pose_2d) < 1: + continue + pose_2d = [pose_2d[0, 8, 1] * 0.25 + pose_2d[0, 11, 1] * 0.25 + pose_2d[0, 1, 1] * 0.5, + pose_2d[0, 8, 0] * 0.25 + pose_2d[0, 11, 0] * 0.25 + pose_2d[0, 1, 0] * 0.5] + CKh = calCKh(pose_2d, labels[0], headsizes[0]) + CKh_num += CKh + PCKh = CKh_num / len(test_batch_idxs) + print('Epoch {}: Validation PCKh@0.5:{} '.format(epoch, PCKh)) + + train_batch_idxs, test_batch_idxs = shuffle_batch() + + # close model + pose_estimator.close() +d = save_joints() + +if __name__ == '__main__': + import sys + + sys.exit(main()) + -- Gitee From 04174253ac253f7ff718536752cd5aab1de5a9de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:03:39 +0000 Subject: [PATCH 03/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md. --- .../README.md | 177 ++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md new file mode 100644 index 000000000..734516dee --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/README.md @@ -0,0 +1,177 @@ +## 基本信息 + +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Computer Vision** + +**框架(Framework):TensorFlow 1.15.0** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于TensorFlow框架对图片中姿势进行检测的训练代码** + +## 概述 + + LiftingFromTheDeep + +- 参考论文: + + https://openaccess.thecvf.com/content_cvpr_2017/papers/Tome_Lifting_From_the_CVPR_2017_paper.pdf + +- 参考实现: + + https://github.com/DenisTome/Lifting-from-the-Deep-release + + +## 默认配置 + +- 训练数据集预处理: + + - 图像的输入尺寸为 1080*720 + - 图像输入格式:jpg + +- 训练超参 + + - Batch size: 4 + - Train epoch: 30 + +## 快速上手 + +数据集准备 +模型训练使用MPII数据集,数据集请用户自行获取. +obs桶地址: +>obs://cann-id0891/npu/ + +## 模型训练 +单卡训练 + +1. 配置训练参数 +2. 启动训练 +``` +bash train_full_1p.sh \ + --data_path="./dataset/MPII" \ + --output_path="./checkpoint/model.ckpt" +``` + + +## 训练结果 + +- 精度结果比对 + +|精度指标项|GPU实测|NPU实测| +|---|---|---| +|PCkh@0.5|0.819410|0.814496| + +- 性能结果比对 + +|性能指标项|GPU实测|NPU实测| +|---|---|---| +|FPS|10.04|10.56| + + +## 文件夹结构 + +``` +├── README.md //代码说明文档 +├── train.py //网络训练 +├── online_inference.py //在线推理代码,用于推理单张图片 +├── evaluate.py //用于衡量模型在数据集上的精度 +├── requirements.txt //依赖列表 +├── LICENSE +├── packages +│ ├──lifting +│ ├──__init__.py +│ ├──_pose_estimator.py +│ ├──utils +│ ├──__init__.py +│ ├──config.py +│ ├──cpm.py +│ ├──draw.py +│ ├──prob_model.py +│ ├──process.py +│ ├──upright.py +├── checkpoint //checkpoint模型保存地址 +├── data +│ ├──prob_model +│ ├──prob_model_params.mat +│ ├──init_session +│ ├──checkpoint +│ ├──init.data-00000-of-00001 +│ ├──init.index +│ ├──init.meta +├── dataset //数据集文件夹 +│ ├──MPII +│ ├──images +│ ├──000001163.jpg +│ ├──000003072.jpg +│ ├──... +│ ├──mpii_human_pose_v1_u12_2 +│ ├──bsd.txt +│ ├──mpii_human_pose_v1_u12_1.mat +│ ├──README.md +├──result //单张图片推理结果 +│ ├──result2d.jpg +│ ├──result3d_0.jpg +├── test +│ ├──train_performance_1p.sh //单卡训练验证性能启动脚本 +│ ├──train_full_1p.sh //单卡全量训练启动脚本 + +``` + + +## 启动脚本说明 +在test文件夹下, 有train_performace_1p.sh和train_full_1p.sh脚本, +可分别用于检测训练性能与训练精度. + +### 检测性能 +命令: +``` +bash train_performace_1p.sh \ + --data_path="./dataset/MPII" \ + --output_path="./checkpoint/model.ckpt" +``` +打屏信息: + +> awk: cmd. line:1: fatal: division by zero attempted +>------------------ INFO NOTICE START------------------ +>INFO, your task have used Ascend NPU, please check your result. +>------------------ INFO NOTICE END------------------ +>------------------ Final result ------------------ +>Final Performance images/sec : 10.56 +>Final Performance sec/step : 0.38 +>E2E Training Duration sec : 754 +>Final Train Accuracy : + +**注**:此处Final Train Accuracy为空, 原因是性能检测中不涉及validation过程, 不能检测精度. +打屏日志的第一行错误提示来源于此. + +### 检测精度 +命令: +``` +bash train_full_1p.sh \ + --data_path="./dataset/MPII" \ + --output_path="./checkpoint/model.ckpt" +``` + +打屏信息: +>------------------ INFO NOTICE START------------------ +>INFO, your task have used Ascend NPU, please check your result. +>------------------ INFO NOTICE END------------------ +>------------------ Final result ------------------ +>Final Performance images/sec : 10.19 +>Final Performance sec/step : 0.39 +>E2E Training Duration sec : 21154 +>Final Train Accuracy : 0.814496 + +## 在线推理结果 +### 输出图片: +![2d result](./result/result2d.jpg) +![3d result](./result/result3d_0.jpg) + + -- Gitee From 9eee92f84329724fd28290b4acbd58a50838f69f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:04:02 +0000 Subject: [PATCH 04/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE. --- .../LICENSE | 251 ++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE new file mode 100644 index 000000000..4246e35a2 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/LICENSE @@ -0,0 +1,251 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +## Some of TensorFlow's code is derived from Caffe, which is subject to the following copyright notice: + +COPYRIGHT + +All contributions by the University of California: + +Copyright (c) 2014, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: + +Copyright (c) 2014, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. \ No newline at end of file -- Gitee From cc93cdc053ab39180562f92e69cb1c7a7bb10ebd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:04:28 +0000 Subject: [PATCH 05/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py. --- .../evaluate.py | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py new file mode 100644 index 000000000..cff5a53ce --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/evaluate.py @@ -0,0 +1,215 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * + +from packages.lifting import PoseEstimator +from packages.lifting.utils import gaussian_heatmap, config +import cv2 +import argparse +import os +import numpy as np +from scipy.io import loadmat +from tqdm import tqdm + + +# set up argparse +parser = argparse.ArgumentParser() +parser.add_argument('--data_path', type=str, default='./dataset/MPII') # MPII dataset root +parser.add_argument('--label_path', type=str, + default='./dataset/MPII/mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat') #label path +parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/model.ckpt') # checkpoint path +parser.add_argument('--prob_model_path', type=str, + default='./data/prob_model/prob_model_params.mat') # 3d model path + +args = parser.parse_args() +input_width = 654 +input_height = 368 + + +def save_joints(): # read mpii dataset and label + mat = loadmat(args.label_path) + d = {} + fd = [] + for i, (anno, train_flag) in enumerate( + zip(mat['RELEASE']['annolist'][0, 0][0], + mat['RELEASE']['img_train'][0, 0][0], + )): + img_fn = anno['image']['name'][0, 0][0] + train_flag = int(train_flag) + + if 'annopoints' in str(anno['annorect'].dtype): + # only one person + annopoints = anno['annorect']['annopoints'][0] + head_x1s = anno['annorect']['x1'][0] + head_y1s = anno['annorect']['y1'][0] + head_x2s = anno['annorect']['x2'][0] + head_y2s = anno['annorect']['y2'][0] + datas = [] + for annopoint, head_x1, head_y1, head_x2, head_y2 in zip( + annopoints, head_x1s, head_y1s, head_x2s, head_y2s): + if annopoint != []: + head_rect = [float(head_x1[0, 0]), + float(head_y1[0, 0]), + float(head_x2[0, 0]), + float(head_y2[0, 0])] + # build feed_dict + feed_dict = {} + feed_dict['width'] = int(abs(float(head_x2[0, 0]) - float(head_x1[0, 0]))) + feed_dict['height'] = int(abs(float(head_y2[0, 0]) - float(head_y1[0, 0]))) + + # joint coordinates + annopoint = annopoint['point'][0, 0] + j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]] + x = [x[0, 0] for x in annopoint['x'][0]] + y = [y[0, 0] for y in annopoint['y'][0]] + joint_pos = {} + for _j_id, (_x, _y) in zip(j_id, zip(x, y)): + joint_pos[str(_j_id)] = [float(_x), float(_y)] + + # visiblity list + if 'is_visible' in str(annopoint.dtype): + vis = [v[0] if v else [0] for v in annopoint['is_visible'][0]] + vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)]) + else: + vis = None + feed_dict['x'] = x + feed_dict['y'] = y + feed_dict['vis'] = vis + feed_dict['filename'] = img_fn + if len(joint_pos) == 16: + data = { + 'filename': img_fn, + 'train': train_flag, + 'head_rect': head_rect, + 'is_visible': vis, + 'joint_pos': joint_pos + } + datas.append(data) + + for data in datas: + head_center = [(data['head_rect'][0] + data['head_rect'][2]) / 2, (data['head_rect'][1] + data['head_rect'][3]) / 2] + if d.get(data['filename']): + d.get(data['filename']).append(data) + else: + d[data['filename']] = [data] + filt = [] + for key, value in d.items(): + if len(value) != 1: + filt.append(key) + for key in filt: + del d[key] + return d + + +def generate_center_map(center_poses, img_shape): # input label position and generate a heat-map + """ + Given the position of the person and the size of the input image it + generates + a heat-map where a gaissian distribution is fit in the position of the + person in the image. + """ + img_height = img_shape[1] + img_width = img_shape[0] + # Gaussian operator generate a heat-map + center_map = [gaussian_heatmap( + img_height, img_width, center_poses[1], center_poses[0], + config.SIGMA_CENTER, config.SIGMA_CENTER)] + + out = np.zeros_like(center_map[0]) + # multiple map composition + for map in center_map: + out += map + out[out > 1] = 1 + return out + + +def preprocess(k, input_width=654, input_height=368): # read image pretreatment + # read image + image = cv2.imread(os.path.join(args.data_path, 'images', k)) + ratio = (input_width / image.shape[1], input_height / image.shape[0]) + image = cv2.resize(image, (input_width, input_height)) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb + # obtain label + labels = [d[k][0]['joint_pos']['7'][0] * 0.5 + d[k][0]['joint_pos']['6'][0] * 0.5, d[k][0]['joint_pos']['7'][1] * 0.5 + d[k][0]['joint_pos']['6'][1] * 0.5] + labels[0] *= ratio[0] + labels[1] *= ratio[1] + # obtain headSize + headSize = d[k][0]['head_rect'] + headSize = (headSize[2] - headSize[0]) * 0.5 + (headSize[3] - headSize[1]) * 0.5 + heatmap_gt = generate_center_map(labels, (input_width, input_height))# generate a heat-map + return image, labels, heatmap_gt, headSize + +def get_batch(idxs): # read batch data + name_lst = np.array(list(d.keys()))[idxs] + images = [] + labels = [] + heatmap_gts = [] + headSizes = [] + for name in name_lst: + image, label, heatmap_gt, headSize = preprocess(name) + images.append(image) + labels.append(label) + heatmap_gts.append(heatmap_gt) + headSizes.append(headSize) + images = np.stack(images, 0) + labels = np.stack(labels, 0) + heatmap_gts = np.stack(heatmap_gts, 0) + headSizes = np.stack(headSizes, 0) + return images, labels, heatmap_gts, headSizes + + +def calCKh(pred, label, headSize): + dist = np.sqrt(np.sum((np.array(pred) - np.array(label)) ** 2)) / headSize + CKh = 1 if dist < 0.5 else 0 + # print(Chk) + return CKh + +def main(): + # generate batch + + batch_idxs = np.random.permutation(len(d)) + batch_idxs = np.array_split(batch_idxs, len(d)) + + # model definition + pose_estimator = PoseEstimator((input_height, input_width, 3), args.checkpoint_path, args.prob_model_path) + + # model initialisation + pose_estimator.initialise() + + # validation + CKh_num = 0 + for i, idxs in enumerate(tqdm(batch_idxs)): + # generate batch + images, labels, heatmap_gts, headSizes = get_batch(idxs) + pose_2d, heatmap_pred = pose_estimator.estimate(images[0]) + + if len(pose_2d) < 1: + continue + pred = [pose_2d[0, 8, 1] * 0.25 + pose_2d[0, 11, 1] * 0.25 + pose_2d[0, 1, 1] * 0.5, + pose_2d[0, 8, 0] * 0.25 + pose_2d[0, 11, 0] * 0.25 + pose_2d[0, 1, 0] * 0.5] + + CKh = calCKh(pred, labels[0], headSizes[0]) + CKh_num += CKh + PCKh = CKh_num / len(batch_idxs) + print('PCKh@0.5: ', PCKh) + + # close model + pose_estimator.close() + + +d = save_joints() +if __name__ == '__main__': + import sys + sys.exit(main()) + -- Gitee From cfdf6bf656e975f09917ba23dbffc0767850b03f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:05:05 +0000 Subject: [PATCH 06/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt. --- .../modelzoo_level.txt | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt new file mode 100644 index 000000000..b1413df69 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/modelzoo_level.txt @@ -0,0 +1,5 @@ +FuncStatus:OK +PrecisionStatus:OK +PerfStatus:OK +GPUStatus:OK +NPUMigrationStatus:OK \ No newline at end of file -- Gitee From 1b3c6532ae9f2dcdbf178c3943116a821a9bcafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:05:39 +0000 Subject: [PATCH 07/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep. --- .../data/init_session/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/init_session/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 07e0a609426aa87c3da8b0447af9a96439b0c9f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:06:02 +0000 Subject: [PATCH 08/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20prob=5Fmodel?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data/prob_model/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/prob_model/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/prob_model/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/data/prob_model/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From e0732579a2deebcd5ceb689663c8eb6bbf691c64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:06:25 +0000 Subject: [PATCH 09/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20checkpoint?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/checkpoint/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 4ec65be1d85c14cdcf69a487b8bfc89678506975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:06:54 +0000 Subject: [PATCH 10/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20result?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 67b6044e66627d0a1d7ed1674e310db60ad32be1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:07:22 +0000 Subject: [PATCH 11/43] upload --- .../result/result2d.jpg | Bin 0 -> 38007 bytes .../result/result3d_0.jpg | Bin 0 -> 44394 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecebc1e1e82a673646474cde182db656db3ddc4f GIT binary patch literal 38007 zcmeGDbyQqI_cn+&E5}*klTpA68Ai)V5+#v)S4ek)!-6446 z4Rkltd4J!`UF-hw&dmMuuJ81!v-;RRd)KadcGXk+JT5$L0G_HUswe``(9i%js2|{Q z86XcpM;)S}{oAAe+b}Wy?XfU1F)*>Qu(AJBaPV-kaqw`kv2mZ^;^F_>Q2z*?;1m2i z_$TCl9!1B(z`(-C!N&Q|E&s1}kDUM#T(k}JU<|Zp0CW;G3=*`*UH}sSfQI!S_0RzS zYePfFz{JAFL8;U?9VmsB`y@Uko6zR- z^M9uO$Fl!thK2tBWZC~^*#F0_B>({i8p?SXBmf`)0nd--0Q~Rrzw6+Cw}Jn^X#*Hz zhBrNkg!R*)nROz~>HZrgxL+2~aePti3Q;PrDz2?GeyCJn#sJ zo8O`+`bCcK+j(F@={2mxD%3GY+i`gkcwCgybC#HZpsn$SNIbMJ+)_X-=rTP^&Fk4ql`ui(UsEDxH< zQTC7rc^KUz0Dl>XlvH~J5Q;yDfG4|sm{6xifJkwRcE7(fd@zRg8zEItCxhk=_VkAM#e^EZ)DQ6xnJ81B`<@;uV=&agv^ zOZY8f^IkVV-n2;g_9t>U@)Fq0RI?79VXD(a&;vuj(5kvez;=D)O#$M&lEo5tXD-ap z7<|424^3T;?ZOsV&?5%@evihaYH>tiad*>xV|eohn2i`jgaILZ$Ndt>$s7!(ThT?- zBf!lv@CV?%*f^?EH!W;Y743MwW~$(>(KS%?hG*JK(Lv@})gwS=er?!TTJ&apC4ONY z28v&dM8-*Wf?-DgUaR?^6JjVMjjaUJ~y2|lE__p>D zU@d=#lCj7K1A+ff8HWG#z&X);;Lpje_YQspKx=-X%!9y|8VEF~-LA?~`-P%v4`;{MRoa5Ne2|FaX2d zvpVX*#~16~+v8!x(|we=aQ6=OmrlWG(bp%@Z7v@R2(W=Dn}ost+{3g`e?N+Hk0DdN z(lOH7A#wnSLoS;JhN)EXcj)f9>7jg64*bBd4Bjk&V`W`E0>Zoh({lG-wx>~6f{zp%x|A=Z6fv9Q*F&PuY^60^1)@3Sb} z@3^X;dN+F9Rt8Og(eNXg=-@1$X>=a}crUAQ9|0Z7LqPlS#Z$iP4hA@F$C*$51o%W1 zPM3tlyMs2I*OdmS!@kOBsH$w^q1XIGbH#zEyF3Cg_YgTA$Ryxb-dFP{j9g9Pw;Quy zXKiFP@OY_Vm8n%QXqu@m+~Vk8?eaaWan~_D=+r@g%d9tYlP@Um$4~U3Yg2Nqq3?Y5 zHDeNvKBpUQu>qkP;66Qye+=`BM$>`QcqW5+Cp3RR9g_xpX?rWplCq?)gtr za>w};v%8ol0Xtbx$vN1^CF+cFJjYA^s<1M}%&d$FWRf+a8Eo~%h|Qvbs(X&7OOLOY zK`3!8_vn6h`@uYDsHf&c7J3{Q;Q0PuSGaRWWFZLxB+g%yI69mYD`%V#)jANkD@eXM zt$Pz&+mPFu*m>I**h+Pb_wc-s`zcHw_sJtbTe1czX|i=my=jx2y60mi{8q~lXcCW2YJ_Sv!BUc{*KX1}L&6(t$q(O_9 zY-oJ!U|)(^Ic~87q8Fk2C;AV3umMepj&QI8F&d~LTG_Emw4YM9ANS~zt2sB&d_Rf0%&CLjs68=zlXj0wRP_;70!=XyOq7AZFdp4eNjw*Q5G$%AJcl`k*-V zz_agH6y30BR6kF1JhSt@5oZ4fEi-iwZ-T}*HsGJ2-bnHrRG*25*~g>c68W_RC8Fei zM3|psWla(O;WrA=L|9e1Y}T}e4V66tBp(v)3sAsJ_5id7LffSNsUiJOjY1SKMI&;- ze_LRpD75z1M4@#`3G415r#L&E4+=xy{|~_RJkTjpj&PEE!w#$6skM|YTv`pPL&QD;X*FQ*N*^& z^{!l)o(bx1`3L2ou|wA&X&6snfUd(o4_*{$5h=q*6}Vo!3K@X#T}S^XW>DP-{RgT| zQCcS;vTJi;7D1>s)m=P|Y6v>SOS(6oSXsBvLgNvm|6r!$#gYrpQ&-u_gKOm zXl`o35`TH!IG$Pm2b!9hs$(D2g2vQt+kr5)zyL!#)VsN8z#x~!IJEB__UmXr9s!Ph zH}a1F(EN5#>()egeuqY|VJ@ts|J7Ss;UJu{@<)Kt(Gqer@EBOj^3T3gq4fsh<0Kbk z$4w#3J^iSo{|4i3L`&tt5!qi0u0m9`)9?b@wpYc#Cm=Y+yztkrp~kr~kijF-IJ151 z-1|OLWMEBO`~zw3Wk)m9BzZ@p-LX7c?<1f=sT<`j@D^hR@TN@>>;hge^F-7Kk%2e+ zr-3j8Si}S#s%JS&NjQ^AH5-_}qRYny90>4R!~XBs~NLb#u9{H#Bz3 zUc=&xP{H=z>JiW-3$!eGu6wfrlT$v#>8;|6hsRrt6yB#aiDjIhuVbL6(jnO1zt0BT zZ)J`j@!fg-M@ znu)($R3hSsxM_sU1Ijy%phmdx?Jq`6^}t4yz7QZ(8w_LbJbLhg(=y%aUgaW{iyPtO z-blQiT=2~Wj9;dM{1fVA&CcN#av}G+;t>#tKiocJ$^KoJp&c|QK5Hz=!Pl0+Q=Yiw za;kI_ra*2u!N{zwKo0mX1r2^~%z;bc>SmIvjB$OUSM?N=bz&|h{H^QG{N9n1W@WUB z(PU#hD8|nqqR<-SiGF2*8v01aR4i3!Q~nTnf^f8zX1Z6!T=}1Uj5b;O)@0gNtKh`I za`WBN)RRrV9#`24PRhx6KO1~K7wG}jTG~|dO4>Yh=Xc?{I_u7|6t>n&t!0|4Ql8|; z!p(YpO(zHe?GcBthyjfhR~9DGB38vpwWnZwgNeygJ4nv#^Y~pY6#$P&oTGNW0R`1! zw~f=lT!KOEn`NOuvZJ?@z2Y*n`POW@0THB6ao&QtG%b}2S)>OvlCK-^MX#}|amg70 zc`p@3wy(<4+)wgM=w1)7=T(px=RNck8O5QspDFXm6SgG9b=P#^M|Fzb z>oP~#VOJ@A40bkur6Z{P)JdnH$gD9&`StByKIC<&ctvWbN4$wt_#{{^hTH5}oD^!* zl!w3R{Fx>T?0-KG&PTUyVfNBQ;2NTRdPM?w1W+Tb+BEMXzUy5(JYiK7D-oo8Zf6vj;Ia7Sucf zI=aM zO~JU|3kUF~;FOhDs&ok`G#wty6cLolP>n_yV>m2<({;8`uK7jXHy^pOMjy52Gt8jc zqZvK;Io3c?VA<}^T|4xyHYR4PeR%@O0F&?4NWCZS&2%jBr7AB*vO0$VNp~x`KecZMFn-)|`%xtYefBVLxGUg2P|D8Q zfA+lOYdaV*IA<8Um)%ai87H#^y?n#0p{uPKbe6C6moHHhQ$00 z`BZeiXX@kB*F|nwKl?;qhGpsAR!bVuoo3i_BMu!tdKE6oHT>wcPSQVpu7{by>q1GA zC|=nnuQi-S@6jjxXX4mk2BM%ZCN|)=EBx+Lxyanc?1}Sv+?EVv@y@do5}+>}-6hJsk~p&+&7IKToYV7-toiv^pQ>`t3pN zpL~`c^_pL@Y2wgIxT@qvP+CzRA88hisL&JDfW~m8nBJSUms!D>%it~ZMHW7RBOmSS z&Wu`eBO6KE4cw;c<9a16bVocH*uDkn#;f-Ko}$L66JvxNdIwPoq6cL8VHk_%kO$Eqo^(XOZ4M5Au9NfPt{lWIFQAz80E{hmb4CI#C?_p(Qqu#D^#ACa8<)(b8RwZF; zJwkF_d6bFwrJvaR`hY5j){Im)_pO02G6WKN+97ci{-*>hkoW{Wnxm#xMwuf&8t~r= z(%%UnMY5hTQyXje#JFL@`M{LYzVb)JUWZtV^8NZVL_-mB_~ z&kaE3UJo)!NO}D9^8y{JyWjmPh@lOgRBcdU;94!yP^RBY9h`!X{cUvog1y#V{YIpC z>GQ&_yfw6Jy3TC?B=mZ9QOKUaN2Y!w(~$X7>BROoE(PXjUdeZ-Z}dK|E(;aOdzK_$ zAsh;yx{8+xsgTsGVk_|XyXE@j;^%ZML{_U5)?a0Z`i<5&1PL5G@rSSmtq<& zjAN#gY}IiRZ8{cs##)Nt^7%&ZH&tnwqyDxw_D39qx058slc~rk_-QK&tpzr)$AePD z9@2U8`nNRGduaLr+rodR zVj9gWx%Vv=7up)(6ntBLQt6jMDJ*XDxBRsC&wR-@$byZmFKhf6aCP%kl**g8ruOdp z)NY^4ht24>dJS8zmEw8HxPm@4FJMdi8D?6SW$P~|ZG6=qFiou)%Y|`Lb^AM!ZN2r& z32`f5Upeyn^==5`N7|4JKRf|bOYTAgkDYCPDVNc+hQuC;Gr`-RTRpUSld*f<{99_% z`Dv*IkKNfvR$obFc8r{Xo*)4@H{$zm%a=v%uCJarp1s34H%aQ~A>;Q`Ca zx{qKYKl#qR--7sXDzBiXn&(SfCX=p~L>-5YU&%Sy*9hW-M}Qp|W3+mOm;hvA+t<2! z)ZoZdW0|i_tu!|9=_L*3eyE^Z*8Jfk;COb^3D_mNWEd_qRFX(2GuT7ks6lIphCA{U zz;?}!-XS7IQan@yYB9*Aiej7j0XcsrlqU8~#pB~t%WhgO&vB&J)Bs)7XyD}o2P~{F zhYQwYTMD!8Vq~p_wkUW$8LcvZ)oZL3VL$oR2{Ty6V4pqupx+?d4_asNE-FrqK!&nN zuehU+y!{9`u{vYTS8f&;uDbqbLQaXAEsj&zA;zz74!GYMO8n6O!)~or&CQ(cDpS$T zFoef46hEi89=DVsGhbg~&-8y}m!6~rEvYf)akwU{j?00>n z^#pwt2jUYCC%;huFASfLE1WZ=Gu@tp`&%!f`i&M!>cne@7rZZ|!#2*Mt}Q6xBGCOW zkmfgO-uD#sw>oIC7eXK-q;GXzvcxt<#wPH?(3^F1I2|YT_~$B*@dtomK&i-e&9mRW zjw5i*OX+gwtL?whiL9&}p9+^Wi;TINS!(Cf&$(hpqDNKC|4j7~phFduIHEIXA7bzV^d00ca{OyAE*&@u~=;N2N#o(`)O4CS zGaRs{?4J1N*?lXVvt0`p2Lz@3rIIiy6^pBL5enbA3q)}KU3|H+;arc2*HmS@>f=VA z)EF|=hQF^o$f7ILOd+?ez`KlY4K4X0{z>X|zdNDbtRKwvrv>`aU4WY?@Z3BrK;{Vr zGZH;N_q9>Kk=sE-__1ngN#ZJuJ}iMsW1;e)2BR*vnBITB+UzfMSeZ*N9yHQENkJsC z53M3Is3m+DNLcZq7}Ha_H&DL~_WH&sWWKc8 zLl2r&%e&4ee7uyC9D!?p&q(NgKbMlwG%()i2Vv!uWSSEu*S7Eri25&M)PLhwH2B@? zW+}h9zFb*jkr1_QC&z`S{K_?Bv=jG7Ml+#2mUH3SchS-jIuOkA4Nsbi8=p&ooK;RV z(4w-LXT`S{0k-#`(@(z}JxO*VyP?lx2fJ(*iuEWoTO~UMfAKT=!QT1cZNLF2^4?xa5 zve0cD9uJ+QR$5wSo!3KUBqE>k{E*}qk&w43^$JG%2h(k8)SZR0!DtjLX4?==F3PQt zdNiG|gp4S6917uPGYU)Mxvwk-Rz|v9SW=3w`vly@253rBm+-^ne|ynt4D^avP$8M% zsQFK5DXg^@NEV<*8DF?|;HL0=XR_|uD-(s-Wm%~N3)!9rji7$2ALNKiDV~ZM8k+gs zygvE!`5R(H2H1X`q9#s;-ADBrqV4=s#lIMoXew3O;q7Bi9kW6DDvldP0KGKHMi5y39q+mfi*RQ(&JCo#Te@-8m*5p<9{UL(ih))$SZs(&xIhOz<_AkK ziT)0v9BegWhsS=RySc~a_EQu#PuT%g0WTD0uS3I+j8GHwjQx9I6jj;v{1ISfwcvt& zq;$09pNr&5EKBi?oH-vkscE{R>2N+<%HZ00?P}2|z)rg<`_R~!tLwW)!{BNnlH_J#Myz}2phgjvCH~$& zTa3e+UwGDN{N0t5{Kna5o#`#YxC-eRFOrqlaeQiD5@^Qn#Up5dfvnbhI*NiC{40S^ zMB(K7H>Oa9DH=nGiyGf1+)g8@OzFSDO za^pBu5&ApdqZQxb@v*;HRJd$x-_c=Zq6O7PLcv{9&NhxEh`ehRhLtqVO~)Kf@J4wD z^xOi*Zf5fc=sTO=(LdDf_NMya$_iKVK`|_Hm5mX>Sadnv2kQfwot=UKB-R+}ehWud| z7PPkB*l|35Ubi6+e*u#(g@vA;2AV%0K9IjwQ+SZD zw~)KcMU_x+WEtN(Uw`u8A;gb%4QqM6o4yo1 zRi1Ihha9C_-e3wfSN@I%ZOh|2lx1qwotXa-1=}uCqUY~DmnTq-iqhAnLVMj0V2Cin z%T4yGAu4P>Y;;;3cPH5mjkgksY{C!_16oL4Y8P9yN^L&Ed4KQUBzqX}pz$v3AN|vs) zq*SN~eXf5n4sv)4CNw#H5kq(KOzrD~lPq6JtkVdYb2w%aE0ZYwc4^t$_b_e~QMbe& zL>;QdtSh~Ko_Ue)@I^&7LX{Zq^=ViL2tP_t3`!qX&iQ|qJzB~>eNp*-&6I-h?G8AT{aQq&_=y(}0z$ zf*;1Oa+(_m``4#_dLr1hn*e(*dkkRrIBH}!T5R?W#)sDT;zI6#l&r>b&kbmd39j4R7vwq0NB@A@~6+mXYcICoiD|g z(j{(;i}S1MI_+R`)O4fI@=#<;*jD->L7#?Z>39c1XL&v^TxFa1z^G1Mxt#B@AnVeh1ry_Od=b)6{frlC`Wk>6~fvkJ|K?Y;= zjajh}4!~7~*GkCMk5Y%cJi7O<*)uYvw%vg8`piQEqw7T`nym{6kT*}7n?V=+Am|Yw zEgj3R>>u*lt%)ov(80{>W40^qk#4Qn*eiYc>Wx|PY$ZtzE~WQ*W&$qG;$^J7hJ17B ziBw`|8r(U+D#9zy7b>La?$W;b1QcQn-P6wQC5oX2Nnnyvq}Xm0~uQy0`R~+M(1Np(g2jOSPC4dgBDdNQ}tOT%elY%>b3&Kw!XJ= zhjJ!1OcY+Na4yikEbMU>P@N`RI1GZ|riXW8ln6zs?5`Dp@ZP0{?Ob@kW(>VZKxhH| ztRMUW1=Nkv!?JcBW^PW61;MoS$31Czxh};G*__|ry^7t5ct7CG;jkTI;;;kYV@onP z!|fFA4lp|JfU`^*@fwPfHWG+26Q2tg@@X%oQC|urbO)^gA)iYgM37_dK0TW!S?XJ{ zYG#n>D{FtM@1)o~7XtC212!LVq$o!N?KV0hmvH)k*v95CcCC#0K7oS-m2#NX<(_dd zNf_UvEt3!SYPd@#!)TBxay3ZGC3>B{oiu$30Ry$4Y*e`228Zd=3PF>7#*FF-FpD0qJc{W4Ko=67qPAPLWyJ{GMsS2bSDote zP-~W$s_?mQF<+`?YA&13!RYWVAA*bHbdB@9SG4_bOCpVtWf?=P02$_omiQPMZfJb0 zl&9xb2li7Zm)Yl8Tuv5JrI)yj5zTOQ_e1@7Rk&3lg+vu=IhTX~@Sms{^SoMTvt|Ya7|w^!Ob@=&cG)6(#gy1i7Rr4#mf-3{rpgON8F_iM%BV<&s0& zsR5ce0>qcq@&82R>l9+ql-DU5CFEe4v!o}y_0hseA#Ys4C;H)eq%9~Ihhx%ovqRWv zC;X&`<*!&tRV7uehV70sLsPjnHkmT+nTepH(%RWng)jS%LyHy>&MW#ECybCpfve&d)po*_PF*H8-?#?4_3v2q4rNjdv;?wrD9-XSxiH6eD@S0^ zovVQ&$F&IY_jd-Kf8I|dg=y?8n|>GNP-ga~NO7lfz=@u3LDLKA=YDamvAw=jlKJk7 zo1ws|;FHT7pGqe^hC`3*s*248-Ty|g zS(jG&U)C{S4)_HuB{>g>mUk-O5z~lMj2&td!AX6p^Y2vH1kRRz3JYnNPMjwg)t9x0bk@-_owF5(?iD40=N{#znta4$m@5>4O3MHp#x@vJ@#mO)h z!WI1}HcM>0!9x3_j}x)q6=nA-eRp`tFH!Rw&M%hgM$roFaYG$~%wM2-Cqfar=;B*$OR;1^bisT7IIYHa@8>#eLpzl;br zQu_TOWjq3YN4BK}2g$?@uG7&M@Eo9o^M)c`p6Wh^ka65n{G}Lq`v+|Q8UJLoobJ%h zn7Aw9qJj6z=|5e9u3v27VCU%w#N3wP*fJcH{Ft6d27W|1Bn{%4F^Wcm8Z-{$cE5aj zGVPOR#&B$GEGUrijej{91o-XrEDH|je8!&sRCj>vy!a3Rv0Sl`O>){mZaJ(BTyQi;%%gx3VLCl;el&m&$g$++$& z$=?O9*sY0gq}_r$%vAB7@e(HXkvDvO*~L*gS!%2Bw_tZ@K*#aVZ`4vNmI5romSCh< zsYo{1OJb4g{jsPB@fP=3I|FjYf;33lXGCjG%Cl;~n=u#N*E!XJjBp^>3xG?%)n;*= zrO238pJZ?`lT|0tGuy|-MZ7k3bC;F&1^L1#epx?2wsJ|GM}~0rT- zl6v*7+#h$Y;_{~Hi^V8I7Vqj?EHm{-K&(z7(rgU181g(+1_BfJ_W*CeAmTdq+fDeB z%Np)hbc#yz;_!+pV-0`9K`{0sV55+UcxRpss@Pmm1u4{(FaqJdveL&1m8B;e8c@9l zceP7G9|7?~pXQ|`{TeAx1=-|q;a$S8RCcVW!sea&Q;!jW?kzXn7$%FOnZ<+|3pw}| z=+|@968TX%=^*1NOU?j=jh%fL+%fv#l~NhEYJpUSRZFAUb>Vg!i$43V&^y8^oJ6q! z5A}rLk7O#BXo;GP$#I?Vz7ADsygIpru4VP0rWXljBwfG=w&vXydToCeQ!UmQ{E*3H z%edKR!+^WxqBp1G<3iI6QRd78do<xyJ|=g0GK6Zln}__8=~@f}|4o zqb;xX61lv{OwzrhjPayjR|YD8y|cWlj{4|~(3T(xAI)R@xI4ngNQGKx!Tp z20w$@K}<_O1I2v6uBM5~;oCUBYyM;EPn9pVrN+YA9rI@rZf%gBVWq6RXK7_sR;Pb5 z>Z{`qPEoY%7Y2b)mSEk!+U`b=yFVlx4EtS}nX&WZwVHTlT`nwsjq$m&PKosyuNHw7u zwNLVxIQNna4HdsoP9j}0OR9{%blr~XC^`#jaQWEm(+Kl;@xD2&3H3R~G9vIICDe^p zuYN*XM?bv6QnVx3eFf*zv(z-Ht~Ar+iPFPvidF8(bhCd2*WRU()0fNq9|*s;%5 z8AF1Q63Z{QJ|$<_28C0@3};tY$tlyFHv-cx6fYrxic_}9_7kMdw`FQO=*8t_J+6Lw zBhPF&q-LsyRsW>h#O!5U9IJPdDp_*I|K4_~k(zGgicde+_`|cXwHf#7x`?N=nLWCS zK3LgzBZqTZwH4n6!R5_Rt(sUN06^AXvOCRci0jEbaa&8+#edLmD*we<_|-~*9(ymZ zwe?~Vjj3O}w72t} zss$}vwgu+BIchODam5kS&ek6HmOEiQ+4Lv1_zmHK1qK`pTV;y-?OSH<1H!7GT6>pT z+n?#3AA`eM`_Xxz%@24YeGY(=!VDduas#@T0V-Q;58lz$vlqiL?pnDwQOn4~+%v-w zQIqgQvak$9t4#N%kKv2Fp8EW@7KquK%FLU@_*NC^6IJ3hmBlfQ+g%{uW`KrhOx=nS zg+X#EqhUcr#M0LS8vpAomk&o8GTWfTEQ+c>yfvko9^#iT*gj!Mb2lI|erq}v`6TI0 zoE6#^wAZ8An!$@S!J2L6Ub|l@+1VQ*@1Rs)t)TH=S!T~FBSuRE)=V2!Se>Z@17%X6 zkh>z4i#G4OD((AJ?3&qHx6-+&+Ya0Sc4%?rW&`x&;C#cV`z_Kf(LdP8F!*+ z48g#?O>X(vD{bfv@gMhtK>ZXyeR(>TP)Vz-_;}R_M%9f}m($jC)E#nv?Z1m*S zYm`C4EY2l+aw&uefqgRc&4$rF3lh$E`QM1N+m3$fPG`W65D`EJY?$|!^ee^r8j*tx z$d<{ViDs%OIE4;*&m2e?gvuSG!BgCCY;)W5(T}XL|2RGirS7PlzBEn!g1MC_(64Fo znX{2V)FeeiH?FxxTv}7^FLm}RNW89o8?qV%Tcq-uf#mBA)eG`5yo)Bg9=hLP}p{@F?1}my~mr<9VI2H!JSC>U1-g z(VXo*mu8Xh&zxfQ;R|Kq`0^98m>c0bch|zX7e7S%{|sbQnOB;8_hzY0p-2kAqaL8B z^_XB%sQkJ)HhGZ2y6}~HDUrYPS*<5Srp5-h`)yXzxNu}S_f87Kk@EGc)Ei(4Ek}Nb zstoggTZsF24e85cy7&D41H_3MyG!`1H79GX$#mRnS9!yIr6mPvbtASOCsY8gdSd+L zqd-45<76kx!dz^HGdQ#8g?&O5eOEqnBc_J|bAm4Qe)M?65GGN2g;o=0z-qZmUcP}4 z#l1(L8qMIwRr#b~GKROXkgt^gjO6E{!uK(C`d_{EabC_B>F5~7E>@8I+z2J&1QEMm zh~ZSJ;t8wL1^1K)+p={Y>9Q?N)EmiZcxa;GZOm}>~L`LNHyNZwZTJVOZajX6+6gUCFqr^{GUa zx8VeqdhVhC?{T3kypXmifET%^HHmSW7(*LZaY1QORqGJT-&cLc3H@t$m3))}lsZn* zPp|HEV>~Gb@7T9cYbj}4c40CsM>3?7@F)4BjwK*cS%OAles6s-%#)8Fno&stp#^YT z9s+X!bi;h3{_YVlanyqp7m?wIrFmPNe18O7v6_9AbiPxmDJcopUkG8LcLi}eAr0mhag_xZXN|Mgf%nod1K*F(T)iEVt9?tAS_d9cdB`$Gl$ zpcM;pp-j^@(Hn0ldA>AR55COut|-~+cGhe@2^%gJWrR$+r2655jeSJc$T8^*}xm<59`x^*?L9aw;4Qz%7wUWzTCj z!DQ{vU}0PA@em+`PvTD7T#@UCkD|D*A{4IlwaESP|3n|yq9xR69f}xIef$b4Yi)*M4HMTl6EMGd+9sO(ykij7&kj5?T z##7|9PmES!Yh_E6ppg0C1Lx|^d3w&Kn#6#9^sz@Emo0no07dp*$Fgg-(Z*hLu#PeO#UZwNGqHqZF%)yYK2KUb5d6svN$5u6IpY2udOG63?9i-~a zQkkmC6+~Idd@7LFPqDf|nw4tLK< zlH{qQcbC#`^(~HY08Uv3#P`oBhQM92QVd1o+YC(G`yriyWLuP9DVB!A;-#LQ9!w+) zzf_qx?6Ef-5)fhepS$ENWV9D*opKl7~8RjHVvp+2!~Q&h*z^6O{y&rqW5cMu&DUQY(>J z_W98W4w&Wt%||O3cMFh|#$ccQD*I&-WXVP4S7Y*aG-~N1)jwqqCDb)zzV$MjY_V#- z(ZS942C;>`1j072J3U>B%2-XBx{u{k>#NJjq9K*C0-`z7JiTZ`eo%k@=N< zpWcNwh?ga)fL`f{%LRLT=yr+hRqkeMkn3mq4%c8*sULrC;QN9nHmXM76{O{RuP?T* zC`LHur#HM}CzLr;_%&fr`Dm`PaQQV;m0jP9Q-}aKldh>W?Ui-yk0i2NDXUSmnC9ZoA0{Dd>|zel@$de_s8% zc{SMa*r8)p!IxQe(5b7JN0a>T!Umc)>cN2ug1r`0IvGjp`F<}0T{or3C|la?`J2)< z4I>+UQJc5>_b3Q@sxb5K(9p6ZEQ!{H?C0vtLhyvSN1Ay*v?Pykw;Qi6>}1`tWgxT=jrH;Pve0Qc zM5g&LP36_Y^X9g2K1Mek@&}SM7ueI9G*o&A-{NIwi+A6kl#;hpt-}&Q=K65v?YOv( zb-(Nq`9@LW?vN&uVM_=(Q@N}9P{}~WC1zVa z>=tum+NL+2`CE5WlQe+;7nO`CboDgTg9lF-0@-a<;8xt`+7Kba-7!petc|*sEjw7` zWEop}ZnF6GYD@zgBl)4>XS{fXPDb}#k%@&4&n zve_eUOc?Xn#t$`t{g z%s+SLN?VOL@2EZ2nwOO+ue}!pfX9uP`cZi^AEPeh`JD?GxKHk=+HM+gWN-H~Kceux98*vdv7Qw*d>ZB*Z0%`DlG{;@tp&h}Vf5x9S3smfeVj zffRsqo#e{MWa_0VE_4DOj@G|)e|duj^L+rWn+P}dR^cOkJ-J`G@TA`GePT)ZjtXb8 zJ(dL4Y<`BZ$mtGr$XRd)2?YGvoXsXj$O5R=788)x;H>i5k_dN@z6?--tvgtgotL$7 z^ueP`;Q^|0zus7)8;OIfUn16a&)D|B1+z9qRD(Bi8AV}ER+hOYFcqeowX7FOq|(J~ zNmkzMpcP(;z^(%e(E#a&OtbUnV-P2MZ{AE^mY97vy}LN+nzisZqq zu{+yc!AWu;ho)n)`n|`6@}|_)_{^I_;Cc;*1%X)r2BJVp4O(Lo^%*)IRdaP=w9&#= zMLk}}jPuuV8>Z9twl)4$RK%CsU-z?vD-uG{rSC2TWmo<@z(lNMsm zn$pI~m3osEy~NIODO}U`;ywrU9RTLW_l`}s^Xva;t;a}mxJAF2<6ASd9?i-T7 z9rXrv4laxf+dsc4;*DvI(9qv*%&&O~#y>X|KHXDCye|%FRW+Z=Nc&{rFvCbJL<5Xs z1a*AB+{y3>2zBQk{g4^w5cM(Xx>zbqT&&QF8a_*K>Ew)ipuXZLXONupxuUHnDHULq zeoNqrEup8*!OAm}avx{#G2mC|6STSV;eoL+U+%S>596RJH)c@`hkX~Jq(j0Rw#zVH zfkB}m`tzi=Wcq{&pAXwGK$=V7ixmwq{L#Cncs2QbF0kSn+E&21^28dGMC%f z-Gqh>YtjaVGfNY8MTkLdk~^dtVq0~5MTq53V_YUI$?OT?mVR-LT+jHX&#@&srAaoH85ck%LTEtES#)#C?yC^BwpJp8{A zasRFXv;6V&+jVCQis~z?akpxn(7#}u?*FW^XA&Dcu*j-AB`L0Sq~`}ter!RZAx62mkpq>CbA>jze42_g($p1Ls? zxw}4KC>UUKF3|s~5=D0B(eC^%UTx~2m<{kiMa19A;6pzCf3Yojh?Q-AUI$4Y_>`mg zvxUUV)>C%s6ALZWxSA75F|d^zmu5jGT$|0^m0`W6E)=$Fyh0HZ1W-mTC*t2ZAE!2W zYKIDpZ(HPB)KP`=wBrZ$co91CI90%qA3E9-*)gj+-n@-}gO7V`^WK!5nq9|K!2;ev zcELwe>l+b7Th!z0;nur1`#vlt6xg?I}(lgX` zW-H z{i%nA82lYf7J9i={M~JHovfwxZPo75N^ib08}iEKn=E6&vo^&$VVs$@Q)3ffbGeHl z)_~*Q3l!NR4OW@HF3q4DmBPclV{(?Qi5%qA8?-5(E?sqyahN&clxf#CRg~l?Y|Sz7 zNFNcY&WL}`!D}SzM16DV;w!z985lnfra~pvv|*>afh`%2Vo>A(f%L*)ZcRHY|d zNY)`AeBy5;5JhmjAEHsh#JB@xuVc!OidLR{f6X2SL1PQ#K0z3EnzUz@W{y7JR+3L2 zUCU+Yj?Z&z3L@;&1XsZFuBII-UY3mc>dBSCIQoM=cbJxJQMxgzvpkO*X`15D-c^lZ z{nOLeA5d9t_|n1XZND3PS9&mF_bad@$jl&|jt*U?H7$v2f+qbc!bRiN8)NKM-M%p^ z2jvSUlQ%xmf-umOe+_5f!^|Eee9B?zU>DB*vtzQjp`jsK>{(RxIiqWN9v=#!tDNfMzmtI;-l0n;#g`gfy}w_%dW=YxtYo{03e8Q8cm?EO#CK8sqTF- znwBPs`4p~fGp}sUN5D%H@7-hz+vlgl`+0p?obsOqKDpg_Dw*>3mPk+{K6W6uAHw*4XRXQuHnc6U!37&k ztqCXao6Ay&H{z8VtotoffBv*8BI&e*!aN1RuIk|15J1HJvH;N-S>BGHH4&s=>ByNsx{K^gI%;ja8*<$pWR za|$vXlX5z$T+N)!d(YfGI^iJh{C8bAvCYLS^gGiw&VJ|DMgFlC8Qt|+mNPoTKQMBK zQ{^iCu1eE`aW|%CDGI(-F&~4n<^ikdJ=7FJh1RYNSb7;obMNc-!_>4K*Vqy_YC8|K zDKVZ};8i@pp|cSkK&Yo`#P5+SI_wz;s?>Avvl;O&^j`7(fF zuUh2=b3neDI-26;RNrk`rMXi3z1kx>7mj{e5vt&W>C&PDx27`~rYb$IF}yCJ*wHvI_fk=B;YEuGfs$**3j| z%@sF0IS_(LXNv|NR>?R&>&pr^+qko-vM!FNn@T?r3Z_b2Vq&A5+(gGe^S?naq^4_(h1kQ$d{^2I1E5q ztDjO~Q(1-1FCzWzg>AYGo=LAB<(#WA{Ty`bk{@&YKN$PUu&COoT@)2X5hbJ%Q;h z=}PSEH-{6k{P6gzER0fJKhGM~ikQJiv7Zw_O)4iaK|Ny^!$;JR;W7jchE5Eu6I22B z_+0+GzSjzq4sZZ&LM6O>3-S;)u)0BG(soUU4Jh^@o`;v~5x$^YDjmn^q=s(>@@!(m z*Z)1j#9}kPqG({CsC9^yj?&z9;bxd`7g$@I>jBH$YzWz5`)OobdaLcqC_qd@^yH>D zd3&uh_#v~T86Fl=h2}%Kc7#$fDYpz1=g?o$XV)P+ehcjfkx)MH7_$y!$p&wplzW%o zde)nfR*bl%#0Qh;${)0`HvO{dNHw2ziE*_laA+vh))koKOukuNxN=%t-fTsQ!VJU( z;UQHXBF#Oujfs_ZC?={{p_}`Rad_~pZ&i(pUjVaDc3py>C-z_M&mejmBcTTHn?vD3 z^}(2W{UFVJz$%Ng zptY-kwR7l?l}|RMbhXasSzIT@gME$$hCTEMlB-s4rDxXtLEvjd%C4rl`pQ;IMF;oz z0-3SeMY0O&x+-iyTb!geu{ciQ#ro>RMK!%*7RGnWh`7reAM zi1b1x_;Pl#?q@=m;|5cf5|1*%e()|wefEDylBaED%CrBe(n5^Mncj%nLA~euy~w-5 zJ%k*q3W4n@UtYa(F2}*eI>1rbH`k;I6@5F^Wj7+o#MZMh!}W2$^fsUaUpbbXyTEG( z>C^)fp8?TWYpv12Iy$l%Q&q!s-zSOxiE9mbejx4cY$F^p2+(Td0PbhFLnjRJb>VS+ zc)A|OIEbM8K_Pu{i=rN>l9ku^+s68!KVXt}2l&fgr-uUQK$)>_GvUiL%)?TD+LgEk ze1|4&QNQiCaL=I)PAmaubZz*Y!Am#hN}3Oa#X<{S9t!Y}z!cKl0e}2vI!WMzEig4C z`faw`m*$zuLKDWHe}!4A*@M2fvBY+~AQd01@OILfHKzk5)pYu>c$bPZ_{+g^iV7z- z4l6t7y-3FPSsmWh2KWqLQzwu03UvtR6-#HRdpHZGw9I%UGehW%a&2U7*ohVVCw(s( z?g3A&5C)<)6F4W(<^V9 zr^DFCg8>zd2s^mnS2!rl5B6!xGQY&Y+s$B#zB-+c;!B->iwpsFv+z&c>YX?Ed?ESX z$9;gh>zhmF+S5wb z&f{U{Ec>-rXA%5b$(O2Pi$*L&vmOz*2_-(B0}*velQb^rjlHG%agB2^v8mp6=k<&T z@Zmh8(wwA?VF7+Ton__vJBej4?UqIVaT+Q2&izg4j!Bn@-#p%#i5<6PQk;T$0;)aZ zQ-WJY>;|e3pup~8TtI4RvGJN`h>ws&8UW-%XF!d{ znacSiqP>98;ZpkerZ|Ocb?O)boCO=vS)w7cAiwDs+ZmkB^Lcj+OX=f2b(8}kR=crx4|C$A?5)oX5)Di5$m zL7E^kco4oVVsHYD%M9nzNe5fDrKpJ;bE+lM9U~L9F~_Hk0%IZ*{|O||DzRuOd2q|k z;funH6{Wd#U-oTgS+|qlR=sZog(xw_gxEbkQctq``9WuJk0Q#q1u1(n0KyWybK2Rz z42ASzKxYzN5udf!>q4tyamMO2&Jlg;Y;A;~T;G3KT$K*5_ZNQc(T&6JX2UtY_q^gV zrT`#V*go(uoxrQ`krB{;SY%}wuzpbSas{TM6zVlEHv~H}1EwabKV(MDjlzxh^oC53 zxfVm7furw^wZ65=L{neGD}gYLWkQTb036zuh$&fGLxFd=g(?V^#&kl5+#CP-#945? z_Wh6|QiQ}Q3}XZcHJMjGX+9?sR9aP0XiR++CB(oq^)L`8!_D;G}YYr2G6kxKPlgR`hJP>21 z@(Z0V$&D^t^+~{aV>AHu;|ma`iID=4Msgs2?jx7NF_!r_*$2dPQ}4oVLdg~70X7fS zg71wfDK-8YoP58+@6pouxDnjyE|?at9JVd6uCoH$ouUEM zmBpvXJ|JKRGTsx2?He%cIzAI$QVf|enyGLL(g)ija#Rqk;z?TX@w>;qf-Y|}uicRy z@bLTDG~fmVO=O$^4)XmiUQs^u4{PucY_ZJ_L$jiKwqa$7} zoh5=US55GaxR{{evpR?Rl3F+1Sg&UkWf$o#Yd-OnjqU{Yz$%ne2bz|8movec*5eq* zH^Z*-)hc{<3pYsFw(gB%yX=`b)}tndbw&8U*uZnzG|+2L)YCTx+Z^5;zkk?m zp|Fq=Nft3akUurzd;>#M@bpV+Jj@zdV-zNMsgWMCmsMu5MaINqPg?x41x^W((vsWU zXabl)?Lg5B{3RwM>PxB)DL=0&{GRovXj;TSEV)!2*jY-BLP9)QgvM%Y!H+C$5kD5| zk)^CwrLnEb9}W$*ySYqx8{5Z5kj;tZ`{Q0cUh-#|NN!s$eSMjCeB{N`(H_9&Bj1b% zeC*`E!yjMbe~*KldLrw_idfVu#?;N7OK$iOOH=hwhi{6MiTh3eun4~b7IC&2PrJU= zGl~AgD#VoC7VRdN(YPx9iVhFi9*>&lbr1hx{bUirM=)CyCN|Gvn9PF(XFkH!^nid# zTt}e-Z5t3^&o_#zXF%^C*8GZ3V;7Per3}mY;Zk{guUX&tlZJ=FM>m~BaP?Len-^oh zp$Y|nJIQCPYg{;-afn8wl5O^2y*+G>8|e_o=-=QHZ>;K#IIyZCSj+i3{wPtFd&pIf z$UNTz`ob&XNOinl_7!X?hjty_s<(?%*8gOSW*oX7qIw?v#@BVgyR?5AV$4}4(`s~K z<1niaTe*a*=WCVJ7eA=DJwUtcpM&shF8z5CzVOklwu*04D=>9snS*|>M9OmH&JE63 zmlb}55%U~MJOdp2Ep3=Cmq{)8!jUvFdvRa<;dlZ>jUt$B|5P@H*+8i)+J7qz47a-) zeR?<~y}^`#Is|%+>-T%{A1m=yV#oq#9$~m5e-{&SI0hdy3@|FpAuS5RD7?DLpnLT zrlSrol#nSBX-p-2NDtpQQJ7^G!D7qeuK@o`9$GdpES5 zrcP0*z6K=<GfG2y|cL%%PBVpSG)eWoho$-Xy;X>}@r4}qD{|T}l%eiixi{M#R%ys$M zMQm`Nm;o!{cL)@N-La0Gw9si`ExH4zU)24nu5mn9KgDnQRmHPZ;J4Zbo$BNUBUm`Y ztF<^6QBb6sT{Jj}j}#Nyau6fZBI1ww`)2IaFv>X@;6%8+04V#G;2(3dPCN~}9?D3{otkaNPDTJs13wxY1He!JPSxHa zV{*60g0Tow?~1tOPI}1V)<%HqEit#yP-Gr(Z(0MM8ZgEfpII%(IQQ=s7W1YdCwgw7 z5p->l1Ccf0x8PlTsF<{KfmZTZS?!x;wH1a<*)lc-_#NsQ& z({K4L0QiM%_AQD?;3nY-F97?hBW-vkl?>;&?Ov6wZ^`R4J8`TpIBD8k-ExTrI2`ow1 zsBN9lA3CSijiHBH-2yDfyhC2rppH5G3QZ-1FezRWpmF}x#f5v^pD_Oy&HMa!%9@=m zi9m=(>q9-*&PU(h=;dEi1J_@}6PGDTwdG^Qg^;69f0)JN&cp{Yjy@K>O3oDPJ=+Y$ zhxkW!HOii9n3(~Cc}YY!GMPwGh2;e3c}x$V_d7EzCzR{ZavkMb?_=oauZkRC>&^b} zf%MkDVE-fqsOstkXU48}zwv=4odNAQmy&-oZ;ny9%s)KGps{uoO5BiY=$X`~aW{-V zTkNBC@l%&m%VTvlIU@r0T>miNfL$I6F(kcP+2N-|suKUNKfvf%+{y3;mSWRV1n_DG z|4{oqrmIceh5vMVWBQSL%Qv<4NaVk7VETJe z3bw=#D||{$zhebFPSb37NCi>xlX|;Kx!lI|S>4T-FV&9&y|DI+ZFk$XE4g+8<9t&i z%2)3qb!=JlF>VSQ0bzTLcza6VKZpX3Y}R4J)X!NP#i&0!-5qem+AYjY9ZBJ&#u;an zgpT_>o22HNXN-erp-mq2;x2O7z~;u!YMv%1-8zXylx;o4`@hm?k8>WP{4e`8nx#(W z#v9}u4iOfSxqN)OE&AP-nxpCR<9!EUS2ju($>=Yx^@6Ws*(^T78JLXeO1=64Lu=y^ zL#DEv-)%?UB-JE!A~xx$7QdIxjZ5|H{Cwr9h6m8COzrPGngJ$LGccX_Yzom{h8_0Mb9r>&8P zT@>^IZunq+Zwj=!wyP{C(Mfke)APqlfe!_KImm34E2w2lS9RmJ1Ovu@Sa*9dEfc7x zuC_Xy@!jt-82Vc%c}qsIFZZvCkGDvc%)EWMy>?~81x`#h!Xwk1IG$1LJqi7dsmFzK z!wg(~qFh|%cY>qAszxik#GQ$Ma^Al7y>pX0y{o|jY4UZ2L4GkG1Z5gSwJVd|Iiuk0!f9O-DeqjGnYv=VQ`rgeENd_FS#!`iJ^j0UgM%ul^p z(9=UT-ZVc6SlGh8Q|l-XxMNR$<~^eE;MjBtqJh=70fd-NvVbnDDxYUK{HY2{ z68ZW_2m0gc2VdYmzoq}lIx?6`olfNI&*bu(M;wHt?h0|!EYts3aqYQWT)BT&>VFH; z^8lJ{Qc?rHb3YyM4u@80tHt8tu@271e9>1alHCfj;$!#qSUp8rPK*>aK{z_^ z4}~9_jd+WSXt{9zdPwyrq#R@OZRicP4+;M8@>ES8$>A+yoMoqt!MooWe(GHl4hpdkz{{Gu*;j~w;$EG+fpE=T#BoE zIKg?Dx2j?ICq)~4fX+E4CzpD#0w3q%%gx4m`w`=jUw>OvvfWZx+e*(dI0(e}4gP99 z4wOu}!Ew5OUX8}7<~NSPsUTmrZ9~PU6g5wV4d+X(B=--1Ma3Skx9sh$*^@ECjMqK) zaLop%MV@X?&nYAE0ztXIiI-&?ukN+(y<=U;zHoD$Jg2S0Fe@74`n)Qr3MUn4?8#of zon9+WDv{RX7>vRW^E4=#0MQ6*#aDFZ7FB;BJ)x`O(c+D9a7d8<`}noSW%L`nh&eu+ zOnm`@kxz9${j|cu&U0>hZ$^Rb2Gv7u#?=UgHK*_~L0i2C@Jp?a##iVtq|!^D7PTxA z#Ocffb{Mk}-$y6gpBNqhQNRIO$$$LvGhBayyE2$EN7-(USAUg9{cpl-6|v&yM_Zka zlh-bk)_+iM|B@f`asrI>TcsU`0t3fDnqLkjnB*t^;Aug>y zchk6!*dP77^Ig3|YhWiSW$k9tKdf~jz9Vq$bya>jigdc+WwX3rh$oG&BMm5dsWHa6 zk~U+~P=4oaXqC`*MOl4L*oaoo*PAJ=BmZmslLFY5%;KvHx&5XrNr0->_Cu^c(Lb#A zcej-I?X`rFw~FVl4pV99Q(g__>C)ld&ys;C_1o?mZ!Y>(EWVA1moGkWsnvMUU&rUp zmN|^XQ2=Xkf=C#Y&gaLqA(j0{NuGovN8;-Y?x*D^?k!7xhai!zZO3dkUgx%>A?+dX&J|)+c z(&vAeLi_k?#RC8}+94!1m&Jmj$*IqBnNFNw52O)K_X76$&SS7iR@g@IZ_J;l4}^O# znBV~V+6mRKSEPK0ldt_n7U_D}|6!TOg}pO#sqw<7Qpz&j&tRh9=sMmu4dy8O8U3d) zZr^B;??+--J%-ovRXpua|3kJ)KQ@wxG}MCpMVrS}vt0dvz&PdFMVD51Y%EzfSyz1yMge=7WE{;a$rrh3oz& z#uTPHqS;PoUNV1z3B|J-96$_kJ6P5F~{ z$YcVct`iw$`K441t={Jdv;sa7Z5vFs#}WEu-^`cmf)qetkDF1)JGNixe22QU{B=xj z{!G`2XSI635w;)BOc!lF7vDdFHl@NS@hQ25B05Fl0AtX`K zs!3I<=)qYIO1?G76J4O|)Os%R=*vH>1KJeM=eh<8pue?aIiwRJ{O1V#)dyBJ{1<&* zoSAGN%i}gbLogh&!tT6=U*^|a#hM9#x*DmzKF^fzvQK|~{*kBJSjCuZ1pru@9ZA8p zXkj_QmJfr8Z{1cnSWZAVQ$!@~f#5~Wq#X@E*_kP>`Y+nCf}I*)xfVPs-nkGg?={@1 zs0{&%jL}1|-Fmc!u)XfVfFv7O@8>}$wPXw13vmb?IEKT$i99?xygcu9gTj^$ zhijzbci+H3wH51|y6U%sJ+cq{{`hl{p3PIdeC_78G};^eFzkO=QpFi-8wF*l4#ncz zyFStKJ8qG?=#Pj|IfT;2=sKx)f`nNJ|8h?j?^M=lOu#VeJ5z&Kf=!11Ipqn+Y8si@7!)?9tu3b z5uN<)f3F^7Z*9CSW{LuYGmZwwr}$!D>NqiId{zzlD&UL-EgMjLk+|{xdma?CE44X!qLz`#sh@f$ z+}ih}y;G{(VvNRLPZlW)OJ$S#)gShN>1h*|*AkO*YRz4}6-2%&-EE$fB@li}+N~4Z% z3H%%|+lR0g9ap`DXHMFH-I})^6<=O5O1|`+#b{YlsrtcdWw3%GM%?$sFQf+fU|zbA z)#dQ-%2jz|nYUWL>D)Gick69mHmv9aHQJ?rf$gm4`Ac!qCf|rRLv;Q50(jRfQaVb% zv|+LCZv#SEywQ~F>-rDncxoP|&-+seu+hY-Wq%$vJ1tmq0sd+*rJMNj!dy#>1%!BR zwj26gYwL8o0)YpuP*xexD7T9B$e7%pzTE=cr0)|Z%9TB_ZKS!y8O*`yKWgX2RzhEf z$u2t!LU`_1c={LE*5U@jDwIpgqZ94+sjX2HUaWtp-*pJDn$e^8(b}R*cdv8H70sVv zfs_2GrM!}+=m&UM3@;M;mP@BFBwvL^!iQnzr!PqJCJsf| zy57@8f}%-SJW{y5w+nYVRWq#p%+*&5TMoGQ+iPpSNKyF|S%;ITq4_ZuBCnRDu9%=S zE>bwp3FH{v6uGoT=$NvUb1Mt@cW~rXTsHl_Z`u?OwAch=*(^!typep~8$0_~ahLc{ zJmqa^9xSFKxm?m=|7Uhc-!>pyH%w-l@nMv`SNx-!2h#<+aSpS=oxG8nLo(!9HeYuv z|L7xh+4zeJ5-nM$Ga3o`9l@f^o($2x5zhDaR_W_;bNLR>m#Sl^<=?nOFnT`7kBJQ4 zn(M1Qt+@dE(9q-r;uBMiH8WrKMhjax@tNDS@=rB!3ID@#PdA90nISlsoAh5d*CiBF zT`t*IOlOx`{j#dk+etMu9k3Qd8Fyszv`f4z*@E*2)Sgnlz)qVBqoPvU65>dQD!dEZ z=xt5@%gZ|Rx-ppeM7L`&15IA{MGR6q*nLfZc|S=GA=tj2f66RjOhgu`D-Dg!k#o44Yx*({p?0it{~Y=KFa`@hIy`t#cLGTU^OfaE(95 zpRam%ullmC_{y^d;IszhUNTBKHBOaGOT3wdEvmPj7@Z!Vl+7uvA@;=yYtJhQV1Ahg zFbw^+MdHG`(SE&;m+MXq-}x{UknQ8uzfN(Cp(%f|D>*>Z2}3%1&l11P$Yo&I<~RTW zvAT)5a-wa01@m#o%-cALR*xSK)~3KkvhqPcvSBZ9cQkpX(kIB_yZiDAeX&2R?;8_C z7KThEq|p%onq1TF9y{(bM*R@roQf z+J_SS@RuC;bVAP{BIzO>;f{{^(hny-PqvH9jSQEbv6zvajv2M^@O~4YTHL0s<9lWq zlX`O8Hdn?w?6~KcPs?)_=CrL#o>bk1PjEH?T}3yPgxddN%3XOD z_YX^gHY_Nxa;NW5 zbJE0t`zcQ2JKW!xRwecC3acK19!B~bcC_YHHVdD;Mc-8Kxv^cn?Pxw%$uc)*4FHUF?&VrBRBz4Z7y>>7#? z@nxlei!nC#VPKL}VMEv!c8ei!fa^{IT-iBARD&WOup_T6hLDt`DZfpD5Sai-c^l2j zyq)A{w^aKx0*>{6gEt5s^ZdhlTv4#~v^bkooCYQv?tT`SeN`uFHoyP9d1$VlL>Yb` z5|7d>{Ia?oKr4-N-00hQODz%vU=t78uGTpt#B@b$BO3T1(A~Ti3u5=hI2*V~k6vMy z*)+KfT{6t}xT5D(Xw{YOh7z3|%Wu!l7_AYOXfYyGs?1cqrJNq6p}%nO@7DQuQwy4UGWw)u+1DVI}HXD=^L^R>N%kTl(v%7-d-@4 zl^A>g-psm!$(2480P6kfJ%zn<)x>4ei{&E4uVOy}3GAdqSnZ_yC!@+_8HhHeowo-8 z#Q7?9eH_1z3Md@iqi`Nfo1;O=NT&?2)xC}_D)R}E!UTmW z)_1OpmY1a8|MVvgPXM7VWuR3cAxf1FY%ggeJaU)((MzCF1f+QkURr)` z$LZa1C(Y{;{hB$mclZ9b8b5Xwx{jYYOR!FL_NX8!@ShLnK#rzW&K)Y=D>q6j5JFF_ zvKmCoHBdW+7Ebke*D8I7vQ{0kmkqKJ};sDrY4D|E% zU^kYTpVjftZx`~!WXPRnw4LC$!p-H=l~c2Awv9YJTSuCGpOy}$6wgFr#OG{=ZbEhx zxVg*ya3K)en^bY5cr!b?5&h@14tcq~)eTOu;;&O?^Xj5BBOd`uxKRfS00X!rRYuo2%DO&Td z^*<~s|F^}~s-HMxc(<9!T<59q=ZVEfJST~cWRRJ@A(7cy_Jdt)S&v)zxR0bLf@N@< z1?}0LBj9b_w!asRJYWb|n{>czf_ zVon8Babmg3jb9&S@T>2ulBF@@X%!3|nXJ-;N$)>Ohz+>C>v{)zTkR_9V&0$EqtuW7 zKBbQtv!h9G|Rqa%)j3< zj&(MAJlFYnUe6kF`7G&()N|Zr%;uGt)}P&QRZsEf4;Ai;aWBv_jpNR8-hJcrZ9c$cP)j9I;jv1`WsRNS6>pYR6tS`bfZOpRr&b<}5oSaxs&8~mk&^(mB ze<%h$(?N`%&G|W)4t{|6OWU|Tk4QM+M`iJn{7thxkE{U-bVp<&&l5>4+e$1CuH#Vs0Ww;nOBVk_7%6GYa=e^|O zPy=aZ+|^;IXf51oAk>Vvlhj%Q&{J~PHSS(!HG`%z%J(5ukBSwaC|oQV`i-50Ff*!*Z=Q>e6}vFARW!sN)T5@ELw%$mjv@wj z85uM~$aoy5T@?Z2x7#AcwUe|sbq3>oSE`C1;n{sS0V*=zZ>%dz{6VgfUaH#i{tFzQ z4Hgt#VgIlQuD4!y!1-U2x?SoYkYnb4sc=i_!@Em)0Y5-ku#RopcdhDc#%H7Jw#pVs zxdu6pP^=d2x#5bJ%ZSl0X%1*As>}N^&7vlFVXWHm5G|vaKZKeq4tbc7Siw}EaP>Fr zsb5Ec{5gc)ES?~)d`|Q4jOp)&M`6yz#hJZp^v>p)TaG0hzJ;DQzj3B-E$gb<-+I2j z6Sb-msNyJ)#7y?dtK?M1a4cOD41Z9 zMc2Kbcyw*^{ee_VbN#z0?@k1V$GiQkqj|i0lHz26nUvjy962?EvXH+wctNbJYVR7m zA^lwVH|^ae*T42RwZEScZOvvsSjOkxWem@N00)f#K4?h&aGL|aO_uKk^U$I|+xt}_ z4+q)4znv8Q@BZfG$zzg+d0?scL~qK)$J_xe4h^@cVBqh$jO+Rn3klMg| z+(w@d0a$vF(iPC`uhh0|DZ48P|fa}<$(bq?!6Z~H1J}#o(RG^1pr{yHW z9+U1_RE^&c5LGWcKM7z3d~Jv`VR4zDEoV0vEJ%N##FTi%S|eEQJQ`pCvI zFP2+ktBSD{4&#`BzRc1RtB)VJH24$Va-hO=sBJQ9Lg!HQL%yxx(Ey0475G&y%kW5m zA*9fWcE4<^Mtx**lo+vohMfDV_SZ)itw`awmn~lSBZP17){k!Fbtc3mqJGEJ-3xW@FWXG{jg&J7>-2Y+8)MHP-{3>1sQAV=w#L}!- zDsv%bg!mhHV<0t6POm(N_tp&qZ07FIGA>^FS<`zmVcTApw;G4Bb8I7(>$aG(ZbMgK z`1TpI)z!k@FG{$jI)29}0*1&cm$haJXQMoC)D5Z$nIl{#!}wfTmH7WUT#Qy98KYR9 z`pmlV2FI^lmpV3#+9^x0JzOs3H?cozOQ|y0sMQOhE^ko1hKMh0Ns-2um1YBD~YW>6e5 zRe2~*ta80nG`*q#49-mg$LNv3eH3b#Qu6$3%A@0)6l~e11=1|s4P3J)W+KBlEX4}) zIf^`&-1YS~HDw992oQFY;=0xdA5I=dnzLsGZSjAsKKf>2TOkmmYApO9GP*XK!mve<8d9B*SR_Pvf z)rJP$pDO1O#%wRyRtz#1m6odZab4{kpWaHQBxlhqys_%1-7}!LxJI zhqc5K`WR7qR*e46aDDv@JKwtmDxq{tzXw9UF*AAC8eBC8GRTaVRp|KkSlpY#Jjy4cS#+LP=OFrkS)3>iR^Jfj(G<>y?LRk}n}icx z@|9Kl6Kv?muXgDlD$^TJCG3}&$!e!afk5&6{;{ zzmERx3g3=HHz)};k8FPpJy}MG!Q6Uf&Nzux>`YVNy1neNCIM47L%*PV8nBsJpIxmE zxPhqOEfKxd*XMs9_q}s=LCPNLfLbV|sJZxQYjv)Y{&Sn+Ai9OkLic0KYkX2s0JMRP z+_EzqjqHUi%QI-3QbCKEG-v#AO zzClX1N@BR{oY;lwTMf8+;_pmBd9!v%yZ2<$^a+CE$pX!5!CFhD9fp5bqJ1NCbZJe0 z>C2gKR}8*D9lyVl`+Ap>z^m+=bO8F5OG6x_^&hfW$c)vb$OD6W?+ZV_j zBT3XT@atc_7r`ljHv52irzA9SEA;7%{;!?;+j4FBQoO&lq0y8&(s@v_$2q+O$gZGa-q@ha%GA ztrJ|}v90=8+`~f-@`kIr3jb8P>6DY@Pt4Qlr(6LVoAlcEtST-n?tI9TccZMr^IlGVSeT;#g zb^rdFlER|$_FledkWb~Q`bllz{<5g~mHEcp@sNX8R#%GrFxjE*Qf$^co?5uUqrb6s z-i)(A!m-Pcv8e1=kUTYb3Wt*=3rZcV0H}KnQr8s`UsJ=^w0UAzV&UJqY6mK@T z!K(TaBMvPp`OON_MJ+rzcCZ7C_^FH}vuOjwT#hQ~*B<&Z} zb@fNH?C^0xwb$t+M;LEV=}v9X@FGGp$8XF6Ss-_!20!r9H>w^*yO4;5KW zZH=uz`>eiA3yKbrtg4iW;;v8Eud)*v9Ak)=pDC(;+&3^T)ylQ$1R2Vz&M;h@~H&+?M~h?)vYO;hnNgaH(YFU@6yzyIe6!n<-u2BQMfYBr2~Pf%gp&Pd=6~ z5WHjP2z_ec#8A586JJ?l>kAo~ zS>;VXd*@Vtz~1zZ9*T=~WGA+8p09O&((3PH{xlMn=T*fsNKI8YZ|YZZY+>`!vr$<4 zQ-HH&tHA4>Gb%9_9^8?_--1JrGc#g)m)-VH^(9)u)VzN^!fokR)$b&S^-f^c!k8l(wXJe`srl$rpr_7qdMuS<3g)l?Z8XO32 z4p(GfQZ_w_ZJ1frwbe_h9GudWOTr3Q+1C^1j}i4cCSEENWaAbrb-i_r>WHPD?^_DZ zq4TUwdIRn|V3nzgQNbZZ%)PXl zgB6WIfBRHkE)U>-PMK)<`dGIv2dFSO(@x<)zf<@bOAZV7dIBL+pwsPK5rR$JkCK`` zuYXV`t$$FEBc5lcw0?c&d#Te~n61=5IeNH;-g-|sk%xsfu98p%7 zH`!cn-tI82U-><5{-Pn3%5Po~Tr5j-iEH(^HEEZmg3y7Qha}jMeYC!G!|^~?FCT*N zICXsD(i3d4+S<{jJjv#$n&GEu3K^6(@t20t-wIo|O+n_}9WEaYjVf#MHH-!)Bu~7r zo!)3@y)ZCW>YlKKj2a3--g6CRYtI#_JUjB@thM&sxPJ8vPrUEXr4povR&IQr^ug9l zb$DqnnYKw9|An`d>egrP{L$eLD-y$eSI@HZARXm0<>DHY_w$+FbbYsYRRaOUgL5&p zj}F#mk9|Kwc|SS#ZDZg@?H6iwRy?x{foz1|Vs*HsRUD@e>E)jU55>309k6~Lrj278 z1it-hAWb_sMH_^*>!Q!}C)h6hht1YGi`aZo0-w1` zPb;?MCOfcGxqj%|om=&#q{Pp>0!Za0?{PDlo7>XjtPu&hh_a%lMme5xd zbHKpB=gdzkQd!fPX2=0Lv2fnFrwu!1tZg$ND+pGLPpW!dP=+jI*=LtGvX7+!ZlaT9 z%qRS`va841A+h($BTTXLcIVgE zAOlr>USwABa$=I(fDrm!D(M2|bO?uRvO7|eGFRcxOeI8qtUfZ}H-vTBk2TMbNyAB1 zCv{NAV5s>c`r@`it+QVY*DL5|VUK~}^@vcp<@PDgC^NlE?)Im+aj4i?Cveyn+3MNyQ)K1Uk3c*+wF@5qPSn!mm#g;F;Zq8S@! z10#4#Y=8^)@dz#cA}1$7MjZwmstY{pUsGIJNjXK#*Q^IGbeQl1v-m955Y;U8dciuU zSuS@o2-lsz7Ko_oXpq9fg9-v1UI?@-m# zeDChguSq1cw)n{)2+n_FmipQv<1hX^(UG`}+@-8q3ja$hr zDv|%j;mQCIOkStpdK$#1ozjz5bZ+j$hf0i;Pn0dDz0z%bM|4SvgL;Tti+kTV&1TZH z%I3FT@3WWtI-+#t4_+mg#E*)%iAJkd7)`0WhYCnOruL1SnRBvq9SFP^$J{dX^#f!p z{5kq<3f;7EH2`T;f@I~hL;R0z;)jCoO>i?zU6XDQ;@-mUda+hEq$bO2z&ll{>VHsB zX5ePjB5*>5G?g0xZz|W2?Tgb)W5`Uyk$>jE-@wb3@}eC)8$T`1&bPz_@kdXIHj^xy z=Ato`eiF14da_#|QG=Y^5$=j`Uw_>f=xLFPcyrAcj}zUnYUGByAZqLTM#%O$GZ(>l zemW3PD(xlEQ{l1Ti*cXB{cEI(!nvpz=C$r0O0LqLq#EN+?u*uO)R{g;;;&DN7Vtz) z;{#Do|6#@3agVmnVit%Db{mGXC_Y_bLj4uDAA0%dT)WDFt&)bF0AM~rKVNHz?gKYS z8uY=U{H)MF8nFR))TN5ZXP~(Y@k>}p!1y7L0j?cPck2&oGkG7|X9y zsHEftvVY{Hm-N`saV7XXdWoWoyotc9!P3D{IGW&c9nG`^#wfv-tAl!gjjAH*8*L6l zg7DYb6X-9hY*0S%<^MeFPYi*i7W0chVmuv#LvSLJ@l3S;&|V{U%s;QA7&C9XISu3x z0)+U9uESKo%jEknnR=>8kX^8!HVYUAUF3hHQy=s~cyruF_4fCQEj+dU&`(@7(e)-CJ?%S@kZG>l5p@w7NF56PqfWAXz|Xa z-8)K&6m|ANSH>a0`4Z#XI!?o5`es7-kW4e!%a7atPkUDy)zr0x!J;x%@`9)c z8e7mP1feJ@vm=6<#$|yZgDGVQ$e@gBP_BZ2k7^1Pv-bJUR^Uie$p*0%KR}Kvej*k0;nNn8 z&CoJ_OpSh_Pe%&)8tZzZ6{uUD{>amq)+nd5oicG++C5*rWh--&tMN1Sl|{Icqla>6 z@Ur%eVN{4e0m9-fD!0L$n#EjfGIZ_P+AU^L|A+!ZeJlZDWOR*YwQi_Uxn{+Hw-lYb z^YKa;>mLmIh2d0N-3Q>fPh7w2L~9eIGzV00qfTy&FOSAN!6t#{6*5;I4}gFZk`ll`2=${*|PVJ#+Okf^nF}! zY<4mvV0RZD2z8o_U_r?Wd3JsQW!?l-k2QG#FKm`zA|Po}$8cG8Rxg_+4f`tQeu3Wc z7>C>nc3rQYz%A(LLZeQ_5poj9(K$)?qZpf^=Dr9ndP71;49={BW=T$;&z=lX0us>^ zX_@QW^IJu)#$ma}Ixvbqp5Xt1rb1I>^*h-=cBPxd<$sq^)*P7dn$Kv7*5;MWf*FET zMwawr^`HCz7Tc4-femup798i*<@^gdX~b@c z75DR$UumBpg>Tp@7q69P5YZZd(-vombAt@#FQxs>A|iD`hd-g6_$$ncce;Q}JdpBPL-&gTj()gdn9h}v4SNzjWh0e9ZMz$a zy>iwVwfxjU+iY|*DGHUx9xHw`7Y;}fj8n{WY7LH^1e5TybWMiAzRW9^6Q57g725Z> zZ(nmT*Btud8AG7%ruZJ}ckC48kBbDxlxt6b{pn)~3p)0woZ-NFyvB4+*+gfr#VW4d zooLl^Di#GnEy$)L}9Ff-?1}$NtEY&EmFNm;qwX zi?5rgA`;2&cXKxRg;_GM6B$40yXla6>sr+axVtkSHT<2`I=)f{0FL@KolB@2Eh-JC zt-519n2DU+Wi9UYNVN@++%5_H6C_m&rgb}ER_RG{qkDA^cUzq|v}q^bOEkG(p)F3* z^%?aUBxUW_Hg;CPs-O<^E?~Zuu}Fo-buqn z+$4#<_kNg-{~wF!MNk>-C=I*9EChZx$H#AGu_Klr$$Xm?$n1)%cBsx9P2QB~x&G7F z2b2hHnUlkIIc#B_|M+uvN|2ND{C}T55_pPe!n*JiXP7Eqocl-cTF9(>7#`iex~>e& z=G7g?jLYP{fN{2!9yPG**^&iryPanZJ!rA}#<+6L0Qqw70|LxNG1Bg<*EWuwSml+{ z3PQ<{{tT9hUSl2V@bNuz?Xyst7IhMo{3GRu_XCidwmGT?{eV%6QbE4I9WtD9YdVzj zI+xp(>z3_$%ckjGt~Yq6Zm-Xxk65O|z@E43kH4)x{ zlQhU6YB7VjXnXV6It$_A&I0rXt^U2-hdFp~f;atS6uRLeY*teB41SvxstRA(3dVt% z)bxG}CYtLgE{BG#-jN^?_V{z~IV$}<-4(l*TTleAnb`;^?~PN1(m$N40Ggi5oLSR0 z2YdJw7~&=YiX7S&MT84IFH6K^@!i>L?*|9s2RimSWw-jYZR<#{uxr+*5%9b@{wi%X)(6XQx0+jRQ*& z0sO7qOySC903BlwOCryi1|50p?J-$~I4t(9PSM7{oO;%Oc1H%EtC^GnL1&>*aol?O zw_4hPOd*V{+6u|KBL=`)w}m6RTVv}+peqlg_|Kdw0N#p1f)fYc$h`z>% literal 0 HcmV?d00001 diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/result3d_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf4bc4a4caf7df7fe7dc532731b06a16b938bff6 GIT binary patch literal 44394 zcmeFZXH*njv@Kd>BuYl2N|YdyGmU{PARr*IB}tMTg@z_0AhhI9P(X4^Z}dzfJgN2 z@bG~DJn--d2#JVEaKj+ST~L1;H+%vD+@OhYBgS1FggXxq(h$+!6Malfr)NcS--BK( zH1R7b*OTgQ2K{j)x45-u*exDXD4c|9r~G&C4$+EGjN3t*Nc6Z)j|4 zZt3am>mL~WKJ;T^a%y^J_V?U89I>{(vAMOqvx_=DIXyeSK>xY?YZo3s@E>O3`u{NO zf3k}PXBYn8IUxCK7aqPJt`X1>65SIerhTkOV&y@1Uo4cA{z>B3>h4=y;`&GiYtL~q zMs5i>59+UJ|62CHW?0z&E6e_eVgI*X^8f__9&YjoXaEp!jmeGW2mZVL_cQo!ANW7$ z1DQhAT~Qk5{OlU#3nQ9lIX2M`IIDbb05YK3qsmdLk@HPcE7|W~>N1$!pG-!xf*s3n zZjR!vS>ck9hsp$KP=z5)j?1=G<3-9UHU)1%&-_bAb=mx?O0hT|Ml62iI-u1JaaD&n z><3>LXLEkdp}Z7iSs3hKZkk$93A_OW4qlsN+Zi>&!t8kTV5Xbn@Y^=x(cPMwx*9v%Pt#`8_oy66U)A`0}bB*i4FvXxy^|v z`<{mAVU&d5CF=!tuG-&*^L&Ar3$5t3bD5P{wjg|%TKw3DJZP?B!`R^TZlW^tBj}KI z>kUx-X5|LZ=9QJZ)1Lv#`=kU(8$7AkYFl$XfR|Qdg#>R6>L1lxueXk4!9FBfQ_b4+evgX zjc4jfU0hto+Ggp#Gu#eHa!y%7WXFPcN0y`E16U>Nj;ZtobVaUGn|klnmoCw>QHEX9 z&Vj0Zx2OY0pFqY?;r4^|eeL!by06v?(3zHXYGHL*tES@0-kxCMNr=#ilbHDkwA3Q> z{J<0$E`i=|bAK;?KO}Ebmhb+c>O`A7%0_eiR9i{V>=opJ@3(GzB18Pmg&a=sOsfF! zwe$uU`~hvgcB(g-u0D_8+~?dfEC%jA66n2qM%;X@26qdoCr|4HQ>!_8FS$t!u}oM8 zY4sSw5Y9QdAqof)GIXsbxB~YeBH{SsF74l~FzH$DMrSz`f;-qVn zT(e;9;r@-rYVAP}1aAx1$MG-KxZNX&3z6+7B>6w6cON;Oay&}(Z<`!#ee&m;(q>$lUAQyG+xdgYeBWDWlwRW> zlyQ>!h6Sq0YPU#o;5KPDLnw+>e@cRrOc5IwC|tgI`Aag5cwRZM_#>=OlrtU(_`)#! zxN6U6k-VG#^pp7I2g3y@^Ebqa4%c-t81%ItaMgS1(x#DT{v{X5#R*Sw0aaLN(5fu4 zMseh9hA_tR=ivXJY>A)rwB!L2eJA;{CKo#IilM7gxs0DJtr@4-i-`U3%Q3>)%lzcL z$F}{C^%a(HfJjd$>6ze?4csr<$3kagtEFk&CBHe#XjtQ3jGw6DkgVjmu`p5>7I8@k zU7XYd9c6(E03XSo;4w>~2?-FvaDkR|X0e5~>$%bG6QS0JT-F{K0nuSLer}5OP*awo z_%rbBT{?do_-8$H^Cz3~*Kd4;9j%vU!xVPZFee5f!6zb56W3M<9WCk5;7Ty}q@|NI zjq1f^#Ts$eYI&@xIv|knJxAc29nNRUNl8H@jRiD&S%7~0OEb#iEvMo`D-3*P{+1Is~G3YLZ5z;4IE zH?4LuZeVWqJapVCz;J}+8!BD8?(A76o0bfE_{iW&|7RcVn^9h##Hkql z=(agP8gTFLgTfoY3ZpyzXF|c*zEoB2X~l2Ck2k=AgH$MbzyWU@xBg{f`-?kIb2WFI zf6)za+@0tU(DFjS@GZG+fF2MF=v@A~{>h(1zg41-33;sIgPMC1&=?NlS=Y|m(;I+x zfxPO^akCA@+8%wVdGJfTMiu%OB$6m+?6WaNpZG`6bqvGAtR%`ln^aZNXM`WR@njjd z&c+@O4C^&IYd`t*0O4`cFJYD1k%M&3hDIz?h5lUTh}wJh@$=!h2*;e)5lgmkCxhg~ zxH8+|IaA*O=NGPlnT&j!e7WT;+9#mBa#m%GjpQhrZb0eY_#sC5@+ib@of>h=G%%ih z=1^u)g*J8J_{S37cUXIXq8FI*5A-(ZqD{-|k`Nap_$IKtXlmmYmY~j<*HPQ2HSJ3- zpzEs?L2PRKY=P0VgbSB>y_D%ps>{1udkjW%XaAFLW0kpEkwgM-d&)>E8b#8r z#Ff1t0E*e)XyObQFSsRkZSz(Jqw;nPP__yaQ{0Y&P7( zOGfdh!u1_6H*bn+%+-3D?Z9I;Ymlz#&TG_rHcQ8SMN{5I5LdW4RpQl3T*iJh)?{1m ztqm5tT#dEHP1Vjpn@Zl|m$HE3m8J9zP=&dXvBmeU@Ud`nE4(OXe8s&{kh2@xUN%{$ z9d!)OA=)04>lQh2aG>8+?D|TlO}IGlhV?k=KVA^{P7PP)I8yQCtX;ZzF9`|>CYfq8 zD;W+fUG#tYJl&wJNj51ks{X?e7qsq|hSFxsCBUB>`@gYeawqmgX)qJ9Yvhm^$}j2N z<7Iu#en~gy)V^6HJYW;f|7D>S8fipE|C$WwMpD(6I)8Mkb!Lis=Td*Q;J=hYS(bc3ymMeoxBG~mX4;#zkTc>r6c_9nM$u)1i4Lu>lOWI8X6S@;godUGa~l+Pc}B8Vuy!_r`#9%eo1Nyt-&Gg04{M`b5Rg1c?GdX zVjB?$_E%a4%@dNm)^slIUn!j~G$@o!FXdj5lhzH8KX_@Z zr%dnTc2<1@v_K0COS6Pfvtg&~Zz!3}P)6DIenNpseAIup;--Rf$!wDa{S=_RR`2hF*H7&ez+)RnsnbvDVmR*W=VNjAef+M{=3 zdSy6%?5Qm%32m>S&hmCx7N3@j2DAJmo-W-!B~xRDyf+@`j}ZwcTh z_;egB?69(1CE?;JiV+{=`OpV#>#!ER0YZv#=pZujVdgQqvFmas6V|Tao9!IE*Rt45 zu$1atlp7*bg@-Qd4dy||BLqjN%^I4G>#O4xN*>$PJy##p3tLQ>o}Z<$S;bKe=*B_I z2TEMXvoA-feh!I>{VlbsDm7Au)u|xq7b*_Wp2lSDKW#i~LC((ymqHJ*v(%F=U7jR0 zHDe^YmbfQNq&L0r1oGPB0NIjJjBL zJK$XHN5@<#VO}FgTc>PVn)W4>r$XGSmQp}qEEG}3I0c?}} z@p16>k9`jaX?z3B2@Y5iq8m{lod%R`Fb7(#bZ3SsAU}hp1)Zg>T)K0RH%MQ_gRs!x z*6aG281rGg#x;KC>9{{1mP{K7GC(%=Rrsxmgk!+S%ok`i`0uu-YgLpOwMm!Uj@t2! z*mw7)xUMMsr=%YZojXT>d5t}|MOXzo=tF{TsrZBUSFYZn1sXyv1!2%R_=N2RJfiD7cfg-o-|h(6CDS`CT{Hq@QG71|?)!xR7)2mWhKO{ZS< z#Av{}w%heEfEtxU&g3^XPa!%?(?eC*EJBGcGC(PnRhc?D(#8v#Uh%Z_<=ZKZ5Oy_a z48JiLWy9-tK7Q3IDilz?>zt01KE$}J$;h)iEx%A|V8jt3H7{_13I~M`SbOEnTV^_o z`<%dfvOWzi4+NO-L8Hqn>vj1!<`EvSK z2`Gyy2qM5VdrI6W`{-cy$K)g^L5WKYI?(Kei%TydfW3Ut9rX);O(=k1!C$vqPA$SZ z&CDom^L!^9yz2++K~N!Y^6{LX|1f!KU>In+P+954`VDYrH+5=TiMXS6`aTNU zC-}YUOugpqfi=l~hHv1rq9Y|4vBsZ)E}X)q=&zia$jLrEXyB7p&Ob!k1iC6=GqY-S zjWvx4%=&YLde1@)nH;)s1cqs8Fh}wT^7=#b)rDzf6<@I;LWy(VM zk-hK8&j7aCbO-x6$r>vWq8EDj$fmJeK>pq`k}&pQMq{{hOr_lwSw+H%W8?8?(WYBA7bcJDI-@T_+> z`LxW`ZzNr8&y#{%tNuHzaF%Q4)Q3qiCRjqC5rjJ~jV zmgp5Q7cK;FC%H6Sawof-YJ850TyrDyhapasV3m)#pgjs)qOzjjIY;>%)p`lSKhs!D zLr}=I^MBCyu2eCuxR>ABjZ(WIi2dLceb&nV$<dyW$@ZtQ4*xM^!gb-K1UFw=?hO5H3pGL)Gi?(9cg2eRr zn)?kvhieS0mePEEJ?%VvO#Jc%JS~12MWq212HF<#UliX}wqL!3NFZ;3cy6T6{Bmca z`|IY;1fECzcV)PaT<@}lDIW|t=O#g8F9}_Qc5?UJRd3lF1>OluKr2~`VqOFZZjF$@ z8>5x(Ij4heV~mTPSa@N@yOX12@%K$^{GUgx9gwGd*z!#3C2&=BK12tSB+bSL%;gub zxNGoMp)}7;c^7J2gj=_R&DNM`{WDXIe;h9p73frowQUI`7UxcU)iBsh_Dp$cHL5R~ zQn^);^BMLv_<>kQcoD)LVJ+dTb733@ZZ8Xz+P5TQrT5t)DVW50u zqB4`mYdek8%3mFh^IHein;XW9I22FJu!SW?`>a;o-nmRDDR!U{Ucqn&1R zy#a1n3!s@--?Za5S4DD4M;_W?Z-DobZ*G7ek^O))_I4GMBS<APUosP+8@ zII2nlkBo34KSDIe(KBu2ZYBRRW#nxzut3m1mn2R*+c{LOoN-EICPuSnotbK%5(rI~ z%I0k^Nj?&~p~+cbgVItLE$I>Y^-UT!>3t!21Kp`1wJolsdlM~=IZ>OuVF-pqW~;=h z+iFdcTOHD9$JK3!89X%C5j-chv}f>ca)*}n&7ZfGW*}Z-PZF7-JM6az3C8&=ZLl<> z?=L}eoG|MtE_UIw14Wbfr?lN0iDv>+&>p)rVZIHNbZ=mx9*z@}&u`VO95~*hG`()#OG5QT&W<=!7r(y38n%&g-5n#5T!k+CR}C<+cQ)pJfOfqV zhL%X;5^Utj<`hIv`U0cHhn078?Yt89t^k3>)=PogwG^Z^APQV zg*DWPDEO)IXf{&+>-kZb5;<~cr#yHJOi^J=&_d1Pa00^0)4d>>ZgGvy-LA3F%G*9~ zh!661bu*n`vXQ1^Fcj+;lWsQDYg&gZ?+Xrhx`IME8Eyc_?71Q^njPtf=#LJU6uB+g z#x&nhl&>2|cnQsl_p?PUEG|5!Md0zYDj2BU&;O~H`mJ|nJBNA*L>oMK10eM>sbirW z3Kj@3DO$HL!z~lTby0)RY?lIfA0Q!!Mw|*`(yh= z52L!WD8~(Oo`L2=bz;dL2VJQ`?9t*#tpN`9$)isD3a)NTqHpMU_;5D0CA#cY-L#uz zOdez75Zl;7{LFco#P|0{3I!S}LQpM-9O`&1y?^aMc7rj7UbhQcmlcB~K!}94@gN7` z)bG8&m>wad!*R10w`6b9KEeL>Ueu1^q5iV)L*pdGT_j`Lqa-B_PFq|SPXAwQFJ>%X z=u3Lu4sW75QwNBFvia|u;%C@YwIWkMaFjY;7$b}l@0(Z2%2diL1+&Tuc@OKIXk5h? z%)y4)Y#fFg3GPxP;Z3)rvg+4Zo4q+B5AG!JiLUr3*si#_suA7eJYLp2IDT-|msmH6 z_JGGx--2kN9)5CG?C(q339#$$-&ilysASJL&}!wpkfZolrq7`irtV5_%*eNVh>i|q z{94lIXENVMJtuK=xn6VxvJr#~e$(s!t5C2tMkwjNUw#;OH94p8XMqTZ2c1hQ<$8@L zFd!6N|9n<0l(M2+&i9MN$>`meDt&uNx>t2e9X#l=;kb6DUTq9W!nh^l>eSgERz&*< zySj^~RI+9P?#qZ+W=7j4YDtJOpHX^xFF9K;O?$wUtggGRA;|vx%b6JwB(h|0h{xjaiy52r*lmXmDzv-0f?3ko@BjFrt@f%_|E+iC8xGqLcZ(Q=%%Gnu$>Zb@ zWs%*L0MJ0Gv8*3*V$0NfrsM8jqp@6m^YOr^Ajvu@^6T%uV#$XGNx!!U$OLv-MB>`+ zSxJ=@VRWC`In!x~p)=;V6C?<@Kjn$mgvc)TS8M8jxWBF^xT-+SH)t}|c67n%yGr(SW{g|!uC+W++ zr2xvG_h;&1lFE2ylt_9|7xz!bgdzJ4;P=SbCQGB6J4u6kwr8sB{hIT5ZzDyg;e)ag zYdIN%z=61F&B~hxWF4=_v}AH zT-g3R9PNPLB5RjGn&D_Jkel{KF=W?y3I2=uJK6vr`Oc8jUVCV$tYPs>>O!)cZPI>f z;weP18zb2@XZuZ7l1McV%Z$4mHsTfsLh9?H1snJ0Rx>-vv2&eB zO%orqB*PVjlI(SMn|?-{K`XLHQNY@DDrNDpJ$t*rA}(xu9${$XRvWwG;wrRoif|o@ zFG~1cLpnU6Xw|t)IN0Ppf$LC@8NugT(QGojEuNo>0+!rlUgs$xi zp#1^iga^6x5$J{+bbO46-~8w6tMlvC{1EkdTjl=Qjg@&mEal~OMdxa<5@{sD3i; z%dRAj6(=(`n-_d}?m1b4#Im7aQdg*SPflKd)xUrs*|vD6_$;} zaY^&`a9D5Wdk)!a9GZ5K?cjt2p#xEAx{z}_yF*vq z6>ioL_(V6y5x^z)A;~X0NP@?p1RzHkz8Oa6I$KE?9XJp`^ouZ{Dq8YYWvL^ye)Nuo&xA1Efj7t4H-55JyQ8K`_~?w#r_u|~ zr$n)IeE`;X`*rE1CEWuh%CGSGI;WkdGstT}yK~?fq^bHu8gonAE1UeZDuz*sGHOd1 zr4ql+d%Pc~X!*SiI{35szQg+g9bNt<|I-|5wcKVHS_h(4;DP9jW{J>LI{TG)e?{fb z!#P|+Vll3C-=C{b8e}zr(Rw>;vA+ow@HmS>uA%7h>dj~4ux{-B8j2!^*~fJ$(EVu~ z+(N)I5qy!!W6PZzq?}ihfOj2koj0DAi03n_my-LH?&e1xwC$Q>LXV{&Ehj+c^){$M zBkL)3pSlj`uhjH`4&n~uZUDB*tXSX#7Jc4H=l%OE<}_YcbJ_;y!eKkMae6Ta1&OI) zj3?F`;M6f@o_i>igA8|-WKfZT+Jaka6~?Rx_5pw z55k2Yk*Y-BS!mJR3EH@6ZVg5j76Lk~LlMLldr zm7P0-BeLs2AKI9Toj|O)->MrMvnaK+!_@28onJ7i{<@SkEC5&2Guz?Iax)V~y`UZw zz%8(5xfnI%QHb{v6JjL+_tMg#W0!d?v@9o1=?WkFFZ?d6sLq1&@6R*eVrC=?dGVww z9v2j!LGE0U>LPmv5nBff$XYE_LOT4@dOdP5x?WNCjBl!*btdM1ZS7=@{`t9mtVE=* zc*#;-?kAQIrHI^42|hFtO0Z`IA8HHzoq1l_&{Btl_PHfwT%iZMr2{bO-+u29XrqW5 zbNw|px0|IuDqB}KdJ7Fbs5$QTM`b-l=zMZPR=VLP?VDS#qUwnX`T^g87jm|zX2;Yr zPw0_Di)yodfrZmNPhD|$h0B{J@23=RzcLvl((}9X6PyLaqPNZ;qCHl@bD3B&MO5SY ztLrZqMYpdIiAJ>j>I!{Tgm-qb_L$`GaE0p(+$D#^u5n(tTxi383^0*id)Rj!|6eZm zVD6AXQN_Vs?(PNOowBKKcn{D2c*d7o%vMX+rI(}&7&Ba_Biv#^?I><(7EtuUq2s++ z3vg9M?}MLu*?-oHJg{Q2nul##d5&@^X?~YcnS>`M&n+iRtSQE-^~oP7QptQT=EoNl z6jYS@B@EK3OCY_c{IQpe=$T#@Kn0vvy7pc`S?XVaHX0-Msjp8w-rWHGvj*nhu2%+d zWO88#BPV(Tr&S=f2};VLjObYnWQu#Qg69(0qS^waRr}&(>CrBOS5LxWpFn~JLv|&4 z>qX}@4>G#9RlTa7KVe~EsPe!Fw!6Nj1@n$7jt7{$_#hv-GsWtt@bY<6- zZ-rmD(C>BrL7D-1)h4||PjwPjxEmRG4@U_3XxktR3(C_!ZqNHHC1ksCNwyc-wHYpD z@h1A(sw;Q<&XW+gOEo)#lh+JyfNKe4eGyjP%j;h=l!-h$soC#MV_tK+modfst`+S9 z=1d!Ox6HEcqj+QB+5+14;7+YT|L?4s+_fC|0tqHDtNet?T?|`Fw~%KSec25DrEC2! zmR@ZiRP>6YeP1&ivdx*fNE2!NaCzy*SExbV%A7+LizDh*XbW6GUD7a{JYqo~gmSp| zb*w4=qva@TDHxpM9jnwb!?=P7ob?O2a`mfwR|*BqLKE^277qt`7PGWyH7fjbV@cCU zxn4edQT+Z9Jyi%*PtqnxYF@pwEh0DtFB(~>D-HLH+8A9P6B@-TYpzs+SFfi;AuP!L zrJuuW9%$E}LtNkRltaZd6iFEL>f@n^Ojhu%rO`rkpiH*e!?x6;>Ai*JsHpS^|D@YGaP{$&_Jv<`Bh$TJZFM>(o&_KJ$m817ey_wM3F z%j;R|$wPceI@-P|c$?t5A{=$iIk%=Uc}C6D@8{)JtZ{$t)vNn;NrdF>2?+e+#kMWb zqb`ND9}vNk-5a3MH}7FtvAI@0M0!Fb?SaIzCj6sU&4$MP-A|97Nk5pUR!j)CtGd#{ zfYQ%@pyzP-;i$VMLQ2Y#uqI8>tdANmmE+tx!n8E%jsVyCR(KOfxKh!T3Zn(>~{ z5%b&ntXL(Mq8~5oY4SruQw+l>zNVvyBA|)rdm;v(*U@V;g*BFFsK{un%)cxw^BVBb zlqf8j`{AMgq4kUS^T|33IZ7_|>e!i?d7gx2oYu$fK#X6^29P+pmf)nFjgsn_x-#eQ ztVd6M@V^%!U#pnpc3OV0AuaAV5vDceZi6k~BQ*$%U`QJ0$<=U3(j4Nh)8KGlp*;Np zLgdc*YZ-Ko4Pv-9Rj?yXKOUTP4W{$^_uyD*dMd#jXJ{OYj3E0mtm<)sO>_Ky5!a;W zpZ9}U{IJg}$FGvH{Wt(N5m}WEbJhyp31J(LQ_f+k-}R%_gGzzFE}Or65YByAGTTSPqA5 zIt76Vc6rN*)y|reWao0v9QShBUW(NZS&xhMvZ>n`5b~jcWoV}vt;7pA8inTB9RaJs zRp)5&{!RbSYP@ouDLc!IX{cJqs+W_UW))^2neQ^JQE2i#W}!R+3O-2<&3r2kvnlQV z6Q{HI&-&V`a>eL{+p!KGo^ zdtozdRG(Zh|Cxph{Wy^jbK^gKA3k7JLr0AT@AdJs16b?%K0g~MW58WIt@vBC1+ z&^E>9yQMy)!WHX|ZyHxFf&b%61$GVB5_Cou-2d6$ZD z;|>eO0LbV87#>$p*65mAuePb;{mlpJN}9W--ougaILR!DUX6VuK2`<=n?a0`S~?8@ z9eKQo?t}O%rhl<+?x+96x=QjGYP9o}4uo??i2|cLIe(=*dyx82X+&!7c3FAiFzbf{ zZy#fWFtN5UF={1YYfDCFs)-W&;h^S<0!(E92BeUd6&RW2c1Me}^zqD#3D<0~Exn*c zHy4Bb$y%^Dv|AdwTG_&Mu!f>GLTezXBM#t-0mILOv~-Up-5MVIy-m6=mnJ%(uOfDD zfYho9)6{*L38w~8m;ZAE&^J>4SyXq1>(0@gze0Q9W~&otdE6j!0KMpxA>9uqZEx&7 zV&2_Q>91d>6-qaK8;L()s$AU^l=G$f?nXw(9vYO4T+y<@=(PrWidJz&3vL>^Q{9P3 zuDGQsR$n()Hb1l^UznQV{PUV_08$l(WV6tv>5;J07491=Fp6KyNS(?vNdNRhZ=;0d z))Rfth&Aq0k4~+Z^_`UHz`lkR8x&QvV-S^%_xH9M<1wKY?M{bU7>ZFr^z0bfO;r&!uW3r+^Ry?E6aQ)q7Q0#%*KQ?%e zw+e7IJ*I=BC^zND7#7CDEi879dQX`Wch_vkB1qOlWXIpT7BtoTy=ytq7`3mUnJHfk zL>^)0R*&|xsO^Yroi{HJ3Gz*jZGAPr(mP6sj$J!wsfwyF-Kt;L*wE?JFrWevD6>i! z;{;<;kr;_e$O9CoUX0pO!}PdYE@t6v=O0?w!D!69G$u-c$+2_*SFgE3_z`xwrZfh+ zMG45m`9@YprLh4{^C{UE)29d5&IMKSXg!pCcLp?U|E=VbVck%c#y?Oe<@4>0O>TzRpV{4?V3!jFKM@o_uMY-Z*b_cw(RcPHdcI>(iYP-bV{3=+$`7%!A z+7oHnNeT*WW;jY+&1NN8=v>Xc0cuqL1&-Nm&Ka&V+O&}@B6GC^!OXK56}WSdp`^SN3IwaK(0y6kt^xUd_6fre2VI3I4TG1MLFO8E8(Qj)o~wHh)X*V+mW{yR59J z0y|x%mBbB-pC2f%T7g5_rMK=c77xNjJcY791jea-uCB z`BHC)I}6~b-?%W1Dnfw}Be4#~#+RyzplH{SsZ@pLGgAG)pkZ%B77XpE^ zNfUpN8LECU;HYnV{BBHNni6ZQHu8y@z{xknJ2ms5enkSBmcm)JR8)AcXM*&0M!p&r zYto4Q;Xn}hWX#)*V#mXwh3X4KML8EuFYPMT|3FJ-xEiYs*bu51IteD{#Q3g5Q9geC z9kMmZsc$=sM*2>2?eobvEjDpeorLIj7S{)k%0;ShJ^+QikJ2fqggA#kLA&`l3h$c| zO}Ag4bqQ0Z@ihu5vuWL5rxv0K+iME7D2ixfDDYQ(kl&bOR<_qbUF_udmL;%=eXhET zJ;e&A8|RQIaUdnvTgrn)QD^%BQI#RrY5d(+4?j~IhUyv(5*z)=yiPoxgxons-v9%m zm%EIxKXZMcbJJbHbY#&mN~AyWGL8fTx5B}XF%rF(5H*AaT~C0)p4|0uH~#IMk6Q9s z3^b}*)e1yFQUF;2;)u<}-wHk{{C0gx(zz?R`>%N;3FUn#HXC&QTf9SeN?j^{j&p58 z1-rEcoHQIsvS0m-daiCmQJ+ZBjO~u@&y{^xT8J!X`10);1ux`6@5ow|`l*?P7-8)g z98xjU4`_Kp-`mkjupC8;{Y;!p)iKE8clUF-?v(ZI^Ul10)Zr}V}$Bn%t4!YCbWJyUCsa_Rh1zJ%j z1M_NOxyZw!fIaS+%dL6+5j#V_c;PLIIcxmfL)OojQYwu~f^+-vHZ|1NU^=+ouW#Yy z@k>QmsKi0^a9sn%)5T(|q+gz2-xFdM@xD=~AT&Xo)ue8<#BU1o_TOQ z*>Psi{>KCb0e`ZwEnffh_K(M_)&ib(7SYCrs`WLdAs1w^%SHHSyq7u<{%TpAW+Q_)rMOjR2H!ucEEF$7nk^LDOI54y6kM1$dI zA!og!)R}XWSorOF&m$%67C~7M>FSiyz1ccyoGKlRbLvgh zY##4-580M4_B>kDI?EH~hhNF}>t?*5Z$692`f477?LNBy5?3@7{Na%oS>WbuSuiaF zwkxbH5q|gJv8GoL7wa!Y-9eH0qW+f7!MR{k>|h3acASEHi=e4f%ZD2K=g8$ZM1?IM z-GBZwm?q}O#`f)aA#Dn}8oxpwd~bOHy|U&vP{r)LEY$XS(Z9$r3_+SV#Ut}0SK_&~ z3UEKfUoV6N2x#m+nj6-*&QMTuTcr;5H0gcQ!dCk|qd&*d_0`YN42`d(zt-2bUiabE zQ2-^c`dP*1n9je(#{8A*5rz>hqWU zuT<3qlw5f!MT*9g2DsFvr{=PgMAQG3Fpo*`wRNY~$h_tkrEht<>N^j96{w7GMqmtZ z*yRTJ3tn)XkAi}BF$R}xhZ$gklMy6gl}Cl0Ga!4B4=iQcuBK`tcgN3o@0Bm6Sy!tY zDfZR2eHIQV+IPX>;>_!ePRDG{?76`;;_wOxEqc~znDHwz8{rMIYD+Vi+!t?pbu|7> zlfEw@&o7OMxTA0kdlxOz=f`P10a1Hnq8+a&Nbr?! zlaOx!OrSi@Z_yYg*!d2wev8*%95FfwLUU&i=yIq)i@w)Fl+n5cPXno%tTNP1Hx(7$ z`fG@O{6W>z9c!go97a8znd$8Oon`)Ti6?%5L-w}Vw+oUAuNn*=(p$fNGm=Hi5f@Z70%nTt)f6ogly!jB zg4{UgK#1%EBINxbrfy5wu&sWMI~{Rgq*UHJ+i%PBPHFjVX1sB?PCg9yslh%=sg5~8 ze@ma&Pj<)#4|lMj4rarBUfISn)y%RC%pN>&gpxC4@g<18fl;0SLl^8iI?mg%xPowZtO4SjmL8I`r0No%npq3j~z_qmQF)xQ1X2N#EqJ$xIb$shbSIv;j zb+@{7n=>7pWc{Hw!h=mw*n`;;V#L&zo16783|B=WQ~r$W1_1F-X;_+G7v-S0x|jLA zsKOCi@}G>47V^6^vyW8WhtZ*?^6(?i78v z0m}8q*EaTA*Am>vBo|!vwz0(mBmJmzF=P3~?#dGxNlBa}u4g%N9w=)+x)_sr zd?$y>4^IDR3Bo@%gWyTzo8_N2C~yMje>+!B{uh81b#n~M%*KWoONV`W0N zg1EBB&>{>^^mnu|ytw-Nn9#}Dv4P~+7|CCZegpJXuzsN%l)6l@Dd=c@EuFZt*Cr4WTsg(nv!l7rFr(^gH)Qrljw zzIpYV)a3W#fQ>QUkDzKx1_z}jTy*h8e%iZEzmoA!MqU|peuaXnarLQZREec#&H>t$ z^>%DKlp8I&v=*jAgr${VL;Tkb{TE15Uf{^ z@xTpGm)=W_o1~t3(&I&f!~!J#1~~huo4BXhwZoe!DZWAkce5ci8siL1d+qHbxUsQ)B013J+r>w%(YyE zyndSuv%cpssCgzJeP{vuZG7&G>fB#I4)%eN4Q<*5yUWt33s}9|9silF$Fa{Q;hh~R zG%ij&*K3anX%@PwcsX?W0*Qp{fI!3l4SN>vSW0Aq!+VoEql8=zMQY0MEq>a3pzUI` z#^dJa`6esVHQ@XSSrvMB9H;%8F6Yb_Ho9lJO3IE=#?1bRlvCk=MtBBP+l=_Ogp6du z-1bZ)0Apc9RWiaYk_USX?*G$=QyXe0RB5@DUZUg2AR;f7Wlm$w^!pMw=YU2Sr<94&cUp{t|7~& z&A&^s_;k{PwX78_^$Ftvl|^G zn?XUC@1jByR~jrC*?DfdvysMs-*GEdk48(r^)+sbv|=P6*EWStw`fAQBHXAUp!%@g zV=XPBmd;O&W{!Qg!tVgGe%-7#%zU3yfsEN8H%oN>f&J>zj7=iCK`3 z{GCw@z}dmN3Zvd$rbBM4!(qcS$4pC|SdW9iKW62dFqj;ra^;h-?n8oV$gd+{Xw_|xENiGuc8 zT>ms{_cC!!!VO?LPqFzVc(my24)?xXK)KDPn^hVctXI*#pF;>4VM&kFQjE`MN_Q;w zkI5tCmln%@_?v?pLi34APaLD&)ra1|kvDwTAIt9|Rnnq;g32xC6&I!4Z)=G&;yZt` zXrw(X;@h;TLrt``ZOQ#;3KYqb7cDK(kFVle+E2n2BMxydV2tDX*i@3-D-9KfrK(sY ztek&X;C`Oz>Gmwir8Cpq{(wB+u|onKmxCP%2rl!0mO8b>w;#b2RNZ-^rZ)8FJo+B@ zR=*k~lS*RsV!5g>-k_Xx^!#DpV-E9Pp8AU#MOv>Z_t;gxhByO$d1^hN0x;zD!2lh=c z1R}+b!TTDQ^9eYXmCapg#yjk4AAN^?S&d~7XQ4xw8@4w{s_Gf*&cUab$TSM4|JWTk z)g1`b^RyJk&Np5*-2f}tsJ6Yetpge)PU%Bxp|=o4?~P^{ZP+6}2yR+9(fLM^hWut#L}Xjpez`jX(lX>63xU)+zT}f#PYjL*j*|FbC@di zkNl=VG|d$gI$9T12d%R|&_KjNSt07Fc=Zp9uH<`%rXJhStTNmHS^30#iD4$0XKln! zBfH`E{@K!Z&*KU3Ub8c5L>pk)<+t}V%zShtc}lM*h2WnV2?!__-WLdXb607mye&g0 ztY!rNia$+#SEgvlKb=;+UPm3;$pUrNcAZ729e7!J!Ep-gNyT8a65V5Yp++;4Yy0369(C z5a*{0u?@EVoUC&9rzgRwk8SK@Q4>u6fAw|xMWb6b*{Z^koNiP&4#|4iH4x6!g}ZNtFO2n>Q3Nm{si2%2pj@W z>Lt03Gl1?q^O=}^4}q(h<0Mc8ZMSnUjf0fmIIN}7caL|)qscv}-v-)KXCc0c^ou)- z=i~GxYPovOsr%3!u2#eFS9<<)a$bG?j}m+E*tsR+1jum(Cu_hj`mPkk`}7*pBrAyd zGI;G=zQ?$88&cgS?u)Ice@y6o#z}>qg&&1_%ZuQa+8$xP@VW!J8F?3wZnh?4gVDF# zNcBAb&`_F}0K+Q)=<2sX^252{Kle(92rgLc$eo3z{n}kVpRpA% zo)~iG}RsT+;t~p~u_te>h zcUVHjFK86;y(+77_5I2%?7e>hfhp!*+!K`KR~O?anWh^)p&dUXY8Ibw4>OoiM!8bWM%VTkmbM&`FgH{@G?Bq4uNw5Ae_;K8(e|EEO+|0FZvYXbh|)nJR8fi) z=`A1v0%9mqq*{<(q)DU&rAdu|fYN)1NbiJ>2#R#+BuH-wH9!*2_J8hs?z!jdd+rB@ zpB#f^FZNn%&flC*iEf-GY*3mAK0ckvR{vz*M2L~e{k&W}Xc4<2uR0dfGG69ilofwh zXJe+9)M;DyV}DouLdhJK-gnYJ(r-aI+`@qHDPCE~a<&CxgLCQLv88z?VPzI+5J4raTPSRpysr5(!$mjA zQoiq}E@MnEChXOa!07C>RE`cV!sHpXEnSg1Q0V~t(c;rK%hT%4IwYOJJ22;wzaZ|W zX267Y*+leS5ht<2I8RpVX@jnrTg%-#)cLY2C^S(gs0Z(?5eDNmNjw|#_y7#3E!L-D zddJj8<T5UrA9a1@DZ;zr`g;m{mV70`&BGqkJLGC zax4C#%m!(kz{wpyzT9(S9XflESHB(qtAlm(gi|mhiya6)7{GCx<5y=`&7FKr!hx8N6vb6!-|usQysclh1P{i}3u zzQfWU_lI|JhU^X&!(A>rPid60O_+>_JsiL(J=dNFg}fj@r;oB}V6EMYJk#QrSVygFwDz6= zNTTc{$ZJ?Pgqd82=c>~v3~r527v54;tW6wvK*v4ZQ?aD-Q-R@)_9ee-h)-~im!01x z+Qh3gR{nyhV2uNuc?-_>wvD?}8vYPd`sl;QFU3I@UGtLo1BcZ*4albp4aoTf9QotO z%VLE~Iw{7Q10YE&rojHHuPpZJFDQO~0gf}7n7)GXVA)ut!z1q&R_Oi(p|BH%mX=){ zZ{1(9i8sjq^4jt9;r_Es?nqHLajc@GHNH0K>#|WuxX0jPJ)>-NyrG`f0VaZ=UEh{EJZ*KjlMaBNZAKnd4?LGw%!x0GMHtk*BSLwgQPQ^)m!>>oK(>d?K#uoAHzYU|ZhBH5 zUAP2j8iS6_!16D=^wvH90|sw=7A@yrrsGnUF72+mmGR`cHYPvP(9~wyv}_^{6_y@m zi+iPDL$x&%tk)CFwtVNgkSAn$`n38oPi)Ohue?fx)%<=C-C29}azs-7{u~Hm6V3Mp`>*tl!8pM{U^Hd-))<{=U zN5EGm6Qi%!)4@HqIz>=desh8&Ycaa2BSiM(>OXdAP#}cT`@DDnLhg7HJDZW(FkKKH zsoVB7kv1wLgHoM`teJm}%5m%A%}E3U3~D^(D<9~#avE(paaOk=%eLfE^j@(PP?r)D zri}O4C=~w4>n^hX3e>KIg%jZAYO`J{3H9SXgZ-O{jOd|Z@9;Om5}|wX1&L~#93Fr) z`qZrr?td?P3~8vwvF&?BNzY9+Ued^S)3hM^j@HaeQ}8a`1bt!G6cudbLTq4F?Ix<% z7Ei=}JI0AZ&Tlzyx+LIO`bMG%4n;8Up6}w1W6H!9D}7L+Z+=H>n#cG^WiIdD43YZ% zZRE)`w_#teIJnscq6j*3A<_Ecz$6wA0%QJ)*@qOk7*bh?7~iDaIJd`JEv({omjRu2 z3Z6lroA3%3vIJGU7S90+&5G4ZE@1EMA9r(L7J#oL9~2YVoFCwdI}u^<+WOWjmYCFC zdNh>5%Chw5_DkiYZ@qBeN0RH~d>t#uUx&~W2<<@&mnVCMwZmFze*~xa{z;^O%8u*O zE0$A0j!H1IhKoOFgYV6LgX6}{RT+T$K$`}(r{{#5_gAHOo;BkH_1^B38pJD8jv{&n zr*{_2K9POVzyw4Tr(;P|OJIhbz6+{tIIF0`ozfY+F0DSsO8vx*G@30Ch2 zw-=Mo8u;3k?{fQ=LEhYH+(;hryY;>L%ZW$i(Cg@9m!HfSRJ+1$WMcc0UHV2K(1##L z`U#;m0BoSrFBK|zy+cmP%}DznccXcC4g*aVosn~XxGpOa%axq_#~IO ziH=-_d-I9#7{+f~S4b?jy2oYc?-EG*P@_`jGU#G8N`oh72rmY>K6vw1moPiV6 zW!J4qVW4+r_nBLLQqpG>n}4IJZ}N5WQ|Z)urEeBv*QokD7=CAFiH+BDQC>8Z+nZho zSnP2hU=eAeGokuZq&wRd@yT$?35uTc7X?!Xky^vk#qY0NZs{gt=jSr45xu*f;x@`6ke#aV^YuNOTx?{Hp%s8pfv@?v2db>#0@ycQ zkTarf{#TwK{0=f#$R{yD*c|~hH@bt%ni=fBuhC@rjnqo50|&*aajAHt*a{^GcW@%{ zsNZtWf*y%NDxrAN*f}X=G zK|ZE9YO$3NI5{ODToq`mL?(97lD@%f#rr+*cK}J)HmXAfFO;*moprq)E8TLrl_h&- z-__%i+^NyG)N7wUz71bKWP3C7!11PpfylN64)Xg`P~*uDVp+W_yab1ekly(VnugFi zO#{zxu#F^5whgG3S7R;dPar9{fa%}ZyrCcuq#gfa%A6J)>pX;eS?#p{N zBlsobdZ*!3+t$DUsy=e32aRA33?iZ+PLNPP2z}rkbWEkTS!x%Arap*o?^j#Lq`SGpJ)l+-@3d#D8h`bPp%wLd0c3d_+e7SfQ+0QVmgl^mgV@d0n$mn45ZB;hB zAvU#pQEq&{-h&qAZ_jYDYqm44U^$mQQ0;ULEIevT?I#_f<&3H=?#~WG)&0}DpF=}Y zwXL=I^5yoh(`**$+dgM}%~-BvROr5R)X{>jtbvb&F?Phu{+mNzfU`OJSel@7F9TrPa&QtC9#=}T8I` z`oet2)h$1Q-9W&g_niqd5d2yu+qB$~DhMzY1hEVSnx9F>-C7~N#pmm-6StcH;m5uz zPlrE3HsvXc;gb}5dC=8@w1bi84xvd^UYI$8hF?8LqkNMT+}c!-`bhja#m?uQV)}LN ztMj*A_dr}8AC%on7M*KgO6c7>v$O&jTcfw#>}JmCCq(-#1%T%DPs~qiMT0Lnk(ilm z>W3R=GtpE4(2KQi;?G$LQzzVRGTBi~^6MdSjRuMpYUgcm`deCRW>y#J2xO$Zcxx|| z*qaCyn4ATzcJLEgM#659)}sMLp1ZcimYCL(w%Q>?K%xgx&6sps0_-8`LgrTMii(-} zq&?QJCoI}!!_q##Y`C?uZpnV&e5QJg2ew>o7#m`R^N4@(>+XwR(k_=Lj$7-*oQm$_ zdA$*`rhhcAshA&cyG4 zfV(Sje=P%TIsPklM2;}vMYMN`7a9aw{A6Ju7oHdM2TRPbU{;JH?y!E;SDC2M6An{# zZhBx6dlPg;)H@;iJop+~c_hxeY4tN3`;UI3wmC!HJF*K&K}I@vi)UVgDb~4pam-$U z4F+-A9QxngN=mx^<%jnYb=)s*1u@~6P6qG}7czLSzo7eVhxPkx&Z;64m|w~)dBGP3 z6So4IIY4nKA#WNqG=pc64QD82DgTsY-@FE-QY(%b$P$n84k%C0&{381! z-6QB;B^jHPe98A2vk(NZf=BWE%h}Noib|BTZA=Gu+jJm&~9hc-ngZZ4V-M(n>|4!@xKTqZJAVk@8xWwdlHASC2 zeoDeo3Hy37=*ZT6y1hlrR2k1CR%!;ccm^UCe&QySA$>2Tl+yskIW$=qUtBO}v$Kgl z4(?ChXEP$rmbI_yG9(<+l$Eilf#^U=M$=+{L02HXZVZU)cuE=SNJ|uh4VC<{#Ed6+fj6?DlfT?>nxCNfooN-*qTn* zX-G% z7|>;t#wL3nk%Xbv##(DlnE~(iAd3D066iewZ(-{1Ft`Gx-J0bE`@5t=6_>Wx)vTEV znr#bxr4w~-O4ijhnHcEZ13+XWeUQm$J(0He4d8h8oKF@WQ58(3((;j<^}W*&A7l_< zpb-H{zF1!$1|&UguMLFOg+{!eQD+BwH^%!Y}|?MsF-qPrBFZS8JbK zF|=FidSg_U<10`NyvIL^`~gB~zR+E&3!A`8#Mn{V&0zL-!91{onRnfN_8t1MkCh^t z?p74nmL4I?rETfAvUn!)a09*a010+WUqoB#atsvs4$Rl{N8&3-6=)aI*Ou_N>T3wW z_%FDaljBMBVI|=$W;_oj{FIHy(ZXSmKM}GQWyRPDzo~#6yk~n7HQI$ddn+wq z+VPMubB+KIk#4OUQWzaa>?f-`kn<0H-Ufvpiav{c$W&3=FUN7rYh#$3bHli7H7^MN zBbNX?VnIi{x(oS?@W*1U?cQOaHcsPQFgn-}u!CpHbLE61T}Rs-i1k%|T{|`S{l3I< za>~^_JFuSl2&UWt-AU%zg3Bk{IK08UJFQ zp(|l~%-18lgKDxkV0DzWAJ=smXvR&bbQr0^M<*cyqt60JS46NvDM+gv`?Ce zZ6pzf_i1wBMZ_;8g+Oo%Za-S^moJoY9?uR7HNY++xLWl6O1s#?S0|%K_)s%$kJ8^* zDI`w}TonLa7)292h+o>gu}DBi0eExT;O|Q`FK}8h%k&N>%dJ>=jOe}(m#x+kI(?tr zIZ3^kHs)Jy!jw<+>`xzUsp2fGevOu0)%Z#F_%8hAkjL!yi32hD?m8CQ^O>N8E!)x; z?_qOuazo##C;G4JOwE4dB_9|b=Z!{(S(3|3#okNd0hfO=>x0)$ciD#9Rxc4)>{m|S z5L1=GE@*LbeD;Yy{5mYPoAaKOxkkB#%WDe@+YdS-HxIi!-y}Yiu)p9-6&*~wF#2!H zhBT`LO$--XcZ=P>Rv^LpI&FIXV}T4x0|k4bkbHrp5RsB9DkxU0mn7i9ik~yk*E;9O zzK~^(t?bD9!t>TqXE-cqli|k%eSS3PCqjL?L+D%MqHIkn^w&FjH}txE%6?Non~x2K zY~Xb8B5$N$ekBy0dHU&{>E%XW6LuO#*zcF)g}x?3fo^BO^N+ASTP@TRCmzVbbwOKB zHABD-F#fRQLt*Cc<$r#3Syel7W08HVaO!yXzLJZ%mR*X*_J1|{JOtS{e>l?j#(ati zT2Z>=Lj*~$Q#8-&@3T5tQ~)e#SFQjS8-fnMlQ~43hpy| ztdJusuWRljne*?yUOzM|bhNmNKxYbbF{zKQ)YrfBWR|>2` zU?+ixEN%pi0;#fC(B)ir$ulz(AmG(!x_F%<3ux95GQqvZq34|kIGAeK<_a1n`xZq@zB^#hOd0OEMD`c-3=;W< zo3RB4+0{n}v0G%2Q+?_uW4Y&&7&ReN9X(o)I=Zu?ta-J06z@uY~v0M zab^Tr?SX)D+7Yg8Syin#^lq>m1RL)Zc}vMBMVWg26{OAMcJ`NdT~7Cos0gBNdfI1z zB_(DI1&U2}@-KW*(>^@ks9Lr1I#3eB`90!qU9cW|I{(i+4&}7mC^| z^?oc~FUnrDZ*G&bgWNnTvB&wvNiWRb8DH1gWhC1E1Y?RAC0 zxdfGPP&~bDnqwvAe@7yOa*$Dq0?-< zgzC#Fr!@P_yiM$;g+5S-HH8a%Pi-Ka7au}4_A%aA7cT->P*-Lukv&8|vtQA%2`>Jx zqP6dGm-D z=bMs?KD*MT_#^kB_g|3pM4b@nqALD7X~4KaZ$a@uWguP6MWMpEUa+6LXVPKdU!IYOg4%-YBJV7YUe%pHtn!C>J!W6<>Zue2bk)mwE_Xa&*x0~^k zw)n-_MXAG)e+>HsKvMt7ubCi;erxofteXmEza-OD>nQ#+Uv>?U3@-?RkpAoutNsR zT3-C_e=Q<~j|^|(94SiT`=yzGvjzv>`LU8cJMg4lryd#u=4>;;NVg9@U)6*0@+lt~o$Mwm z$a8Fn>{}2S`fL|Gr-Z1+={e=$&~o&Kk;|(5#978?uGiJ6ht|Yh)B!{qzT5$+^VnrW zdqf7)W0$%&2_|S;dgr9Y0nA)Ftg!xV5jihqTT=g>5t3*`oF{w>@JSqCT zx$(=JAC$f7qXW6dU0{p#b-d>Ct4|9+?OB@WI>~awjNTDC`H~k*x>R7c^65bLBZkA? zz13+TH7Re@w`103%3^P1)-(9_a<%H!(Ph}G&x#RCdwFm1{`awTGlX#9Np`K5cjAfD z?LKFp2{SQb3=VpqW0YY1s-joXayupKQ88(*lbqzq*@YA8hhLwu(nHG=nBKlB=h5&D zqJz(}rd(Hr2a>exghJ~%n3Ngs|C#;;{+KBg6KEa4a*mhlIvKUzHty#N%lYj3Liz3Z z%SP+c(~MX;khwV)^-dZ^_gndnr%c~MZ;{LHeFx(1Z~@3_B}4*5bD0@LrCW;qX$vY! z72v*#6YD5ePUP^tfvOixWf~4X9E#U?(*?Q>ROXPkID?9&*bEQpzda|;P#rV(2Yf(H z&q`3>lh#;*qd_^T*S@_@q&w%P_eL=Xy9qstydVjVLvV9VK#=WKRfDCa)4=|7(^LL5 zjC(%(!y;c}Ep|TK$L)sU)4k(cXDMrI{Jj$KXv=++n$~DiVh)!t^Hc;$yq)ozior5T zEB&OT$L*7}!{{_KnILSMz}Oxpt1F(hi4COQ*1f|GcA-G)=%ybQ;{8)VUZ^; z6TytL)-a(y#gqKXZGW!S-QgkG{Gbw@QY&jwByS3b>J0zmxEshs^dlRazCPPX+ppO> z`Pf!86e#I}`Jr#Jb#UW0Q%Ou80-VF5DJ#rBB&xn@!>Z2XDM6G3jL4 z{$xOv68sZf?Xa_Oach2?Gqv+Ch}?R{nUnD5*VcJ|J;qx5DXRJXynJJ$oPS*J;RilO zoz>2p+T16z`JN-C{}-Y_EOB0v-aJ66;4esVrXprErWvv*O~&F02P^C3Z7k8&w~H7w z-P$%D-CM4%3#T0t`D`h0d);~6TTYWgU!$8*o4dy7&$$ER~o6fY)Q4a(spC|`l54E4&9Uf-_bIg%xXvK$Aa>SUpufhcCGbJIn>@=_4nX9lQhj;2cR!qC#QJF#ZT7Cs)iHPP?0b*i z)gH&`JN=>(!=)l3PZ{AON*5fE{q%2^=Vm%8-Mz9%QGc32+#b-~gza08aNE~xOQmUb ze-zN`TEWiPe52X0Uwf*W>VvAFBlti9c8lJW_q6JWHb+2p?aKFrBQx(^U~WJyZh9Hm zsD20n>T4{7(I=CfBb1nap^G!zlclTa1229AG$E#M$1K2+qZ4`=I3F+KYaK3L5*$YD zYq6$LBJl1!Y|s`Foqa|-FQmc0%s#;L?0R=OBplvl0SW`q1A+OOR%<9TncFhHkwklQ}cQf5~1!&;`mj z*QE9a%Z=^iTRxeeO+pBpM_x!(aY3Jv4a!$ZO{1p~%1-XncQEO5`cyx3M5P%R`={M} z*aT#{=n~7gsiQJm$b0bG#`$b2vSn0n zZBQgbSz+sH>TFRYE@f=az$`(@>n<~cF;m#bAZ8VtUnBvbAH@tl)8hAm*Yf8Hn|e+Y z*t~#d<)HF=VHM1!XHlr`g5krFk{&)l+t0m*Y)_6NbYG3lvC5Lfk`7e&y;pu!?!4x&*x&G^br7W$NC(vh*Q~vbkZbJ_zXEpq$)%^)*I2b9e6+k6m?y8rs0KnJ|3U z--{CD%PowvPeRkn+itn>x7APs3iWv4v{Y6?KdN`&fI{FSnM`sH3~D;%Yu@IBqkQS| zJIYN?H}15n=)GjdQQ`}EsW*S_Ueh5^b-PXPgs}{lEU@S+pVf4XB$-=s zfK0ioDxQ8@4`KEfewdh(C`4BwRZ+|n4YB~ygC=pTSN$1v8Cw%+!1MWp=3A8dA5#Y)HY zUH-+7&8j8^b}J(ph?cAvV7s~c2lNadi9sNgdA+j06K#~2OUtY}_g|#3Y<`q5B-NNN zX!M3)A1?g_4ax)QQ6es;3j*BQUyVd(v-?8W%kfAZykCX$J=sEvS*y$MzdIRG^M1N) z-cv$To{^Mjo-thy)97mGV2uX)*A_2~+n)hJ$=6h0Ua#bIqObc4Uu6zRgbqNkuaN@~ zP@o|2=6@D=AKTH2spOmMwGppr9BY#-uvvVNDz_`RTVwT^-@BR6>X$I{>ABjU*y!^P z%0XvOHqO;%JD%0u^3fO_xxJQ;>;6J{?}6f}syQ929u`Bxp2tB+gfAPmSS>J+1}}pz zr`S>u04eeIJdAV6%&uQyf@^LqbN{011$ z$>5B^WPE=+!nAUDW%0OtDy#M4*jdkV+b8Yu`2x#*noRbd8a=GGv8LB1z>=NB^Wa{! zpGK|}9OJPMba^)6#GXUUr zwObQ1H}<1WYw!U*sfp@2UAzdB$sDiBH=XFp<>$ZNyw!ee@^;V$2_4_@NoV)@AvLBH z5CH3atK%Qh ze%x*7PNJ22LbS%Uw{|8-HDVOper{=o?Zg_Jy>nU~sFvDiHcQ{$vlYyn)+H9a;OFft zGk+j@CxP>AZIj8+wOoyRH>-zlS;KQgc_eOw*$1yMflC!@__xSva<8*(&fZc45@x zIE^yhVVj(DDr>b+iPu&9&2be4Iu#M9k_)@1ffb5AW-4$t<#c6Dv~aGLsZLl?L-9W( z|0cikTxOxv)Zm-?rPrDu5G6Z#O7v~=hVpzT&fRpU^F?5+hx?L0Z3*iGh(TxcIsiYL zns34(@0E4R93KSz7P}ib=TV#>;bTZzwypG)`H3&?PnB)?fJKDvte~b=VdFfH~?IhLcWx2He8jpJaCT&^l z(I1*(G?)qIJonW0df<}@!D`&ivX>DmFD5H1cP9RVhHf~(7aIK8=z#5rf<|%w3heH5 zkCvP6!HK?xX&Y#Zv#>klU?uPNO>Opd=c`oB0+|6ZE&)wIUhn@~t#;={mQ30emeB~4 z(HiRG%b8Zas4HzE9{g5#3%wuY_5!KD_8#4amPUR~$7$#vsETde>Cy|FPhA5_qq<~rKf~3!aWk!9)$RaOWYt8whh`@qqQ-im4UKsn@f;&(?$}-rF zt#N~nM2Tt6#HN&KXk3d@vb-PvUjJ37arj!uU07p(;-*;w{$-D;NQq0T;okd^V8%;7 zl{bF}ed{8aV{l0wNnRU zZKB9i!KRkxq+!&=T}z*7qnG=w!SA%l<3Z&0j^s+MN_rQ5bshEbjv3D}$rK_C_ZcoX z58(CCG1~X=k1-cL2T2(ljas*Bx1RClLZZz8YS(KK+V_LvvxhYDS|=m|&Dp;W-oFj| zC$|_F0-X$&I^(KP&JByKke+?L5{vytfCt2}#WI|Jf!6!V47n@{O%gQ9ek>zFkII#W z+$$?SF|cM)Uqmx7KGxRUXr^084&f*xsHyTa+0qBzZLgaUFN{?_k98NUAIG>SDi}w4 z6C^`ZwqCbQ4TKQ{i(p!PyA_K%>rF{NIy#!&#;tX=@9xf@fSQdzwle1)L$wLV@& z?orm*CfnMiDAF!s%k-z-ps<7#qmIxUZ57Er2u$Mjq5mVa+$SyrDRTm)bsPQl0YZ@X8%a)_T&N*`Qj zefE{gM``%6qUeV$dGd}9VeJ$3+Xq61G1dVc@%d!{FVRGwP2cxVwy!TDwTNa zr|4f0ef{Vxm8t!g7McrwvuPQRn_J#6?JhZ_n}Y{0!eaWx_H{0|-4P9O`{0C&Msk8- zzA-VNf5obK6SI0;=3Nt=DXP8yY^P5Dj=YXov962fv$XSyms6_S#}<&oBgZ!=q;r$dRA5DWraUX)gR5 zxUWHRg1@k%(dEyuHqkG{jevnzK+twJn-HP6z$@iWIYHayj=^&t#|xLOwOjM10Ts~! z2Jh=&!#%mY(i6;6+hGEv3SKpt-40@>NLKBya{>2)83=5csPMH$^pKM~+n$Z^TkqnF znj$VZh1JcD8#s^HJr8MyNZaeaw+RqxiA8H?|uGPHTV9@_?f$(mY5xThn!n}5AeJi^`d$pZ?CQ>nM^`CO?%j*d%A$_!OD z^ip%S@wDTAK@vcFunXei__i4&B!_it%RzB>%U2r~l91 zYL+i$vDVqAnoiRy<*-!s)LtgM|M=6obi?}h3U@2`n@!U$1@1Vg`Q3KC$*9oP2U2|s z*dp0E$7NOf_uP|;UmfM_*&hnHHXh`K@O6Bxo&W}^8P!B!I4*N@tX!>7=~|waU!sv# zk^d6NR<^~>9h@s&o;~kCYX3ve_9y3J8fe$_TJ-BfG5iG~YsUzhEprXLNL6@K|Qn#AAiMnLJ7Ez7Jm zTlXo08+zySetiL0Wl!F*vX-Ed=AL=qj_{pKwS0w3ug~lNW{EBwevHd(iEpjbp^VpF zcs%waKIYU+!TtN34J)hJ@tLz6o`lk1_R-;phA4~|F+#~{lD9-ZS?uwj0ZL{ zV=`a{l*X-6HxeTI93NY!7Gw@~-q?|$r8k-suN&HMfA;a!t@j2SJ-_4WLHe4PLZlvp zw)8z4IXg;m_c8h+-KjORj@O&@cHLR$1kqIVEY&;h8jn=Ey$^3+njMJpMX+rd<25?j z_ypb!`**Z|m9pT*-DhBgZco14nvplM3}nU!6U=I{c$z;RiwS(_ z{H`;obm|~8x^b?Wn&Si%CyK?snF2m9JD-&>;&7?`DNsm#!GMW)e=ir`G5$0R$jP|1 zm3nRng^-=~4lE+@PvtDM2Up;ioLkJn{(Fn!Kq-P>Pm1B*M*YO78nX|W3GdyMO(*}O4!;_yd%Z~khG`Y&k&os*$s8W(_v+?PnGdaR=$s~d$QNO^X@`7(Q^TXqr;*6(jZ+P6Dk~@|SeaI+8jhv1 zcaT&wq0<16or`f_xAE`PIDxDLh$R67Cc`WB8Hx3y&_k9{<3#IGgzNvj*a132{kdYat5Jeb#ko(IGIH5F~sci-C&DI(r4L_XL#K=|5tby6&k;6>R&2$tBcHVz<* z2_|OC(7z$AFLT$u#OV*&e6cv}Ijw4#4KEPyt$pi!5hES`VmP)y;)N;(?}nqn*haPp zeP~kv+JNCi{D(<{-Z9ohbI9$r9=Sg!3+#f!`Vyt;(nJO7Tnq%nMT zjwcW@dOV$LQ|n|*O!0zj$M)tzP9}mxQYL^gS>Wr()P#GoNb$%zv+qkb&SEEisAqlp z6;clNQq+E?gBRr!UqKt<$o*hpo1vTyHZOJbhJfUjf5BZ7eoS6$IaZralE*}o0t8Y7-D!DMmHDbz-Jw8(?oIWBau2R1G8RxLZM&2*1PcIM*l|0e z|2M$}LzIQalkPOu9oPrV>=%w#qFWp*Jqnsy7HfLHeqFdLVBIU_48}fYm~DX5ID@M- zi(@;$$epuNf<64Eeud2B22NTrFbi-U!lKT_umpJi=6(oUa`lJ;{pE}?% zGqhYBA4ukeMV!P>uT2mvD_t7p%8__$Us@3tE_+%L$I8V;yp^W=KGTp)bA(>%yr7Ap zt-F(=Nkn*|rDK{&Q}oo&1)hcv+>Fu&WvCF40cA9xv(o(E_&E?qEPeIcNxjgAMSyP- zEb;fA%SBi60BsIS51O-ergv7jSh^^HMD!WHHd;o5dc_ z;idI$?o^3p++=PF14}J*Ef+*rFc@0*=a>9;VwKwRFZ^SA@=MFcY3+4Mv|=Ju-Kuy1 z_1p49i&%905E#}`yeu1D<}#Jfsw!F+wIkDdSMnlGcQmgx6mhmLe8io6$PL7&%`#J$X(n8bhm65WKNVQSLiBoGD2!OG+lBPq<9iew>WB&LdKu7?0?J zj{Je8LQX+mL3YWD-kWoz2TFF|VHzE>;Vt|{Xu_vF-(HPEFe8^{nPLCEh@Zt&9rEg? zw~NKX5pY7;Y;h`-r-76Tg-{7((5#91SX*LV4f~J8y4ux^@_XLO&KZ_C6}VY3t#Kg2 zpx))0#Z2)9p@%PDnWJJ={58=gA!tlzF>ucE6M2n_w;%$N0*s&4bfU7lbJo%d|8Qc% z)u;z%#mgD};tMAiK)qs`6@K?F>e+lr6A5Tt@(lq=P%@L5X;1q=ugz5D=WLxcHte3- z*Javi;7j=`O)pnthX9mg#Tl6wqqp`K^f@A3#A4oI@nLjr&=vk!z^(6Az=^1>_jJfc zJ_+-w!_j|TLZ~bAK0L2c$hlgSE?TY51XYgy5dC!FC;7XHq~B&Do;bzrp4V-ELDvG= zN}LROn^O(y!LId&^Qj*^4EJy)6+hTas`0pci?<4CVJ~J~Z)M-?94cwaZdVNbjQNJ) ze^0a`JNQWJCU)9)Drqq&h72d`O)9-}x0>{wyVPTA!EQHvwUyS=(&dKZPDSMyaA!-< zok1dY5Zrq#2UDtMpRHd^_xuROO^cLF0 zR|kx30$d|fbAI3=4gOup>_3D%_aX>2ej27r_4Olz^Nl|6b!iYZs*RRAm*!wP3*1Vl zcI;^XyJw?ft>vviJo~Iu!IK9@Pd_h%6cb(I99r;Qcj8$S8LpH+|6^owoTw!3+jy*E zw|n5#$&MpD#IE30LQ^w?1jUP>zs~$;j)F z3>f z>A(mz9?_#pJ>^k%iggdi1n9UG1k^JTtG3v0k>$rFS59>Ns$pRl8j?A;3}n_ZI&aM6x%*Qa z$>h3EW1lQn6O9s$EL5Yh$*WO4{T2D&m$bxokJtMw2Ypm%wjL@gU5ZfAGY|lep45pG50t-#JiEL2dB8a+E-owGfi=5$7AX7C zV*Fp?1n5aUPFrVK56BUv9LS7>XC112iPv-V)1n?cmN;YzzU_pv$2aD9yOlcG@P@y?OF=PLKQvzSMDn6|R`Ck00m zif1$u)*k&~O1!TXeeiIQQz?Yu9_;RKfx{HcXS?d5syHFE>LhdXL<)zufkvl&-@9eQ$#FU4thFI^Aj#m_x*6qOmF za=E^x$PiwabZdXdjnhuhM$q>%X5)okX_EKugXr7!D7%pv0bt0l8K^LAtx` zA7J4$YaP25bg$FmNf_irD`WWb>5CF~qy+ccgPWp;eVhR&kvWbw9ODrQ=yt|&Xx&r0 zWH(p0l-oVuc7ft&H4X=RdlfxNp+raW1#bv@umo^?yn{`c(vcoF$OUU2Xf$1H*~S7VXmy} zCav*I0%-e9Rux;E9E+#{(iE518ZE(dKdV8#Cq!^;stO&dTrJE3}A>{YSNfRE8 zO9j&7b)%cN)-^)EluEKLn2dczfwg#pBM;*JD|8{@Y%-WJdS+W`PufWFK$JSr|u@2?Mt&_ zi^k5s6M5~1zvo{H33`szBhiRl+z1|pGf{Pmhc0m>nM|!Wwr1-vI+~rNq|M9LQ187O z_qfXwKlkL}=a5||fiCYvwYByd6cAwk2X)Z=1v$#YpCMrv%aIVLxdVdn3yIS+wufbxK>;r|~$NC`agf|c~$w*JQ4 zVqgIN3L16YS=OBNW7=Kxl0{KgfK2_7FUSp zqg#pc3q*!ipSGcSM#&NR;=6lpJY2civ^P2hw;TuZ0={=G4^z)ngJI1L$}oG$1i`r& zZQYAUF|qA6e5L9Ez3s7LPF)4#_meNkh^IzIscFKLX3mKWu=Bm=`4Y?;%B#7|iDpI# zg1v6m5b{iZX{lQ!z0E6@fw>yArG4g*N64%Qb)EX%|2bB8`ujpWN1NZDuI)(p zLfORCnHsEAnj50EJW`l&&^{sRcN>;D(mKmBEL*_7Ek#~IC&9N7dx`GJqomGzBE>GP zlb_>QaQWFM@F2bPd51NJ42a;o@>(b2_~uZP`M6(G)9hGY9YTozo&A&-wZyf4+0mY@ z0|}RuHcBJEzi2?wMtye&^Trz{Qiw4#4WI`_)*=_OIA^fE&@qO8=lDUQ_-LuWu|iSo zo#vpv1U~0nvt=n`mcTvz9EA2byrzAs%!fj(vz!LWc{SCw=YO78zoN!3DBEjwTuLSe z1k*#8tsXfIX|`)ygteu6U%OfO8gSfUK%CBUFhoY{CCmDzK-8|9DA`5&$DUQ`ed+z| z%J-Tpyjy6&51%2Ej{qXFT5b&8^Yrtoq%?4CuJ8zZ$lgzYuU>ZDd|uB z4J!#du-So^eJtwqf@z6DR#u2naAV`M{c$QlFY`1cULCu7`0LNe|~a9;vWI> z{!slwU^jjEUq*8-SEru-$7t@w++f{@52KX0_1m)z8Xr?|H$V(-Ql4;{pH&$3r0}jY zrp%RHO|RFL1fKavH^?Nf8O4i6ADh4tO_M*y+xF{XyP}MKo!F{&?Q>fBU%^(jk3b>6 z!?uS@8t(=5`lVSOHb|cp5ha)~_cM1>Xx(Mry((Keq*N-yt|cMn$!u%)z#MH`fT+W7XZMT&i?^mE#DXC zJD%KfCERf6A)$9d2inBZ#Tz$59n<;Ey)I)Nn3qnFm4~a1OTU+2I$JvZONzhdOZ@&b zsEWJCQC=_wdQ~zICVywULV#Ha3bM)KhcP#^0x&VzFe$I=ujc)A&7S^Od9Bc$b@AFA zc`(?gZ+3e#(s_@Gp5&NXx>)k!bB-s~Gmv%Bw(Q{$)lIIlUPj)ZT5{M$y04fno4I5v zIZlV|{Ftjdy2YoUi|GCvHcaI25Y5L2Gm{tNISzO%lajEeTbBL4ua$7TN80rwga7oF zO2Rw$LQJB+`>nzzbG1@${lhM1-(JjUpQGTuY%-Iy-DHMe12CfyG{7PW4ra5kTq<-}Hg_op-(=6}l zyAYj4VxNZASb9yth1?_ktIj2&T{4DdC6O})`M!f1#@z4rhg3!f+-&Ag+h_xP}4 zvaZ!Egx**F=RW@2%fI&$dN5C6UIKzO&B)g-vzc_RBzDr|+2Y^Jg5&u^9gET@uB6Ep zbO--7IhO``9-6Hmw%rA0&yYLO;Dkqd~ z%XU*Li#Qd%sLrCW%b4heK&CT0v;lJ8j8}Nnm95Q6N@a8#+4cK-K3q{yH&S!&SIVTx z#>c<%@;~%y6?(zG_t04(T`(?BTi~9(DU&2RZN+rCeIS?$ua;C{)X+s1r5$z zN^Yekvr!-B*hBA3s%-8CXcX6{pRiY>p-`$=txBktzx_k-Io>79>F&Y7sOQ%nIp2*94p61(cj%s%Of{>nd1Vh< zKu!XHP=T+(z05?m!=ovC@m5$eRKzgoxbNI*m=UL;`?XU3tF4(|F!mms#b1)k9%PT5 zEoT__r{tjaS$si#gO@KaG{YB4m~<3#riiuqdq>Mb7CJsqR1%_h1lw=-3G{PYqgwBG ze(qTtxqE+<+5`Q!ISJCYg%#JQ4BBnB;#HUji&)2|6zeiUw63Gc#49|@I9%4&GwZtV z$F81B;x;e|Q-w>HALC)XTBH}5hHZBuKR`%8bX&uj_^VRqU?aAE^)+v_I1+S`Yw}DV3LH^N$CB^4G1McP@p-J(c)l_5s z_jj0dW(5Xp3EFs})2N2gP0U0>bcjR@ewEAqoy^NGqhvA{!|M0PBngIpf)_reh*WB04;Jff%``l{{cLT z;zJCZkY+u5ArzPk#hXQaTZEqwKdJl*w`Y$SEA5K;$*qm=)dU!S4lv-a?E|QR;?<%F zpt4c?(Gs)rNm)g{Vt6B_K1E|a-dH`?r=sfG@dLF3n*0%s4>5KV4y8E`wNI|1OszMz zfIvN>KRt{B)Q-RfX_UBk&W>2j3V!2yU&{md8<`yFsaEz6fOFFO+-jBOzQZpAI|d`X z09-(Dxyu2A#(R*JM9kFUu)op(KM%gyyn+c^BTZYO8(22y# z+BY^d>oz(wlJEt&)mUrVRYkmK7)LrC&OV;SnLS{?GImAmL)4bd+x9n^gAIT}#)BT= zEH9pN9T`Isxqw_lNDVE2j&C}VymbE&%DcWc*+jf|X~aSarY10PSj6Tc2ya68Iomh? z(c2tT`T3E7k_=SI{PhU9S=(_ig!Yy#{VfLxBcw_?IPOi}-yw>0fFha69d?fI{C1uC z^~h(VKjq0vhpBbQXuWJ1%KW;3%mTmw1-J12{td%wNjv*c#PAjdREk8`eJ!?DDIUG)TG^kt(jG<1Vf_O^Jl9V>OQ1LeK9?H%?&>X;jI2h+{I1=TS?; zdy3b0+|xRhs`&S@fJnDa(9^#LN0Laq-c-?5%+&dS3;PalaNxL zEuCq|euhnGY;1%ogAXR>jlC0%x|-5e=qpxk-#@%ZSFCJK?1ZQiXYBR5?xsZW2hyUF zWUC{YNf}dSeXHR)6TDn><+!iK^V&$~oytPdx4>v-yy!n+o#G?^9oD(T5wNL2?}t^JM(%gY==2)b2$;fIxo2Vtt|Uje_)-wx_@AuECB070$3-v-hUAt zHvykUF~_5tH?NT_NDFh7R}6HmI_USIwH!p;brKW4Iq8|<7}Z*^Hc9k0fwEYtQ-S2gW)5byd=whc_?FD* zqLQmLI!M$KgeNue!{fSvf^))I)o)D+pSQ12n+5=B4>>YDWzz}H8~1`2yEvXcn?JbL z#Evp7sB)@Si`?y=N2LrdK0bJ2?dkM&Yr}^%vz-&-C(|AIc{!Wo)E`14tS&~z0&7x^ ze~{}uRM58?9}?sDF+CI2y}Ht^9+9M0vgrO0^Xy5exULuAZVvXiU1V0WCwLhM9pHD1 zZU?dyRNSZ)Q~vG5aYfbMrB!Q+=;4#>sE{A$&1E$;WZv;6G48f+g*TfX%;9AtZ_r23 z?1F}YRe*pYB@Wa#eW{w&+iPtzgf}YmsXcTR@f&vV*S`Le$%oEXz^^GlzDlEt(G4?V zxAD@y@d~*Tj8U6icykg6#9<;6ikncAXesK;{Q0?t{03{WqoYPjb>Y?_@lRCZ?@1>G z(PrOh7jO;`-V>A|ffDHF6uBcg9t#xGBniDn>~EMf^l->j{(NQ!);B#p$Uf*4rn|XG zXP=UBZ=;oN=_l(estfDjeH7*;Q+$aENb2V>k_@GK%{?FSs(>1p<14*;o_5c&++>NQ zH{N77Z~uNN&bu1pzV*xp{0EW`SYWmP)k_q?%ejN56*=|l&Qd&yu|&}(Jwk?NAf`nL zr^s=M@(JP=?QonA(vaBt?#RcPc?;MdlcDuR~z%1Q7!SZ9)&= z<%02e^g2?~_##urkx6Mwj%bl)wUF)?;hd>`+QeF4ir3Tk6Jrx5@s^>#?BmK#*S|P= z48?nf(B&TBuHq8=HGF~PZ zj=T77RMXOU-PIrW`)e{{tdkeFwH5Qu?w*{WGeXj{XpWsSEPr*=0ke81T!W;4Okr-X*X#r@=UzTnp z{@o(cx7xvMY4&vAl`CTtay<20IgT)}+d5D8eb1?hTVo$g;XXb8!P?zq(GKDk>G4M{ zo?~5(E)F#EDV=OcqT5zkrK(1u(&bLoDH-41Z)kw>u-_skat-ea!I|#tP-a5QCg=w< zgGxC1W#)6%3tZlraQ;Y=4oUNlrx0CE3NSL@+AhS_V~YsDDR@cfpjT_P6CO;sp=`)l!IVL(cIfZMm!gzw2tmB3EQqHQ@*QKF71(+Q8-5RF?kDvvKw zXwNh@7_7XNoOviMwIMsVMIRahV=)42SN^o?u~vF{?V`jKIgGS(rguq&<(0@F%J~N0 zqPqzt&}dF`wc@<2^VN)O?`m8&%5GQ3WsI7@>}XLVCI0dSjJ#QU3gjiDL!aNR^PiRI z>lx91DiQ0E>;3%vlE~s9ef@VkwR0?UfqYd>7D$v~LFDW{XV9lTA$-;^AI1$$%7nG( z;RxMZFt71y{n62S`LX6gT&p$5%$`s5CEr62waXJiwQvUf9X1@*AK9ukP%sPLX_(vw zwwiHDhCf3qzTrSu6#0e<|66_YrKok@566QShyRv`t-n4bekN^8>T%DjX`Uyx+#51r zMNf-T(V@tlYsm>n8Llh#ITUcJ%0NCnKj^Q1mq--K*2!vbT&{U&W8>k%BX7jEwdjKv z)01O_*B)rm!CzsWGem^tnW{84rpk?sJW;vXA;0%9|zY<6~4g%|7b` zq>-PK7VTtO-V`8Q`tXO{b~SuixZ~J8)SCQYBS(%&t4MRjqD;@eE4EAsvUopi{!Q{} z=4En`k3c$Pz&y>6S@=2WfRM$DKmjkHi`4YWZoQY0&mBve^Bd288?z9<>VBa;<@IJD z!R`puwKG+`%rV6UvuT6XB?$BgdRnNBB7%A6OGQ1H5~AL>cFx4q;!7mc?{&m=hT#ss z{hA%TD{Y1(PV_~_QP|I0B3+bg#vEmQfk0FKH0G%pnH zAylsOmTFSzCde@chSt(}K~~5cKp8}0;It0@L9nug%>0=!=KJ?rf);tznXW2gpYIo1 zW*ocU@~dRAclTk9rn6)nv89yMRi5M@9mOW_G-Q#Jd8Bgs8=IKSXevA&EwQ{BzbKD& zs9rSVDin3Um|eb^YPGF-V1G4MLF1k7dCsp#o6S_^O5ijko5f2bdUShHsl+|}gEj^8 zPDvSvSG%6Qfc)(SmiF~M_jL4MQtn&)j&(lcwW-u=`?eE;(D7f+{2gJS8p3$#d=)V1 z7v+j$JJ8iOp$K`}eCEzu_&^phu?gk*vL)j={gmvS;q~APhwA(NHD0tn7La-&aktpX zJnmTe)MuI~JD3+knao4(ZqelTxoMYw!HcqlCsiXf2r{@Nwto5t+w+<~oDCYxibJ(C zzeVyMzq=mM^g~;%^wOljoIf~@)fy~RG9}t3q*&4IqH&18f}|@3iaVp*HV&|x14~AW z)s4oSXmcrJHpbf}UL+sgmnePa$eW|z8z2JlfEpgC0T9DQ5{qHJ!eak*Qz# zvb#m0(BQ5J$y?SC^*3x2*Ou8a z6izq*samq631JN02|kUS+@@Nmeid?>KdM3eS@)0Xl_ug=!`-+BJ@p{yVEZSI9%1@}1E1V>2@5G+F&~*bfwKDJpbj^z#6rZN3%F7|B;uU#uTs`L3R;F{C z{Eb&ruMkR3XdA7VYNOd3n$LT$C+4?Xdq~r^jg)?q2wk@;42|$APBdckfbl$*B#Ad< zYAt~1Xqhv0Ebljo$EHf_OYk;*%YoaD6bByKX-LT0+nmT1_ldlP@ zXI*3)^2+&g+-p$S^ztyVgD;rG0cZABu*tqyBYGrtC0fOU_}XN_e=cYAzgIrM&c4la4{)s zUzSaOUR_0P{Vn6$=YwZV-GvX2v5uH+Qd0cE5he8gB!F(DqAP;pqYnH)M1<@H{g(XJ z#zi!nR{PeBjOvTYSFE&++pUW*X)d9TrYf6nxsl!Mz&E>(5#A#Ie9?OpGy)vj4m3n!evO+0|TXx@T{o-1hvm!NKDr(w6%ihwJLil9xqJ+msI!tM*nKSuSamUiB$edMcojTDC7OY%+ZN zmJkr#N9Kwx^S877K!~>vFNZJB|A^e#bjY8%;(B&!nYv*^Ez)(pHh<^ALa@kT$(8J0 zhjZo6l7cO7+9srkf>AWbhCg}_9!UWudsJZBrfZZaj&o-}9lFEW>(6*hkS?pKWInz( z_$ctzoL_B{q8b+p7HO-0=*ndKN3-Ppg4lO*xEA+^kG>>FCa7}v=Qec-=kf&P|ikx4=6?+U2dhy-*8dm(?l>1Mt eBP>5I#D{NUi~JkXn*{&=Z Date: Tue, 22 Mar 2022 10:07:28 +0000 Subject: [PATCH 12/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/result/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/result/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 168ba4f80055c8c120bbc984df079598057a2b06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:08:37 +0000 Subject: [PATCH 13/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh. --- .../test/train_full_1p.sh | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh new file mode 100644 index 000000000..c74dd2525 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_full_1p.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## +# shell脚本所在路径 +cur_path=`echo $(cd $(dirname $0);pwd)` + +# 判断当前shell是否是performance +perf_flag=`echo $0 | grep performance | wc -l` + +# 当前执行网络的名称 +Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'` + +export RANK_SIZE=1 +export RANK_ID=0 +export JOB_ID=10087 + +# 路径参数初始化 +data_path="" +output_path="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --data_path # dataset of training + --output_path # output of training + --train_steps # max_step for training + --train_epochs # max_epoch for training + --batch_size # batch size + -h/--help show help message + " + exit 1 +fi + +# 参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --output_path* ]];then + output_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + fi +done + +# 校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +# 校验是否传入output_path,不需要修改 +if [[ $output_path == "" ]];then + output_path="./test/output/${ASCEND_DEVICE_ID}" +fi + +# 设置打屏日志文件名,请保留,文件名为${print_log} +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +modelarts_flag=${MODELARTS_MODEL_PATH} +if [ x"${modelarts_flag}" != x ]; +then + echo "running without etp..." + print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank` + print_log="/home/ma-user/modelarts/log/${print_log_name}" +fi +echo "### get your log here : ${print_log}" + +CaseName="" +function get_casename() +{ + if [ x"${perf_flag}" = x1 ]; + then + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf' + else + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc' + fi +} + +# 跳转到code目录 +cd ${cur_path}/../ +rm -rf ./test/output/${ASCEND_DEVICE_ID} +mkdir -p ./test/output/${ASCEND_DEVICE_ID} + +# 训练开始时间记录,不需要修改 +start_time=$(date +%s) +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## + +#========================================================= +#========================================================= +#========训练执行命令,需要根据您的网络进行修改============== +#========================================================= +#========================================================= +# 基础参数,需要模型审视修改 +# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 +# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 +# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 +train_epochs=30 +batch_size=4 + +if [ x"${modelarts_flag}" != x ]; +then + python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \ + --epochs=${train_epochs} --batch_size=${batch_size} +else + python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \ + --epochs=${train_epochs} --batch_size=${batch_size} 1>${print_log} 2>&1 +fi + +# 性能相关数据计算 + +#读取iteration/s数据(ITPS),再计算StepTime = 1/ITPS; FPS=BATCH_SIZE * ITPS +ITPS=`grep "100%" ${print_log} | awk '{print $NF}'| cut -d "i" -f 1 | awk '{sum+=$1} END {print sum/NR}'` +StepTime=`awk 'BEGIN{printf "%.2f", '1'/'${ITPS}'}'` +FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*'${ITPS}'}'` + + +# 精度相关数据计算 +train_accuracy=`grep "Validation PCKh@0.5:" ${print_log} | tail -n 1| awk '{print $4}' | cut -c 10- | awk '{sum+=$1} END {print sum/NR}'` + +# 提取所有loss打印信息 +grep "loss=" ${print_log} | awk -F "=" '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt + + +########################################################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +########################################################### + +# 判断本次执行是否正确使用Ascend NPU +use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l` +if [ x"${use_npu_flag}" == x0 ]; +then + echo "------------------ ERROR NOTICE START ------------------" + echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration." + echo "------------------ ERROR NOTICE END------------------" +else + echo "------------------ INFO NOTICE START------------------" + echo "INFO, your task have used Ascend NPU, please check your result." + echo "------------------ INFO NOTICE END------------------" +fi + +# 获取最终的casename,请保留,case文件名为${CaseName} +get_casename + +# 重命名loss文件 +if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ]; +then + mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt +fi + +# 训练端到端耗时 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +echo "------------------ Final result ------------------" +# 输出性能FPS/单step耗时/端到端耗时 +echo "Final Performance images/sec : $FPS" +echo "Final Performance sec/step : $StepTime" +echo "E2E Training Duration sec : $e2e_time" + +# 输出训练精度 +echo "Final Train Accuracy : ${train_accuracy}" + +# 最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From 2a420031d05f1d6c46645e1af79f89a6bcccf76a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:09:25 +0000 Subject: [PATCH 14/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh. --- .../test/train_performance_1p.sh | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh new file mode 100644 index 000000000..977218b85 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/test/train_performance_1p.sh @@ -0,0 +1,191 @@ +#!/bin/bash + +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## +# shell脚本所在路径 +cur_path=`echo $(cd $(dirname $0);pwd)` + +# 判断当前shell是否是performance +perf_flag=`echo $0 | grep performance | wc -l` + +# 当前执行网络的名称 +Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'` + +export RANK_SIZE=1 +export RANK_ID=0 +export JOB_ID=10087 + +# 路径参数初始化 +data_path="" +output_path="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --data_path # dataset of training + --output_path # output of training + --train_steps # max_step for training + --train_epochs # max_epoch for training + --batch_size # batch size + -h/--help show help message + " + exit 1 +fi + +# 参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --output_path* ]];then + output_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + fi +done + +# 校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +# 校验是否传入output_path,不需要修改 +if [[ $output_path == "" ]];then + output_path="./test/output/${ASCEND_DEVICE_ID}" +fi + +# 设置打屏日志文件名,请保留,文件名为${print_log} +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +modelarts_flag=${MODELARTS_MODEL_PATH} +if [ x"${modelarts_flag}" != x ]; +then + echo "running with modelarts..." + print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank` + print_log="/home/ma-user/modelarts/log/${print_log_name}" +fi +echo "### get your log here : ${print_log}" + +CaseName="" +function get_casename() +{ + if [ x"${perf_flag}" = x1 ]; + then + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf' + else + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc' + fi +} + +# 跳转到code目录 +cd ${cur_path}/../ +rm -rf ./test/output/${ASCEND_DEVICE_ID} +mkdir -p ./test/output/${ASCEND_DEVICE_ID} + +# 训练开始时间记录,不需要修改 +start_time=$(date +%s) +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## + +#========================================================= +#========================================================= +#========训练执行命令,需要根据您的网络进行修改============== +#========================================================= +#========================================================= +# 基础参数,需要模型审视修改 +# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 +# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 +# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 +train_epochs=1 +batch_size=4 + +if [ x"${modelarts_flag}" != x ]; +then + python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \ + --epochs=${train_epochs} --batch_size=${batch_size} +else + python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} \ + --epochs=${train_epochs} --batch_size=${batch_size} 1>${print_log} 2>&1 +fi + +# 性能相关数据计算 + +#读取iteration/s数据(ITPS),再计算StepTime = 1/ITPS; FPS=BATCH_SIZE * ITPS +ITPS=`grep "100%" ${print_log} | awk '{print $NF}'| cut -d "i" -f 1 | awk '{sum+=$1} END {print sum/NR}'` +StepTime=`awk 'BEGIN{printf "%.2f", '1'/'${ITPS}'}'` +FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*'${ITPS}'}'` + + +# 精度相关数据计算 +train_accuracy=`grep "Validation PCKh@0.5:" ${print_log} | tail -n 1| awk '{print $4}' | cut -c 10- | awk '{sum+=$1} END {print sum/NR}'` + +# 提取所有loss打印信息 +grep "loss=" ${print_log} | awk -F "=" '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt + + +########################################################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +########################################################### + +# 判断本次执行是否正确使用Ascend NPU +use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l` +if [ x"${use_npu_flag}" == x0 ]; +then + echo "------------------ ERROR NOTICE START ------------------" + echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration." + echo "------------------ ERROR NOTICE END------------------" +else + echo "------------------ INFO NOTICE START------------------" + echo "INFO, your task have used Ascend NPU, please check your result." + echo "------------------ INFO NOTICE END------------------" +fi + +# 获取最终的casename,请保留,case文件名为${CaseName} +get_casename + +# 重命名loss文件 +if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ]; +then + mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt +fi + +# 训练端到端耗时 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +echo "------------------ Final result ------------------" +# 输出性能FPS/单step耗时/端到端耗时 +echo "Final Performance images/sec : $FPS" +echo "Final Performance sec/step : $StepTime" +echo "E2E Training Duration sec : $e2e_time" + +# 输出训练精度 +echo "Final Train Accuracy : ${train_accuracy}" + +# 最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 71e7f1ee1b024e82a595788d5be81f333210e2ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:10:25 +0000 Subject: [PATCH 15/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt. --- .../requirements.txt | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt new file mode 100644 index 000000000..959dee0a6 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/requirements.txt @@ -0,0 +1,8 @@ +matplotlib==3.4.0 +numpy==1.19.3 +opencv_python==4.5.1.48 +scikit_image==0.18.1 +scipy==1.2.1 +skimage==0.0 +tensorflow==1.15.0 +tqdm==4.62.2 -- Gitee From ce699e71f175083f735b79a69c2fdfa3ddb1396f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:11:11 +0000 Subject: [PATCH 16/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py. --- .../online_inference.py | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py new file mode 100644 index 000000000..13c9bc86a --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/online_inference.py @@ -0,0 +1,77 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * + +from packages.lifting import PoseEstimator +from packages.lifting.utils import plot_pose, draw_limbs + +import cv2 +import matplotlib.pyplot as plt +import argparse +import os + + +# set up the argparse +parser = argparse.ArgumentParser() + +parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/model.ckpt') # checkpoint path +parser.add_argument('--prob_model_path', type=str, + default='./data/prob_model/prob_model_params.mat') # 3d model path +parser.add_argument('--test_image', type=str, + default='./dataset/MPII/images/099363014.jpg') +parser.add_argument('--result_path', type=str, + default='./result') + +args = parser.parse_args() + + +def main(): + # read image + image = cv2.imread(args.test_image) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # conversion to rgb + image_size = image.shape + + # initialize model + pose_estimator = PoseEstimator(image_size, args.checkpoint_path, args.prob_model_path) + + # load model + pose_estimator.initialise() + + # estimation + pose_2d, visibility, pose_3d = pose_estimator.estimate(image, lifting=True) + + # Show 2D and 3D poses + display_results(image, pose_2d, visibility, pose_3d) + # close model + pose_estimator.close() + + +def display_results(in_image, data_2d, joint_visibility, data_3d): # 2d3d resalt visualization + """Plot 2D and 3D poses for each of the people in the image.""" + plt.figure() + draw_limbs(in_image, data_2d, joint_visibility) + plt.imshow(in_image) + + plt.axis('off') + # save 2d image + plt.savefig(os.path.join(args.result_path,'result2d.jpg')) + + # Show 3D poses + for i, single_3D in enumerate(data_3d): + plot_pose(single_3D) + plt.savefig(os.path.join(args.result_path, 'result3d_{}.jpg'.format(i))) # save images + +if __name__ == '__main__': + import sys + sys.exit(main()) -- Gitee From 25b90df1f812f549d3a07c290ad5356b3107513d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:12:00 +0000 Subject: [PATCH 17/43] add TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py. --- .../packages / lifting/__init__.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py new file mode 100644 index 000000000..3bb056e45 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from ._pose_estimator import * +from . import utils + -- Gitee From 04c3e03bb50546e26252817cf04ce1b814b134c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:12:38 +0000 Subject: [PATCH 18/43] add --- .../packages / lifting/_pose_estimator.py | 187 ++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py new file mode 100644 index 000000000..986ea7bbf --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py @@ -0,0 +1,187 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from . import utils +import cv2 +import numpy as np +import tensorflow as tf +import abc + +ABC = abc.ABCMeta('ABC', (object,), {}) + +__all__ = [ + 'PoseEstimatorInterface', + 'PoseEstimator' +] + + +class PoseEstimatorInterface(ABC): + + @abc.abstractmethod + def initialise(self, args): + pass + + @abc.abstractmethod + def estimate(self, image): + return + + @abc.abstractmethod + def train(self, image, labels): + return + + @abc.abstractmethod + def close(self): + pass + + +class PoseEstimator(PoseEstimatorInterface): + + def __init__(self, image_size, session_path, prob_model_path): + """Initialising the graph in tensorflow. + INPUT: + image_size: Size of the image in the format (w x h x 3)""" + + self.session = None + self.poseLifting = utils.Prob3dPose(prob_model_path) + self.sess = -1 + self.orig_img_size = np.array(image_size) + self.scale = utils.config.INPUT_SIZE / (self.orig_img_size[0] * 1.0) + self.img_size = np.round( + self.orig_img_size * self.scale).astype(np.int32) + self.image_in = None + self.heatmap_person_large = None + self.pose_image_in = None + self.pose_centermap_in = None + self.pred_2d_pose = None + self.likelihoods = None + self.session_path = session_path + + def initialise(self): + """Load saved model in the graph + INPUT: + sess_path: path to the dir containing the tensorflow saved session + OUTPUT: + sess: tensorflow session""" + # initialize graph structrue + tf.reset_default_graph() + + with tf.variable_scope('CPM'): + # placeholders for person network + self.image_in = tf.placeholder( + tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 3]) + self.label_in = tf.placeholder( + tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 1]) + + heatmap_person = utils.inference_person(self.image_in) + + self.heatmap_person_large = tf.image.resize_images( + heatmap_person, [utils.config.INPUT_SIZE, self.img_size[1]]) + + # placeholders for pose network + self.pose_image_in = tf.placeholder( + tf.float32, + [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 3]) + + self.pose_centermap_in = tf.placeholder( + tf.float32, + [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 1]) + + self.pred_2d_pose, self.likelihoods = utils.inference_pose( + self.pose_image_in, self.pose_centermap_in, + utils.config.INPUT_SIZE) + + # set up loss and optimizer + self.loss = tf.reduce_mean(tf.abs(self.heatmap_person_large - self.label_in)) + self.optimizer = npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate=0.0000001)).minimize(self.loss) + + # load pretraining model + sess = tf.Session(config=npu_config_proto()) + sess.run(tf.global_variables_initializer()) + variables = tf.contrib.framework.get_variables_to_restore() + variables_to_resotre = [v for v in variables if v.name.split('/')[-1][:4] != 'Adam' and v.name[:4] != 'beta'] + self.saver = tf.train.Saver(variables_to_resotre) + self.saver.restore(sess, self.session_path) + self.session = sess + + def train(self, image, labels): + # input model,back propagation and then output loss + b_image = np.array(image / 255.0 - 0.5, dtype=np.float32) + labels = labels[:, :, :, np.newaxis] + + # self.session.run(self.optimizer, {self.image_in: b_image, self.label_in: labels}) + _, loss, heatmap_pred = self.session.run([self.optimizer, self.loss, self.heatmap_person_large], + feed_dict={self.image_in: b_image, self.label_in: labels}) + return loss, heatmap_pred + + def estimate(self, image, lifting=False): + """ + Estimate 2d and 3d poses on the image. + INPUT: + image: RGB image in the format (w x h x 3) + sess: tensorflow session + OUTPUT: + pose_2d: 2D pose for each of the people in the image in the format + (num_ppl x num_joints x 2) + visibility: vector containing a bool + value for each joint representing the visibility of the joint in + the image (could be due to occlusions or the joint is not in the + image) + pose_3d: 3D pose for each of the people in the image in the + format (num_ppl x 3 x num_joints) + hmap_person: heatmap + """ + # test model + sess = self.session + + image = cv2.resize(image, (0, 0), fx=self.scale, + fy=self.scale, interpolation=cv2.INTER_CUBIC) + b_image = np.array(image[np.newaxis] / 255.0 - 0.5, dtype=np.float32) + + hmap_person_viz = sess.run(self.heatmap_person_large, { + self.image_in: b_image}) + hmap_person = np.squeeze(hmap_person_viz) + + centers = utils.detect_objects_heatmap(hmap_person) + b_pose_image, b_pose_cmap = utils.prepare_input_posenet( + b_image[0], centers, + [utils.config.INPUT_SIZE, image.shape[1]], + [utils.config.INPUT_SIZE, utils.config.INPUT_SIZE], + batch_size=utils.config.BATCH_SIZE) + + feed_dict = { + self.pose_image_in: b_pose_image, + self.pose_centermap_in: b_pose_cmap + } + + # Estimate 2D poses + pred_2d_pose, pred_likelihood = sess.run([self.pred_2d_pose, + self.likelihoods], + feed_dict) + + estimated_2d_pose, visibility = utils.detect_parts_from_likelihoods(pred_2d_pose, + centers, + pred_likelihood) + + pose_2d = np.round(estimated_2d_pose / self.scale).astype(np.int32) + + # Estimate 3D poses + if lifting: + transformed_pose2d, weights = self.poseLifting.transform_joints( + estimated_2d_pose.copy(), visibility) + pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights) + return pose_2d, visibility, pose_3d + + return pose_2d, hmap_person + def close(self): + self.session.close() -- Gitee From 7e6b6ed5551e61f95975ef999183376bfdc7d686 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:13:25 +0000 Subject: [PATCH 19/43] add --- .../packages / lifting/utils/__init__.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py new file mode 100644 index 000000000..cbbdebfd8 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from .prob_model import * +from .draw import * +from .cpm import * +from .process import * +from . import config +from . import upright_fast + -- Gitee From c88568199d3edb87bd2c2ba60cee47a87d050e69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:13:51 +0000 Subject: [PATCH 20/43] add --- .../packages / lifting/utils/config.py | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py new file mode 100644 index 000000000..f81a92fe3 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py @@ -0,0 +1,51 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__all__ = [ + 'VISIBLE_PART', + 'MIN_NUM_JOINTS', + 'CENTER_TR', + 'SIGMA', + 'STRIDE', + 'SIGMA_CENTER', + 'INPUT_SIZE', + 'OUTPUT_SIZE', + 'NUM_JOINTS', + 'NUM_OUTPUT', + 'H36M_NUM_JOINTS', + 'JOINT_DRAW_SIZE', + 'LIMB_DRAW_SIZE' +] + +# threshold +VISIBLE_PART = 1e-3 +MIN_NUM_JOINTS = 5 +CENTER_TR = 0.4 + +# net attributes +SIGMA = 7 +STRIDE = 8 +SIGMA_CENTER = 21 +INPUT_SIZE = 368 +OUTPUT_SIZE = 46 +NUM_JOINTS = 14 +NUM_OUTPUT = NUM_JOINTS + 1 +H36M_NUM_JOINTS = 17 + +# draw options +JOINT_DRAW_SIZE = 3 +LIMB_DRAW_SIZE = 1 +NORMALISATION_COEFFICIENT = 1280*720 + +# test options +BATCH_SIZE = 4 -- Gitee From bb2aac7fb10463bed6e7d8ff29a354c21240696f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:14:15 +0000 Subject: [PATCH 21/43] add --- .../packages / lifting/utils/cpm.py | 408 ++++++++++++++++++ 1 file changed, 408 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py new file mode 100644 index 000000000..1f8a3a847 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py @@ -0,0 +1,408 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * + +import tensorflow as tf +import tensorflow.contrib.layers as layers + +__all__ = [ + 'inference_person', + 'inference_pose' +] + + +def inference_person(image): + with tf.variable_scope('PersonNet'): + conv1_1 = layers.conv2d( + image, 64, 3, 1, activation_fn=None, scope='conv1_1') + conv1_1 = tf.nn.relu(conv1_1) + conv1_2 = layers.conv2d( + conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') + conv1_2 = tf.nn.relu(conv1_2) + pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) + conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, + activation_fn=None, scope='conv2_1') + conv2_1 = tf.nn.relu(conv2_1) + conv2_2 = layers.conv2d( + conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') + conv2_2 = tf.nn.relu(conv2_2) + pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) + conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, + activation_fn=None, scope='conv3_1') + conv3_1 = tf.nn.relu(conv3_1) + conv3_2 = layers.conv2d( + conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') + conv3_2 = tf.nn.relu(conv3_2) + conv3_3 = layers.conv2d( + conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') + conv3_3 = tf.nn.relu(conv3_3) + conv3_4 = layers.conv2d( + conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') + conv3_4 = tf.nn.relu(conv3_4) + pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) + conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, + activation_fn=None, scope='conv4_1') + conv4_1 = tf.nn.relu(conv4_1) + conv4_2 = layers.conv2d( + conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') + conv4_2 = tf.nn.relu(conv4_2) + conv4_3 = layers.conv2d( + conv4_2, 512, 3, 1, activation_fn=None, scope='conv4_3') + conv4_3 = tf.nn.relu(conv4_3) + conv4_4 = layers.conv2d( + conv4_3, 512, 3, 1, activation_fn=None, scope='conv4_4') + conv4_4 = tf.nn.relu(conv4_4) + conv5_1 = layers.conv2d( + conv4_4, 512, 3, 1, activation_fn=None, scope='conv5_1') + conv5_1 = tf.nn.relu(conv5_1) + conv5_2_CPM = layers.conv2d( + conv5_1, 128, 3, 1, activation_fn=None, scope='conv5_2_CPM') + conv5_2_CPM = tf.nn.relu(conv5_2_CPM) + conv6_1_CPM = layers.conv2d( + conv5_2_CPM, 512, 1, 1, activation_fn=None, scope='conv6_1_CPM') + conv6_1_CPM = tf.nn.relu(conv6_1_CPM) + conv6_2_CPM = layers.conv2d( + conv6_1_CPM, 1, 1, 1, activation_fn=None, scope='conv6_2_CPM') + concat_stage2 = tf.concat([conv6_2_CPM, conv5_2_CPM], 3) + Mconv1_stage2 = layers.conv2d( + concat_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage2') + Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) + Mconv2_stage2 = layers.conv2d( + Mconv1_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage2') + Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) + Mconv3_stage2 = layers.conv2d( + Mconv2_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage2') + Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) + Mconv4_stage2 = layers.conv2d( + Mconv3_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage2') + Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) + Mconv5_stage2 = layers.conv2d( + Mconv4_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage2') + Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) + Mconv6_stage2 = layers.conv2d( + Mconv5_stage2, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage2') + Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) + Mconv7_stage2 = layers.conv2d( + Mconv6_stage2, 1, 1, 1, activation_fn=None, scope='Mconv7_stage2') + concat_stage3 = tf.concat([Mconv7_stage2, conv5_2_CPM], 3) + Mconv1_stage3 = layers.conv2d( + concat_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage3') + Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) + Mconv2_stage3 = layers.conv2d( + Mconv1_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage3') + Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) + Mconv3_stage3 = layers.conv2d( + Mconv2_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage3') + Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) + Mconv4_stage3 = layers.conv2d( + Mconv3_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage3') + Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) + Mconv5_stage3 = layers.conv2d( + Mconv4_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage3') + Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) + Mconv6_stage3 = layers.conv2d( + Mconv5_stage3, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage3') + Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) + Mconv7_stage3 = layers.conv2d( + Mconv6_stage3, 1, 1, 1, activation_fn=None, + scope='Mconv7_stage3') + concat_stage4 = tf.concat([Mconv7_stage3, conv5_2_CPM], 3) + Mconv1_stage4 = layers.conv2d( + concat_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage4') + Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) + Mconv2_stage4 = layers.conv2d( + Mconv1_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage4') + Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) + Mconv3_stage4 = layers.conv2d( + Mconv2_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage4') + Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) + Mconv4_stage4 = layers.conv2d( + Mconv3_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage4') + Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) + Mconv5_stage4 = layers.conv2d( + Mconv4_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage4') + Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) + Mconv6_stage4 = layers.conv2d( + Mconv5_stage4, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage4') + Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) + Mconv7_stage4 = layers.conv2d( + Mconv6_stage4, 1, 1, 1, activation_fn=None, scope='Mconv7_stage4') + return Mconv7_stage4 + + +def _argmax_2d(tensor): + """ + Compute argmax on the 2nd and 3d dimensions of the tensor. + e.g. given an input tensor of size N x K x K x C, then it computes the (x,y) coordinates for + each of the N images and C channels, corresponding to the max for that image and channel. + :param tensor: image of size N x K x K x C + :return: argmax in the format N x 2 x C (where C corresponds to NUM_JOINTS) + """ + # get size + shape = tensor.get_shape().as_list()[1] + n_channels = tf.shape(tensor)[-1] + + # process each channel + linearised_channel = tf.reshape(tensor, [-1, shape * shape, n_channels]) + best_channel = tf.argmax(linearised_channel, axis=1) + + idx_y = tf.expand_dims(tf.floordiv(best_channel, shape), axis=1) + idx_x = tf.expand_dims(tf.mod(best_channel, shape), axis=1) + argmax_channels = tf.concat([idx_x, idx_y], axis=1, name='output') + return argmax_channels + + +def _process_stage(heat_maps, hm_size): + """ + For each heat-map identify joint position and likelihood + :param heat_maps: input heat-maps + :param hm_size: size in which to return the coordinates + :return: 2d joints (BATCH_SIZE x 14 x 2) + likelihood for each joint (BATCH_SIZE x 14) + """ + rescaled = tf.image.resize_images(heat_maps[:, :, :, :-1], [hm_size, hm_size]) + uncertainty = tf.reduce_max(tf.reduce_mean(rescaled, axis=1), axis=1, name='prob') + return _argmax_2d(rescaled), uncertainty + + +def inference_pose(image, center_map, hm_size, stage=6): + with tf.variable_scope('PoseNet'): + pool_center_lower = layers.avg_pool2d(center_map, 9, 8, padding='SAME') + conv1_1 = layers.conv2d( + image, 64, 3, 1, activation_fn=None, scope='conv1_1') + conv1_1 = tf.nn.relu(conv1_1) + conv1_2 = layers.conv2d( + conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') + conv1_2 = tf.nn.relu(conv1_2) + pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) + conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, + activation_fn=None, scope='conv2_1') + conv2_1 = tf.nn.relu(conv2_1) + conv2_2 = layers.conv2d( + conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') + conv2_2 = tf.nn.relu(conv2_2) + pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) + conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, + activation_fn=None, scope='conv3_1') + conv3_1 = tf.nn.relu(conv3_1) + conv3_2 = layers.conv2d( + conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') + conv3_2 = tf.nn.relu(conv3_2) + conv3_3 = layers.conv2d( + conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') + conv3_3 = tf.nn.relu(conv3_3) + conv3_4 = layers.conv2d( + conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') + conv3_4 = tf.nn.relu(conv3_4) + pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) + conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, + activation_fn=None, scope='conv4_1') + conv4_1 = tf.nn.relu(conv4_1) + conv4_2 = layers.conv2d( + conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') + conv4_2 = tf.nn.relu(conv4_2) + conv4_3_CPM = layers.conv2d( + conv4_2, 256, 3, 1, activation_fn=None, scope='conv4_3_CPM') + conv4_3_CPM = tf.nn.relu(conv4_3_CPM) + conv4_4_CPM = layers.conv2d( + conv4_3_CPM, 256, 3, 1, activation_fn=None, scope='conv4_4_CPM') + conv4_4_CPM = tf.nn.relu(conv4_4_CPM) + conv4_5_CPM = layers.conv2d( + conv4_4_CPM, 256, 3, 1, activation_fn=None, scope='conv4_5_CPM') + conv4_5_CPM = tf.nn.relu(conv4_5_CPM) + conv4_6_CPM = layers.conv2d( + conv4_5_CPM, 256, 3, 1, activation_fn=None, scope='conv4_6_CPM') + conv4_6_CPM = tf.nn.relu(conv4_6_CPM) + conv4_7_CPM = layers.conv2d( + conv4_6_CPM, 128, 3, 1, activation_fn=None, scope='conv4_7_CPM') + conv4_7_CPM = tf.nn.relu(conv4_7_CPM) + conv5_1_CPM = layers.conv2d( + conv4_7_CPM, 512, 1, 1, activation_fn=None, scope='conv5_1_CPM') + conv5_1_CPM = tf.nn.relu(conv5_1_CPM) + conv5_2_CPM = layers.conv2d( + conv5_1_CPM, 15, 1, 1, activation_fn=None, scope='conv5_2_CPM') + concat_stage2 = tf.concat( + [conv5_2_CPM, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage2 = layers.conv2d( + concat_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage2') + Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) + Mconv2_stage2 = layers.conv2d( + Mconv1_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage2') + Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) + Mconv3_stage2 = layers.conv2d( + Mconv2_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage2') + Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) + Mconv4_stage2 = layers.conv2d( + Mconv3_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage2') + Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) + Mconv5_stage2 = layers.conv2d( + Mconv4_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage2') + Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) + Mconv6_stage2 = layers.conv2d( + Mconv5_stage2, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage2') + Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) + Mconv7_stage2 = layers.conv2d( + Mconv6_stage2, 15, 1, 1, activation_fn=None, scope='Mconv7_stage2') + if stage == 2: + return _process_stage(Mconv7_stage2, hm_size) + + concat_stage3 = tf.concat( + [Mconv7_stage2, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage3 = layers.conv2d( + concat_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage3') + Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) + Mconv2_stage3 = layers.conv2d( + Mconv1_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage3') + Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) + Mconv3_stage3 = layers.conv2d( + Mconv2_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage3') + Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) + Mconv4_stage3 = layers.conv2d( + Mconv3_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage3') + Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) + Mconv5_stage3 = layers.conv2d( + Mconv4_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage3') + Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) + Mconv6_stage3 = layers.conv2d( + Mconv5_stage3, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage3') + Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) + Mconv7_stage3 = layers.conv2d( + Mconv6_stage3, 15, 1, 1, activation_fn=None, scope='Mconv7_stage3') + if stage == 3: + return _process_stage(Mconv7_stage3, hm_size) + + concat_stage4 = tf.concat( + [Mconv7_stage3, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage4 = layers.conv2d( + concat_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage4') + Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) + Mconv2_stage4 = layers.conv2d( + Mconv1_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage4') + Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) + Mconv3_stage4 = layers.conv2d( + Mconv2_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage4') + Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) + Mconv4_stage4 = layers.conv2d( + Mconv3_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage4') + Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) + Mconv5_stage4 = layers.conv2d( + Mconv4_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage4') + Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) + Mconv6_stage4 = layers.conv2d( + Mconv5_stage4, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage4') + Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) + Mconv7_stage4 = layers.conv2d( + Mconv6_stage4, 15, 1, 1, activation_fn=None, scope='Mconv7_stage4') + if stage == 4: + return _process_stage(Mconv7_stage4, hm_size) + + concat_stage5 = tf.concat( + [Mconv7_stage4, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage5 = layers.conv2d( + concat_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage5') + Mconv1_stage5 = tf.nn.relu(Mconv1_stage5) + Mconv2_stage5 = layers.conv2d( + Mconv1_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage5') + Mconv2_stage5 = tf.nn.relu(Mconv2_stage5) + Mconv3_stage5 = layers.conv2d( + Mconv2_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage5') + Mconv3_stage5 = tf.nn.relu(Mconv3_stage5) + Mconv4_stage5 = layers.conv2d( + Mconv3_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage5') + Mconv4_stage5 = tf.nn.relu(Mconv4_stage5) + Mconv5_stage5 = layers.conv2d( + Mconv4_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage5') + Mconv5_stage5 = tf.nn.relu(Mconv5_stage5) + Mconv6_stage5 = layers.conv2d( + Mconv5_stage5, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage5') + Mconv6_stage5 = tf.nn.relu(Mconv6_stage5) + Mconv7_stage5 = layers.conv2d( + Mconv6_stage5, 15, 1, 1, activation_fn=None, scope='Mconv7_stage5') + if stage == 5: + return _process_stage(Mconv7_stage5, hm_size) + + concat_stage6 = tf.concat( + [Mconv7_stage5, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage6 = layers.conv2d( + concat_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage6') + Mconv1_stage6 = tf.nn.relu(Mconv1_stage6) + Mconv2_stage6 = layers.conv2d( + Mconv1_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage6') + Mconv2_stage6 = tf.nn.relu(Mconv2_stage6) + Mconv3_stage6 = layers.conv2d( + Mconv2_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage6') + Mconv3_stage6 = tf.nn.relu(Mconv3_stage6) + Mconv4_stage6 = layers.conv2d( + Mconv3_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage6') + Mconv4_stage6 = tf.nn.relu(Mconv4_stage6) + Mconv5_stage6 = layers.conv2d( + Mconv4_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage6') + Mconv5_stage6 = tf.nn.relu(Mconv5_stage6) + Mconv6_stage6 = layers.conv2d( + Mconv5_stage6, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage6') + Mconv6_stage6 = tf.nn.relu(Mconv6_stage6) + Mconv7_stage6 = layers.conv2d( + Mconv6_stage6, 15, 1, 1, activation_fn=None, + scope='Mconv7_stage6') + return _process_stage(Mconv7_stage6, hm_size) + + -- Gitee From dcdabd0bde9ceaf9b6096836a7e154e808ab2ffb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:14:35 +0000 Subject: [PATCH 22/43] add --- .../packages / lifting/utils/draw.py | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py new file mode 100644 index 000000000..d95affa0d --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py @@ -0,0 +1,112 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import cv2 +import numpy as np +from .config import JOINT_DRAW_SIZE +from .config import NORMALISATION_COEFFICIENT +import matplotlib.pyplot as plt +import math + +__all__ = [ + 'draw_limbs', + 'plot_pose' +] + + +def draw_limbs(image, pose_2d, visible): + """Draw the 2D pose without the occluded/not visible joints.""" + + _COLORS = [ + [0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0], + [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170], + [170, 0, 255] + ] + # _COLORS = [ + # [0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], + # [128, 0, 0], [0, 128, 0], [0, 0, 128], [255, 255, 255], + # [128, 128, 128] + # ] + _LIMBS = np.array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, + 9, 10, 11, 12, 12, 13]).reshape((-1, 2)) + + _NORMALISATION_FACTOR = int(math.floor(math.sqrt(image.shape[0] * image.shape[1] / NORMALISATION_COEFFICIENT))) + + for oid in range(pose_2d.shape[0]): + # for i in range(14): + # cv2.putText(image, str(i), (pose_2d[oid][i][1], pose_2d[oid][i][0]), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255)) + for lid, (p0, p1) in enumerate(_LIMBS): + + if not (visible[oid][p0] and visible[oid][p1]): + continue + y0, x0 = pose_2d[oid][p0] + y1, x1 = pose_2d[oid][p1] + cv2.circle(image, (x0, y0), JOINT_DRAW_SIZE *_NORMALISATION_FACTOR , _COLORS[lid], -1) + cv2.circle(image, (x1, y1), JOINT_DRAW_SIZE*_NORMALISATION_FACTOR , _COLORS[lid], -1) + cv2.line(image, (x0, y0), (x1, y1), + _COLORS[lid], 10 , 16) # LIMB_DRAW_SIZE*_NORMALISATION_FACTOR + + +def plot_pose(pose): + """Plot the 3D pose showing the joint connections.""" + import mpl_toolkits.mplot3d.axes3d as p3 + + _CONNECTION = [ + [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], + [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], + [15, 16]] + + def joint_color(j): + """ + TODO: 'j' shadows name 'j' from outer scope + """ + + colors = [(0, 0, 0), (255, 0, 255), (0, 0, 255), + (0, 255, 255), (255, 0, 0), (0, 255, 0)] + _c = 0 + if j in range(1, 4): + _c = 1 + if j in range(4, 7): + _c = 2 + if j in range(9, 11): + _c = 3 + if j in range(11, 14): + _c = 4 + if j in range(14, 17): + _c = 5 + return colors[_c] + + assert (pose.ndim == 2) + assert (pose.shape[0] == 3) + fig = plt.figure() + ax = fig.gca(projection='3d') + for c in _CONNECTION: + col = '#%02x%02x%02x' % joint_color(c[0]) + ax.plot([pose[0, c[0]], pose[0, c[1]]], + [pose[1, c[0]], pose[1, c[1]]], + [pose[2, c[0]], pose[2, c[1]]], c=col) + for j in range(pose.shape[1]): + col = '#%02x%02x%02x' % joint_color(j) + ax.scatter(pose[0, j], pose[1, j], pose[2, j], + c=col, marker='o', edgecolor=col) + smallest = pose.min() + largest = pose.max() + ax.set_xlim3d(smallest, largest) + ax.set_ylim3d(smallest, largest) + ax.set_zlim3d(smallest, largest) + + return fig + + + -- Gitee From 43b642ce3ab38ffd3b01c2144dbf1f2142c577dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:15:10 +0000 Subject: [PATCH 23/43] add --- .../packages / lifting/utils/prob_model.py | 270 ++++++++++++++++++ 1 file changed, 270 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py new file mode 100644 index 000000000..b8ffe330b --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py @@ -0,0 +1,270 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import os +import scipy.io as sio +import numpy as np +from .upright_fast import pick_e +from . import config + +__all__ = ['Prob3dPose'] + + +class Prob3dPose: + + def __init__(self, prob_model_path): + model_param = sio.loadmat(prob_model_path) + self.mu = np.reshape( + model_param['mu'], (model_param['mu'].shape[0], 3, -1)) + self.e = np.reshape(model_param['e'], (model_param['e'].shape[ + 0], model_param['e'].shape[1], 3, -1)) + self.sigma = model_param['sigma'] + self.cam = np.array( + [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) + + @staticmethod + def cost3d(model, gt): + """3d error in mm""" + out = np.sqrt(((gt - model) ** 2).sum(1)).mean(-1) + return out + + @staticmethod + def renorm_gt(gt): + """Compel gt data to have mean joint length of one""" + _POSE_TREE = np.asarray([ + [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], + [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], + [15, 16]]).T + scale = np.sqrt(((gt[:, :, _POSE_TREE[0]] - + gt[:, :, _POSE_TREE[1]]) ** 2).sum(2).sum(1)) + return gt / scale[:, np.newaxis, np.newaxis] + + @staticmethod + def build_model(a, e, s0): + """Build 3D model""" + assert (s0.shape[1] == 3) + assert (e.shape[2] == 3) + assert (a.shape[1] == e.shape[1]) + out = np.einsum('...i,...ijk', a, e) + out += s0 + return out + + @staticmethod + def build_and_rot_model(a, e, s0, r): + """ + Build model and rotate according to the identified rotation matrix + """ + from numpy.core.umath_tests import matrix_multiply + + r2 = Prob3dPose.upgrade_r(r.T).transpose((0, 2, 1)) + mod = Prob3dPose.build_model(a, e, s0) + mod = matrix_multiply(r2, mod) + return mod + + @staticmethod + def upgrade_r(r): + """ + Upgrades complex parameterisation of planar rotation to tensor + containing per frame 3x3 rotation matrices + """ + assert (r.ndim == 2) + # Technically optional assert, but if this fails data is probably + # transposed + assert (r.shape[1] == 2) + assert (np.all(np.isfinite(r))) + norm = np.sqrt((r[:, :2] ** 2).sum(1)) + assert (np.all(norm > 0)) + r /= norm[:, np.newaxis] + assert (np.all(np.isfinite(r))) + newr = np.zeros((r.shape[0], 3, 3)) + newr[:, :2, 0] = r[:, :2] + newr[:, 2, 2] = 1 + newr[:, 1::-1, 1] = r[:, :2] + newr[:, 0, 1] *= -1 + return newr + + @staticmethod + def centre(data_2d): + """center data according to each of the coordiante components""" + return (data_2d.T - data_2d.mean(1)).T + + @staticmethod + def centre_all(data): + """center all data""" + if data.ndim == 2: + return Prob3dPose.centre(data) + return (data.transpose(2, 0, 1) - data.mean(2)).transpose(1, 2, 0) + + @staticmethod + def normalise_data(d2, weights): + """Normalise data according to height""" + + # the joints with weight set to 0 should not be considered in the + # normalisation process + d2 = d2.reshape(d2.shape[0], -1, 2).transpose(0, 2, 1) + idx_consider = weights[0, 0].astype(np.bool) + if np.sum(weights[:, 0].sum(1) >= config.MIN_NUM_JOINTS) == 0: + raise Exception( + 'Not enough 2D joints identified to generate 3D pose') + d2[:, :, idx_consider] = Prob3dPose.centre_all(d2[:, :, idx_consider]) + + # Height normalisation (2 meters) + m2 = d2[:, 1, idx_consider].min(1) / 2.0 + m2 -= d2[:, 1, idx_consider].max(1) / 2.0 + crap = m2 == 0 + m2[crap] = 1.0 + d2[:, :, idx_consider] /= m2[:, np.newaxis, np.newaxis] + return d2, m2 + + @staticmethod + def transform_joints(pose_2d, visible_joints): + """ + Transform the set of joints according to what the probabilistic model + expects as input. + + It returns the new set of joints of each of the people and the set of + weights for the joints. + """ + + _H36M_ORDER = [8, 9, 10, 11, 12, 13, 1, 0, 5, 6, 7, 2, 3, 4] + _W_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] + + def swap_xy(poses): + tmp = np.copy(poses[:, :, 0]) + poses[:, :, 0] = poses[:, :, 1] + poses[:, :, 1] = tmp + return poses + + assert (pose_2d.ndim == 3) + new_pose = pose_2d.copy() + new_pose = swap_xy(new_pose) + new_pose = new_pose[:, _H36M_ORDER] + + # defining weights according to occlusions + weights = np.zeros((pose_2d.shape[0], 2, config.H36M_NUM_JOINTS)) + ordered_visibility = np.repeat( + visible_joints[:, _H36M_ORDER, np.newaxis], 2, 2 + ).transpose([0, 2, 1]) + weights[:, :, _W_POS] = ordered_visibility + return new_pose, weights + + def affine_estimate(self, w, depth_reg=0.085, weights=None, scale=10.0, + scale_mean=0.0016 * 1.8 * 1.2, scale_std=1.2 * 0, + cap_scale=-0.00129): + """ + Quick switch to allow reconstruction at unknown scale returns a,r + and scale + """ + weights = np.zeros((0, 0, 0)) if weights is None else weights + + s = np.empty((self.sigma.shape[0], self.sigma.shape[1] + 4)) # e,y,x,z + s[:, :4] = 10 ** -5 # Tiny but makes stuff well-posed + s[:, 0] = scale_std + s[:, 4:] = self.sigma + s[:, 4:-1] *= scale + + e2 = np.zeros((self.e.shape[0], self.e.shape[ + 1] + 4, 3, self.e.shape[3])) + e2[:, 1, 0] = 1.0 + e2[:, 2, 1] = 1.0 + e2[:, 3, 0] = 1.0 + # This makes the least_squares problem ill posed, as X,Z are + # interchangable + # Hence regularisation above to speed convergence and stop blow-up + e2[:, 0] = self.mu + e2[:, 4:] = self.e + t_m = np.zeros_like(self.mu) + + res, a, r = pick_e(w, e2, t_m, self.cam, s, weights=weights, + interval=0.01, depth_reg=depth_reg, + scale_prior=scale_mean) + + scale = a[:, :, 0] + reestimate = scale > cap_scale + m = self.mu * cap_scale + for i in range(scale.shape[0]): + if reestimate[i].sum() > 0: + ehat = e2[i:i + 1, 1:] + mhat = m[i:i + 1] + shat = s[i:i + 1, 1:] + (res2, a2, r2) = pick_e( + w[reestimate[i]], ehat, mhat, self.cam, shat, + weights=weights[reestimate[i]], + interval=0.01, depth_reg=depth_reg, + scale_prior=scale_mean + ) + res[i:i + 1, reestimate[i]] = res2 + a[i:i + 1, reestimate[i], 1:] = a2 + a[i:i + 1, reestimate[i], 0] = cap_scale + r[i:i + 1, :, reestimate[i]] = r2 + scale = a[:, :, 0] + a = a[:, :, 1:] / a[:, :, 0][:, :, np.newaxis] + return res, e2[:, 1:], a, r, scale + + def better_rec(self, w, model, s=1, weights=1, damp_z=1): + """Quick switch to allow reconstruction at unknown scale + returns a,r and scale""" + from numpy.core.umath_tests import matrix_multiply + proj = matrix_multiply(self.cam[np.newaxis], model) + proj[:, :2] = (proj[:, :2] * s + w * weights) / (s + weights) + proj[:, 2] *= damp_z + out = matrix_multiply(self.cam.T[np.newaxis], proj) + return out + + def create_rec(self, w2, weights, res_weight=1): + """Reconstruct 3D pose given a 2D pose""" + _SIGMA_SCALING = 5.2 + + res, e, a, r, scale = self.affine_estimate( + w2, scale=_SIGMA_SCALING, weights=weights, + depth_reg=0, cap_scale=-0.001, scale_mean=-0.003 + ) + + remaining_dims = 3 * w2.shape[2] - e.shape[1] + assert (remaining_dims >= 0) + llambda = -np.log(self.sigma) + lgdet = np.sum(llambda[:, :-1], 1) + llambda[:, -1] * remaining_dims + score = (res * res_weight + lgdet[:, np.newaxis] * (scale ** 2)) + best = np.argmin(score, 0) + index = np.arange(best.shape[0]) + a2 = a[best, index] + r2 = r[best, :, index].T + rec = Prob3dPose.build_and_rot_model(a2, e[best], self.mu[best], r2) + rec *= -np.abs(scale[best, index])[:, np.newaxis, np.newaxis] + + rec = self.better_rec(w2, rec, 1, 1.55 * weights, 1) * -1 + rec = Prob3dPose.renorm_gt(rec) + rec *= 0.97 + return rec + + def compute_3d(self, pose_2d, weights): + """Reconstruct 3D poses given 2D estimations""" + + _J_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] + _SCALE_3D = 1174.88312988 + + if pose_2d.shape[1] != config.H36M_NUM_JOINTS: + # need to call the linear regressor + reg_joints = np.zeros( + (pose_2d.shape[0], config.H36M_NUM_JOINTS, 2)) + for oid, singe_pose in enumerate(pose_2d): + reg_joints[oid, _J_POS] = singe_pose + + norm_pose, _ = Prob3dPose.normalise_data(reg_joints, weights) + else: + norm_pose, _ = Prob3dPose.normalise_data(pose_2d, weights) + + pose_3d = self.create_rec(norm_pose, weights) * _SCALE_3D + return pose_3d + -- Gitee From 6368d733b7c9b85ebf37f6483017a4e822e51377 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:15:43 +0000 Subject: [PATCH 24/43] add --- .../packages / lifting/utils/process.py | 309 ++++++++++++++++++ 1 file changed, 309 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py new file mode 100644 index 000000000..da0050947 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py @@ -0,0 +1,309 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import division +from npu_bridge.npu_init import * + +import skimage.io +import skimage.transform +import scipy.ndimage as ndimage +import scipy.ndimage.filters as filters +from scipy.stats import multivariate_normal + +import os +import json +import numpy as np +from . import config +import cv2 +from itertools import compress + +__all__ = [ + 'detect_objects_heatmap', + 'detect_objects_heatmap', + 'gaussian_kernel', + 'gaussian_heatmap', + 'prepare_input_posenet', + 'detect_parts_heatmaps', + 'detect_parts_from_likelihoods', + 'import_json', + 'generate_labels', + 'generate_center_map', + 'rescale', + 'crop_image' +] + + +def detect_objects_heatmap(heatmap): + data = 256 * heatmap + data_max = filters.maximum_filter(data, 3) + maxima = (data == data_max) + data_min = filters.minimum_filter(data, 3) + diff = ((data_max - data_min) > 0.3) + maxima[diff == 0] = 0 + + labeled, num_objects = ndimage.label(maxima) + slices = ndimage.find_objects(labeled) + objects = np.zeros((num_objects, 2), dtype=np.int32) + pidx = 0 + for (dy, dx) in slices: + pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2] + if heatmap[pos[0], pos[1]] > config.CENTER_TR: + objects[pidx, :] = pos + pidx += 1 + return objects[:pidx] + + +def gaussian_kernel(h, w, sigma_h, sigma_w): + yx = np.mgrid[-h // 2:h // 2, -w // 2:w // 2] ** 2 + return np.exp(-yx[0, :, :] / sigma_h ** 2 - yx[1, :, :] / sigma_w ** 2) + + +def gaussian_heatmap(h, w, pos_x, pos_y, sigma_h=1, sigma_w=1, init=None): + """ + Compute the heat-map of size (w x h) with a gaussian distribution fit in + position (pos_x, pos_y) and a convariance matix defined by the related + sigma values. + The resulting heat-map can be summed to a given heat-map init. + """ + init = init if init is not None else [] + + cov_matrix = np.eye(2) * ([sigma_h**2, sigma_w**2]) + + x, y = np.mgrid[0:h, 0:w] + pos = np.dstack((x, y)) + rv = multivariate_normal([pos_x, pos_y], cov_matrix) + + tmp = rv.pdf(pos) + hmap = np.multiply( + tmp, np.sqrt(np.power(2 * np.pi, 2) * np.linalg.det(cov_matrix)) + ) + idx = np.where(hmap.flatten() <= np.exp(-4.6052)) + hmap.flatten()[idx] = 0 + + if np.size(init) == 0: + return hmap + + assert (np.shape(init) == hmap.shape) + hmap += init + idx = np.where(hmap.flatten() > 1) + hmap.flatten()[idx] = 1 + return hmap + + +def prepare_input_posenet(image, objects, size_person, size, + batch_size, sigma=25, border=400): + result = np.zeros((batch_size, size[0], size[1], 4)) + padded_image = np.zeros( + (1, size_person[0] + border, size_person[1] + border, 4)) + padded_image[0, border // 2:-border // 2, + border // 2:-border // 2, :3] = image + if objects.shape[0] > batch_size: + objects = objects[:batch_size] + for oid, (yc, xc) in enumerate(objects): + dh, dw = size[0] // 2, size[1] // 2 + y0, x0, y1, x1 = np.array( + [yc - dh, xc - dw, yc + dh, xc + dw]) + border // 2 + result[oid, :, :, :4] = padded_image[:, y0:y1, x0:x1, :] + result[oid, :, :, 3] = gaussian_kernel(size[0], size[1], sigma, sigma) + return np.split(result, [3], 3) + + +def detect_parts_heatmaps(heatmaps, centers, size, num_parts=14): + """ + Given heat-maps find the position of each joint by means of n argmax + function + """ + parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) + visible = np.zeros((len(centers), num_parts), dtype=bool) + for oid, (yc, xc) in enumerate(centers): + part_hmap = skimage.transform.resize( + np.clip(heatmaps[oid], -1, 1), size) + for pid in range(num_parts): + y, x = np.unravel_index(np.argmax(part_hmap[:, :, pid]), size) + parts[oid, pid] = y + yc - size[0] // 2, x + xc - size[1] // 2 + visible[oid, pid] = np.mean( + part_hmap[:, :, pid]) > config.VISIBLE_PART + return parts, visible + + +def detect_parts_from_likelihoods(poses, centers, likelihoods, num_parts=14): + """ + Given heat-maps find the position of each joint by means of n argmax + function + """ + if len(centers) > config.BATCH_SIZE: + centers = centers[:config.BATCH_SIZE] + parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) + visible = np.zeros((len(centers), num_parts), dtype=bool) + for oid, (yc, xc) in enumerate(centers): + for pid in range(num_parts): + x, y = poses[oid, :, pid] + parts[oid, pid] = y + yc - config.INPUT_SIZE // 2, x + xc - config.INPUT_SIZE // 2 + visible[oid, pid] = likelihoods[oid, pid] > config.VISIBLE_PART + return parts, visible + + +def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'): + """Get the json file containing the dataset. + We want the data to be shuffled, however the training has to be repeatable. + This means that once shuffled the order has to me mantained.""" + with open(path) as data_file: + data_this = json.load(data_file) + data_this = np.array(data_this['root']) + num_samples = len(data_this) + + if os.path.exists(order): + idx = np.load(order) + else: + idx = np.random.permutation(num_samples).tolist() + np.save(order, idx) + + is_not_validation = [not data_this[i]['isValidation'] + for i in range(num_samples)] + keep_data_idx = list(compress(idx, is_not_validation)) + + data = data_this[keep_data_idx] + return data, len(keep_data_idx) + + +def generate_labels(image_shape, joint_positions, num_other_people, + joints_other_people, offset): + """ + Given as input a set of joint positions and the size of the input image + it generates + a set of heat-maps of the same size. It generates both heat-maps used as + labels for the first stage (label_1st_lower) and for all the other stages + (label_lower). + """ + _FILTER_JOINTS = np.array([9, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5]) + + img_height, img_width, _ = image_shape + heat_maps_single_p = np.zeros( + (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) + heat_maps_other_p = np.zeros( + (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) + + # generate first set of heat-maps + for i in range(config.NUM_JOINTS): + # the set of joints can be different fromt the one in the json file + curr_joint = joint_positions[_FILTER_JOINTS[i]] + skip = (curr_joint[0] < 0 or curr_joint[1] < 0 or + curr_joint[0] >= img_width or curr_joint[1] >= img_height) + if not skip: + heat_maps_single_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[ + 1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA) + + heat_maps_other_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[ + 1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA) + + heat_maps_single_p[-1] = np.maximum( + 1 - np.max(heat_maps_single_p[:-1], axis=0), + np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) + heat_maps_single_p = np.transpose(heat_maps_single_p, (1, 2, 0)) + + # generate second set of heat-maps for other people in the image + for p in range(int(num_other_people)): + for i in range(config.NUM_JOINTS): + # the set of joints can be different fromt the one in the json file + try: + if num_other_people == 1: + curr_joint = joints_other_people[_FILTER_JOINTS[i]] + else: + curr_joint = joints_other_people[p][_FILTER_JOINTS[i]] + skip = ( + curr_joint[0] < 0 or curr_joint[1] < 0 or + curr_joint[0] >= img_width or curr_joint[1] >= img_height) + except IndexError: + skip = True + if not skip: + heat_maps_other_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA, init=heat_maps_other_p[i]) + + heat_maps_other_p[-1] = np.maximum( + 1 - np.max(heat_maps_other_p[:-1], axis=0), + np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) + + heat_maps_other_p = np.transpose(heat_maps_other_p, (1, 2, 0)) + + # rescaling heat-maps accoring to the right shape + labels_single = rescale(heat_maps_single_p, config.OUTPUT_SIZE) + labels_people = rescale(heat_maps_other_p, config.OUTPUT_SIZE) + return labels_people, labels_single + + +def generate_center_map(center_pos, img_shape): + """ + Given the position of the person and the size of the input image it + generates + a heat-map where a gaissian distribution is fit in the position of the + person in the image. + """ + img_height = img_shape + img_width = img_shape + center_map = gaussian_heatmap( + img_height, img_width, center_pos[1], center_pos[0], + config.SIGMA_CENTER, config.SIGMA_CENTER) + return center_map + + +def rescale(data, new_size): + """Rescale data to a fixed dimension, regardless the number of channels. + Data has to be in the format (h,w,c).""" + if data.ndim > 2: + assert data.shape[2] < data.shape[0] + assert data.shape[2] < data.shape[1] + resized_data = cv2.resize( + data, (new_size, new_size), interpolation=cv2.INTER_CUBIC) + return resized_data + + +def crop_image(image, obj_pose): + """ + Crop the image in order to have the person at the center and the final + image size + is the same as the expected CNN input size. + Returns the cropped image and the offset that is used to update the joint + positions. + """ + offset_left = int(obj_pose[0] - config.INPUT_SIZE // 2) + offset_up = int(obj_pose[1] - config.INPUT_SIZE // 2) + # just for checking that it's inside the image + offset_right = int(image.shape[1] - obj_pose[0] - config.INPUT_SIZE // 2) + offset_bottom = int(image.shape[0] - obj_pose[1] - config.INPUT_SIZE // 2) + + pad_left, pad_right, pad_up, pad_bottom = 0, 0, 0, 0 + if offset_left < 0: + pad_left = -offset_left + if offset_right < 0: + pad_right = -offset_right + if offset_up < 0: + pad_up = -offset_up + if offset_bottom < 0: + pad_bottom = -offset_bottom + padded_image = np.lib.pad( + image, ((pad_up, pad_bottom), (pad_left, pad_right), (0, 0)), + 'constant', constant_values=((0, 0), (0, 0), (0, 0))) + + cropped_image = padded_image[ + offset_up + pad_up: offset_up + pad_up + config.INPUT_SIZE, + offset_left + pad_left: offset_left + pad_left + config.INPUT_SIZE] + + return cropped_image, np.array([offset_left, offset_up]) -- Gitee From b9073eac1e65caf88787f460e55eac8e8c722026 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:16:10 +0000 Subject: [PATCH 25/43] add --- .../packages / lifting/utils/upright_fast.py | 302 ++++++++++++++++++ 1 file changed, 302 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py new file mode 100644 index 000000000..cbfd5ec22 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py @@ -0,0 +1,302 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import numpy as np +import scipy + +__all__ = [ + 'upgrade_r', + 'update_cam', + 'estimate_a_and_r_with_res', + 'estimate_a_and_r_with_res_weights', + 'pick_e' +] + + +def upgrade_r(r): + """Upgrades complex parameterisation of planar rotation to tensor containing + per frame 3x3 rotation matrices""" + newr = np.zeros((3, 3)) + newr[:2, 0] = r + newr[2, 2] = 1 + newr[1::-1, 1] = r + newr[0, 1] *= -1 + return newr + + +def update_cam(cam): + new_cam = cam[[0, 2, 1]].copy() + new_cam = new_cam[:, [0, 2, 1]] + return new_cam + + +def estimate_a_and_r_with_res( + w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, + residue, Ps, depth_reg, scale_prior): + """ + TODO: Missing the following parameters in docstring: + - w, e, s0, camera_r, Lambda, check, a, res, proj_e, depth_reg, + scale_prior + + TODO: The following parameters are not used: + - s0, weights + + So local optima are a problem in general. + However: + + 1. This problem is convex in a but not in r, and + + 2. each frame can be solved independently. + + So for each frame, we can do a grid search in r and take the globally + optimal solution. + + In practice, we just brute force over 100 different estimates of r, and + take the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a + given r. + + Arguments: + + w is a 3d measurement matrix of form frames*2*points + + e is a 3d set of basis vectors of from basis*3*points + + s0 is the 3d rest shape of form 3*points + + Lambda are the regularisor coefficients on the coefficients of the + weights typically generated using PPCA + + interval is how far round the circle we should check for break points + we check every interval*2*pi radians + + Returns: + + a (basis coefficients) and r (representation of rotations as a complex + number) + """ + frames = w.shape[0] + points = w.shape[2] + basis = e.shape[0] + r = np.empty(2) + Ps_reshape = Ps.reshape(2 * points) + w_reshape = w.reshape((frames, points * 2)) + + for i in range(check.size): + c = check[i] + r[0] = np.cos(c) + r[1] = np.sin(c) + grot = camera_r.dot(upgrade_r(r)) + rot = grot[:2] + res[:, :points * 2] = w_reshape + res[:, :points * 2] -= Ps_reshape + proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( + e.shape[0], 2 * points) + + if Lambda.size != 0: + proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) + res[:, 2 * points:].fill(0) + res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + # depth regularizer not used + proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * + depth_reg) * grot[2]).dot(e) + # we let the person change scale + res[:, 2 * points] = scale_prior + + """ + TODO: PLEASE REVIEW THE FOLLOWING CODE.... + overwrite_a and overwrite_b ARE UNEXPECTED ARGUMENTS OF + scipy.linalg.lstsq + """ + a[i], residue[i], _, _ = scipy.linalg.lstsq( + proj_e.T, res.T, overwrite_a=True, overwrite_b=True) + + # find and return best coresponding solution + best = np.argmin(residue, 0) + assert (best.shape[0] == frames) + theta = check[best] + index = (best, np.arange(frames)) + aa = a.transpose(0, 2, 1)[index] + retres = residue[index] + r = np.empty((2, frames)) + r[0] = np.sin(theta) + r[1] = np.cos(theta) + return aa, r, retres + + +def estimate_a_and_r_with_res_weights( + w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, + residue, Ps, depth_reg, scale_prior): + """ + TODO: Missing the following parameters in docstring: + - w, e, s0, camera)r, Lambda, check, a, res, proj_e, residue, + Ps, depth_reg, scale_prior + + So local optima are a problem in general. + However: + + 1. This problem is convex in a but not in r, and + + 2. each frame can be solved independently. + + So for each frame, we can do a grid search in r and take the globally + optimal solution. + + In practice, we just brute force over 100 different estimates of r, and + take + the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a given r. + + Arguments: + + w is a 3d measurement matrix of form frames*2*points + + e is a 3d set of basis vectors of from basis*3*points + + s0 is the 3d rest shape of form 3*points + + Lambda are the regularisor coefficients on the coefficients of the + weights + typically generated using PPCA + + interval is how far round the circle we should check for break points + we check every interval*2*pi radians + + Returns: + + a (basis coefficients) and r (representation of rotations as a complex + number) + """ + frames = w.shape[0] + points = w.shape[2] + basis = e.shape[0] + r = np.empty(2) + Ps_reshape = Ps.reshape(2 * points) + w_reshape = w.reshape((frames, points * 2)) + p_copy = np.empty_like(proj_e) + + for i in range(check.size): + c = check[i] + r[0] = np.sin(c) + r[1] = np.cos(c) + grot = camera_r.dot(upgrade_r(r).T) + rot = grot[:2] + rot.dot(s0, Ps) # TODO: remove? + res[:, :points * 2] = w_reshape + res[:, :points * 2] -= Ps_reshape + proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( + e.shape[0], 2 * points) + + if Lambda.size != 0: + proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) + res[:, 2 * points:].fill(0) + res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * + depth_reg) * grot[2]).dot(e) + res[:, 2 * points] = scale_prior + if weights.size != 0: + res[:, :points * 2] *= weights + for j in range(frames): + p_copy[:] = proj_e + p_copy[:, :points * 2] *= weights[j] + a[i, :, j], comp_residual, _, _ = np.linalg.lstsq( + p_copy.T, res[j].T) + if not comp_residual: + # equations are over-determined + residue[i, j] = 1e-5 + else: + residue[i, j] = comp_residual + # find and return best coresponding solution + best = np.argmin(residue, 0) + index = (best, np.arange(frames)) + theta = check[best] + aa = a.transpose(0, 2, 1)[index] + retres = residue[index] + r = np.empty((2, frames)) + r[0] = np.sin(theta) + r[1] = np.cos(theta) + return aa, r, retres + + +def pick_e(w, e, s0, camera_r=None, Lambda=None, + weights=None, scale_prior=-0.0014, interval=0.01, depth_reg=0.0325): + """Brute force over charts from the manifold to find the best one. + Returns best chart index and its a and r coefficients + Returns assignment, and a and r coefficents""" + + camera_r = np.asarray([[1, 0, 0], [0, 0, -1], [0, 1, 0]] + ) if camera_r is None else camera_r + Lambda = np.ones((0, 0)) if Lambda is None else Lambda + weights = np.ones((0, 0, 0)) if weights is None else weights + + charts = e.shape[0] + frames = w.shape[0] + basis = e.shape[1] + points = e.shape[3] + assert (s0.shape[0] == charts) + r = np.empty((charts, 2, frames)) + a = np.empty((charts, frames, e.shape[1])) + score = np.empty((charts, frames)) + check = np.arange(0, 1, interval) * 2 * np.pi + cache_a = np.empty((check.size, basis, frames)) + residue = np.empty((check.size, frames)) + + if Lambda.size != 0: + res = np.zeros((frames, points * 2 + basis + points)) + proj_e = np.zeros((basis, 2 * points + basis + points)) + else: + res = np.empty((frames, points * 2)) + proj_e = np.empty((basis, 2 * points)) + Ps = np.empty((2, points)) + + if weights.size == 0: + for i in range(charts): + if Lambda.size != 0: + a[i], r[i], score[i] = estimate_a_and_r_with_res( + w, e[i], s0[i], camera_r, + Lambda[i], check, cache_a, weights, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + a[i], r[i], score[i] = estimate_a_and_r_with_res( + w, e[i], s0[i], camera_r, Lambda, + check, cache_a, weights, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + w2 = weights.reshape(weights.shape[0], -1) + for i in range(charts): + if Lambda.size != 0: + a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( + w, e[i], s0[i], camera_r, + Lambda[i], check, cache_a, w2, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( + w, e[i], s0[i], camera_r, Lambda, + check, cache_a, w2, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + + remaining_dims = 3 * w.shape[2] - e.shape[1] + assert (np.all(score > 0)) + assert (remaining_dims >= 0) + # Zero problems in log space due to un-regularised first co-efficient + l = Lambda.copy() + l[Lambda == 0] = 1 + llambda = -np.log(l) + score /= 2 + return score, a, r + -- Gitee From 500b8cfb9e5904ae09ccab826fc6696a31c6e5e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:18:46 +0000 Subject: [PATCH 26/43] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20TensorFlow/?= =?UTF-8?q?contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorflow/datas?= =?UTF-8?q?et=20/=20MPII=20/=20images=20=E4=B8=BA=20TensorFlow/contrib/cv/?= =?UTF-8?q?LiftingFromTheDeep=5FID0891=5Ffor=5FTensorflow/dataset=20/=20MP?= =?UTF-8?q?II=20/images?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dataset / MPII /{ images => images}/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /{ images => images}/.keep (100%) diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /images/.keep similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII / images/.keep rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /images/.keep -- Gitee From 51f2a0f2ce77baebe65134a1deb5f4109e68f58b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:19:20 +0000 Subject: [PATCH 27/43] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20TensorFlow/?= =?UTF-8?q?contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorflow/packa?= =?UTF-8?q?ges=20/=20lifting=20=E4=B8=BA=20TensorFlow/contrib/cv/LiftingFr?= =?UTF-8?q?omTheDeep=5FID0891=5Ffor=5FTensorflow/packages=20/lifting?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages /{ lifting => lifting}/__init__.py | 0 .../packages /{ lifting => lifting}/_pose_estimator.py | 0 .../packages /{ lifting => lifting}/utils/__init__.py | 0 .../packages /{ lifting => lifting}/utils/config.py | 0 .../packages /{ lifting => lifting}/utils/cpm.py | 0 .../packages /{ lifting => lifting}/utils/draw.py | 0 .../packages /{ lifting => lifting}/utils/prob_model.py | 0 .../packages /{ lifting => lifting}/utils/process.py | 0 .../packages /{ lifting => lifting}/utils/upright_fast.py | 0 9 files changed, 0 insertions(+), 0 deletions(-) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/__init__.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/_pose_estimator.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/__init__.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/config.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/cpm.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/draw.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/prob_model.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/process.py (100%) rename TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /{ lifting => lifting}/utils/upright_fast.py (100%) diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/__init__.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/_pose_estimator.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/__init__.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/config.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/cpm.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/draw.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/prob_model.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/process.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py similarity index 100% rename from TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages / lifting/utils/upright_fast.py rename to TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py -- Gitee From bae4a6746ccdd865ee95dd843cb8b60703deb00b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:20:06 +0000 Subject: [PATCH 28/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/dataset=20/=20MPII=20/images?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dataset / MPII /images/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /images/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /images/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset / MPII /images/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 0740388bf83d8e01582b53e9d3870756e9f4131a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:23:59 +0000 Subject: [PATCH 29/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20dataset?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 685f07a541f70d70591ed3b3defb982d728359f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:24:09 +0000 Subject: [PATCH 30/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20MPII?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 6fbdd45454798b2d2d50de4bae113a438db6498d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:24:18 +0000 Subject: [PATCH 31/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20images?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dataset/MPII/images/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/images/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/images/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/images/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 2d92b1ea30f62d8aeb07ad60640df6332f0d4502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:24:31 +0000 Subject: [PATCH 32/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/dataset/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From f13965c590c877e3fdea3a28eb8239a57f35f36c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:24:40 +0000 Subject: [PATCH 33/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/dataset/MPII/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/dataset/MPII/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 32551246e9b98685bc5187432f3c3edd4011232e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:24:57 +0000 Subject: [PATCH 34/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/packages=20/lifting?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages /lifting/__init__.py | 17 - .../packages /lifting/_pose_estimator.py | 187 -------- .../packages /lifting/utils/__init__.py | 21 - .../packages /lifting/utils/config.py | 51 --- .../packages /lifting/utils/cpm.py | 408 ------------------ .../packages /lifting/utils/draw.py | 112 ----- .../packages /lifting/utils/prob_model.py | 270 ------------ .../packages /lifting/utils/process.py | 309 ------------- .../packages /lifting/utils/upright_fast.py | 302 ------------- 9 files changed, 1677 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py deleted file mode 100644 index 3bb056e45..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -from ._pose_estimator import * -from . import utils - diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py deleted file mode 100644 index 986ea7bbf..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/_pose_estimator.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -from . import utils -import cv2 -import numpy as np -import tensorflow as tf -import abc - -ABC = abc.ABCMeta('ABC', (object,), {}) - -__all__ = [ - 'PoseEstimatorInterface', - 'PoseEstimator' -] - - -class PoseEstimatorInterface(ABC): - - @abc.abstractmethod - def initialise(self, args): - pass - - @abc.abstractmethod - def estimate(self, image): - return - - @abc.abstractmethod - def train(self, image, labels): - return - - @abc.abstractmethod - def close(self): - pass - - -class PoseEstimator(PoseEstimatorInterface): - - def __init__(self, image_size, session_path, prob_model_path): - """Initialising the graph in tensorflow. - INPUT: - image_size: Size of the image in the format (w x h x 3)""" - - self.session = None - self.poseLifting = utils.Prob3dPose(prob_model_path) - self.sess = -1 - self.orig_img_size = np.array(image_size) - self.scale = utils.config.INPUT_SIZE / (self.orig_img_size[0] * 1.0) - self.img_size = np.round( - self.orig_img_size * self.scale).astype(np.int32) - self.image_in = None - self.heatmap_person_large = None - self.pose_image_in = None - self.pose_centermap_in = None - self.pred_2d_pose = None - self.likelihoods = None - self.session_path = session_path - - def initialise(self): - """Load saved model in the graph - INPUT: - sess_path: path to the dir containing the tensorflow saved session - OUTPUT: - sess: tensorflow session""" - # initialize graph structrue - tf.reset_default_graph() - - with tf.variable_scope('CPM'): - # placeholders for person network - self.image_in = tf.placeholder( - tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 3]) - self.label_in = tf.placeholder( - tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 1]) - - heatmap_person = utils.inference_person(self.image_in) - - self.heatmap_person_large = tf.image.resize_images( - heatmap_person, [utils.config.INPUT_SIZE, self.img_size[1]]) - - # placeholders for pose network - self.pose_image_in = tf.placeholder( - tf.float32, - [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 3]) - - self.pose_centermap_in = tf.placeholder( - tf.float32, - [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 1]) - - self.pred_2d_pose, self.likelihoods = utils.inference_pose( - self.pose_image_in, self.pose_centermap_in, - utils.config.INPUT_SIZE) - - # set up loss and optimizer - self.loss = tf.reduce_mean(tf.abs(self.heatmap_person_large - self.label_in)) - self.optimizer = npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate=0.0000001)).minimize(self.loss) - - # load pretraining model - sess = tf.Session(config=npu_config_proto()) - sess.run(tf.global_variables_initializer()) - variables = tf.contrib.framework.get_variables_to_restore() - variables_to_resotre = [v for v in variables if v.name.split('/')[-1][:4] != 'Adam' and v.name[:4] != 'beta'] - self.saver = tf.train.Saver(variables_to_resotre) - self.saver.restore(sess, self.session_path) - self.session = sess - - def train(self, image, labels): - # input model,back propagation and then output loss - b_image = np.array(image / 255.0 - 0.5, dtype=np.float32) - labels = labels[:, :, :, np.newaxis] - - # self.session.run(self.optimizer, {self.image_in: b_image, self.label_in: labels}) - _, loss, heatmap_pred = self.session.run([self.optimizer, self.loss, self.heatmap_person_large], - feed_dict={self.image_in: b_image, self.label_in: labels}) - return loss, heatmap_pred - - def estimate(self, image, lifting=False): - """ - Estimate 2d and 3d poses on the image. - INPUT: - image: RGB image in the format (w x h x 3) - sess: tensorflow session - OUTPUT: - pose_2d: 2D pose for each of the people in the image in the format - (num_ppl x num_joints x 2) - visibility: vector containing a bool - value for each joint representing the visibility of the joint in - the image (could be due to occlusions or the joint is not in the - image) - pose_3d: 3D pose for each of the people in the image in the - format (num_ppl x 3 x num_joints) - hmap_person: heatmap - """ - # test model - sess = self.session - - image = cv2.resize(image, (0, 0), fx=self.scale, - fy=self.scale, interpolation=cv2.INTER_CUBIC) - b_image = np.array(image[np.newaxis] / 255.0 - 0.5, dtype=np.float32) - - hmap_person_viz = sess.run(self.heatmap_person_large, { - self.image_in: b_image}) - hmap_person = np.squeeze(hmap_person_viz) - - centers = utils.detect_objects_heatmap(hmap_person) - b_pose_image, b_pose_cmap = utils.prepare_input_posenet( - b_image[0], centers, - [utils.config.INPUT_SIZE, image.shape[1]], - [utils.config.INPUT_SIZE, utils.config.INPUT_SIZE], - batch_size=utils.config.BATCH_SIZE) - - feed_dict = { - self.pose_image_in: b_pose_image, - self.pose_centermap_in: b_pose_cmap - } - - # Estimate 2D poses - pred_2d_pose, pred_likelihood = sess.run([self.pred_2d_pose, - self.likelihoods], - feed_dict) - - estimated_2d_pose, visibility = utils.detect_parts_from_likelihoods(pred_2d_pose, - centers, - pred_likelihood) - - pose_2d = np.round(estimated_2d_pose / self.scale).astype(np.int32) - - # Estimate 3D poses - if lifting: - transformed_pose2d, weights = self.poseLifting.transform_joints( - estimated_2d_pose.copy(), visibility) - pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights) - return pose_2d, visibility, pose_3d - - return pose_2d, hmap_person - def close(self): - self.session.close() diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py deleted file mode 100644 index cbbdebfd8..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -from .prob_model import * -from .draw import * -from .cpm import * -from .process import * -from . import config -from . import upright_fast - diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py deleted file mode 100644 index f81a92fe3..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/config.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -__all__ = [ - 'VISIBLE_PART', - 'MIN_NUM_JOINTS', - 'CENTER_TR', - 'SIGMA', - 'STRIDE', - 'SIGMA_CENTER', - 'INPUT_SIZE', - 'OUTPUT_SIZE', - 'NUM_JOINTS', - 'NUM_OUTPUT', - 'H36M_NUM_JOINTS', - 'JOINT_DRAW_SIZE', - 'LIMB_DRAW_SIZE' -] - -# threshold -VISIBLE_PART = 1e-3 -MIN_NUM_JOINTS = 5 -CENTER_TR = 0.4 - -# net attributes -SIGMA = 7 -STRIDE = 8 -SIGMA_CENTER = 21 -INPUT_SIZE = 368 -OUTPUT_SIZE = 46 -NUM_JOINTS = 14 -NUM_OUTPUT = NUM_JOINTS + 1 -H36M_NUM_JOINTS = 17 - -# draw options -JOINT_DRAW_SIZE = 3 -LIMB_DRAW_SIZE = 1 -NORMALISATION_COEFFICIENT = 1280*720 - -# test options -BATCH_SIZE = 4 diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py deleted file mode 100644 index 1f8a3a847..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/cpm.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * - -import tensorflow as tf -import tensorflow.contrib.layers as layers - -__all__ = [ - 'inference_person', - 'inference_pose' -] - - -def inference_person(image): - with tf.variable_scope('PersonNet'): - conv1_1 = layers.conv2d( - image, 64, 3, 1, activation_fn=None, scope='conv1_1') - conv1_1 = tf.nn.relu(conv1_1) - conv1_2 = layers.conv2d( - conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') - conv1_2 = tf.nn.relu(conv1_2) - pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) - conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, - activation_fn=None, scope='conv2_1') - conv2_1 = tf.nn.relu(conv2_1) - conv2_2 = layers.conv2d( - conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') - conv2_2 = tf.nn.relu(conv2_2) - pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) - conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, - activation_fn=None, scope='conv3_1') - conv3_1 = tf.nn.relu(conv3_1) - conv3_2 = layers.conv2d( - conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') - conv3_2 = tf.nn.relu(conv3_2) - conv3_3 = layers.conv2d( - conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') - conv3_3 = tf.nn.relu(conv3_3) - conv3_4 = layers.conv2d( - conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') - conv3_4 = tf.nn.relu(conv3_4) - pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) - conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, - activation_fn=None, scope='conv4_1') - conv4_1 = tf.nn.relu(conv4_1) - conv4_2 = layers.conv2d( - conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') - conv4_2 = tf.nn.relu(conv4_2) - conv4_3 = layers.conv2d( - conv4_2, 512, 3, 1, activation_fn=None, scope='conv4_3') - conv4_3 = tf.nn.relu(conv4_3) - conv4_4 = layers.conv2d( - conv4_3, 512, 3, 1, activation_fn=None, scope='conv4_4') - conv4_4 = tf.nn.relu(conv4_4) - conv5_1 = layers.conv2d( - conv4_4, 512, 3, 1, activation_fn=None, scope='conv5_1') - conv5_1 = tf.nn.relu(conv5_1) - conv5_2_CPM = layers.conv2d( - conv5_1, 128, 3, 1, activation_fn=None, scope='conv5_2_CPM') - conv5_2_CPM = tf.nn.relu(conv5_2_CPM) - conv6_1_CPM = layers.conv2d( - conv5_2_CPM, 512, 1, 1, activation_fn=None, scope='conv6_1_CPM') - conv6_1_CPM = tf.nn.relu(conv6_1_CPM) - conv6_2_CPM = layers.conv2d( - conv6_1_CPM, 1, 1, 1, activation_fn=None, scope='conv6_2_CPM') - concat_stage2 = tf.concat([conv6_2_CPM, conv5_2_CPM], 3) - Mconv1_stage2 = layers.conv2d( - concat_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage2') - Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) - Mconv2_stage2 = layers.conv2d( - Mconv1_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage2') - Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) - Mconv3_stage2 = layers.conv2d( - Mconv2_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage2') - Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) - Mconv4_stage2 = layers.conv2d( - Mconv3_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage2') - Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) - Mconv5_stage2 = layers.conv2d( - Mconv4_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage2') - Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) - Mconv6_stage2 = layers.conv2d( - Mconv5_stage2, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage2') - Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) - Mconv7_stage2 = layers.conv2d( - Mconv6_stage2, 1, 1, 1, activation_fn=None, scope='Mconv7_stage2') - concat_stage3 = tf.concat([Mconv7_stage2, conv5_2_CPM], 3) - Mconv1_stage3 = layers.conv2d( - concat_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage3') - Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) - Mconv2_stage3 = layers.conv2d( - Mconv1_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage3') - Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) - Mconv3_stage3 = layers.conv2d( - Mconv2_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage3') - Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) - Mconv4_stage3 = layers.conv2d( - Mconv3_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage3') - Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) - Mconv5_stage3 = layers.conv2d( - Mconv4_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage3') - Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) - Mconv6_stage3 = layers.conv2d( - Mconv5_stage3, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage3') - Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) - Mconv7_stage3 = layers.conv2d( - Mconv6_stage3, 1, 1, 1, activation_fn=None, - scope='Mconv7_stage3') - concat_stage4 = tf.concat([Mconv7_stage3, conv5_2_CPM], 3) - Mconv1_stage4 = layers.conv2d( - concat_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage4') - Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) - Mconv2_stage4 = layers.conv2d( - Mconv1_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage4') - Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) - Mconv3_stage4 = layers.conv2d( - Mconv2_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage4') - Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) - Mconv4_stage4 = layers.conv2d( - Mconv3_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage4') - Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) - Mconv5_stage4 = layers.conv2d( - Mconv4_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage4') - Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) - Mconv6_stage4 = layers.conv2d( - Mconv5_stage4, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage4') - Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) - Mconv7_stage4 = layers.conv2d( - Mconv6_stage4, 1, 1, 1, activation_fn=None, scope='Mconv7_stage4') - return Mconv7_stage4 - - -def _argmax_2d(tensor): - """ - Compute argmax on the 2nd and 3d dimensions of the tensor. - e.g. given an input tensor of size N x K x K x C, then it computes the (x,y) coordinates for - each of the N images and C channels, corresponding to the max for that image and channel. - :param tensor: image of size N x K x K x C - :return: argmax in the format N x 2 x C (where C corresponds to NUM_JOINTS) - """ - # get size - shape = tensor.get_shape().as_list()[1] - n_channels = tf.shape(tensor)[-1] - - # process each channel - linearised_channel = tf.reshape(tensor, [-1, shape * shape, n_channels]) - best_channel = tf.argmax(linearised_channel, axis=1) - - idx_y = tf.expand_dims(tf.floordiv(best_channel, shape), axis=1) - idx_x = tf.expand_dims(tf.mod(best_channel, shape), axis=1) - argmax_channels = tf.concat([idx_x, idx_y], axis=1, name='output') - return argmax_channels - - -def _process_stage(heat_maps, hm_size): - """ - For each heat-map identify joint position and likelihood - :param heat_maps: input heat-maps - :param hm_size: size in which to return the coordinates - :return: 2d joints (BATCH_SIZE x 14 x 2) - likelihood for each joint (BATCH_SIZE x 14) - """ - rescaled = tf.image.resize_images(heat_maps[:, :, :, :-1], [hm_size, hm_size]) - uncertainty = tf.reduce_max(tf.reduce_mean(rescaled, axis=1), axis=1, name='prob') - return _argmax_2d(rescaled), uncertainty - - -def inference_pose(image, center_map, hm_size, stage=6): - with tf.variable_scope('PoseNet'): - pool_center_lower = layers.avg_pool2d(center_map, 9, 8, padding='SAME') - conv1_1 = layers.conv2d( - image, 64, 3, 1, activation_fn=None, scope='conv1_1') - conv1_1 = tf.nn.relu(conv1_1) - conv1_2 = layers.conv2d( - conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') - conv1_2 = tf.nn.relu(conv1_2) - pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) - conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, - activation_fn=None, scope='conv2_1') - conv2_1 = tf.nn.relu(conv2_1) - conv2_2 = layers.conv2d( - conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') - conv2_2 = tf.nn.relu(conv2_2) - pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) - conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, - activation_fn=None, scope='conv3_1') - conv3_1 = tf.nn.relu(conv3_1) - conv3_2 = layers.conv2d( - conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') - conv3_2 = tf.nn.relu(conv3_2) - conv3_3 = layers.conv2d( - conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') - conv3_3 = tf.nn.relu(conv3_3) - conv3_4 = layers.conv2d( - conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') - conv3_4 = tf.nn.relu(conv3_4) - pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) - conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, - activation_fn=None, scope='conv4_1') - conv4_1 = tf.nn.relu(conv4_1) - conv4_2 = layers.conv2d( - conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') - conv4_2 = tf.nn.relu(conv4_2) - conv4_3_CPM = layers.conv2d( - conv4_2, 256, 3, 1, activation_fn=None, scope='conv4_3_CPM') - conv4_3_CPM = tf.nn.relu(conv4_3_CPM) - conv4_4_CPM = layers.conv2d( - conv4_3_CPM, 256, 3, 1, activation_fn=None, scope='conv4_4_CPM') - conv4_4_CPM = tf.nn.relu(conv4_4_CPM) - conv4_5_CPM = layers.conv2d( - conv4_4_CPM, 256, 3, 1, activation_fn=None, scope='conv4_5_CPM') - conv4_5_CPM = tf.nn.relu(conv4_5_CPM) - conv4_6_CPM = layers.conv2d( - conv4_5_CPM, 256, 3, 1, activation_fn=None, scope='conv4_6_CPM') - conv4_6_CPM = tf.nn.relu(conv4_6_CPM) - conv4_7_CPM = layers.conv2d( - conv4_6_CPM, 128, 3, 1, activation_fn=None, scope='conv4_7_CPM') - conv4_7_CPM = tf.nn.relu(conv4_7_CPM) - conv5_1_CPM = layers.conv2d( - conv4_7_CPM, 512, 1, 1, activation_fn=None, scope='conv5_1_CPM') - conv5_1_CPM = tf.nn.relu(conv5_1_CPM) - conv5_2_CPM = layers.conv2d( - conv5_1_CPM, 15, 1, 1, activation_fn=None, scope='conv5_2_CPM') - concat_stage2 = tf.concat( - [conv5_2_CPM, conv4_7_CPM, pool_center_lower], 3) - Mconv1_stage2 = layers.conv2d( - concat_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage2') - Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) - Mconv2_stage2 = layers.conv2d( - Mconv1_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage2') - Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) - Mconv3_stage2 = layers.conv2d( - Mconv2_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage2') - Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) - Mconv4_stage2 = layers.conv2d( - Mconv3_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage2') - Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) - Mconv5_stage2 = layers.conv2d( - Mconv4_stage2, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage2') - Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) - Mconv6_stage2 = layers.conv2d( - Mconv5_stage2, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage2') - Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) - Mconv7_stage2 = layers.conv2d( - Mconv6_stage2, 15, 1, 1, activation_fn=None, scope='Mconv7_stage2') - if stage == 2: - return _process_stage(Mconv7_stage2, hm_size) - - concat_stage3 = tf.concat( - [Mconv7_stage2, conv4_7_CPM, pool_center_lower], 3) - Mconv1_stage3 = layers.conv2d( - concat_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage3') - Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) - Mconv2_stage3 = layers.conv2d( - Mconv1_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage3') - Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) - Mconv3_stage3 = layers.conv2d( - Mconv2_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage3') - Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) - Mconv4_stage3 = layers.conv2d( - Mconv3_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage3') - Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) - Mconv5_stage3 = layers.conv2d( - Mconv4_stage3, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage3') - Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) - Mconv6_stage3 = layers.conv2d( - Mconv5_stage3, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage3') - Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) - Mconv7_stage3 = layers.conv2d( - Mconv6_stage3, 15, 1, 1, activation_fn=None, scope='Mconv7_stage3') - if stage == 3: - return _process_stage(Mconv7_stage3, hm_size) - - concat_stage4 = tf.concat( - [Mconv7_stage3, conv4_7_CPM, pool_center_lower], 3) - Mconv1_stage4 = layers.conv2d( - concat_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage4') - Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) - Mconv2_stage4 = layers.conv2d( - Mconv1_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage4') - Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) - Mconv3_stage4 = layers.conv2d( - Mconv2_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage4') - Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) - Mconv4_stage4 = layers.conv2d( - Mconv3_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage4') - Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) - Mconv5_stage4 = layers.conv2d( - Mconv4_stage4, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage4') - Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) - Mconv6_stage4 = layers.conv2d( - Mconv5_stage4, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage4') - Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) - Mconv7_stage4 = layers.conv2d( - Mconv6_stage4, 15, 1, 1, activation_fn=None, scope='Mconv7_stage4') - if stage == 4: - return _process_stage(Mconv7_stage4, hm_size) - - concat_stage5 = tf.concat( - [Mconv7_stage4, conv4_7_CPM, pool_center_lower], 3) - Mconv1_stage5 = layers.conv2d( - concat_stage5, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage5') - Mconv1_stage5 = tf.nn.relu(Mconv1_stage5) - Mconv2_stage5 = layers.conv2d( - Mconv1_stage5, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage5') - Mconv2_stage5 = tf.nn.relu(Mconv2_stage5) - Mconv3_stage5 = layers.conv2d( - Mconv2_stage5, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage5') - Mconv3_stage5 = tf.nn.relu(Mconv3_stage5) - Mconv4_stage5 = layers.conv2d( - Mconv3_stage5, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage5') - Mconv4_stage5 = tf.nn.relu(Mconv4_stage5) - Mconv5_stage5 = layers.conv2d( - Mconv4_stage5, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage5') - Mconv5_stage5 = tf.nn.relu(Mconv5_stage5) - Mconv6_stage5 = layers.conv2d( - Mconv5_stage5, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage5') - Mconv6_stage5 = tf.nn.relu(Mconv6_stage5) - Mconv7_stage5 = layers.conv2d( - Mconv6_stage5, 15, 1, 1, activation_fn=None, scope='Mconv7_stage5') - if stage == 5: - return _process_stage(Mconv7_stage5, hm_size) - - concat_stage6 = tf.concat( - [Mconv7_stage5, conv4_7_CPM, pool_center_lower], 3) - Mconv1_stage6 = layers.conv2d( - concat_stage6, 128, 7, 1, activation_fn=None, - scope='Mconv1_stage6') - Mconv1_stage6 = tf.nn.relu(Mconv1_stage6) - Mconv2_stage6 = layers.conv2d( - Mconv1_stage6, 128, 7, 1, activation_fn=None, - scope='Mconv2_stage6') - Mconv2_stage6 = tf.nn.relu(Mconv2_stage6) - Mconv3_stage6 = layers.conv2d( - Mconv2_stage6, 128, 7, 1, activation_fn=None, - scope='Mconv3_stage6') - Mconv3_stage6 = tf.nn.relu(Mconv3_stage6) - Mconv4_stage6 = layers.conv2d( - Mconv3_stage6, 128, 7, 1, activation_fn=None, - scope='Mconv4_stage6') - Mconv4_stage6 = tf.nn.relu(Mconv4_stage6) - Mconv5_stage6 = layers.conv2d( - Mconv4_stage6, 128, 7, 1, activation_fn=None, - scope='Mconv5_stage6') - Mconv5_stage6 = tf.nn.relu(Mconv5_stage6) - Mconv6_stage6 = layers.conv2d( - Mconv5_stage6, 128, 1, 1, activation_fn=None, - scope='Mconv6_stage6') - Mconv6_stage6 = tf.nn.relu(Mconv6_stage6) - Mconv7_stage6 = layers.conv2d( - Mconv6_stage6, 15, 1, 1, activation_fn=None, - scope='Mconv7_stage6') - return _process_stage(Mconv7_stage6, hm_size) - - diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py deleted file mode 100644 index d95affa0d..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/draw.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -import cv2 -import numpy as np -from .config import JOINT_DRAW_SIZE -from .config import NORMALISATION_COEFFICIENT -import matplotlib.pyplot as plt -import math - -__all__ = [ - 'draw_limbs', - 'plot_pose' -] - - -def draw_limbs(image, pose_2d, visible): - """Draw the 2D pose without the occluded/not visible joints.""" - - _COLORS = [ - [0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0], - [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170], - [170, 0, 255] - ] - # _COLORS = [ - # [0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], - # [128, 0, 0], [0, 128, 0], [0, 0, 128], [255, 255, 255], - # [128, 128, 128] - # ] - _LIMBS = np.array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, - 9, 10, 11, 12, 12, 13]).reshape((-1, 2)) - - _NORMALISATION_FACTOR = int(math.floor(math.sqrt(image.shape[0] * image.shape[1] / NORMALISATION_COEFFICIENT))) - - for oid in range(pose_2d.shape[0]): - # for i in range(14): - # cv2.putText(image, str(i), (pose_2d[oid][i][1], pose_2d[oid][i][0]), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255)) - for lid, (p0, p1) in enumerate(_LIMBS): - - if not (visible[oid][p0] and visible[oid][p1]): - continue - y0, x0 = pose_2d[oid][p0] - y1, x1 = pose_2d[oid][p1] - cv2.circle(image, (x0, y0), JOINT_DRAW_SIZE *_NORMALISATION_FACTOR , _COLORS[lid], -1) - cv2.circle(image, (x1, y1), JOINT_DRAW_SIZE*_NORMALISATION_FACTOR , _COLORS[lid], -1) - cv2.line(image, (x0, y0), (x1, y1), - _COLORS[lid], 10 , 16) # LIMB_DRAW_SIZE*_NORMALISATION_FACTOR - - -def plot_pose(pose): - """Plot the 3D pose showing the joint connections.""" - import mpl_toolkits.mplot3d.axes3d as p3 - - _CONNECTION = [ - [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], - [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], - [15, 16]] - - def joint_color(j): - """ - TODO: 'j' shadows name 'j' from outer scope - """ - - colors = [(0, 0, 0), (255, 0, 255), (0, 0, 255), - (0, 255, 255), (255, 0, 0), (0, 255, 0)] - _c = 0 - if j in range(1, 4): - _c = 1 - if j in range(4, 7): - _c = 2 - if j in range(9, 11): - _c = 3 - if j in range(11, 14): - _c = 4 - if j in range(14, 17): - _c = 5 - return colors[_c] - - assert (pose.ndim == 2) - assert (pose.shape[0] == 3) - fig = plt.figure() - ax = fig.gca(projection='3d') - for c in _CONNECTION: - col = '#%02x%02x%02x' % joint_color(c[0]) - ax.plot([pose[0, c[0]], pose[0, c[1]]], - [pose[1, c[0]], pose[1, c[1]]], - [pose[2, c[0]], pose[2, c[1]]], c=col) - for j in range(pose.shape[1]): - col = '#%02x%02x%02x' % joint_color(j) - ax.scatter(pose[0, j], pose[1, j], pose[2, j], - c=col, marker='o', edgecolor=col) - smallest = pose.min() - largest = pose.max() - ax.set_xlim3d(smallest, largest) - ax.set_ylim3d(smallest, largest) - ax.set_zlim3d(smallest, largest) - - return fig - - - diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py deleted file mode 100644 index b8ffe330b..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/prob_model.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -import os -import scipy.io as sio -import numpy as np -from .upright_fast import pick_e -from . import config - -__all__ = ['Prob3dPose'] - - -class Prob3dPose: - - def __init__(self, prob_model_path): - model_param = sio.loadmat(prob_model_path) - self.mu = np.reshape( - model_param['mu'], (model_param['mu'].shape[0], 3, -1)) - self.e = np.reshape(model_param['e'], (model_param['e'].shape[ - 0], model_param['e'].shape[1], 3, -1)) - self.sigma = model_param['sigma'] - self.cam = np.array( - [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) - - @staticmethod - def cost3d(model, gt): - """3d error in mm""" - out = np.sqrt(((gt - model) ** 2).sum(1)).mean(-1) - return out - - @staticmethod - def renorm_gt(gt): - """Compel gt data to have mean joint length of one""" - _POSE_TREE = np.asarray([ - [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], - [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], - [15, 16]]).T - scale = np.sqrt(((gt[:, :, _POSE_TREE[0]] - - gt[:, :, _POSE_TREE[1]]) ** 2).sum(2).sum(1)) - return gt / scale[:, np.newaxis, np.newaxis] - - @staticmethod - def build_model(a, e, s0): - """Build 3D model""" - assert (s0.shape[1] == 3) - assert (e.shape[2] == 3) - assert (a.shape[1] == e.shape[1]) - out = np.einsum('...i,...ijk', a, e) - out += s0 - return out - - @staticmethod - def build_and_rot_model(a, e, s0, r): - """ - Build model and rotate according to the identified rotation matrix - """ - from numpy.core.umath_tests import matrix_multiply - - r2 = Prob3dPose.upgrade_r(r.T).transpose((0, 2, 1)) - mod = Prob3dPose.build_model(a, e, s0) - mod = matrix_multiply(r2, mod) - return mod - - @staticmethod - def upgrade_r(r): - """ - Upgrades complex parameterisation of planar rotation to tensor - containing per frame 3x3 rotation matrices - """ - assert (r.ndim == 2) - # Technically optional assert, but if this fails data is probably - # transposed - assert (r.shape[1] == 2) - assert (np.all(np.isfinite(r))) - norm = np.sqrt((r[:, :2] ** 2).sum(1)) - assert (np.all(norm > 0)) - r /= norm[:, np.newaxis] - assert (np.all(np.isfinite(r))) - newr = np.zeros((r.shape[0], 3, 3)) - newr[:, :2, 0] = r[:, :2] - newr[:, 2, 2] = 1 - newr[:, 1::-1, 1] = r[:, :2] - newr[:, 0, 1] *= -1 - return newr - - @staticmethod - def centre(data_2d): - """center data according to each of the coordiante components""" - return (data_2d.T - data_2d.mean(1)).T - - @staticmethod - def centre_all(data): - """center all data""" - if data.ndim == 2: - return Prob3dPose.centre(data) - return (data.transpose(2, 0, 1) - data.mean(2)).transpose(1, 2, 0) - - @staticmethod - def normalise_data(d2, weights): - """Normalise data according to height""" - - # the joints with weight set to 0 should not be considered in the - # normalisation process - d2 = d2.reshape(d2.shape[0], -1, 2).transpose(0, 2, 1) - idx_consider = weights[0, 0].astype(np.bool) - if np.sum(weights[:, 0].sum(1) >= config.MIN_NUM_JOINTS) == 0: - raise Exception( - 'Not enough 2D joints identified to generate 3D pose') - d2[:, :, idx_consider] = Prob3dPose.centre_all(d2[:, :, idx_consider]) - - # Height normalisation (2 meters) - m2 = d2[:, 1, idx_consider].min(1) / 2.0 - m2 -= d2[:, 1, idx_consider].max(1) / 2.0 - crap = m2 == 0 - m2[crap] = 1.0 - d2[:, :, idx_consider] /= m2[:, np.newaxis, np.newaxis] - return d2, m2 - - @staticmethod - def transform_joints(pose_2d, visible_joints): - """ - Transform the set of joints according to what the probabilistic model - expects as input. - - It returns the new set of joints of each of the people and the set of - weights for the joints. - """ - - _H36M_ORDER = [8, 9, 10, 11, 12, 13, 1, 0, 5, 6, 7, 2, 3, 4] - _W_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] - - def swap_xy(poses): - tmp = np.copy(poses[:, :, 0]) - poses[:, :, 0] = poses[:, :, 1] - poses[:, :, 1] = tmp - return poses - - assert (pose_2d.ndim == 3) - new_pose = pose_2d.copy() - new_pose = swap_xy(new_pose) - new_pose = new_pose[:, _H36M_ORDER] - - # defining weights according to occlusions - weights = np.zeros((pose_2d.shape[0], 2, config.H36M_NUM_JOINTS)) - ordered_visibility = np.repeat( - visible_joints[:, _H36M_ORDER, np.newaxis], 2, 2 - ).transpose([0, 2, 1]) - weights[:, :, _W_POS] = ordered_visibility - return new_pose, weights - - def affine_estimate(self, w, depth_reg=0.085, weights=None, scale=10.0, - scale_mean=0.0016 * 1.8 * 1.2, scale_std=1.2 * 0, - cap_scale=-0.00129): - """ - Quick switch to allow reconstruction at unknown scale returns a,r - and scale - """ - weights = np.zeros((0, 0, 0)) if weights is None else weights - - s = np.empty((self.sigma.shape[0], self.sigma.shape[1] + 4)) # e,y,x,z - s[:, :4] = 10 ** -5 # Tiny but makes stuff well-posed - s[:, 0] = scale_std - s[:, 4:] = self.sigma - s[:, 4:-1] *= scale - - e2 = np.zeros((self.e.shape[0], self.e.shape[ - 1] + 4, 3, self.e.shape[3])) - e2[:, 1, 0] = 1.0 - e2[:, 2, 1] = 1.0 - e2[:, 3, 0] = 1.0 - # This makes the least_squares problem ill posed, as X,Z are - # interchangable - # Hence regularisation above to speed convergence and stop blow-up - e2[:, 0] = self.mu - e2[:, 4:] = self.e - t_m = np.zeros_like(self.mu) - - res, a, r = pick_e(w, e2, t_m, self.cam, s, weights=weights, - interval=0.01, depth_reg=depth_reg, - scale_prior=scale_mean) - - scale = a[:, :, 0] - reestimate = scale > cap_scale - m = self.mu * cap_scale - for i in range(scale.shape[0]): - if reestimate[i].sum() > 0: - ehat = e2[i:i + 1, 1:] - mhat = m[i:i + 1] - shat = s[i:i + 1, 1:] - (res2, a2, r2) = pick_e( - w[reestimate[i]], ehat, mhat, self.cam, shat, - weights=weights[reestimate[i]], - interval=0.01, depth_reg=depth_reg, - scale_prior=scale_mean - ) - res[i:i + 1, reestimate[i]] = res2 - a[i:i + 1, reestimate[i], 1:] = a2 - a[i:i + 1, reestimate[i], 0] = cap_scale - r[i:i + 1, :, reestimate[i]] = r2 - scale = a[:, :, 0] - a = a[:, :, 1:] / a[:, :, 0][:, :, np.newaxis] - return res, e2[:, 1:], a, r, scale - - def better_rec(self, w, model, s=1, weights=1, damp_z=1): - """Quick switch to allow reconstruction at unknown scale - returns a,r and scale""" - from numpy.core.umath_tests import matrix_multiply - proj = matrix_multiply(self.cam[np.newaxis], model) - proj[:, :2] = (proj[:, :2] * s + w * weights) / (s + weights) - proj[:, 2] *= damp_z - out = matrix_multiply(self.cam.T[np.newaxis], proj) - return out - - def create_rec(self, w2, weights, res_weight=1): - """Reconstruct 3D pose given a 2D pose""" - _SIGMA_SCALING = 5.2 - - res, e, a, r, scale = self.affine_estimate( - w2, scale=_SIGMA_SCALING, weights=weights, - depth_reg=0, cap_scale=-0.001, scale_mean=-0.003 - ) - - remaining_dims = 3 * w2.shape[2] - e.shape[1] - assert (remaining_dims >= 0) - llambda = -np.log(self.sigma) - lgdet = np.sum(llambda[:, :-1], 1) + llambda[:, -1] * remaining_dims - score = (res * res_weight + lgdet[:, np.newaxis] * (scale ** 2)) - best = np.argmin(score, 0) - index = np.arange(best.shape[0]) - a2 = a[best, index] - r2 = r[best, :, index].T - rec = Prob3dPose.build_and_rot_model(a2, e[best], self.mu[best], r2) - rec *= -np.abs(scale[best, index])[:, np.newaxis, np.newaxis] - - rec = self.better_rec(w2, rec, 1, 1.55 * weights, 1) * -1 - rec = Prob3dPose.renorm_gt(rec) - rec *= 0.97 - return rec - - def compute_3d(self, pose_2d, weights): - """Reconstruct 3D poses given 2D estimations""" - - _J_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] - _SCALE_3D = 1174.88312988 - - if pose_2d.shape[1] != config.H36M_NUM_JOINTS: - # need to call the linear regressor - reg_joints = np.zeros( - (pose_2d.shape[0], config.H36M_NUM_JOINTS, 2)) - for oid, singe_pose in enumerate(pose_2d): - reg_joints[oid, _J_POS] = singe_pose - - norm_pose, _ = Prob3dPose.normalise_data(reg_joints, weights) - else: - norm_pose, _ = Prob3dPose.normalise_data(pose_2d, weights) - - pose_3d = self.create_rec(norm_pose, weights) * _SCALE_3D - return pose_3d - diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py deleted file mode 100644 index da0050947..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/process.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import division -from npu_bridge.npu_init import * - -import skimage.io -import skimage.transform -import scipy.ndimage as ndimage -import scipy.ndimage.filters as filters -from scipy.stats import multivariate_normal - -import os -import json -import numpy as np -from . import config -import cv2 -from itertools import compress - -__all__ = [ - 'detect_objects_heatmap', - 'detect_objects_heatmap', - 'gaussian_kernel', - 'gaussian_heatmap', - 'prepare_input_posenet', - 'detect_parts_heatmaps', - 'detect_parts_from_likelihoods', - 'import_json', - 'generate_labels', - 'generate_center_map', - 'rescale', - 'crop_image' -] - - -def detect_objects_heatmap(heatmap): - data = 256 * heatmap - data_max = filters.maximum_filter(data, 3) - maxima = (data == data_max) - data_min = filters.minimum_filter(data, 3) - diff = ((data_max - data_min) > 0.3) - maxima[diff == 0] = 0 - - labeled, num_objects = ndimage.label(maxima) - slices = ndimage.find_objects(labeled) - objects = np.zeros((num_objects, 2), dtype=np.int32) - pidx = 0 - for (dy, dx) in slices: - pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2] - if heatmap[pos[0], pos[1]] > config.CENTER_TR: - objects[pidx, :] = pos - pidx += 1 - return objects[:pidx] - - -def gaussian_kernel(h, w, sigma_h, sigma_w): - yx = np.mgrid[-h // 2:h // 2, -w // 2:w // 2] ** 2 - return np.exp(-yx[0, :, :] / sigma_h ** 2 - yx[1, :, :] / sigma_w ** 2) - - -def gaussian_heatmap(h, w, pos_x, pos_y, sigma_h=1, sigma_w=1, init=None): - """ - Compute the heat-map of size (w x h) with a gaussian distribution fit in - position (pos_x, pos_y) and a convariance matix defined by the related - sigma values. - The resulting heat-map can be summed to a given heat-map init. - """ - init = init if init is not None else [] - - cov_matrix = np.eye(2) * ([sigma_h**2, sigma_w**2]) - - x, y = np.mgrid[0:h, 0:w] - pos = np.dstack((x, y)) - rv = multivariate_normal([pos_x, pos_y], cov_matrix) - - tmp = rv.pdf(pos) - hmap = np.multiply( - tmp, np.sqrt(np.power(2 * np.pi, 2) * np.linalg.det(cov_matrix)) - ) - idx = np.where(hmap.flatten() <= np.exp(-4.6052)) - hmap.flatten()[idx] = 0 - - if np.size(init) == 0: - return hmap - - assert (np.shape(init) == hmap.shape) - hmap += init - idx = np.where(hmap.flatten() > 1) - hmap.flatten()[idx] = 1 - return hmap - - -def prepare_input_posenet(image, objects, size_person, size, - batch_size, sigma=25, border=400): - result = np.zeros((batch_size, size[0], size[1], 4)) - padded_image = np.zeros( - (1, size_person[0] + border, size_person[1] + border, 4)) - padded_image[0, border // 2:-border // 2, - border // 2:-border // 2, :3] = image - if objects.shape[0] > batch_size: - objects = objects[:batch_size] - for oid, (yc, xc) in enumerate(objects): - dh, dw = size[0] // 2, size[1] // 2 - y0, x0, y1, x1 = np.array( - [yc - dh, xc - dw, yc + dh, xc + dw]) + border // 2 - result[oid, :, :, :4] = padded_image[:, y0:y1, x0:x1, :] - result[oid, :, :, 3] = gaussian_kernel(size[0], size[1], sigma, sigma) - return np.split(result, [3], 3) - - -def detect_parts_heatmaps(heatmaps, centers, size, num_parts=14): - """ - Given heat-maps find the position of each joint by means of n argmax - function - """ - parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) - visible = np.zeros((len(centers), num_parts), dtype=bool) - for oid, (yc, xc) in enumerate(centers): - part_hmap = skimage.transform.resize( - np.clip(heatmaps[oid], -1, 1), size) - for pid in range(num_parts): - y, x = np.unravel_index(np.argmax(part_hmap[:, :, pid]), size) - parts[oid, pid] = y + yc - size[0] // 2, x + xc - size[1] // 2 - visible[oid, pid] = np.mean( - part_hmap[:, :, pid]) > config.VISIBLE_PART - return parts, visible - - -def detect_parts_from_likelihoods(poses, centers, likelihoods, num_parts=14): - """ - Given heat-maps find the position of each joint by means of n argmax - function - """ - if len(centers) > config.BATCH_SIZE: - centers = centers[:config.BATCH_SIZE] - parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) - visible = np.zeros((len(centers), num_parts), dtype=bool) - for oid, (yc, xc) in enumerate(centers): - for pid in range(num_parts): - x, y = poses[oid, :, pid] - parts[oid, pid] = y + yc - config.INPUT_SIZE // 2, x + xc - config.INPUT_SIZE // 2 - visible[oid, pid] = likelihoods[oid, pid] > config.VISIBLE_PART - return parts, visible - - -def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'): - """Get the json file containing the dataset. - We want the data to be shuffled, however the training has to be repeatable. - This means that once shuffled the order has to me mantained.""" - with open(path) as data_file: - data_this = json.load(data_file) - data_this = np.array(data_this['root']) - num_samples = len(data_this) - - if os.path.exists(order): - idx = np.load(order) - else: - idx = np.random.permutation(num_samples).tolist() - np.save(order, idx) - - is_not_validation = [not data_this[i]['isValidation'] - for i in range(num_samples)] - keep_data_idx = list(compress(idx, is_not_validation)) - - data = data_this[keep_data_idx] - return data, len(keep_data_idx) - - -def generate_labels(image_shape, joint_positions, num_other_people, - joints_other_people, offset): - """ - Given as input a set of joint positions and the size of the input image - it generates - a set of heat-maps of the same size. It generates both heat-maps used as - labels for the first stage (label_1st_lower) and for all the other stages - (label_lower). - """ - _FILTER_JOINTS = np.array([9, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5]) - - img_height, img_width, _ = image_shape - heat_maps_single_p = np.zeros( - (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) - heat_maps_other_p = np.zeros( - (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) - - # generate first set of heat-maps - for i in range(config.NUM_JOINTS): - # the set of joints can be different fromt the one in the json file - curr_joint = joint_positions[_FILTER_JOINTS[i]] - skip = (curr_joint[0] < 0 or curr_joint[1] < 0 or - curr_joint[0] >= img_width or curr_joint[1] >= img_height) - if not skip: - heat_maps_single_p[i] = gaussian_heatmap( - config.INPUT_SIZE, config.INPUT_SIZE, - curr_joint[ - 1] - offset[1], curr_joint[0] - offset[0], - config.SIGMA, config.SIGMA) - - heat_maps_other_p[i] = gaussian_heatmap( - config.INPUT_SIZE, config.INPUT_SIZE, - curr_joint[ - 1] - offset[1], curr_joint[0] - offset[0], - config.SIGMA, config.SIGMA) - - heat_maps_single_p[-1] = np.maximum( - 1 - np.max(heat_maps_single_p[:-1], axis=0), - np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) - heat_maps_single_p = np.transpose(heat_maps_single_p, (1, 2, 0)) - - # generate second set of heat-maps for other people in the image - for p in range(int(num_other_people)): - for i in range(config.NUM_JOINTS): - # the set of joints can be different fromt the one in the json file - try: - if num_other_people == 1: - curr_joint = joints_other_people[_FILTER_JOINTS[i]] - else: - curr_joint = joints_other_people[p][_FILTER_JOINTS[i]] - skip = ( - curr_joint[0] < 0 or curr_joint[1] < 0 or - curr_joint[0] >= img_width or curr_joint[1] >= img_height) - except IndexError: - skip = True - if not skip: - heat_maps_other_p[i] = gaussian_heatmap( - config.INPUT_SIZE, config.INPUT_SIZE, - curr_joint[1] - offset[1], curr_joint[0] - offset[0], - config.SIGMA, config.SIGMA, init=heat_maps_other_p[i]) - - heat_maps_other_p[-1] = np.maximum( - 1 - np.max(heat_maps_other_p[:-1], axis=0), - np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) - - heat_maps_other_p = np.transpose(heat_maps_other_p, (1, 2, 0)) - - # rescaling heat-maps accoring to the right shape - labels_single = rescale(heat_maps_single_p, config.OUTPUT_SIZE) - labels_people = rescale(heat_maps_other_p, config.OUTPUT_SIZE) - return labels_people, labels_single - - -def generate_center_map(center_pos, img_shape): - """ - Given the position of the person and the size of the input image it - generates - a heat-map where a gaissian distribution is fit in the position of the - person in the image. - """ - img_height = img_shape - img_width = img_shape - center_map = gaussian_heatmap( - img_height, img_width, center_pos[1], center_pos[0], - config.SIGMA_CENTER, config.SIGMA_CENTER) - return center_map - - -def rescale(data, new_size): - """Rescale data to a fixed dimension, regardless the number of channels. - Data has to be in the format (h,w,c).""" - if data.ndim > 2: - assert data.shape[2] < data.shape[0] - assert data.shape[2] < data.shape[1] - resized_data = cv2.resize( - data, (new_size, new_size), interpolation=cv2.INTER_CUBIC) - return resized_data - - -def crop_image(image, obj_pose): - """ - Crop the image in order to have the person at the center and the final - image size - is the same as the expected CNN input size. - Returns the cropped image and the offset that is used to update the joint - positions. - """ - offset_left = int(obj_pose[0] - config.INPUT_SIZE // 2) - offset_up = int(obj_pose[1] - config.INPUT_SIZE // 2) - # just for checking that it's inside the image - offset_right = int(image.shape[1] - obj_pose[0] - config.INPUT_SIZE // 2) - offset_bottom = int(image.shape[0] - obj_pose[1] - config.INPUT_SIZE // 2) - - pad_left, pad_right, pad_up, pad_bottom = 0, 0, 0, 0 - if offset_left < 0: - pad_left = -offset_left - if offset_right < 0: - pad_right = -offset_right - if offset_up < 0: - pad_up = -offset_up - if offset_bottom < 0: - pad_bottom = -offset_bottom - padded_image = np.lib.pad( - image, ((pad_up, pad_bottom), (pad_left, pad_right), (0, 0)), - 'constant', constant_values=((0, 0), (0, 0), (0, 0))) - - cropped_image = padded_image[ - offset_up + pad_up: offset_up + pad_up + config.INPUT_SIZE, - offset_left + pad_left: offset_left + pad_left + config.INPUT_SIZE] - - return cropped_image, np.array([offset_left, offset_up]) diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py deleted file mode 100644 index cbfd5ec22..000000000 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages /lifting/utils/upright_fast.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from npu_bridge.npu_init import * -import numpy as np -import scipy - -__all__ = [ - 'upgrade_r', - 'update_cam', - 'estimate_a_and_r_with_res', - 'estimate_a_and_r_with_res_weights', - 'pick_e' -] - - -def upgrade_r(r): - """Upgrades complex parameterisation of planar rotation to tensor containing - per frame 3x3 rotation matrices""" - newr = np.zeros((3, 3)) - newr[:2, 0] = r - newr[2, 2] = 1 - newr[1::-1, 1] = r - newr[0, 1] *= -1 - return newr - - -def update_cam(cam): - new_cam = cam[[0, 2, 1]].copy() - new_cam = new_cam[:, [0, 2, 1]] - return new_cam - - -def estimate_a_and_r_with_res( - w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, - residue, Ps, depth_reg, scale_prior): - """ - TODO: Missing the following parameters in docstring: - - w, e, s0, camera_r, Lambda, check, a, res, proj_e, depth_reg, - scale_prior - - TODO: The following parameters are not used: - - s0, weights - - So local optima are a problem in general. - However: - - 1. This problem is convex in a but not in r, and - - 2. each frame can be solved independently. - - So for each frame, we can do a grid search in r and take the globally - optimal solution. - - In practice, we just brute force over 100 different estimates of r, and - take the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a - given r. - - Arguments: - - w is a 3d measurement matrix of form frames*2*points - - e is a 3d set of basis vectors of from basis*3*points - - s0 is the 3d rest shape of form 3*points - - Lambda are the regularisor coefficients on the coefficients of the - weights typically generated using PPCA - - interval is how far round the circle we should check for break points - we check every interval*2*pi radians - - Returns: - - a (basis coefficients) and r (representation of rotations as a complex - number) - """ - frames = w.shape[0] - points = w.shape[2] - basis = e.shape[0] - r = np.empty(2) - Ps_reshape = Ps.reshape(2 * points) - w_reshape = w.reshape((frames, points * 2)) - - for i in range(check.size): - c = check[i] - r[0] = np.cos(c) - r[1] = np.sin(c) - grot = camera_r.dot(upgrade_r(r)) - rot = grot[:2] - res[:, :points * 2] = w_reshape - res[:, :points * 2] -= Ps_reshape - proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( - e.shape[0], 2 * points) - - if Lambda.size != 0: - proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) - res[:, 2 * points:].fill(0) - res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] - proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] - # depth regularizer not used - proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * - depth_reg) * grot[2]).dot(e) - # we let the person change scale - res[:, 2 * points] = scale_prior - - """ - TODO: PLEASE REVIEW THE FOLLOWING CODE.... - overwrite_a and overwrite_b ARE UNEXPECTED ARGUMENTS OF - scipy.linalg.lstsq - """ - a[i], residue[i], _, _ = scipy.linalg.lstsq( - proj_e.T, res.T, overwrite_a=True, overwrite_b=True) - - # find and return best coresponding solution - best = np.argmin(residue, 0) - assert (best.shape[0] == frames) - theta = check[best] - index = (best, np.arange(frames)) - aa = a.transpose(0, 2, 1)[index] - retres = residue[index] - r = np.empty((2, frames)) - r[0] = np.sin(theta) - r[1] = np.cos(theta) - return aa, r, retres - - -def estimate_a_and_r_with_res_weights( - w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, - residue, Ps, depth_reg, scale_prior): - """ - TODO: Missing the following parameters in docstring: - - w, e, s0, camera)r, Lambda, check, a, res, proj_e, residue, - Ps, depth_reg, scale_prior - - So local optima are a problem in general. - However: - - 1. This problem is convex in a but not in r, and - - 2. each frame can be solved independently. - - So for each frame, we can do a grid search in r and take the globally - optimal solution. - - In practice, we just brute force over 100 different estimates of r, and - take - the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a given r. - - Arguments: - - w is a 3d measurement matrix of form frames*2*points - - e is a 3d set of basis vectors of from basis*3*points - - s0 is the 3d rest shape of form 3*points - - Lambda are the regularisor coefficients on the coefficients of the - weights - typically generated using PPCA - - interval is how far round the circle we should check for break points - we check every interval*2*pi radians - - Returns: - - a (basis coefficients) and r (representation of rotations as a complex - number) - """ - frames = w.shape[0] - points = w.shape[2] - basis = e.shape[0] - r = np.empty(2) - Ps_reshape = Ps.reshape(2 * points) - w_reshape = w.reshape((frames, points * 2)) - p_copy = np.empty_like(proj_e) - - for i in range(check.size): - c = check[i] - r[0] = np.sin(c) - r[1] = np.cos(c) - grot = camera_r.dot(upgrade_r(r).T) - rot = grot[:2] - rot.dot(s0, Ps) # TODO: remove? - res[:, :points * 2] = w_reshape - res[:, :points * 2] -= Ps_reshape - proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( - e.shape[0], 2 * points) - - if Lambda.size != 0: - proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) - res[:, 2 * points:].fill(0) - res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] - proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] - proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * - depth_reg) * grot[2]).dot(e) - res[:, 2 * points] = scale_prior - if weights.size != 0: - res[:, :points * 2] *= weights - for j in range(frames): - p_copy[:] = proj_e - p_copy[:, :points * 2] *= weights[j] - a[i, :, j], comp_residual, _, _ = np.linalg.lstsq( - p_copy.T, res[j].T) - if not comp_residual: - # equations are over-determined - residue[i, j] = 1e-5 - else: - residue[i, j] = comp_residual - # find and return best coresponding solution - best = np.argmin(residue, 0) - index = (best, np.arange(frames)) - theta = check[best] - aa = a.transpose(0, 2, 1)[index] - retres = residue[index] - r = np.empty((2, frames)) - r[0] = np.sin(theta) - r[1] = np.cos(theta) - return aa, r, retres - - -def pick_e(w, e, s0, camera_r=None, Lambda=None, - weights=None, scale_prior=-0.0014, interval=0.01, depth_reg=0.0325): - """Brute force over charts from the manifold to find the best one. - Returns best chart index and its a and r coefficients - Returns assignment, and a and r coefficents""" - - camera_r = np.asarray([[1, 0, 0], [0, 0, -1], [0, 1, 0]] - ) if camera_r is None else camera_r - Lambda = np.ones((0, 0)) if Lambda is None else Lambda - weights = np.ones((0, 0, 0)) if weights is None else weights - - charts = e.shape[0] - frames = w.shape[0] - basis = e.shape[1] - points = e.shape[3] - assert (s0.shape[0] == charts) - r = np.empty((charts, 2, frames)) - a = np.empty((charts, frames, e.shape[1])) - score = np.empty((charts, frames)) - check = np.arange(0, 1, interval) * 2 * np.pi - cache_a = np.empty((check.size, basis, frames)) - residue = np.empty((check.size, frames)) - - if Lambda.size != 0: - res = np.zeros((frames, points * 2 + basis + points)) - proj_e = np.zeros((basis, 2 * points + basis + points)) - else: - res = np.empty((frames, points * 2)) - proj_e = np.empty((basis, 2 * points)) - Ps = np.empty((2, points)) - - if weights.size == 0: - for i in range(charts): - if Lambda.size != 0: - a[i], r[i], score[i] = estimate_a_and_r_with_res( - w, e[i], s0[i], camera_r, - Lambda[i], check, cache_a, weights, - res, proj_e, residue, Ps, - depth_reg, scale_prior) - else: - a[i], r[i], score[i] = estimate_a_and_r_with_res( - w, e[i], s0[i], camera_r, Lambda, - check, cache_a, weights, - res, proj_e, residue, Ps, - depth_reg, scale_prior) - else: - w2 = weights.reshape(weights.shape[0], -1) - for i in range(charts): - if Lambda.size != 0: - a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( - w, e[i], s0[i], camera_r, - Lambda[i], check, cache_a, w2, - res, proj_e, residue, Ps, - depth_reg, scale_prior) - else: - a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( - w, e[i], s0[i], camera_r, Lambda, - check, cache_a, w2, - res, proj_e, residue, Ps, - depth_reg, scale_prior) - - remaining_dims = 3 * w.shape[2] - e.shape[1] - assert (np.all(score > 0)) - assert (remaining_dims >= 0) - # Zero problems in log space due to un-regularised first co-efficient - l = Lambda.copy() - l[Lambda == 0] = 1 - llambda = -np.log(l) - score /= 2 - return score, a, r - -- Gitee From 76413ef70a6ebec1472ec28f9cd8ed03c4f0dc65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:25:33 +0000 Subject: [PATCH 35/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20packages?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From bcab4e154c8837fa42713f6fa3e80ad6df5d90f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:25:42 +0000 Subject: [PATCH 36/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20lifting?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages/lifting/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 98fb431d164f6d8f288dee412c64b43b967f401d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:25:52 +0000 Subject: [PATCH 37/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/packages/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From a0af39ba461dfe205c4f21c4cfcf4945deb392ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:26:17 +0000 Subject: [PATCH 38/43] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20utils?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages/lifting/utils/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 19756755f0d5c7e4736dc0f48275092109be3f77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:26:23 +0000 Subject: [PATCH 39/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/packages/lifting/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages/lifting/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 20f38fa3b0e8c34c6aa3bd632ad595cb18091e7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:27:01 +0000 Subject: [PATCH 40/43] upload --- .../packages/lifting/__init__.py | 17 ++ .../packages/lifting/_pose_estimator.py | 187 ++++++++++++++++++ 2 files changed, 204 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py new file mode 100644 index 000000000..3bb056e45 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from ._pose_estimator import * +from . import utils + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py new file mode 100644 index 000000000..986ea7bbf --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/_pose_estimator.py @@ -0,0 +1,187 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from . import utils +import cv2 +import numpy as np +import tensorflow as tf +import abc + +ABC = abc.ABCMeta('ABC', (object,), {}) + +__all__ = [ + 'PoseEstimatorInterface', + 'PoseEstimator' +] + + +class PoseEstimatorInterface(ABC): + + @abc.abstractmethod + def initialise(self, args): + pass + + @abc.abstractmethod + def estimate(self, image): + return + + @abc.abstractmethod + def train(self, image, labels): + return + + @abc.abstractmethod + def close(self): + pass + + +class PoseEstimator(PoseEstimatorInterface): + + def __init__(self, image_size, session_path, prob_model_path): + """Initialising the graph in tensorflow. + INPUT: + image_size: Size of the image in the format (w x h x 3)""" + + self.session = None + self.poseLifting = utils.Prob3dPose(prob_model_path) + self.sess = -1 + self.orig_img_size = np.array(image_size) + self.scale = utils.config.INPUT_SIZE / (self.orig_img_size[0] * 1.0) + self.img_size = np.round( + self.orig_img_size * self.scale).astype(np.int32) + self.image_in = None + self.heatmap_person_large = None + self.pose_image_in = None + self.pose_centermap_in = None + self.pred_2d_pose = None + self.likelihoods = None + self.session_path = session_path + + def initialise(self): + """Load saved model in the graph + INPUT: + sess_path: path to the dir containing the tensorflow saved session + OUTPUT: + sess: tensorflow session""" + # initialize graph structrue + tf.reset_default_graph() + + with tf.variable_scope('CPM'): + # placeholders for person network + self.image_in = tf.placeholder( + tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 3]) + self.label_in = tf.placeholder( + tf.float32, [None, utils.config.INPUT_SIZE, self.img_size[1], 1]) + + heatmap_person = utils.inference_person(self.image_in) + + self.heatmap_person_large = tf.image.resize_images( + heatmap_person, [utils.config.INPUT_SIZE, self.img_size[1]]) + + # placeholders for pose network + self.pose_image_in = tf.placeholder( + tf.float32, + [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 3]) + + self.pose_centermap_in = tf.placeholder( + tf.float32, + [utils.config.BATCH_SIZE, utils.config.INPUT_SIZE, utils.config.INPUT_SIZE, 1]) + + self.pred_2d_pose, self.likelihoods = utils.inference_pose( + self.pose_image_in, self.pose_centermap_in, + utils.config.INPUT_SIZE) + + # set up loss and optimizer + self.loss = tf.reduce_mean(tf.abs(self.heatmap_person_large - self.label_in)) + self.optimizer = npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate=0.0000001)).minimize(self.loss) + + # load pretraining model + sess = tf.Session(config=npu_config_proto()) + sess.run(tf.global_variables_initializer()) + variables = tf.contrib.framework.get_variables_to_restore() + variables_to_resotre = [v for v in variables if v.name.split('/')[-1][:4] != 'Adam' and v.name[:4] != 'beta'] + self.saver = tf.train.Saver(variables_to_resotre) + self.saver.restore(sess, self.session_path) + self.session = sess + + def train(self, image, labels): + # input model,back propagation and then output loss + b_image = np.array(image / 255.0 - 0.5, dtype=np.float32) + labels = labels[:, :, :, np.newaxis] + + # self.session.run(self.optimizer, {self.image_in: b_image, self.label_in: labels}) + _, loss, heatmap_pred = self.session.run([self.optimizer, self.loss, self.heatmap_person_large], + feed_dict={self.image_in: b_image, self.label_in: labels}) + return loss, heatmap_pred + + def estimate(self, image, lifting=False): + """ + Estimate 2d and 3d poses on the image. + INPUT: + image: RGB image in the format (w x h x 3) + sess: tensorflow session + OUTPUT: + pose_2d: 2D pose for each of the people in the image in the format + (num_ppl x num_joints x 2) + visibility: vector containing a bool + value for each joint representing the visibility of the joint in + the image (could be due to occlusions or the joint is not in the + image) + pose_3d: 3D pose for each of the people in the image in the + format (num_ppl x 3 x num_joints) + hmap_person: heatmap + """ + # test model + sess = self.session + + image = cv2.resize(image, (0, 0), fx=self.scale, + fy=self.scale, interpolation=cv2.INTER_CUBIC) + b_image = np.array(image[np.newaxis] / 255.0 - 0.5, dtype=np.float32) + + hmap_person_viz = sess.run(self.heatmap_person_large, { + self.image_in: b_image}) + hmap_person = np.squeeze(hmap_person_viz) + + centers = utils.detect_objects_heatmap(hmap_person) + b_pose_image, b_pose_cmap = utils.prepare_input_posenet( + b_image[0], centers, + [utils.config.INPUT_SIZE, image.shape[1]], + [utils.config.INPUT_SIZE, utils.config.INPUT_SIZE], + batch_size=utils.config.BATCH_SIZE) + + feed_dict = { + self.pose_image_in: b_pose_image, + self.pose_centermap_in: b_pose_cmap + } + + # Estimate 2D poses + pred_2d_pose, pred_likelihood = sess.run([self.pred_2d_pose, + self.likelihoods], + feed_dict) + + estimated_2d_pose, visibility = utils.detect_parts_from_likelihoods(pred_2d_pose, + centers, + pred_likelihood) + + pose_2d = np.round(estimated_2d_pose / self.scale).astype(np.int32) + + # Estimate 3D poses + if lifting: + transformed_pose2d, weights = self.poseLifting.transform_joints( + estimated_2d_pose.copy(), visibility) + pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights) + return pose_2d, visibility, pose_3d + + return pose_2d, hmap_person + def close(self): + self.session.close() -- Gitee From c6fea191a9175daf6b3cb4cf39e238cf2eff2804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:27:18 +0000 Subject: [PATCH 41/43] upload --- .../packages/lifting/utils/__init__.py | 21 + .../packages/lifting/utils/config.py | 51 +++ .../packages/lifting/utils/cpm.py | 408 ++++++++++++++++++ .../packages/lifting/utils/draw.py | 112 +++++ .../packages/lifting/utils/prob_model.py | 270 ++++++++++++ .../packages/lifting/utils/process.py | 310 +++++++++++++ .../packages/lifting/utils/upright_fast.py | 302 +++++++++++++ 7 files changed, 1474 insertions(+) create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py create mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py new file mode 100644 index 000000000..cbbdebfd8 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +from .prob_model import * +from .draw import * +from .cpm import * +from .process import * +from . import config +from . import upright_fast + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py new file mode 100644 index 000000000..f9b54b984 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/config.py @@ -0,0 +1,51 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__all__ = [ + 'VISIBLE_PART', + 'MIN_NUM_JOINTS', + 'CENTER_TR', + 'SIGMA', + 'STRIDE', + 'SIGMA_CENTER', + 'INPUT_SIZE', + 'OUTPUT_SIZE', + 'NUM_JOINTS', + 'NUM_OUTPUT', + 'H36M_NUM_JOINTS', + 'JOINT_DRAW_SIZE', + 'LIMB_DRAW_SIZE' +] + +# threshold +VISIBLE_PART = 1e-3 +MIN_NUM_JOINTS = 5 +CENTER_TR = 0.4 + +# net attributes +SIGMA = 7 +STRIDE = 8 +SIGMA_CENTER = 21 +INPUT_SIZE = 368 +OUTPUT_SIZE = 46 +NUM_JOINTS = 14 +NUM_OUTPUT = NUM_JOINTS + 1 +H36M_NUM_JOINTS = 17 + +# draw options +JOINT_DRAW_SIZE = 3 +LIMB_DRAW_SIZE = 1 +NORMALISATION_COEFFICIENT = 1280*720 + +# test options +BATCH_SIZE = 4 diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py new file mode 100644 index 000000000..1f8a3a847 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py @@ -0,0 +1,408 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * + +import tensorflow as tf +import tensorflow.contrib.layers as layers + +__all__ = [ + 'inference_person', + 'inference_pose' +] + + +def inference_person(image): + with tf.variable_scope('PersonNet'): + conv1_1 = layers.conv2d( + image, 64, 3, 1, activation_fn=None, scope='conv1_1') + conv1_1 = tf.nn.relu(conv1_1) + conv1_2 = layers.conv2d( + conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') + conv1_2 = tf.nn.relu(conv1_2) + pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) + conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, + activation_fn=None, scope='conv2_1') + conv2_1 = tf.nn.relu(conv2_1) + conv2_2 = layers.conv2d( + conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') + conv2_2 = tf.nn.relu(conv2_2) + pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) + conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, + activation_fn=None, scope='conv3_1') + conv3_1 = tf.nn.relu(conv3_1) + conv3_2 = layers.conv2d( + conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') + conv3_2 = tf.nn.relu(conv3_2) + conv3_3 = layers.conv2d( + conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') + conv3_3 = tf.nn.relu(conv3_3) + conv3_4 = layers.conv2d( + conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') + conv3_4 = tf.nn.relu(conv3_4) + pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) + conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, + activation_fn=None, scope='conv4_1') + conv4_1 = tf.nn.relu(conv4_1) + conv4_2 = layers.conv2d( + conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') + conv4_2 = tf.nn.relu(conv4_2) + conv4_3 = layers.conv2d( + conv4_2, 512, 3, 1, activation_fn=None, scope='conv4_3') + conv4_3 = tf.nn.relu(conv4_3) + conv4_4 = layers.conv2d( + conv4_3, 512, 3, 1, activation_fn=None, scope='conv4_4') + conv4_4 = tf.nn.relu(conv4_4) + conv5_1 = layers.conv2d( + conv4_4, 512, 3, 1, activation_fn=None, scope='conv5_1') + conv5_1 = tf.nn.relu(conv5_1) + conv5_2_CPM = layers.conv2d( + conv5_1, 128, 3, 1, activation_fn=None, scope='conv5_2_CPM') + conv5_2_CPM = tf.nn.relu(conv5_2_CPM) + conv6_1_CPM = layers.conv2d( + conv5_2_CPM, 512, 1, 1, activation_fn=None, scope='conv6_1_CPM') + conv6_1_CPM = tf.nn.relu(conv6_1_CPM) + conv6_2_CPM = layers.conv2d( + conv6_1_CPM, 1, 1, 1, activation_fn=None, scope='conv6_2_CPM') + concat_stage2 = tf.concat([conv6_2_CPM, conv5_2_CPM], 3) + Mconv1_stage2 = layers.conv2d( + concat_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage2') + Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) + Mconv2_stage2 = layers.conv2d( + Mconv1_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage2') + Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) + Mconv3_stage2 = layers.conv2d( + Mconv2_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage2') + Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) + Mconv4_stage2 = layers.conv2d( + Mconv3_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage2') + Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) + Mconv5_stage2 = layers.conv2d( + Mconv4_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage2') + Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) + Mconv6_stage2 = layers.conv2d( + Mconv5_stage2, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage2') + Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) + Mconv7_stage2 = layers.conv2d( + Mconv6_stage2, 1, 1, 1, activation_fn=None, scope='Mconv7_stage2') + concat_stage3 = tf.concat([Mconv7_stage2, conv5_2_CPM], 3) + Mconv1_stage3 = layers.conv2d( + concat_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage3') + Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) + Mconv2_stage3 = layers.conv2d( + Mconv1_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage3') + Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) + Mconv3_stage3 = layers.conv2d( + Mconv2_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage3') + Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) + Mconv4_stage3 = layers.conv2d( + Mconv3_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage3') + Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) + Mconv5_stage3 = layers.conv2d( + Mconv4_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage3') + Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) + Mconv6_stage3 = layers.conv2d( + Mconv5_stage3, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage3') + Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) + Mconv7_stage3 = layers.conv2d( + Mconv6_stage3, 1, 1, 1, activation_fn=None, + scope='Mconv7_stage3') + concat_stage4 = tf.concat([Mconv7_stage3, conv5_2_CPM], 3) + Mconv1_stage4 = layers.conv2d( + concat_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage4') + Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) + Mconv2_stage4 = layers.conv2d( + Mconv1_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage4') + Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) + Mconv3_stage4 = layers.conv2d( + Mconv2_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage4') + Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) + Mconv4_stage4 = layers.conv2d( + Mconv3_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage4') + Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) + Mconv5_stage4 = layers.conv2d( + Mconv4_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage4') + Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) + Mconv6_stage4 = layers.conv2d( + Mconv5_stage4, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage4') + Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) + Mconv7_stage4 = layers.conv2d( + Mconv6_stage4, 1, 1, 1, activation_fn=None, scope='Mconv7_stage4') + return Mconv7_stage4 + + +def _argmax_2d(tensor): + """ + Compute argmax on the 2nd and 3d dimensions of the tensor. + e.g. given an input tensor of size N x K x K x C, then it computes the (x,y) coordinates for + each of the N images and C channels, corresponding to the max for that image and channel. + :param tensor: image of size N x K x K x C + :return: argmax in the format N x 2 x C (where C corresponds to NUM_JOINTS) + """ + # get size + shape = tensor.get_shape().as_list()[1] + n_channels = tf.shape(tensor)[-1] + + # process each channel + linearised_channel = tf.reshape(tensor, [-1, shape * shape, n_channels]) + best_channel = tf.argmax(linearised_channel, axis=1) + + idx_y = tf.expand_dims(tf.floordiv(best_channel, shape), axis=1) + idx_x = tf.expand_dims(tf.mod(best_channel, shape), axis=1) + argmax_channels = tf.concat([idx_x, idx_y], axis=1, name='output') + return argmax_channels + + +def _process_stage(heat_maps, hm_size): + """ + For each heat-map identify joint position and likelihood + :param heat_maps: input heat-maps + :param hm_size: size in which to return the coordinates + :return: 2d joints (BATCH_SIZE x 14 x 2) + likelihood for each joint (BATCH_SIZE x 14) + """ + rescaled = tf.image.resize_images(heat_maps[:, :, :, :-1], [hm_size, hm_size]) + uncertainty = tf.reduce_max(tf.reduce_mean(rescaled, axis=1), axis=1, name='prob') + return _argmax_2d(rescaled), uncertainty + + +def inference_pose(image, center_map, hm_size, stage=6): + with tf.variable_scope('PoseNet'): + pool_center_lower = layers.avg_pool2d(center_map, 9, 8, padding='SAME') + conv1_1 = layers.conv2d( + image, 64, 3, 1, activation_fn=None, scope='conv1_1') + conv1_1 = tf.nn.relu(conv1_1) + conv1_2 = layers.conv2d( + conv1_1, 64, 3, 1, activation_fn=None, scope='conv1_2') + conv1_2 = tf.nn.relu(conv1_2) + pool1_stage1 = layers.max_pool2d(conv1_2, 2, 2) + conv2_1 = layers.conv2d(pool1_stage1, 128, 3, 1, + activation_fn=None, scope='conv2_1') + conv2_1 = tf.nn.relu(conv2_1) + conv2_2 = layers.conv2d( + conv2_1, 128, 3, 1, activation_fn=None, scope='conv2_2') + conv2_2 = tf.nn.relu(conv2_2) + pool2_stage1 = layers.max_pool2d(conv2_2, 2, 2) + conv3_1 = layers.conv2d(pool2_stage1, 256, 3, 1, + activation_fn=None, scope='conv3_1') + conv3_1 = tf.nn.relu(conv3_1) + conv3_2 = layers.conv2d( + conv3_1, 256, 3, 1, activation_fn=None, scope='conv3_2') + conv3_2 = tf.nn.relu(conv3_2) + conv3_3 = layers.conv2d( + conv3_2, 256, 3, 1, activation_fn=None, scope='conv3_3') + conv3_3 = tf.nn.relu(conv3_3) + conv3_4 = layers.conv2d( + conv3_3, 256, 3, 1, activation_fn=None, scope='conv3_4') + conv3_4 = tf.nn.relu(conv3_4) + pool3_stage1 = layers.max_pool2d(conv3_4, 2, 2) + conv4_1 = layers.conv2d(pool3_stage1, 512, 3, 1, + activation_fn=None, scope='conv4_1') + conv4_1 = tf.nn.relu(conv4_1) + conv4_2 = layers.conv2d( + conv4_1, 512, 3, 1, activation_fn=None, scope='conv4_2') + conv4_2 = tf.nn.relu(conv4_2) + conv4_3_CPM = layers.conv2d( + conv4_2, 256, 3, 1, activation_fn=None, scope='conv4_3_CPM') + conv4_3_CPM = tf.nn.relu(conv4_3_CPM) + conv4_4_CPM = layers.conv2d( + conv4_3_CPM, 256, 3, 1, activation_fn=None, scope='conv4_4_CPM') + conv4_4_CPM = tf.nn.relu(conv4_4_CPM) + conv4_5_CPM = layers.conv2d( + conv4_4_CPM, 256, 3, 1, activation_fn=None, scope='conv4_5_CPM') + conv4_5_CPM = tf.nn.relu(conv4_5_CPM) + conv4_6_CPM = layers.conv2d( + conv4_5_CPM, 256, 3, 1, activation_fn=None, scope='conv4_6_CPM') + conv4_6_CPM = tf.nn.relu(conv4_6_CPM) + conv4_7_CPM = layers.conv2d( + conv4_6_CPM, 128, 3, 1, activation_fn=None, scope='conv4_7_CPM') + conv4_7_CPM = tf.nn.relu(conv4_7_CPM) + conv5_1_CPM = layers.conv2d( + conv4_7_CPM, 512, 1, 1, activation_fn=None, scope='conv5_1_CPM') + conv5_1_CPM = tf.nn.relu(conv5_1_CPM) + conv5_2_CPM = layers.conv2d( + conv5_1_CPM, 15, 1, 1, activation_fn=None, scope='conv5_2_CPM') + concat_stage2 = tf.concat( + [conv5_2_CPM, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage2 = layers.conv2d( + concat_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage2') + Mconv1_stage2 = tf.nn.relu(Mconv1_stage2) + Mconv2_stage2 = layers.conv2d( + Mconv1_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage2') + Mconv2_stage2 = tf.nn.relu(Mconv2_stage2) + Mconv3_stage2 = layers.conv2d( + Mconv2_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage2') + Mconv3_stage2 = tf.nn.relu(Mconv3_stage2) + Mconv4_stage2 = layers.conv2d( + Mconv3_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage2') + Mconv4_stage2 = tf.nn.relu(Mconv4_stage2) + Mconv5_stage2 = layers.conv2d( + Mconv4_stage2, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage2') + Mconv5_stage2 = tf.nn.relu(Mconv5_stage2) + Mconv6_stage2 = layers.conv2d( + Mconv5_stage2, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage2') + Mconv6_stage2 = tf.nn.relu(Mconv6_stage2) + Mconv7_stage2 = layers.conv2d( + Mconv6_stage2, 15, 1, 1, activation_fn=None, scope='Mconv7_stage2') + if stage == 2: + return _process_stage(Mconv7_stage2, hm_size) + + concat_stage3 = tf.concat( + [Mconv7_stage2, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage3 = layers.conv2d( + concat_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage3') + Mconv1_stage3 = tf.nn.relu(Mconv1_stage3) + Mconv2_stage3 = layers.conv2d( + Mconv1_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage3') + Mconv2_stage3 = tf.nn.relu(Mconv2_stage3) + Mconv3_stage3 = layers.conv2d( + Mconv2_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage3') + Mconv3_stage3 = tf.nn.relu(Mconv3_stage3) + Mconv4_stage3 = layers.conv2d( + Mconv3_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage3') + Mconv4_stage3 = tf.nn.relu(Mconv4_stage3) + Mconv5_stage3 = layers.conv2d( + Mconv4_stage3, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage3') + Mconv5_stage3 = tf.nn.relu(Mconv5_stage3) + Mconv6_stage3 = layers.conv2d( + Mconv5_stage3, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage3') + Mconv6_stage3 = tf.nn.relu(Mconv6_stage3) + Mconv7_stage3 = layers.conv2d( + Mconv6_stage3, 15, 1, 1, activation_fn=None, scope='Mconv7_stage3') + if stage == 3: + return _process_stage(Mconv7_stage3, hm_size) + + concat_stage4 = tf.concat( + [Mconv7_stage3, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage4 = layers.conv2d( + concat_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage4') + Mconv1_stage4 = tf.nn.relu(Mconv1_stage4) + Mconv2_stage4 = layers.conv2d( + Mconv1_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage4') + Mconv2_stage4 = tf.nn.relu(Mconv2_stage4) + Mconv3_stage4 = layers.conv2d( + Mconv2_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage4') + Mconv3_stage4 = tf.nn.relu(Mconv3_stage4) + Mconv4_stage4 = layers.conv2d( + Mconv3_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage4') + Mconv4_stage4 = tf.nn.relu(Mconv4_stage4) + Mconv5_stage4 = layers.conv2d( + Mconv4_stage4, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage4') + Mconv5_stage4 = tf.nn.relu(Mconv5_stage4) + Mconv6_stage4 = layers.conv2d( + Mconv5_stage4, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage4') + Mconv6_stage4 = tf.nn.relu(Mconv6_stage4) + Mconv7_stage4 = layers.conv2d( + Mconv6_stage4, 15, 1, 1, activation_fn=None, scope='Mconv7_stage4') + if stage == 4: + return _process_stage(Mconv7_stage4, hm_size) + + concat_stage5 = tf.concat( + [Mconv7_stage4, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage5 = layers.conv2d( + concat_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage5') + Mconv1_stage5 = tf.nn.relu(Mconv1_stage5) + Mconv2_stage5 = layers.conv2d( + Mconv1_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage5') + Mconv2_stage5 = tf.nn.relu(Mconv2_stage5) + Mconv3_stage5 = layers.conv2d( + Mconv2_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage5') + Mconv3_stage5 = tf.nn.relu(Mconv3_stage5) + Mconv4_stage5 = layers.conv2d( + Mconv3_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage5') + Mconv4_stage5 = tf.nn.relu(Mconv4_stage5) + Mconv5_stage5 = layers.conv2d( + Mconv4_stage5, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage5') + Mconv5_stage5 = tf.nn.relu(Mconv5_stage5) + Mconv6_stage5 = layers.conv2d( + Mconv5_stage5, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage5') + Mconv6_stage5 = tf.nn.relu(Mconv6_stage5) + Mconv7_stage5 = layers.conv2d( + Mconv6_stage5, 15, 1, 1, activation_fn=None, scope='Mconv7_stage5') + if stage == 5: + return _process_stage(Mconv7_stage5, hm_size) + + concat_stage6 = tf.concat( + [Mconv7_stage5, conv4_7_CPM, pool_center_lower], 3) + Mconv1_stage6 = layers.conv2d( + concat_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv1_stage6') + Mconv1_stage6 = tf.nn.relu(Mconv1_stage6) + Mconv2_stage6 = layers.conv2d( + Mconv1_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv2_stage6') + Mconv2_stage6 = tf.nn.relu(Mconv2_stage6) + Mconv3_stage6 = layers.conv2d( + Mconv2_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv3_stage6') + Mconv3_stage6 = tf.nn.relu(Mconv3_stage6) + Mconv4_stage6 = layers.conv2d( + Mconv3_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv4_stage6') + Mconv4_stage6 = tf.nn.relu(Mconv4_stage6) + Mconv5_stage6 = layers.conv2d( + Mconv4_stage6, 128, 7, 1, activation_fn=None, + scope='Mconv5_stage6') + Mconv5_stage6 = tf.nn.relu(Mconv5_stage6) + Mconv6_stage6 = layers.conv2d( + Mconv5_stage6, 128, 1, 1, activation_fn=None, + scope='Mconv6_stage6') + Mconv6_stage6 = tf.nn.relu(Mconv6_stage6) + Mconv7_stage6 = layers.conv2d( + Mconv6_stage6, 15, 1, 1, activation_fn=None, + scope='Mconv7_stage6') + return _process_stage(Mconv7_stage6, hm_size) + + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py new file mode 100644 index 000000000..d95affa0d --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/draw.py @@ -0,0 +1,112 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import cv2 +import numpy as np +from .config import JOINT_DRAW_SIZE +from .config import NORMALISATION_COEFFICIENT +import matplotlib.pyplot as plt +import math + +__all__ = [ + 'draw_limbs', + 'plot_pose' +] + + +def draw_limbs(image, pose_2d, visible): + """Draw the 2D pose without the occluded/not visible joints.""" + + _COLORS = [ + [0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0], + [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170], + [170, 0, 255] + ] + # _COLORS = [ + # [0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], + # [128, 0, 0], [0, 128, 0], [0, 0, 128], [255, 255, 255], + # [128, 128, 128] + # ] + _LIMBS = np.array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, + 9, 10, 11, 12, 12, 13]).reshape((-1, 2)) + + _NORMALISATION_FACTOR = int(math.floor(math.sqrt(image.shape[0] * image.shape[1] / NORMALISATION_COEFFICIENT))) + + for oid in range(pose_2d.shape[0]): + # for i in range(14): + # cv2.putText(image, str(i), (pose_2d[oid][i][1], pose_2d[oid][i][0]), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255)) + for lid, (p0, p1) in enumerate(_LIMBS): + + if not (visible[oid][p0] and visible[oid][p1]): + continue + y0, x0 = pose_2d[oid][p0] + y1, x1 = pose_2d[oid][p1] + cv2.circle(image, (x0, y0), JOINT_DRAW_SIZE *_NORMALISATION_FACTOR , _COLORS[lid], -1) + cv2.circle(image, (x1, y1), JOINT_DRAW_SIZE*_NORMALISATION_FACTOR , _COLORS[lid], -1) + cv2.line(image, (x0, y0), (x1, y1), + _COLORS[lid], 10 , 16) # LIMB_DRAW_SIZE*_NORMALISATION_FACTOR + + +def plot_pose(pose): + """Plot the 3D pose showing the joint connections.""" + import mpl_toolkits.mplot3d.axes3d as p3 + + _CONNECTION = [ + [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], + [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], + [15, 16]] + + def joint_color(j): + """ + TODO: 'j' shadows name 'j' from outer scope + """ + + colors = [(0, 0, 0), (255, 0, 255), (0, 0, 255), + (0, 255, 255), (255, 0, 0), (0, 255, 0)] + _c = 0 + if j in range(1, 4): + _c = 1 + if j in range(4, 7): + _c = 2 + if j in range(9, 11): + _c = 3 + if j in range(11, 14): + _c = 4 + if j in range(14, 17): + _c = 5 + return colors[_c] + + assert (pose.ndim == 2) + assert (pose.shape[0] == 3) + fig = plt.figure() + ax = fig.gca(projection='3d') + for c in _CONNECTION: + col = '#%02x%02x%02x' % joint_color(c[0]) + ax.plot([pose[0, c[0]], pose[0, c[1]]], + [pose[1, c[0]], pose[1, c[1]]], + [pose[2, c[0]], pose[2, c[1]]], c=col) + for j in range(pose.shape[1]): + col = '#%02x%02x%02x' % joint_color(j) + ax.scatter(pose[0, j], pose[1, j], pose[2, j], + c=col, marker='o', edgecolor=col) + smallest = pose.min() + largest = pose.max() + ax.set_xlim3d(smallest, largest) + ax.set_ylim3d(smallest, largest) + ax.set_zlim3d(smallest, largest) + + return fig + + + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py new file mode 100644 index 000000000..b8ffe330b --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/prob_model.py @@ -0,0 +1,270 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import os +import scipy.io as sio +import numpy as np +from .upright_fast import pick_e +from . import config + +__all__ = ['Prob3dPose'] + + +class Prob3dPose: + + def __init__(self, prob_model_path): + model_param = sio.loadmat(prob_model_path) + self.mu = np.reshape( + model_param['mu'], (model_param['mu'].shape[0], 3, -1)) + self.e = np.reshape(model_param['e'], (model_param['e'].shape[ + 0], model_param['e'].shape[1], 3, -1)) + self.sigma = model_param['sigma'] + self.cam = np.array( + [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) + + @staticmethod + def cost3d(model, gt): + """3d error in mm""" + out = np.sqrt(((gt - model) ** 2).sum(1)).mean(-1) + return out + + @staticmethod + def renorm_gt(gt): + """Compel gt data to have mean joint length of one""" + _POSE_TREE = np.asarray([ + [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], + [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], + [15, 16]]).T + scale = np.sqrt(((gt[:, :, _POSE_TREE[0]] - + gt[:, :, _POSE_TREE[1]]) ** 2).sum(2).sum(1)) + return gt / scale[:, np.newaxis, np.newaxis] + + @staticmethod + def build_model(a, e, s0): + """Build 3D model""" + assert (s0.shape[1] == 3) + assert (e.shape[2] == 3) + assert (a.shape[1] == e.shape[1]) + out = np.einsum('...i,...ijk', a, e) + out += s0 + return out + + @staticmethod + def build_and_rot_model(a, e, s0, r): + """ + Build model and rotate according to the identified rotation matrix + """ + from numpy.core.umath_tests import matrix_multiply + + r2 = Prob3dPose.upgrade_r(r.T).transpose((0, 2, 1)) + mod = Prob3dPose.build_model(a, e, s0) + mod = matrix_multiply(r2, mod) + return mod + + @staticmethod + def upgrade_r(r): + """ + Upgrades complex parameterisation of planar rotation to tensor + containing per frame 3x3 rotation matrices + """ + assert (r.ndim == 2) + # Technically optional assert, but if this fails data is probably + # transposed + assert (r.shape[1] == 2) + assert (np.all(np.isfinite(r))) + norm = np.sqrt((r[:, :2] ** 2).sum(1)) + assert (np.all(norm > 0)) + r /= norm[:, np.newaxis] + assert (np.all(np.isfinite(r))) + newr = np.zeros((r.shape[0], 3, 3)) + newr[:, :2, 0] = r[:, :2] + newr[:, 2, 2] = 1 + newr[:, 1::-1, 1] = r[:, :2] + newr[:, 0, 1] *= -1 + return newr + + @staticmethod + def centre(data_2d): + """center data according to each of the coordiante components""" + return (data_2d.T - data_2d.mean(1)).T + + @staticmethod + def centre_all(data): + """center all data""" + if data.ndim == 2: + return Prob3dPose.centre(data) + return (data.transpose(2, 0, 1) - data.mean(2)).transpose(1, 2, 0) + + @staticmethod + def normalise_data(d2, weights): + """Normalise data according to height""" + + # the joints with weight set to 0 should not be considered in the + # normalisation process + d2 = d2.reshape(d2.shape[0], -1, 2).transpose(0, 2, 1) + idx_consider = weights[0, 0].astype(np.bool) + if np.sum(weights[:, 0].sum(1) >= config.MIN_NUM_JOINTS) == 0: + raise Exception( + 'Not enough 2D joints identified to generate 3D pose') + d2[:, :, idx_consider] = Prob3dPose.centre_all(d2[:, :, idx_consider]) + + # Height normalisation (2 meters) + m2 = d2[:, 1, idx_consider].min(1) / 2.0 + m2 -= d2[:, 1, idx_consider].max(1) / 2.0 + crap = m2 == 0 + m2[crap] = 1.0 + d2[:, :, idx_consider] /= m2[:, np.newaxis, np.newaxis] + return d2, m2 + + @staticmethod + def transform_joints(pose_2d, visible_joints): + """ + Transform the set of joints according to what the probabilistic model + expects as input. + + It returns the new set of joints of each of the people and the set of + weights for the joints. + """ + + _H36M_ORDER = [8, 9, 10, 11, 12, 13, 1, 0, 5, 6, 7, 2, 3, 4] + _W_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] + + def swap_xy(poses): + tmp = np.copy(poses[:, :, 0]) + poses[:, :, 0] = poses[:, :, 1] + poses[:, :, 1] = tmp + return poses + + assert (pose_2d.ndim == 3) + new_pose = pose_2d.copy() + new_pose = swap_xy(new_pose) + new_pose = new_pose[:, _H36M_ORDER] + + # defining weights according to occlusions + weights = np.zeros((pose_2d.shape[0], 2, config.H36M_NUM_JOINTS)) + ordered_visibility = np.repeat( + visible_joints[:, _H36M_ORDER, np.newaxis], 2, 2 + ).transpose([0, 2, 1]) + weights[:, :, _W_POS] = ordered_visibility + return new_pose, weights + + def affine_estimate(self, w, depth_reg=0.085, weights=None, scale=10.0, + scale_mean=0.0016 * 1.8 * 1.2, scale_std=1.2 * 0, + cap_scale=-0.00129): + """ + Quick switch to allow reconstruction at unknown scale returns a,r + and scale + """ + weights = np.zeros((0, 0, 0)) if weights is None else weights + + s = np.empty((self.sigma.shape[0], self.sigma.shape[1] + 4)) # e,y,x,z + s[:, :4] = 10 ** -5 # Tiny but makes stuff well-posed + s[:, 0] = scale_std + s[:, 4:] = self.sigma + s[:, 4:-1] *= scale + + e2 = np.zeros((self.e.shape[0], self.e.shape[ + 1] + 4, 3, self.e.shape[3])) + e2[:, 1, 0] = 1.0 + e2[:, 2, 1] = 1.0 + e2[:, 3, 0] = 1.0 + # This makes the least_squares problem ill posed, as X,Z are + # interchangable + # Hence regularisation above to speed convergence and stop blow-up + e2[:, 0] = self.mu + e2[:, 4:] = self.e + t_m = np.zeros_like(self.mu) + + res, a, r = pick_e(w, e2, t_m, self.cam, s, weights=weights, + interval=0.01, depth_reg=depth_reg, + scale_prior=scale_mean) + + scale = a[:, :, 0] + reestimate = scale > cap_scale + m = self.mu * cap_scale + for i in range(scale.shape[0]): + if reestimate[i].sum() > 0: + ehat = e2[i:i + 1, 1:] + mhat = m[i:i + 1] + shat = s[i:i + 1, 1:] + (res2, a2, r2) = pick_e( + w[reestimate[i]], ehat, mhat, self.cam, shat, + weights=weights[reestimate[i]], + interval=0.01, depth_reg=depth_reg, + scale_prior=scale_mean + ) + res[i:i + 1, reestimate[i]] = res2 + a[i:i + 1, reestimate[i], 1:] = a2 + a[i:i + 1, reestimate[i], 0] = cap_scale + r[i:i + 1, :, reestimate[i]] = r2 + scale = a[:, :, 0] + a = a[:, :, 1:] / a[:, :, 0][:, :, np.newaxis] + return res, e2[:, 1:], a, r, scale + + def better_rec(self, w, model, s=1, weights=1, damp_z=1): + """Quick switch to allow reconstruction at unknown scale + returns a,r and scale""" + from numpy.core.umath_tests import matrix_multiply + proj = matrix_multiply(self.cam[np.newaxis], model) + proj[:, :2] = (proj[:, :2] * s + w * weights) / (s + weights) + proj[:, 2] *= damp_z + out = matrix_multiply(self.cam.T[np.newaxis], proj) + return out + + def create_rec(self, w2, weights, res_weight=1): + """Reconstruct 3D pose given a 2D pose""" + _SIGMA_SCALING = 5.2 + + res, e, a, r, scale = self.affine_estimate( + w2, scale=_SIGMA_SCALING, weights=weights, + depth_reg=0, cap_scale=-0.001, scale_mean=-0.003 + ) + + remaining_dims = 3 * w2.shape[2] - e.shape[1] + assert (remaining_dims >= 0) + llambda = -np.log(self.sigma) + lgdet = np.sum(llambda[:, :-1], 1) + llambda[:, -1] * remaining_dims + score = (res * res_weight + lgdet[:, np.newaxis] * (scale ** 2)) + best = np.argmin(score, 0) + index = np.arange(best.shape[0]) + a2 = a[best, index] + r2 = r[best, :, index].T + rec = Prob3dPose.build_and_rot_model(a2, e[best], self.mu[best], r2) + rec *= -np.abs(scale[best, index])[:, np.newaxis, np.newaxis] + + rec = self.better_rec(w2, rec, 1, 1.55 * weights, 1) * -1 + rec = Prob3dPose.renorm_gt(rec) + rec *= 0.97 + return rec + + def compute_3d(self, pose_2d, weights): + """Reconstruct 3D poses given 2D estimations""" + + _J_POS = [1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16] + _SCALE_3D = 1174.88312988 + + if pose_2d.shape[1] != config.H36M_NUM_JOINTS: + # need to call the linear regressor + reg_joints = np.zeros( + (pose_2d.shape[0], config.H36M_NUM_JOINTS, 2)) + for oid, singe_pose in enumerate(pose_2d): + reg_joints[oid, _J_POS] = singe_pose + + norm_pose, _ = Prob3dPose.normalise_data(reg_joints, weights) + else: + norm_pose, _ = Prob3dPose.normalise_data(pose_2d, weights) + + pose_3d = self.create_rec(norm_pose, weights) * _SCALE_3D + return pose_3d + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py new file mode 100644 index 000000000..ab25ff010 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/process.py @@ -0,0 +1,310 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import division +from npu_bridge.npu_init import * + +import skimage.io +import skimage.transform +import scipy.ndimage as ndimage +import scipy.ndimage.filters as filters +from scipy.stats import multivariate_normal + +import os +import json +import numpy as np +from . import config +import cv2 +from itertools import compress + +__all__ = [ + 'detect_objects_heatmap', + 'detect_objects_heatmap', + 'gaussian_kernel', + 'gaussian_heatmap', + 'prepare_input_posenet', + 'detect_parts_heatmaps', + 'detect_parts_from_likelihoods', + 'import_json', + 'generate_labels', + 'generate_center_map', + 'rescale', + 'crop_image' +] + + +def detect_objects_heatmap(heatmap): + data = 256 * heatmap + data_max = filters.maximum_filter(data, 3) + maxima = (data == data_max) + data_min = filters.minimum_filter(data, 3) + diff = ((data_max - data_min) > 0.3) + maxima[diff == 0] = 0 + + labeled, num_objects = ndimage.label(maxima) + slices = ndimage.find_objects(labeled) + objects = np.zeros((num_objects, 2), dtype=np.int32) + pidx = 0 + for (dy, dx) in slices: + pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2] + if heatmap[pos[0], pos[1]] > config.CENTER_TR: + objects[pidx, :] = pos + pidx += 1 + return objects[:pidx] + + +def gaussian_kernel(h, w, sigma_h, sigma_w): + yx = np.mgrid[-h // 2:h // 2, -w // 2:w // 2] ** 2 + return np.exp(-yx[0, :, :] / sigma_h ** 2 - yx[1, :, :] / sigma_w ** 2) + + +def gaussian_heatmap(h, w, pos_x, pos_y, sigma_h=1, sigma_w=1, init=None): + """ + Compute the heat-map of size (w x h) with a gaussian distribution fit in + position (pos_x, pos_y) and a convariance matix defined by the related + sigma values. + The resulting heat-map can be summed to a given heat-map init. + """ + init = init if init is not None else [] + + cov_matrix = np.eye(2) * ([sigma_h**2, sigma_w**2]) + + x, y = np.mgrid[0:h, 0:w] + pos = np.dstack((x, y)) + rv = multivariate_normal([pos_x, pos_y], cov_matrix) + + tmp = rv.pdf(pos) + hmap = np.multiply( + tmp, np.sqrt(np.power(2 * np.pi, 2) * np.linalg.det(cov_matrix)) + ) + idx = np.where(hmap.flatten() <= np.exp(-4.6052)) + hmap.flatten()[idx] = 0 + + if np.size(init) == 0: + return hmap + + assert (np.shape(init) == hmap.shape) + hmap += init + idx = np.where(hmap.flatten() > 1) + hmap.flatten()[idx] = 1 + return hmap + + +def prepare_input_posenet(image, objects, size_person, size, + batch_size, sigma=25, border=400): + result = np.zeros((batch_size, size[0], size[1], 4)) + padded_image = np.zeros( + (1, size_person[0] + border, size_person[1] + border, 4)) + padded_image[0, border // 2:-border // 2, + border // 2:-border // 2, :3] = image + if objects.shape[0] > batch_size: + objects = objects[:batch_size] + for oid, (yc, xc) in enumerate(objects): + dh, dw = size[0] // 2, size[1] // 2 + y0, x0, y1, x1 = np.array( + [yc - dh, xc - dw, yc + dh, xc + dw]) + border // 2 + result[oid, :, :, :4] = padded_image[:, y0:y1, x0:x1, :] + result[oid, :, :, 3] = gaussian_kernel(size[0], size[1], sigma, sigma) + return np.split(result, [3], 3) + + +def detect_parts_heatmaps(heatmaps, centers, size, num_parts=14): + """ + Given heat-maps find the position of each joint by means of n argmax + function + """ + parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) + visible = np.zeros((len(centers), num_parts), dtype=bool) + for oid, (yc, xc) in enumerate(centers): + part_hmap = skimage.transform.resize( + np.clip(heatmaps[oid], -1, 1), size) + for pid in range(num_parts): + y, x = np.unravel_index(np.argmax(part_hmap[:, :, pid]), size) + parts[oid, pid] = y + yc - size[0] // 2, x + xc - size[1] // 2 + visible[oid, pid] = np.mean( + part_hmap[:, :, pid]) > config.VISIBLE_PART + return parts, visible + + +def detect_parts_from_likelihoods(poses, centers, likelihoods, num_parts=14): + """ + Given heat-maps find the position of each joint by means of n argmax + function + """ + if len(centers) > config.BATCH_SIZE: + centers = centers[:config.BATCH_SIZE] + parts = np.zeros((len(centers), num_parts, 2), dtype=np.int32) + visible = np.zeros((len(centers), num_parts), dtype=bool) + for oid, (yc, xc) in enumerate(centers): + for pid in range(num_parts): + x, y = poses[oid, :, pid] + parts[oid, pid] = y + yc - config.INPUT_SIZE // 2, x + xc - config.INPUT_SIZE // 2 + visible[oid, pid] = likelihoods[oid, pid] > config.VISIBLE_PART + return parts, visible + + +def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'): + """Get the json file containing the dataset. + We want the data to be shuffled, however the training has to be repeatable. + This means that once shuffled the order has to me mantained.""" + with open(path) as data_file: + data_this = json.load(data_file) + data_this = np.array(data_this['root']) + num_samples = len(data_this) + + if os.path.exists(order): + idx = np.load(order) + else: + idx = np.random.permutation(num_samples).tolist() + np.save(order, idx) + + is_not_validation = [not data_this[i]['isValidation'] + for i in range(num_samples)] + keep_data_idx = list(compress(idx, is_not_validation)) + + data = data_this[keep_data_idx] + return data, len(keep_data_idx) + + +def generate_labels(image_shape, joint_positions, num_other_people, + joints_other_people, offset): + """ + Given as input a set of joint positions and the size of the input image + it generates + a set of heat-maps of the same size. It generates both heat-maps used as + labels for the first stage (label_1st_lower) and for all the other stages + (label_lower). + """ + _FILTER_JOINTS = np.array([9, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5]) + + img_height, img_width, _ = image_shape + heat_maps_single_p = np.zeros( + (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) + heat_maps_other_p = np.zeros( + (config.NUM_OUTPUT, config.INPUT_SIZE, config.INPUT_SIZE)) + + # generate first set of heat-maps + for i in range(config.NUM_JOINTS): + # the set of joints can be different fromt the one in the json file + curr_joint = joint_positions[_FILTER_JOINTS[i]] + skip = (curr_joint[0] < 0 or curr_joint[1] < 0 or + curr_joint[0] >= img_width or curr_joint[1] >= img_height) + if not skip: + heat_maps_single_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[ + 1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA) + + heat_maps_other_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[ + 1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA) + + heat_maps_single_p[-1] = np.maximum( + 1 - np.max(heat_maps_single_p[:-1], axis=0), + np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) + heat_maps_single_p = np.transpose(heat_maps_single_p, (1, 2, 0)) + + # generate second set of heat-maps for other people in the image + for p in range(int(num_other_people)): + for i in range(config.NUM_JOINTS): + # the set of joints can be different fromt the one in the json file + try: + if num_other_people == 1: + curr_joint = joints_other_people[_FILTER_JOINTS[i]] + else: + curr_joint = joints_other_people[p][_FILTER_JOINTS[i]] + skip = ( + curr_joint[0] < 0 or curr_joint[1] < 0 or + curr_joint[0] >= img_width or curr_joint[1] >= img_height) + except IndexError: + skip = True + if not skip: + heat_maps_other_p[i] = gaussian_heatmap( + config.INPUT_SIZE, config.INPUT_SIZE, + curr_joint[1] - offset[1], curr_joint[0] - offset[0], + config.SIGMA, config.SIGMA, init=heat_maps_other_p[i]) + + heat_maps_other_p[-1] = np.maximum( + 1 - np.max(heat_maps_other_p[:-1], axis=0), + np.zeros((config.INPUT_SIZE, config.INPUT_SIZE))) + + heat_maps_other_p = np.transpose(heat_maps_other_p, (1, 2, 0)) + + # rescaling heat-maps accoring to the right shape + labels_single = rescale(heat_maps_single_p, config.OUTPUT_SIZE) + labels_people = rescale(heat_maps_other_p, config.OUTPUT_SIZE) + return labels_people, labels_single + + +def generate_center_map(center_pos, img_shape): + """ + Given the position of the person and the size of the input image it + generates + a heat-map where a gaissian distribution is fit in the position of the + person in the image. + """ + img_height = img_shape + img_width = img_shape + center_map = gaussian_heatmap( + img_height, img_width, center_pos[1], center_pos[0], + config.SIGMA_CENTER, config.SIGMA_CENTER) + return center_map + + +def rescale(data, new_size): + """Rescale data to a fixed dimension, regardless the number of channels. + Data has to be in the format (h,w,c).""" + if data.ndim > 2: + assert data.shape[2] < data.shape[0] + assert data.shape[2] < data.shape[1] + resized_data = cv2.resize( + data, (new_size, new_size), interpolation=cv2.INTER_CUBIC) + return resized_data + + +def crop_image(image, obj_pose): + """ + Crop the image in order to have the person at the center and the final + image size + is the same as the expected CNN input size. + Returns the cropped image and the offset that is used to update the joint + positions. + """ + offset_left = int(obj_pose[0] - config.INPUT_SIZE // 2) + offset_up = int(obj_pose[1] - config.INPUT_SIZE // 2) + # just for checking that it's inside the image + offset_right = int(image.shape[1] - obj_pose[0] - config.INPUT_SIZE // 2) + offset_bottom = int(image.shape[0] - obj_pose[1] - config.INPUT_SIZE // 2) + + pad_left, pad_right, pad_up, pad_bottom = 0, 0, 0, 0 + if offset_left < 0: + pad_left = -offset_left + if offset_right < 0: + pad_right = -offset_right + if offset_up < 0: + pad_up = -offset_up + if offset_bottom < 0: + pad_bottom = -offset_bottom + padded_image = np.lib.pad( + image, ((pad_up, pad_bottom), (pad_left, pad_right), (0, 0)), + 'constant', constant_values=((0, 0), (0, 0), (0, 0))) + + cropped_image = padded_image[ + offset_up + pad_up: offset_up + pad_up + config.INPUT_SIZE, + offset_left + pad_left: offset_left + pad_left + config.INPUT_SIZE] + + return cropped_image, np.array([offset_left, offset_up]) + diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py new file mode 100644 index 000000000..cbfd5ec22 --- /dev/null +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/upright_fast.py @@ -0,0 +1,302 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from npu_bridge.npu_init import * +import numpy as np +import scipy + +__all__ = [ + 'upgrade_r', + 'update_cam', + 'estimate_a_and_r_with_res', + 'estimate_a_and_r_with_res_weights', + 'pick_e' +] + + +def upgrade_r(r): + """Upgrades complex parameterisation of planar rotation to tensor containing + per frame 3x3 rotation matrices""" + newr = np.zeros((3, 3)) + newr[:2, 0] = r + newr[2, 2] = 1 + newr[1::-1, 1] = r + newr[0, 1] *= -1 + return newr + + +def update_cam(cam): + new_cam = cam[[0, 2, 1]].copy() + new_cam = new_cam[:, [0, 2, 1]] + return new_cam + + +def estimate_a_and_r_with_res( + w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, + residue, Ps, depth_reg, scale_prior): + """ + TODO: Missing the following parameters in docstring: + - w, e, s0, camera_r, Lambda, check, a, res, proj_e, depth_reg, + scale_prior + + TODO: The following parameters are not used: + - s0, weights + + So local optima are a problem in general. + However: + + 1. This problem is convex in a but not in r, and + + 2. each frame can be solved independently. + + So for each frame, we can do a grid search in r and take the globally + optimal solution. + + In practice, we just brute force over 100 different estimates of r, and + take the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a + given r. + + Arguments: + + w is a 3d measurement matrix of form frames*2*points + + e is a 3d set of basis vectors of from basis*3*points + + s0 is the 3d rest shape of form 3*points + + Lambda are the regularisor coefficients on the coefficients of the + weights typically generated using PPCA + + interval is how far round the circle we should check for break points + we check every interval*2*pi radians + + Returns: + + a (basis coefficients) and r (representation of rotations as a complex + number) + """ + frames = w.shape[0] + points = w.shape[2] + basis = e.shape[0] + r = np.empty(2) + Ps_reshape = Ps.reshape(2 * points) + w_reshape = w.reshape((frames, points * 2)) + + for i in range(check.size): + c = check[i] + r[0] = np.cos(c) + r[1] = np.sin(c) + grot = camera_r.dot(upgrade_r(r)) + rot = grot[:2] + res[:, :points * 2] = w_reshape + res[:, :points * 2] -= Ps_reshape + proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( + e.shape[0], 2 * points) + + if Lambda.size != 0: + proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) + res[:, 2 * points:].fill(0) + res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + # depth regularizer not used + proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * + depth_reg) * grot[2]).dot(e) + # we let the person change scale + res[:, 2 * points] = scale_prior + + """ + TODO: PLEASE REVIEW THE FOLLOWING CODE.... + overwrite_a and overwrite_b ARE UNEXPECTED ARGUMENTS OF + scipy.linalg.lstsq + """ + a[i], residue[i], _, _ = scipy.linalg.lstsq( + proj_e.T, res.T, overwrite_a=True, overwrite_b=True) + + # find and return best coresponding solution + best = np.argmin(residue, 0) + assert (best.shape[0] == frames) + theta = check[best] + index = (best, np.arange(frames)) + aa = a.transpose(0, 2, 1)[index] + retres = residue[index] + r = np.empty((2, frames)) + r[0] = np.sin(theta) + r[1] = np.cos(theta) + return aa, r, retres + + +def estimate_a_and_r_with_res_weights( + w, e, s0, camera_r, Lambda, check, a, weights, res, proj_e, + residue, Ps, depth_reg, scale_prior): + """ + TODO: Missing the following parameters in docstring: + - w, e, s0, camera)r, Lambda, check, a, res, proj_e, residue, + Ps, depth_reg, scale_prior + + So local optima are a problem in general. + However: + + 1. This problem is convex in a but not in r, and + + 2. each frame can be solved independently. + + So for each frame, we can do a grid search in r and take the globally + optimal solution. + + In practice, we just brute force over 100 different estimates of r, and + take + the best pair (r,a*(r)) where a*(r) is the optimal minimiser of a given r. + + Arguments: + + w is a 3d measurement matrix of form frames*2*points + + e is a 3d set of basis vectors of from basis*3*points + + s0 is the 3d rest shape of form 3*points + + Lambda are the regularisor coefficients on the coefficients of the + weights + typically generated using PPCA + + interval is how far round the circle we should check for break points + we check every interval*2*pi radians + + Returns: + + a (basis coefficients) and r (representation of rotations as a complex + number) + """ + frames = w.shape[0] + points = w.shape[2] + basis = e.shape[0] + r = np.empty(2) + Ps_reshape = Ps.reshape(2 * points) + w_reshape = w.reshape((frames, points * 2)) + p_copy = np.empty_like(proj_e) + + for i in range(check.size): + c = check[i] + r[0] = np.sin(c) + r[1] = np.cos(c) + grot = camera_r.dot(upgrade_r(r).T) + rot = grot[:2] + rot.dot(s0, Ps) # TODO: remove? + res[:, :points * 2] = w_reshape + res[:, :points * 2] -= Ps_reshape + proj_e[:, :2 * points] = rot.dot(e).transpose(1, 0, 2).reshape( + e.shape[0], 2 * points) + + if Lambda.size != 0: + proj_e[:, 2 * points:2 * points + basis] = np.diag(Lambda[:Lambda.shape[0] - 1]) + res[:, 2 * points:].fill(0) + res[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, :points * 2] *= Lambda[Lambda.shape[0] - 1] + proj_e[:, 2 * points + basis:] = ((Lambda[Lambda.shape[0] - 1] * + depth_reg) * grot[2]).dot(e) + res[:, 2 * points] = scale_prior + if weights.size != 0: + res[:, :points * 2] *= weights + for j in range(frames): + p_copy[:] = proj_e + p_copy[:, :points * 2] *= weights[j] + a[i, :, j], comp_residual, _, _ = np.linalg.lstsq( + p_copy.T, res[j].T) + if not comp_residual: + # equations are over-determined + residue[i, j] = 1e-5 + else: + residue[i, j] = comp_residual + # find and return best coresponding solution + best = np.argmin(residue, 0) + index = (best, np.arange(frames)) + theta = check[best] + aa = a.transpose(0, 2, 1)[index] + retres = residue[index] + r = np.empty((2, frames)) + r[0] = np.sin(theta) + r[1] = np.cos(theta) + return aa, r, retres + + +def pick_e(w, e, s0, camera_r=None, Lambda=None, + weights=None, scale_prior=-0.0014, interval=0.01, depth_reg=0.0325): + """Brute force over charts from the manifold to find the best one. + Returns best chart index and its a and r coefficients + Returns assignment, and a and r coefficents""" + + camera_r = np.asarray([[1, 0, 0], [0, 0, -1], [0, 1, 0]] + ) if camera_r is None else camera_r + Lambda = np.ones((0, 0)) if Lambda is None else Lambda + weights = np.ones((0, 0, 0)) if weights is None else weights + + charts = e.shape[0] + frames = w.shape[0] + basis = e.shape[1] + points = e.shape[3] + assert (s0.shape[0] == charts) + r = np.empty((charts, 2, frames)) + a = np.empty((charts, frames, e.shape[1])) + score = np.empty((charts, frames)) + check = np.arange(0, 1, interval) * 2 * np.pi + cache_a = np.empty((check.size, basis, frames)) + residue = np.empty((check.size, frames)) + + if Lambda.size != 0: + res = np.zeros((frames, points * 2 + basis + points)) + proj_e = np.zeros((basis, 2 * points + basis + points)) + else: + res = np.empty((frames, points * 2)) + proj_e = np.empty((basis, 2 * points)) + Ps = np.empty((2, points)) + + if weights.size == 0: + for i in range(charts): + if Lambda.size != 0: + a[i], r[i], score[i] = estimate_a_and_r_with_res( + w, e[i], s0[i], camera_r, + Lambda[i], check, cache_a, weights, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + a[i], r[i], score[i] = estimate_a_and_r_with_res( + w, e[i], s0[i], camera_r, Lambda, + check, cache_a, weights, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + w2 = weights.reshape(weights.shape[0], -1) + for i in range(charts): + if Lambda.size != 0: + a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( + w, e[i], s0[i], camera_r, + Lambda[i], check, cache_a, w2, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + else: + a[i], r[i], score[i] = estimate_a_and_r_with_res_weights( + w, e[i], s0[i], camera_r, Lambda, + check, cache_a, w2, + res, proj_e, residue, Ps, + depth_reg, scale_prior) + + remaining_dims = 3 * w.shape[2] - e.shape[1] + assert (np.all(score > 0)) + assert (remaining_dims >= 0) + # Zero problems in log space due to un-regularised first co-efficient + l = Lambda.copy() + l[Lambda == 0] = 1 + llambda = -np.log(l) + score /= 2 + return score, a, r + -- Gitee From 60b601621a51fbdb7d9e1929736c6b92e5329eef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:27:24 +0000 Subject: [PATCH 42/43] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/LiftingFromTheDeep=5FID0891=5Ffor=5FTensorf?= =?UTF-8?q?low/packages/lifting/utils/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages/lifting/utils/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From cf756a3ca7696f7f66a7860e67bb49084fcff6e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 22 Mar 2022 10:28:25 +0000 Subject: [PATCH 43/43] update TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py. --- .../packages/lifting/utils/cpm.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py index 1f8a3a847..ecbb95beb 100644 --- a/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py +++ b/TensorFlow/contrib/cv/LiftingFromTheDeep_ID0891_for_Tensorflow/packages/lifting/utils/cpm.py @@ -404,5 +404,3 @@ def inference_pose(image, center_map, hm_size, stage=6): Mconv6_stage6, 15, 1, 1, activation_fn=None, scope='Mconv7_stage6') return _process_stage(Mconv7_stage6, hm_size) - - -- Gitee