From 86d48f73e871000fbbf04dc042627d86dfa7e41f Mon Sep 17 00:00:00 2001 From: yongqingli Date: Wed, 7 Dec 2022 22:03:57 +0800 Subject: [PATCH 1/5] commit --- .../cv/PifPaf_for_TensorFlow2/README.md | 190 +++++ .../contrib/cv/PifPaf_for_TensorFlow2/eval.py | 110 +++ .../cv/PifPaf_for_TensorFlow2/export_pb.py | 136 ++++ .../PifPaf_for_TensorFlow2/export_tflite.py | 78 ++ .../hyperpose/Config/__init__.py | 578 ++++++++++++++ .../hyperpose/Config/config_pifpaf.py | 92 +++ .../hyperpose/Config/config_pretrain.py | 47 ++ .../hyperpose/Config/define.py | 73 ++ .../hyperpose/Dataset/__init__.py | 140 ++++ .../hyperpose/Dataset/base_dataset.py | 320 ++++++++ .../hyperpose/Dataset/common.py | 135 ++++ .../hyperpose/Dataset/dmadapt_dataset.py | 50 ++ .../Dataset/imagenet_dataset/__init__.py | 33 + .../Dataset/imagenet_dataset/dataset.py | 113 +++ .../Dataset/mpii_dataset/__init__.py | 34 + .../hyperpose/Dataset/mpii_dataset/dataset.py | 272 +++++++ .../hyperpose/Dataset/mpii_dataset/define.py | 169 ++++ .../hyperpose/Dataset/mpii_dataset/format.py | 225 ++++++ .../Dataset/mpii_dataset/generate.py | 68 ++ .../hyperpose/Dataset/mpii_dataset/prepare.py | 85 ++ .../hyperpose/Dataset/mpii_dataset/utils.py | 91 +++ .../Dataset/mscoco_dataset/__init__.py | 34 + .../Dataset/mscoco_dataset/dataset.py | 226 ++++++ .../Dataset/mscoco_dataset/define.py | 154 ++++ .../Dataset/mscoco_dataset/format.py | 209 +++++ .../Dataset/mscoco_dataset/generate.py | 73 ++ .../Dataset/mscoco_dataset/prepare.py | 138 ++++ .../hyperpose/Dataset/multi_dataset.py | 117 +++ .../hyperpose/Model/__init__.py | 514 ++++++++++++ .../hyperpose/Model/augmentor.py | 102 +++ .../hyperpose/Model/backbones.py | 730 ++++++++++++++++++ .../hyperpose/Model/base_model.py | 78 ++ .../hyperpose/Model/common.py | 306 ++++++++ .../hyperpose/Model/domainadapt.py | 86 +++ .../hyperpose/Model/examine.py | 64 ++ .../hyperpose/Model/human.py | 183 +++++ .../hyperpose/Model/metrics.py | 113 +++ .../hyperpose/Model/pifpaf/__init__.py | 37 + .../hyperpose/Model/pifpaf/define.py | 136 ++++ .../hyperpose/Model/pifpaf/eval.py | 275 +++++++ .../hyperpose/Model/pifpaf/model.py | 313 ++++++++ .../hyperpose/Model/pifpaf/processor.py | 620 +++++++++++++++ .../hyperpose/Model/pifpaf/utils.py | 425 ++++++++++ .../hyperpose/Model/pretrain.py | 204 +++++ .../hyperpose/Model/processor.py | 148 ++++ .../hyperpose/Model/train.py | 626 +++++++++++++++ .../hyperpose/__init__.py | 31 + .../PifPaf_for_TensorFlow2/measure_flops.py | 55 ++ .../PifPaf_for_TensorFlow2/modelzoo_level.txt | 8 + .../PifPaf_for_TensorFlow2/official_test.py | 110 +++ .../cv/PifPaf_for_TensorFlow2/pretrain.py | 81 ++ .../cv/PifPaf_for_TensorFlow2/python_demo.py | 128 +++ .../PifPaf_for_TensorFlow2/python_demo_pb.py | 170 ++++ .../PifPaf_for_TensorFlow2/requirements.txt | 36 + .../scripts/auto-format.sh | 44 ++ .../scripts/check_docker_run.py | 88 +++ .../scripts/download-openpifpaf-model.sh | 43 ++ .../scripts/downloader.py | 68 ++ .../scripts/test_docker.py | 96 +++ .../cv/PifPaf_for_TensorFlow2/setup.py | 64 ++ .../test/train_full_1p.sh | 160 ++++ .../test/train_performance_1p.sh | 160 ++++ .../cv/PifPaf_for_TensorFlow2/train.py | 167 ++++ 63 files changed, 10459 insertions(+) create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/eval.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_pb.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_tflite.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pifpaf.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pretrain.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/define.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/base_dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/common.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/dmadapt_dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/define.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/format.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/generate.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/prepare.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/utils.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/define.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/format.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/generate.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/prepare.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/multi_dataset.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/augmentor.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/backbones.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/base_model.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/common.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/domainadapt.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/examine.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/human.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/metrics.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/define.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/eval.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/model.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/processor.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/utils.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pretrain.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/processor.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/train.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__init__.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/measure_flops.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/modelzoo_level.txt create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/official_test.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/pretrain.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo_pb.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/requirements.txt create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/auto-format.sh create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/check_docker_run.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/download-openpifpaf-model.sh create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/downloader.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/test_docker.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/setup.py create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md new file mode 100644 index 000000000..c3f7f9d15 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md @@ -0,0 +1,190 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [训练结果](#训练结果.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):姿势检测** + +**版本(Version):1.2** + +**修改时间(Modified) :2021.4.6** + +**框架(Framework):TensorFlow 2.6.5** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910 AI处理器** + +**应用级别(Categories):Official** + +**描述(Description):构建一个联合检测和形成时空关键点关联的通用框架,使其成为第一个实时的姿态检测和跟踪算法。使用一种通用的神经网络体系结构,该体系结构使用复合场来检测和构建一个时空姿势** + +

概述

+ + OpenPifPaf模型最早是由Sven Kreiss, Lorenzo Bertoni, Alexandre Alahi在2019年提出的,并在2021年进一步完善为OpenPifPaf,提出了复合强度场(CIF)和复合关联场(CAF),使得网络在能够进行图像姿态识别的基础上还能够进行时空姿势跟踪,并进行实时的姿态预测和跟踪 + +- 参考论文: + + https://github.com/openpifpaf/openpifpaf + +- 参考实现: + + https://github.com/tensorlayer/hyperpose + + +​ + + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 + +- 训练数据集预处理: + + - 图像的分辨率:72dpi + - 图像输入格式:jpg + +- 测试数据集预处理 + + - 图像的分辨率:72dpi + - 图像输入格式:jpg + +- 训练超参 + + - Batch size: 4 + - Train epoch: 70 + - Train step: 1000000 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 并行数据 | 是 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +脚本已默认开启混合精度,设置precision_mode参数的脚本参考如下。 + + ``` + custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(str(args.precision_mode)) + ``` + +

训练环境准备

+ +1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。 +2. 宿主机上需要使用SSH远程登录GPU和NPU处理器进行训练 + + + +

快速上手

+ +- 数据集准备 +1. 模型训练使用mscoco2017数据集,数据集请用户自行获取。 + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 + +- 启动训练之前,首先要配置程序运行相关环境变量,以NPU为例。 + + 环境变量配置信息参见: + + [NPU环境变量设置]( https://gitee.com/ascend/modelzoo/wikis/%E6%A8%A1%E5%9E%8B%E6%8C%91%E6%88%98%E8%B5%9B_%E6%A8%A1%E5%9E%8B%E5%BC%80%E5%8F%91%E7%8E%AF%E5%A2%83%E4%BD%BF%E7%94%A8%E6%8C%87%E5%AF%BC%E4%B9%A6/NPU%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E9%85%8D%E7%BD%AE%E4%BF%A1%E6%81%AF) + +- 单卡训练 + + 1. 配置训练参数。 + + 首先在脚本test/train_full_1p.sh 中,配置batch_size、steps、epochs、data_path等参数,请用户根据实际路径配置data_path,或者在启动训练的命令行中以参数形式下发。 + + ``` + batch_size=4 + steps=1000000 + epochs=70 + data_path="../mscoco2017" + ``` + + 2. 启动训练。 + + 启动单卡训练 (脚本为/test/train_full_1p.sh ) + + ``` + bash train_full_1p.sh --data_path=../mscoco2017 + ``` + +

训练结果

+ +- 精度结果比对 + +|精度指标项|GPU实测|NPU实测| +|---|---|---|---| +|loss|1692.60|1731.94| + +- 性能结果比对 + +|性能指标|GPU实测|NPU实测| +|---|---|---|---| +|FPS|6.56| 0.13| + +对比在GPU和NPU的精度和性能结果:在精度上,NPU与GPU训练相同步数时loss值差距不超过5%;在性能上,FPS差距值6.43,可能原因:数据转移用时过长(数据移动与映射的时间占总时间的45.9%)。 + + +

高级参考

+ +## 脚本和示例代码 + +``` +├── train.py //训练与测试代码 +├── README.md //代码说明文档 +├── hyperpose +│ ├──hyperpose.preocessor.py //模型预处理代码 +│ ├──hyperpose.Model.pifpaf.model.py //pifpaf模型代码 +│ ├──hyperpose.Model.backbones.py //模型主体框架 +├── pretrain.py //预训练代码 +├── requirements.tx //训练python依赖列表 +├── test +│ ├──train_performance_1p.sh //单卡训练验证性能启动脚本 +│ ├──train_full_1p.sh //单卡全量训练启动脚本 + +``` + +## 脚本参数 + +``` +--data_path 数据集路径,默认:hyperpose/data +--batch_size 每个NPU的batch size,默认:4 +--learing_rata 初始学习率,默认:0.0001 +--steps 每个epcoh训练步数,默认:1000000 +--epochs 训练epcoh数量,默认:70 +``` + +## 训练过程 + +1. 通过“模型训练”中的训练指令启动单卡训练。 + +2. 参考脚本的模型存储路径为./save_dir/test_pif/model_dir/。 + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/eval.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/eval.py new file mode 100644 index 000000000..930c36532 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/eval.py @@ -0,0 +1,110 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import sys +import math +import json +import time +import argparse +import matplotlib +import multiprocessing +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='FastPose.') + parser.add_argument("--model_type", + type=str, + default="Openpose", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,PoseProposal") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vgg19, Resnet18, Resnet50") + parser.add_argument("--model_name", + type=str, + default="default_name", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: coco ") + parser.add_argument("--dataset_version", + type=str, + default="2017", + help="dataset version, only use for MSCOCO and available for version 2014 and 2017 ") + parser.add_argument("--dataset_path", + type=str, + default="data", + help="dataset path,to determine the path to load the dataset") + parser.add_argument('--train_type', + type=str, + default="Single_train", + help='train type, available options: Single_train, Parallel_train') + parser.add_argument('--kf_optimizer', + type=str, + default='Pair_avg', + help='kung fu parallel optimizor,available options: Sync_sgd, Sync_avg, Pair_avg') + parser.add_argument('--eval_num', + type=int, + default=10000, + help='number of evaluation') + parser.add_argument('--vis_num', + type=int, + default=60, + help='number of visible evaluation') + parser.add_argument('--multiscale', + type=bool, + default=True, + help='enable multiscale_search') + + + args=parser.parse_args() + Config.set_model_name(args.model_name) + Config.set_model_type(Config.MODEL[args.model_type]) + Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) + Config.set_dataset_type(Config.DATA[args.dataset_type]) + Config.set_dataset_path(args.dataset_path) + Config.set_dataset_version(args.dataset_version) + + config=Config.get_config() + model=Model.get_model(config) + evaluate=Model.get_evaluate(config) + dataset=Dataset.get_dataset(config) + + evaluate(model,dataset,vis_num=args.vis_num,total_eval_num=args.eval_num,enable_multiscale_search=args.multiscale) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_pb.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_pb.py new file mode 100644 index 000000000..d973dff6d --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_pb.py @@ -0,0 +1,136 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import argparse +import tensorflow as tf +from hyperpose import Config, Model +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + +def analyze_inputs_outputs(graph): + ops = graph.get_operations() + outputs_set = set(ops) + inputs = [] + for op in ops: + if len(op.inputs) == 0 and op.type == 'Placeholder': + inputs.append(op) + else: + for input_tensor in op.inputs: + if input_tensor.op in outputs_set: + outputs_set.remove(input_tensor.op) + outputs = list(outputs_set) + # Control nodes shall not be considered. + # input like: "import/x" -> x + # output like: "import/Identity", "import/Identity_1" -> Identity, Identity_1 + inputs = [x.name.split('/')[-1] for x in inputs if '_control_node' not in x.name] + outputs = [x.name.split('/')[-1] for x in outputs if '_control_node' not in x.name] + return (inputs, outputs) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='export fastpose models to pb format.') + parser.add_argument("--model_type", + type=str, + default="Pifpaf", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,PoseProposal") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vgg19, Resnet18, Resnet50") + parser.add_argument("--model_name", + type=str, + default="default_name", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: coco ") + parser.add_argument("--output_dir", + type=str, + default="save_dir", + help="which dir to output the exported pb model") + parser.add_argument("--export_batch_size", + type=int, + default=1, + help="the expected input image batch_size of the converted model, set to None to support dynamic shape" + ) + parser.add_argument("--export_h", + type=int, + default=368, + help="the expected input image height of the converted model, set to None to support dynamic shape" + ) + parser.add_argument("--export_w", + type=int, + default=432, + help="the expected input image width of the converted model, set to None to support dynamic shape") + + + args=parser.parse_args() + Config.set_model_name(args.model_name) + Config.set_model_type(Config.MODEL[args.model_type]) + Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) + config=Config.get_config() + export_model=Model.get_model(config) + + export_batch_size=args.export_batch_size + export_h,export_w=args.export_h,args.export_w + print(f"export_batch_size={export_batch_size}\texport_h={export_h}\texport_w={export_w}") + input_path=f"{config.model.model_dir}/newest_model.npz" + output_dir=f"{args.output_dir}/{config.model.model_name}" + output_path=f"{output_dir}/frozen_{config.model.model_name}.pb" + print(f"Exporting model {config.model.model_name} from {input_path}...") + if(not os.path.exists(output_dir)): + print("Creating output_dir...") + os.mkdir(output_dir) + if(not os.path.exists(input_path)): + print("Input model file doesn't exist!") + print("Conversion aborted!") + else: + export_model.load_weights(input_path, format='npz_dict') + export_model.eval() + if(export_model.data_format=="channels_last"): + input_signature=tf.TensorSpec(shape=(export_batch_size,export_h,export_w,3)) + else: + input_signature=tf.TensorSpec(shape=(export_batch_size,3,export_h,export_w)) + concrete_function=export_model.infer.get_concrete_function(x=input_signature) + frozen_graph=convert_variables_to_constants_v2(concrete_function) + frozen_graph_def=frozen_graph.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_graph_def,logdir=output_dir,name=f"frozen_{args.model_name}.pb",\ + as_text=False) + print(f"Exporting pb file finished! output file: {output_path}") + + with tf.Graph().as_default() as graph: + tf.import_graph_def(frozen_graph_def) + input_names, output_names = analyze_inputs_outputs(graph) + print(f'Exported graph INPUT nodes: {input_names}') + print(f'Exported graph OUTPUT nodes: {output_names}') diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_tflite.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_tflite.py new file mode 100644 index 000000000..625878a02 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/export_tflite.py @@ -0,0 +1,78 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import pathlib +import tensorflow as tf +from functools import partial +from hyperpose import Config,Model,Dataset + +#load model weights from hyperpose +Config.set_model_name("new_pifpaf") +Config.set_model_type(Config.MODEL.Pifpaf) +Config.set_dataset_type(Config.DATA.MSCOCO) +config=Config.get_config() +model=Model.get_model(config) +model.load_weights(f"{config.model.model_dir}/newest_model.npz") +model.eval() +#construct representative dataset used for quantization(here using the first 100 validate images) +scale_image_func=partial(Model.common.scale_image,hin=model.hin,win=model.win,scale_rate=0.95) +def decode_image(image_file,image_id): + image = tf.io.read_file(image_file) + image = tf.image.decode_jpeg(image, channels=3) # get RGB with 0~1 + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + scaled_image,pad = tf.py_function(scale_image_func,[image],[tf.float32,tf.float32]) + return scaled_image +dataset=Dataset.get_dataset(config) +val_dataset=dataset.get_eval_dataset() +rep_dataset=val_dataset.enumerate() +rep_dataset=rep_dataset.filter(lambda i,image_data : i<=100) +rep_dataset=rep_dataset.map(lambda i,image_data: image_data) +rep_dataset=rep_dataset.map(decode_image).batch(1) +print(f"test rep_dataset:{rep_dataset}") +#covert to tf-lite using int8-only quantization +input_signature=tf.TensorSpec(shape=(None,3,None,None)) +converter=tf.lite.TFLiteConverter.from_concrete_functions([model.infer.get_concrete_function(x=input_signature)]) +converter.optimizations=[tf.lite.Optimize.DEFAULT] +converter.representative_dataset=rep_dataset +converter.target_spec.supported_ops=[tf.lite.OpsSet.TFLITE_BUILTINS_INT8] +converter.inference_input_type = tf.uint8 +converter.inference_output_type = tf.uint8 +tflite_model_quant = converter.convert() +print("model quantized using uint8 quantization!") +#save the converted quantization model +save_path=f"./save_dir/{config.model.model_name}.tflite" +tf.io.write_file(save_path,tflite_model_quant) +#print(f"export tflite file finished! output file: {save_path}") + + + + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py new file mode 100644 index 000000000..970aaacc9 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py @@ -0,0 +1,578 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import logging +import matplotlib +matplotlib.use("Agg") +from copy import deepcopy +from easydict import EasyDict as edict +from .define import * +from .config_pretrain import pretrain +update_config,update_train,update_eval,update_test,update_model,update_data,update_log=edict(),edict(),edict(),edict(),edict(),edict(),edict() + +#default train +update_train.optim_type=OPTIM.Adam +update_train.kungfu_option = KUNGFU.Sync_avg + +#defualt model config +update_model.model_type=MODEL.Openpose +#userdef model +update_model.custom_parts = None +update_model.custom_limbs = None +update_model.custom_augmentor = None +update_model.custom_preprocessor = None +update_model.custom_postprocessor = None +update_model.custom_visualizer = None + +#default dataset config +#official dataset +update_data.official_flag=True +#userdef dataset +update_data.userdef_dataset=None +#useradd dataset +update_data.useradd_flag=False +update_data.useradd_scale_rate=1 +update_data.useradd_train_img_paths=None +update_data.useradd_train_targets=None +#domain adaption dataset +update_data.domainadapt_flag=False +update_data.domainadapt_scale_rate=1 +update_data.domainadapt_train_img_paths=None +#default pretrain config +update_pretrain=edict() + + +#get configure api +def get_config(): + '''get the config object with all the configuration information + + get the config object based on the previous setting functions, + the config object will be passed to the functions of Model and Dataset module to + construct the system. + + only the setting functions called before this get_config function is valid, thus + use this function after all configuration done. + + Parameters + ---------- + None + + Returns + ------- + config object + an edict object contains all the configuration information. + + ''' + # import basic configurations + if(update_model.model_type==MODEL.Openpose): + from .config_opps import model,train,eval,test,data,log + elif(update_model.model_type==MODEL.LightweightOpenpose): + from .config_lopps import model,train,eval,test,data,log + elif(update_model.model_type==MODEL.MobilenetThinOpenpose): + from .config_mbtopps import model,train,eval,test,data,log + elif(update_model.model_type==MODEL.PoseProposal): + from .config_ppn import model,train,eval,test,data,log + elif(update_model.model_type==MODEL.Pifpaf): + from .config_pifpaf import model,train,eval,test,data,log + # merge settings with basic configurations + model.update(update_model) + train.update(update_train) + eval.update(update_eval) + test.update(update_test) + data.update(update_data) + log.update(update_log) + pretrain.update(update_pretrain) + # assemble configure + config=edict() + config.model=model + config.train=train + config.eval=eval + config.test=test + config.data=data + config.log=log + config.pretrain=pretrain + # path configure + import tensorflow as tf + import tensorlayer as tl + tl.files.exists_or_mkdir(config.model.model_dir, verbose=True) # to save model files + tl.files.exists_or_mkdir(config.train.vis_dir, verbose=True) # to save visualization results + tl.files.exists_or_mkdir(config.eval.vis_dir, verbose=True) # to save visualization results + tl.files.exists_or_mkdir(config.test.vis_dir, verbose=True) # to save visualization results + tl.files.exists_or_mkdir(config.data.vis_dir, verbose=True) # to save visualization results + tl.files.exists_or_mkdir(config.pretrain.pretrain_model_dir,verbose=True) + # device configure + # FIXME: replace experimental tf functions when in tf 2.1 version + tf.debugging.set_log_device_placement(False) + tf.config.set_soft_device_placement(True) + for gpu in tf.config.experimental.get_visible_devices("GPU"): + tf.config.experimental.set_memory_growth(gpu,True) + + # logging configure + + # logging file path init + tl.files.exists_or_mkdir(os.path.dirname(config.log.log_path),verbose=True) + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN) + tl.logging.set_verbosity(tl.logging.WARN) + + # Info logging configure + info_logger = logging.getLogger(name="INFO") + info_logger.setLevel(logging.INFO) + # stream handler + info_cHandler = logging.StreamHandler() + info_cFormat = logging.Formatter("[%(name)s]: %(message)s") + info_cHandler.setFormatter(info_cFormat) + info_logger.addHandler(info_cHandler) + # file handler + info_fHandler = logging.FileHandler(config.log.log_path,mode="a") + info_fFormat = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") + info_fHandler.setFormatter(info_fFormat) + info_logger.addHandler(info_fHandler) + + # Dataset logging configure + data_logger = logging.getLogger(name="DATA") + data_logger.setLevel(logging.INFO) + # stream handler + data_cHandler = logging.StreamHandler() + data_cFormat = logging.Formatter("[%(name)s] %(levelname)s: %(message)s") + data_cHandler.setFormatter(data_cFormat) + data_logger.addHandler(data_cHandler) + # file handler + data_fHandler = logging.FileHandler(config.log.log_path,mode="a") + data_fFormat = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") + data_fHandler.setFormatter(data_fFormat) + data_logger.addHandler(data_fHandler) + + # Model logging configure + model_logger = logging.getLogger(name="MODEL") + model_logger.setLevel(logging.INFO) + # stream handler + model_cHandler = logging.StreamHandler() + model_cFormat = logging.Formatter("[%(name)s] %(levelname)s: %(message)s") + model_cHandler.setFormatter(model_cFormat) + model_logger.addHandler(model_cHandler) + # file handler + model_fHandler = logging.FileHandler(config.log.log_path,mode="a") + model_fFormat = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") + model_fHandler.setFormatter(model_fFormat) + model_logger.addHandler(model_fHandler) + + # Train logging configure + train_logger = logging.getLogger(name="TRAIN") + train_logger.setLevel(logging.INFO) + # stream handler + train_cHandler = logging.StreamHandler() + train_cFormat = logging.Formatter("[%(name)s] %(levelname)s: %(message)s") + train_cHandler.setFormatter(train_cFormat) + train_logger.addHandler(train_cHandler) + # file handler + train_fHandler = logging.FileHandler(config.log.log_path,mode="a") + train_fFormat = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") + train_fHandler.setFormatter(train_fFormat) + train_logger.addHandler(train_fHandler) + + info("Configuration initialized!") + return config + +#set configure api +#model configure api +def set_model_arch(model_arch): + '''set user defined model architecture + + replace default model architecture with user-defined model architecture, use it in the following training and evaluation + + Parameters + ---------- + arg1 : tensorlayer.models.MODEL + An object of a model class inherit from tensorlayer.models.MODEL class, + should implement forward function and cal_loss function to make it compatible with the existing pipeline + + The forward funtion should follow the signature below: +| openpose models: def forward(self,x,is_train=False) ,return conf_map,paf_map,stage_confs,stage_pafs +| poseproposal models: def forward(self,x,is_train=False), return pc,pi,px,py,pw,ph,pe + + The cal_loss function should follow the signature below: + +| openpose models: def cal_loss(self,stage_confs,stage_pafs,gt_conf,gt_paf,mask), return loss,loss_confs,loss_pafs +| poseproposal models: def cal_loss(self,tc,tx,ty,tw,th,te,te_mask,pc,pi,px,py,pw,ph,pe): + return loss_rsp,loss_iou,loss_coor,loss_size,loss_limb + + Returns + ------- + None + + ''' + update_model.model_arch=model_arch + +def set_model_type(model_type): + '''set preset model architecture + + configure the model architecture as one of the desired preset model architectures + + Parameters + ---------- + arg1 : Config.MODEL + a enum value of enum class Config.MODEL, available options: +| Config.MODEL.Openpose (original Openpose) +| Config.MODEL.LightweightOpenpose (lightweight variant version of Openpose,real-time on cpu) +| Config.MODEL.PoseProposal (pose proposal network) +| Config.MODEL.MobilenetThinOpenpose (lightweight variant version of openpose) + + Returns + ------- + None + ''' + update_model.model_type=model_type + + +def set_model_backbone(model_backbone): + '''set preset model backbones + + set current model backbone to other common backbones + different backbones have different computation complexity + this enable dynamicly adapt the model architecture to approriate size. + + Parameters + ---------- + arg1 : Config.BACKBONE + a enum value of enum class Config.BACKBONE + available options: +| Config.BACKBONE.DEFUALT (default backbone of the architecture) +| Config.BACKBONE.MobilenetV1 +| Config.BACKBONE.MobilenetV2 +| Config.BACKBONE.Vggtiny +| Config.BACKBONE.Vgg16 +| Config.BACKBONE.Vgg19 +| Config.BACKBONE.Resnet18 +| Config.BACKBONE.Resnet50 + + Returns + ------- + None + ''' + update_model.model_backbone=model_backbone + +def set_data_format(data_format): + '''set model dataformat + + set the channel order of current model: + +| "channels_first" dataformat is faster in deployment +| "channels_last" dataformat is more common + the integrated pipeline will automaticly adapt to the chosen data format + + Parameters + ---------- + arg1 : string + available input: +| 'channels_first': data_shape N*C*H*W +| 'channels_last': data_shape N*H*W*C + + Returns + ------- + None + ''' + update_model.data_format=data_format + +def set_model_name(model_name): + '''set the name of model + + the models are distinguished by their names,so it is necessary to set model's name when train multiple models at the same time. + each model's ckpt data and log are saved on the 'save_dir/model_name' directory, the following directory are determined: + +| directory to save model ./save_dir/model_name/model_dir +| directory to save train result ./save_dir/model_name/train_vis_dir +| directory to save evaluate result ./save_dir/model_name/eval_vis_dir +| directory to save dataset visualize result ./save_dir/model_name/data_vis_dir +| file path to save train log ./save_dir/model_name/log.txt + + Parameters + ---------- + arg1 : string + name of the model + + Returns + ------- + None + ''' + update_model.model_name=model_name + update_model.model_dir = f"./save_dir/{update_model.model_name}/model_dir" + update_train.vis_dir = f"./save_dir/{update_model.model_name}/train_vis_dir" + update_eval.vis_dir=f"./save_dir/{update_model.model_name}/eval_vis_dir" + update_test.vis_dir=f"./save_dir/{update_model.model_name}/test_vis_dir" + update_data.vis_dir=f"./save_dir/{update_model.model_name}/data_vis_dir" + update_log.log_path= f"./save_dir/{update_model.model_name}/log.txt" + +#train configure api +def set_train_type(train_type): + '''set single_train or parallel train + + default using single train, which train the model on one GPU. + set parallel train will use Kungfu library to accelerate training on multiple GPU. + + to use parallel train better, it is also allow to set parallel training optimizor by set_kungfu_option. + + Parameters + ---------- + arg1 : Config.TRAIN + a enum value of enum class Config.TRAIN,available options: +| Config.TRAIN.Single_train +| Config.TRAIN.Parallel_train + + Returns + ------- + None + ''' + update_train.train_type=train_type + +def set_optim_type(optim_type): + update_train.optim_type=optim_type + +def set_learning_rate(learning_rate): + '''set the learning rate in training + + Parameters + ---------- + arg1 : float + learning rate + + Returns + ------- + None + ''' + update_train.lr_init=learning_rate + +def set_batch_size(batch_size): + '''set the batch size in training + + Parameters + ---------- + arg1 : int + batch_size + + Returns + ------- + None + ''' + update_train.batch_size=batch_size + + +def set_kungfu_option(kungfu_option): + '''set the optimizor of parallel training + + kungfu distribute training library needs to wrap tensorflow optimizor in + kungfu optimizor, this function is to choose kungfu optimizor wrap type + + Parameters + ---------- + arg1 : Config.KUNGFU + a enum value of enum class Config.KUNGFU + available options: +| Config.KUNGFU.Sync_sgd (SynchronousSGDOptimizer, hyper-parameter-robus) +| Config.KUNGFU.Sync_avg (SynchronousAveragingOptimizer) +| Config.KUNGFU.Pair_avg (PairAveragingOptimizer, communication-efficient) + + Returns + ------- + None + ''' + update_train.kungfu_option=kungfu_option + +#data configure api +def set_dataset_type(dataset_type): + '''set the dataset for train and evaluate + + set which dataset to use, the process of downlaoding, decoding, reformatting of different type + of dataset is automatic. + the evaluation metric of different dataset follows their official metric, + for COCO is MAP, for MPII is MPCH. + + This API also receive user-defined dataset class, which should implement the following functions +| __init__: take the config object with all configuration to init the dataset +| get_parts: return a enum class which defines the key point definition of the dataset +| get_limbs: return a [2*num_limbs] array which defines the limb definition of the dataset +| get_colors: return a list which defines the visualization color of the limbs +| get_train_dataset: return a tensorflow dataset which contains elements for training. each element should contains an image path and a target dict decoded in bytes by _pickle +| get_eval_dataset: return a tensorflow dataset which contains elements for evaluating. each element should contains an image path and an image id +| official_eval: if want to evaluate on this user-defined dataset, evalutation function should be implemented. + one can refer the Dataset.mpii_dataset and Dataset.mscoco_dataset for detailed information. + + Parameters + ---------- + arg1 : Config.DATA + a enum value of enum class Config.DATA or user-defined dataset + available options: +| Config.DATA.MSCOCO +| Config.DATA.MPII +| user-defined dataset + + Returns + ------- + None + ''' + update_data.dataset_type=dataset_type + +def set_dataset_version(dataset_version): + update_data.dataset_version=dataset_version + +def set_useradd_data(useradd_train_img_paths,useradd_train_targets,useradd_scale_rate=1): + update_data.useradd_flag=True + update_data.useradd_train_img_paths=useradd_train_img_paths + update_data.useradd_train_targets=useradd_train_targets + update_data.useradd_scale_rate=useradd_scale_rate + +def set_userdef_dataset(userdef_dataset): + update_data.dataset_type=DATA.USERDEF + update_data.userdef_dataset=userdef_dataset + +def set_multiple_dataset(multiple_dataset_configs): + update_data.dataset_type=DATA.MULTIPLE + update_data.multiple_dataset_configs=multiple_dataset_configs + +def set_domainadapt_dataset(domainadapt_train_img_paths,domainadapt_scale_rate=1): + update_data.domainadapt_flag=True + update_data.domainadapt_train_img_paths=domainadapt_train_img_paths + update_data.domainadapt_scale_rate=domainadapt_scale_rate + +def set_official_dataset(official_flag): + update_data.official_flag=official_flag + +def set_dataset_path(dataset_path): + '''set the path of the dataset + + set the path of the directory where dataset is,if the dataset doesn't exist in this directory, + then it will be automaticly download in this directory and decoded. + + Parameters + ---------- + arg1 : String + a string indicates the path of the dataset, + default: ./data + + Returns + ------- + None + ''' + update_data.dataset_path=dataset_path + +def set_dataset_filter(dataset_filter): + '''set the user defined dataset filter + + set the dataset filter as the input function. + to uniformly format different dataset, + Hyperpose organize the annotations of one image in one dataset in the similiar meta classes. + for COCO dataset, it is COCOMeta; for MPII dataset, it is MPIIMeta. + Meta classes will have some common information such as image_id, joint_list etc, + they also have some dataset-specific imformation, such as mask, is_crowd, headbbx_list etc. + + the dataset_fiter will perform on the Meta objects of the corresponding dataset, if + it returns True, the image and annotaions the Meta object related will be kept, + otherwise it will be filtered out. Please refer the Dataset.xxxMeta classes for better use. + + Parameters + ---------- + arg1 : function + a function receive a meta object as input, return a bool value indicates whether + the meta should be kept or filtered out. return Ture for keeping and False for depricating the object. + default: None + + Returns + ------- + None + ''' + update_data.dataset_filter=dataset_filter + +# interval APIs +# configure log interval +def set_log_interval(log_interval): + '''set the frequency of logging + + set the how many iteration intervals between two log information + + Parameters + ---------- + arg1 : Int + a int value indicates the iteration number bwteen two logs + default: 1 + + Returns + ------- + None + ''' + if(log_interval is not None): + update_log.log_interval=log_interval + +# configure save_interval +def set_save_interval(save_interval): + if(save_interval is not None): + update_train.save_interval = save_interval + +# configure vis_interval +def set_vis_interval(vis_interval): + if(vis_interval is not None): + update_train.vis_interval = vis_interval + +# custome module interfaces +# custom parts +def set_custom_parts(custom_parts): + update_model.custom_parts = custom_parts + +# custom limbs +def set_custom_limbs(custom_limbs): + update_model.custom_limbs = custom_limbs + +# custom augmentor +def set_custom_augmentor(augmentor): + update_model.augmentor = augmentor + +# custom preprocessor +def set_custom_preprocessor(preprocessor): + update_model.preprocessor = preprocessor + +# custom postprocessor +def set_custom_postprocessor(postprocessor): + update_model.postprocessor = postprocessor + +# custom visualizer +def set_custom_visualizer(visualizer): + update_model.visualizer = visualizer + + +def set_pretrain(enable): + update_pretrain.enable=enable + +def set_pretrain_dataset_path(pretrain_dataset_path): + update_pretrain.pretrain_dataset_path=pretrain_dataset_path + +def info(msg): + info_logger = logging.getLogger("INFO") + info_logger.info(msg) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pifpaf.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pifpaf.py new file mode 100644 index 000000000..897d69387 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pifpaf.py @@ -0,0 +1,92 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +from .define import MODEL,DATA,TRAIN,BACKBONE +from easydict import EasyDict as edict + +#model configuration +model = edict() +# number of keypoints + 1 for background +model.n_pos = 17 +model.num_channels=128 +# input size during training , 240 +model.hin = 368 +model.win = 432 +# output size during training (default 46) +model.hout = 46 +model.wout = 54 +model.model_type = MODEL.Pifpaf +model.model_name = "default_name" +model.model_backbone=BACKBONE.Default +model.data_format = "channels_first" +# save directory +model.model_dir = f"./save_dir/{model.model_name}/model_dir" + +#train configuration +train=edict() +train.batch_size = 4 +train.save_interval = 5 +train.vis_interval = 10000 +# total number of step +train.n_step = 1000000 +# initial learning rate +train.lr_init = 1e-4 +# decay lr factor +train.lr_decay_factor = 0.2 +train.lr_decay_steps=[777920,848640] +train.lr_decay_duration=35360 +train.weight_decay_factor = 1e-5 +train.train_type=TRAIN.Single_train +train.vis_dir=f"./save_dir/{model.model_name}/train_vis_dir" + +#eval configuration +eval =edict() +eval.batch_size=4 +eval.vis_dir= f"./save_dir/{model.model_name}/eval_vis_dir" + +#test configuration +test =edict() +test.vis_dir=f"./save_dir/{model.model_name}/test_vis_dir" + +#data configuration +data = edict() +data.dataset_type = DATA.MSCOCO # coco, custom, coco_and_custom +data.dataset_version = "2017" # MSCOCO version 2014 or 2017 +data.dataset_path = "./data" +data.dataset_filter=None +data.vis_dir=f"./save_dir/data_vis_dir" + +#log configuration +log = edict() +log.log_interval = 100 +log.log_path= f"./save_dir/{model.model_name}/log.txt" diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pretrain.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pretrain.py new file mode 100644 index 000000000..80b675821 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/config_pretrain.py @@ -0,0 +1,47 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from easydict import EasyDict as edict + +pretrain=edict() +pretrain.enable=False +pretrain.lr_init=5e-4 +pretrain.batch_size=32 +pretrain.total_step=370000000 +pretrain.log_interval=100 +pretrain.val_interval=5000 +pretrain.save_interval=5000 +pretrain.weight_decay_factor=1e-5 +pretrain.pretrain_dataset_path="./data/imagenet" +pretrain.pretrain_model_dir="./save_dir/pretrain_backbone" +pretrain.val_num=20000 +pretrain.lr_decay_step=170000 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/define.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/define.py new file mode 100644 index 000000000..f2c6e50cc --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/define.py @@ -0,0 +1,73 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from enum import Enum + +class BACKBONE(Enum): + Default=0 + Mobilenetv1=1 + Mobilenetv2=2 + MobilenetDilated=3 + MobilenetThin=4 + MobilenetSmall=5 + Vggtiny=6 + Vgg19=7 + Vgg16=8 + Resnet18=9 + Resnet50=10 + +class MODEL(Enum): + Openpose=0 + LightweightOpenpose=1 + PoseProposal=2 + MobilenetThinOpenpose=3 + Pifpaf=4 + +class DATA(Enum): + MSCOCO=0 + MPII=1 + USERDEF=2 + MULTIPLE=3 + +class TRAIN(Enum): + Single_train=0 + Parallel_train=1 + +class KUNGFU(Enum): + Sync_sgd=0 + Sync_avg=1 + Pair_avg=2 + +class OPTIM(Enum): + Adam=0 + RMSprop=1 + SGD=2 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__init__.py new file mode 100644 index 000000000..3803c3ff6 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__init__.py @@ -0,0 +1,140 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +from .common import TRAIN,MODEL,DATA +from .base_dataset import Base_dataset +from .multi_dataset import Multi_dataset +from .mpii_dataset import MPII_dataset +from .mscoco_dataset import MSCOCO_dataset +from .imagenet_dataset import Imagenet_dataset +from .dmadapt_dataset import Domainadapt_dataset +from .common import log_data as log + +def get_dataset(config): + '''get dataset object based on the config object + + consturct and return a dataset object based on the config. + No matter what the bottom dataset type is, the APIs of the returned dataset object are uniform, they are + the following APIs: + + visualize: visualize annotations of the train dataset and save it in "data_vir_dir" + get_dataset_type: return the type of the bottom dataset. + get_train_dataset: return a uniform tensorflow dataset object for training. + get_val_dataset: return a uniform tensorflow dataset object for evaluating. + official_eval: perform official evaluation on this dataset. + + + The construction pipeline of this dataset object is below: + + 1.check whether the dataset file(official zip or mat) is under data_path, + if it isn't, download it from official website automaticly + + 2.decode the official dataset file, organize the annotations in corresponding Meta classes, + conveniet for processing. + + 3.based on annotation, split train and evaluat part for furthur use. + + if user defined thier own dataset_filter, it will be executed in the train dataset or evaluate dataset generating procedure. + + use the APIs of this returned dataset object, the difference of different dataset is minimized. + + Parameters + ---------- + arg1 : config object + the config object return by Config.get_config() function, which includes all the configuration information. + + Returns + ------- + dataset + a dataset object with unifrom APIs: + visualize, get_dataset_type, get_train_dataset, get_val_dataset,official_eval + ''' + model_type=config.model.model_type + dataset_type=config.data.dataset_type + if(dataset_type==DATA.MSCOCO): + log("Using MSCOCO dataset!") + if(model_type==MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose or model_type==MODEL.Openpose): + from .mscoco_dataset.define import opps_input_converter as input_kpt_cvter + from .mscoco_dataset.define import opps_output_converter as output_kpt_cvter + elif(model_type==MODEL.PoseProposal): + from .mscoco_dataset.define import ppn_input_converter as input_kpt_cvter + from .mscoco_dataset.define import ppn_output_converter as output_kpt_cvter + elif(model_type==MODEL.Pifpaf): + from .mscoco_dataset.define import pifpaf_input_converter as input_kpt_cvter + from .mscoco_dataset.define import pifpaf_output_converter as output_kpt_cvter + dataset=MSCOCO_dataset(config,input_kpt_cvter,output_kpt_cvter) + dataset.prepare_dataset() + elif(dataset_type==DATA.MPII): + log("Using MPII dataset!") + if(model_type==MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose or model_type==MODEL.Openpose): + from .mpii_dataset.define import opps_input_converter as input_kpt_cvter + from .mpii_dataset.define import opps_output_converter as output_kpt_cvter + elif(model_type==MODEL.PoseProposal): + from .mpii_dataset.define import ppn_input_converter as input_kpt_cvter + from .mpii_dataset.define import ppn_output_converter as output_kpt_cvter + dataset=MPII_dataset(config,input_kpt_cvter,output_kpt_cvter) + dataset.prepare_dataset() + elif(dataset_type==DATA.USERDEF): + log("Using user-defined dataset!") + userdef_dataset=config.data.userdef_dataset + dataset=userdef_dataset(config) + elif(dataset_type==DATA.MULTIPLE): + log("Using multiple-combined dataset!") + combined_dataset_list=[] + multiple_dataset_configs=config.data.multiple_dataset_configs + log(f"Total {len(multiple_dataset_configs)} datasets settled, initializing combined datasets individualy....") + for dataset_idx,dataset_config in enumerate(multiple_dataset_configs): + log(f"Initializing combined dataset {dataset_idx},config:{dataset_config.data}...") + combined_dataset_list.append(get_dataset(dataset_config)) + log("Initialization finished") + dataset=Multi_dataset(config,combined_dataset_list) + else: + raise NotImplementedError(f"invalid dataset_type:{dataset_type}") + return dataset + + +def get_pretrain_dataset(config): + return Imagenet_dataset(config) + +def get_domainadapt_dataset(config): + return Domainadapt_dataset(config.domainadapt_train_img_paths) + +def enum2dataset(dataset_type): + if(dataset_type==DATA.MSCOCO): + return MSCOCO_dataset + elif(dataset_type==DATA.MPII): + return MPII_dataset + elif(dataset_type==DATA.MULTIPLE): + raise NotImplementedError("Multiple dataset shouldn't be nested!") + else: + raise NotImplementedError("Unknow dataset!") diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/base_dataset.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/base_dataset.py new file mode 100644 index 000000000..83e72b576 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/base_dataset.py @@ -0,0 +1,320 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import random +import numpy as np +import _pickle as cPickle +import tensorflow as tf +from .common import DATA +from .common import log_data as log +from .dmadapt_dataset import Domainadapt_dataset + +class Base_dataset: + def __init__(self,config,input_kpt_cvter=lambda x: x,output_kpt_cvter=lambda x: x): + # dataset basic configure + self.official_flag=config.data.official_flag + self.dataset_type=config.data.dataset_type + self.dataset_path=config.data.dataset_path + self.dataset_filter=config.data.dataset_filter + self.input_kpt_cvter=input_kpt_cvter + self.output_kpt_cvter=output_kpt_cvter + self.train_datasize=0 + self.eval_datasize=0 + self.test_datasize=0 + # user-define dataset configure + self.useradd_flag=config.data.useradd_flag + self.useradd_scale_rate=config.data.useradd_scale_rate + self.useradd_train_img_paths=config.data.useradd_train_img_paths + self.useradd_train_targets=config.data.useradd_train_targets + # domain adaptation + self.domainadapt_flag=config.data.domainadapt_flag + self.domainadapt_train_img_paths = config.data.domainadapt_train_img_paths + self.domainadapt_dataset = None + if(self.domainadapt_flag): + self.domainadapt_dataset = Domainadapt_dataset(self.domainadapt_train_img_paths) + + def visualize(self,vis_num=10): + raise NotImplementedError("virtual class Base_dataset function: visualize not implemented!") + + def set_dataset_version(self,dataset_version): + self.dataset_version=self.dataset_version + + def get_parts(self): + raise NotImplementedError("virtual class Base_dataset function: get_parts not implemented!") + + def get_colors(self): + raise NotImplementedError("virtual class Base_dataset function: get_colors not implemented!") + + def generate_train_data(self): + raise NotImplementedError("virtual class Base_dataset function: get_train_dataset not implemented!") + + def generate_eval_data(self): + raise NotImplementedError("virtual class Base_dataset function: get_eval_dataset not implemented!") + + def get_dataset_type(self): + return DATA.USERDEF + + def get_train_datasize(self): + #make sure cal this API to get datasize after calling get_train_dataset + return self.train_datasize + + def get_eval_datasize(self): + #make sure cal this API to get datasize after calling get_eval_dataset + return self.eval_datasize + + def get_test_datasize(self): + #make sure cal this API to get datasize after calling get_test_dataset + return self.test_datasize + + def get_train_dataset(self,in_list=False,need_total_num=False): + '''provide uniform tensorflow dataset for training + + return a tensorflow dataset based on MPII dataset, each iter contains two following object + + 1.image_path + a image path string encoded in utf-8 mode + + 2.target + bytes of a dict object encoded by _pickle, should be decode by "_pickle.loads(target.numpy())" + the dict contains the following key-value pair: + + 2.1 key: "kpt" + value: a list of keypoint annotations, each annotation corresponds to a person and is a list of + keypoints of the person, each keypoint is represent in the [x,y,v] mode, v=0 is unvisible and unanotated, + v=1 is unvisible but annotated, v=2 is visible and annotated. + 2.2 key: "mask" + value: None(MPII doesn't provide any mask information) + 2.3 key: "bbx" + value: a list of bbx annotation of the image, each bbx is in the [x,y,w,h] form. + 2.4 key: "labeled"(optional) + value: a bool value used for damain adaptation, 0 stands for the unlabeled target domain, 1 stands for the labeled src domain + + example use + 1.use tensorflow map function to convert the target format + map_function(image_path,target): + + image = tf.io.read_file(image_path) + image, target, mask=tf.py_function(defined_pyfunction, [image, target], [tf.float32, tf.float32, tf.float32]) + 2.process the target to your own format when in need in defined_pyfunction + defined_pyfunction(image, target): + + target = _pickle.loads(target.numpy()) + annos = target["kpt"] + mask = target["mask"] + bbxs = target["bbxs"] + processing + 3. for image,target in train_dataset + + for more detail use, one can refer the training pipeline of models. + + Parameters + ---------- + None + + Returns + ------- + tensorflow dataset object + a unifrom formated tensorflow dataset object for training + ''' + train_img_paths_list,train_targets_list=[],[] + #official data + if(self.official_flag): + log("Generating official training data...") + official_img_paths_list,official_targets_list=self.generate_train_data() + assert len(official_img_paths_list)==len(official_targets_list) + train_img_paths_list+=official_img_paths_list + train_targets_list+=official_targets_list + log(f"{len(train_img_paths_list)} official training data added!") + #user defined data + if(self.useradd_flag): + log("adding user defined training data...") + assert len(self.useradd_train_img_paths)==len(self.useradd_train_targets) + train_img_paths_list+=self.useradd_train_img_paths*self.useradd_scale_rate + train_targets_list+=self.useradd_train_targets*self.useradd_scale_rate + log(f"{len(self.useradd_train_img_paths)} user define training data added! repeat time:{self.useradd_scale_rate}") + #filter non-exist image and target + log("filtering non-exist images and targets") + filter_train_img_paths,filter_train_targets=[],[] + filter_num=0 + for train_img_path,train_target in zip(train_img_paths_list,train_targets_list): + if(os.path.exists(train_img_path)): + filter_train_img_paths.append(train_img_path) + filter_train_targets.append(train_target) + else: + filter_num+=1 + train_img_paths_list=filter_train_img_paths + train_targets_list=filter_train_targets + log(f"filtering finished! total {len(train_img_paths_list)} images and targets left, {filter_num} invalid found.") + #input conversion + log("converting input keypoint...") + for target_idx in range(0,len(train_targets_list)): + target=train_targets_list[target_idx] + #keypoint conversion + kpts=target["kpt"] + for p_idx in range(0,len(kpts)): + kpts[p_idx]=self.input_kpt_cvter(np.array(kpts[p_idx])) + target["kpt"]=kpts + train_targets_list[target_idx]=target + log("conversion finished!") + #shuffle all data + log("shuffling all training data...") + shuffle_list=[{"image_path":img_path,"target":target} for img_path,target in zip(train_img_paths_list,train_targets_list)] + random.shuffle(shuffle_list) + train_img_paths_list=[shuffle_dict["image_path"] for shuffle_dict in shuffle_list] + train_targets_list=[shuffle_dict["target"] for shuffle_dict in shuffle_list] + log("shuffling data finished, generating tensorflow dataset...") + log(f"total {len(train_img_paths_list)} training data generated!") + + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(train_img_paths_list) == len(train_targets_list) + for _input, _target in zip(train_img_paths_list, train_targets_list): + yield _input.encode('utf-8'), cPickle.dumps(_target) + + train_dataset = tf.data.Dataset.from_generator(generator, output_types=(tf.string, tf.string)) + #update datasize + self.train_datasize=len(train_img_paths_list) + log(f"train dataset generation finished!") + if(in_list): + return train_img_paths_list,train_targets_list + else: + return train_dataset + + def get_eval_dataset(self,in_list=False): + '''provide uniform tensorflow dataset for evaluating + + return a tensorflow dataset based on MPII dataset, each iter contains two following object + + 1.image_path: + a image path string encoded in utf-8 mode + + 2.image_id: + a image id string encoded in utf-8 mode + + example use: + for image_path,image_id in eval_dataset + + for more detail use, one can refer the evaluating pipeline of models. + + Parameters + ---------- + None + + Returns + ------- + tensorflow dataset object + a unifrom formated tensorflow dataset object for evaluating + ''' + log("generating official evaluating data...") + eval_img_files_list,eval_img_ids_list=self.generate_eval_data() + log(f"total {len(eval_img_files_list)} official evaluating data generated!") + #filter non-exist eval images and targets + log("filtering non-exist images and targets") + filter_img_files,filter_img_ids=[],[] + filter_num=0 + for img_file,img_id in zip(eval_img_files_list,eval_img_ids_list): + if(os.path.exists(img_file)): + filter_img_files.append(img_file) + filter_img_ids.append(img_id) + else: + filter_num+=1 + eval_img_files_list=filter_img_files + eval_img_ids_list=filter_img_ids + log(f"filtering finished! total {len(eval_img_files_list)} images and targets left, {filter_num} invalid found.") + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(eval_img_files_list)==len(eval_img_ids_list) + for img_file,img_id in zip(eval_img_files_list,eval_img_ids_list): + yield img_file.encode("utf-8"),img_id + + eval_dataset = tf.data.Dataset.from_generator(generator,output_types=(tf.string,tf.int32)) + #update datasize + self.eval_datasize=len(eval_img_files_list) + log(f"eval dataset generation finished!") + if(in_list): + return eval_img_files_list,eval_img_ids_list + else: + return eval_dataset + + def get_test_dataset(self,in_list=False): + log("generating official test dataset...") + test_img_files_list,test_img_ids_list=self.generate_test_data() + log(f"total {len(test_img_files_list)} official test data generated!") + #filter non-exist test images and targets + filter_img_files_list,filter_img_ids_list=[],[] + filter_num=0 + for img_file,img_id in zip(test_img_files_list,test_img_ids_list): + if(os.path.exists(img_file)): + filter_img_files_list.append(img_file) + filter_img_ids_list.append(img_id) + else: + filter_num+=1 + test_img_files_list=filter_img_files_list + test_img_ids_list=filter_img_ids_list + log(f"filtering finished! total {len(test_img_files_list)} images and targets left, {filter_num} invalid found.") + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(test_img_files_list)==len(test_img_ids_list) + for img_file,img_id in zip(test_img_files_list,test_img_ids_list): + yield img_file.encode("utf-8"),img_id + + test_dataset=tf.data.Dataset.from_generator(generator,output_types=(tf.string,tf.int32)) + #update datasize + self.test_datasize=len(test_img_files_list) + log("test dataset generation finished!") + if(in_list): + return test_img_files_list,test_img_ids_list + else: + return test_dataset + + def get_dmadapt_train_dataset(self): + return self.domainadapt_dataset.get_train_dataset() + + def official_eval(self,pd_json,eval_dir=f"./eval_dir"): + raise NotImplementedError("virtual class Base_dataset function: official_eval not implemented!") + + def set_input_kpt_cvter(self,input_kpt_cvter): + self.input_kpt_cvter=input_kpt_cvter + + def set_output_kpt_cvter(self,output_kpt_cvter): + self.output_kpt_cvter=output_kpt_cvter + + def get_input_kpt_cvter(self): + return self.input_kpt_cvter + + def get_output_kpt_cvter(self): + return self.output_kpt_cvter + \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/common.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/common.py new file mode 100644 index 000000000..92912e3c8 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/common.py @@ -0,0 +1,135 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import logging +import os +import cv2 +import numpy as np +import zipfile +from enum import Enum +import _pickle as cpickle +import tensorflow as tf +import matplotlib.pyplot as plt +from ..Config.define import TRAIN,MODEL,DATA,KUNGFU +import multiprocessing + +def unzip(path_to_zip_file, directory_to_extract_to): + zip_ref = zipfile.ZipFile(path_to_zip_file, 'r') + zip_ref.extractall(directory_to_extract_to) + zip_ref.close() + +def imread_rgb_float(image_path,data_format="channels_first"): + image=cv2.cvtColor(cv2.imread(image_path),cv2.COLOR_BGR2RGB).astype(np.float32)/255.0 + if(data_format=="channels_first"): + image=np.transpose(image,[2,0,1]) + return image.copy() + +def imwrite_rgb_float(image,image_path,data_format="channels_first"): + if(data_format=="channels_first"): + image=np.transpose(image,[1,2,0]) + image=cv2.cvtColor(image,cv2.COLOR_RGB2BGR) + image=np.clip(image*255.0,0,255).astype(np.uint8) + return cv2.imwrite(image_path,image) + +def file_log(log_file,msg): + log_file.write(msg+"\n") + print(msg) + +def visualize(vis_dir,vis_num,dataset,parts,colors,dataset_name="default"): + log_file=open(os.path.join(vis_dir,"visualize_info.txt"),mode="w") + for vis_id,(img_file,annos) in enumerate(dataset,start=1): + if(vis_id>=vis_num): + break + image=cv2.cvtColor(cv2.imread(img_file.numpy().decode("utf-8"),cv2.IMREAD_COLOR),cv2.COLOR_BGR2RGB) + radius=int(np.round(min(image.shape[0],image.shape[1])/40)) + annos=cpickle.loads(annos.numpy()) + ori_img=image + vis_img=image.copy() + kpts_list=annos["kpt"] + file_log(log_file,f"visualizing image:{img_file} with {len(kpts_list)} humans...") + for pid,kpts in enumerate(kpts_list): + file_log(log_file,f"person {pid}:") + x,y=kpts[:,0],kpts[:,1] + for part_idx in range(0,len(parts)): + file_log(log_file,f"part {parts(part_idx)} x:{x[part_idx]} y:{y[part_idx]}") + if(x[part_idx]<0 or y[part_idx]<0): + continue + color=colors[part_idx] + vis_img=cv2.circle(vis_img,(int(x[part_idx]),int(y[part_idx])),radius=radius,color=color,thickness=-1) + fig=plt.figure(figsize=(8,8)) + a=fig.add_subplot(1,2,1) + a.set_title("original image") + plt.imshow(ori_img) + a=fig.add_subplot(1,2,2) + a.set_title("visualized image") + plt.imshow(vis_img) + #print(f"test img_file:{type(img_file)} numpy:{type(img_file.numpy())} str:{type(str(img_file.numpy()))} ") + image_path=bytes.decode(img_file.numpy()) + #print(f"test path:{type(image_path)} {image_path}") + image_path=os.path.basename(image_path) + image_mark=image_path[:image_path.rindex(".")] + plt.savefig(f"{vis_dir}/{image_mark}_vis_{dataset_name}.png") + plt.close('all') + print() + file_log(log_file,f"visualization finished! total {vis_num} image visualized!") + +def basic_map_func(image_path): + """TF Dataset pipeline.""" + # load data + image = tf.io.read_file(image_path) + image = tf.image.decode_jpeg(image, channels=3) # get RGB with 0~1 + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # data augmentaion using tf + image = tf.image.random_brightness(image, max_delta=35. / 255.) # 64./255. 32./255.) caffe -30~50 + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) # lower=0.2, upper=1.8) caffe 0.3~1.5 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) + return image + +def get_domainadapt_targets(domainadapt_img_paths): + domainadapt_targets=[] + if(domainadapt_img_paths!=None): + for _ in range(0,len(domainadapt_img_paths)): + domainadapt_targets.append({ + "kpt":np.zeros(shape=(1,17,2)), + "mask":None, + "bbx":np.zeros(shape=(1,4)), + "labeled":0 + }) + return domainadapt_targets + +def get_num_parallel_calls(): + return max(multiprocessing.cpu_count()//2,1) + +def log_data(msg): + logger=logging.getLogger("DATA") + logger.info(msg) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/dmadapt_dataset.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/dmadapt_dataset.py new file mode 100644 index 000000000..051e1b7b4 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/dmadapt_dataset.py @@ -0,0 +1,50 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import tensorflow as tf +from .common import basic_map_func, get_num_parallel_calls +from .common import log_data as log + +class Domainadapt_dataset: + def __init__(self,image_paths): + self.image_paths=image_paths + log(f"Domainadapt dataset constructed, total {len(self.image_paths)} adapt images.") + + def get_train_dataset(self): + # tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + for _input in self.image_paths: + yield _input.encode('utf-8') + train_dataset = tf.data.Dataset.from_generator(generator,output_types=tf.string) + train_dataset = train_dataset.map(map_func=basic_map_func, num_parallel_calls=get_num_parallel_calls()) + return train_dataset diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__init__.py new file mode 100644 index 000000000..0e5bcbf41 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__init__.py @@ -0,0 +1,33 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from .dataset import Imagenet_dataset \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/dataset.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/dataset.py new file mode 100644 index 000000000..e02597b6c --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/dataset.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import glob +import tensorflow as tf + +class Imagenet_dataset: + def __init__(self,config): + self.dataset_path=config.pretrain.pretrain_dataset_path + self.train_dataset_path=f"{self.dataset_path}/imagenet/train" + self.val_dataset_path=f"{self.dataset_path}/imagenet/val" + + def prepare_dataset(self): + # decoding train files + train_file_names=glob.glob(f"./{self.train_dataset_path}/*.tar") + print(f"total {len(train_file_names)} tar file for train found!") + for file_num,file_name in enumerate(train_file_names): + print(f"decompresssing {file_num+1}/{len(train_file_names)} train tar file...") + label=file_name[:file_name.rindex(".")] + os.makedirs(f"{label}",exist_ok=True) + os.system(f"tar -xf {file_name} -C {label}") + #decoding val files + val_file_names=glob.glob(f"./{self.val_dataset_path}/*.tar") + print(f"total {len(val_file_names)} tar file for evaluation found!") + for file_num,file_name in enumerate(val_file_names): + print(f"decompresssing {file_num+1}/{len(val_file_names)} evaluate tar file...") + label=file_name[:file_name.rindex(".")] + os.makedirs(f"{label}",exist_ok=True) + os.system(f"tar -xf {file_name} -C {label}") + + def get_train_dataset(self): + img_paths=glob.glob(f"{self.train_dataset_path}/*/*") + if(len(img_paths)==0): + print(f"error: no training image files founded!") + print(f"please download the .tar files for training in directory:{self.train_dataset_path} and use Imagenet_dataset.prepare_dataset() to decompress") + return None + img_labels={} + train_img_paths=[] + train_img_labels=[] + for img_path in img_paths: + img_label=os.path.basename(os.path.dirname(img_path)) + if(img_label not in img_labels): + img_labels[img_label]=len(img_labels) + train_img_paths.append(img_path) + train_img_labels.append(img_labels[img_label]) + print(f"total train scenery class num:{len(img_labels)}") + + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(train_img_paths)==len(train_img_labels) + for img_path,img_label in zip(train_img_paths,train_img_labels): + yield img_path.encode("utf-8"),int(img_label) + + train_dataset=tf.data.Dataset.from_generator(generator,output_types=(tf.string,tf.int64)) + return train_dataset + + def get_eval_dataset(self): + img_paths=glob.glob(f"{self.val_dataset_path}/*/*") + if(len(img_paths)==0): + print(f"error: no evaluate image files founded!") + print(f"please download the .tar files for evaluate in directory:{self.val_dataset_path} and use Imagenet_dataset.prepare_dataset() to decompress") + return None + img_labels={} + val_img_paths=[] + val_img_labels=[] + for img_path in img_paths: + img_label=os.path.basename(os.path.dirname(img_path)) + if(img_label not in img_labels): + img_labels[img_label]=len(img_labels) + val_img_paths.append(img_path) + val_img_labels.append(img_labels[img_label]) + print(f"total eval scenery class num:{len(img_labels)}") + + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(val_img_paths)==len(val_img_labels) + for img_path,img_label in zip(val_img_paths,val_img_labels): + yield img_path.encode("utf-8"),img_label + + val_dataset=tf.data.Dataset.from_generator(generator,output_types=(tf.string,tf.int64)) + return val_dataset \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__init__.py new file mode 100644 index 000000000..b5f09f066 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from .dataset import init_dataset +from .dataset import MPII_dataset \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/dataset.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/dataset.py new file mode 100644 index 000000000..c041b3199 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/dataset.py @@ -0,0 +1,272 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import json +import numpy as np +import _pickle as cPickle + +from ..base_dataset import Base_dataset +from ..common import visualize +from .define import MpiiPart,MpiiColor +from .format import PoseInfo +from .prepare import prepare_dataset +from .generate import generate_train_data,generate_eval_data + +def init_dataset(config): + dataset=MPII_dataset(config) + return dataset + +class MPII_dataset(Base_dataset): + '''a dataset class specified for mpii dataset, provides uniform APIs''' + def __init__(self,config,input_kpt_cvter=None,output_kpt_cvter=None,dataset_filter=None): + super().__init__(config,input_kpt_cvter,output_kpt_cvter) + #basic data configure + self.official_flag=config.data.official_flag + self.dataset_type=config.data.dataset_type + self.dataset_path=config.data.dataset_path + self.vis_dir=config.data.vis_dir + self.annos_path=None + self.images_path=None + self.parts=MpiiPart + self.colors=MpiiColor + if(input_kpt_cvter==None): + input_kpt_cvter=lambda x:x + if(output_kpt_cvter==None): + output_kpt_cvter=lambda x:x + self.input_kpt_cvter=input_kpt_cvter + self.output_kpt_cvter=output_kpt_cvter + self.dataset_filter=dataset_filter + + def visualize(self,vis_num=10): + '''visualize annotations of the train dataset + + visualize the annotation points in the image to help understand and check annotation + the visualized image will be saved in the "data_vis_dir" of the corresponding model directory(specified by model name). + the visualized annotations are from the train dataset. + + Parameters + ---------- + arg1 : Int + An integer indicates how many images with their annotations are going to be visualized. + + Returns + ------- + None + ''' + train_dataset=self.get_train_dataset() + visualize(self.vis_dir,vis_num,train_dataset,self.parts,self.colors,dataset_name="mpii") + + def get_parts(self): + return self.parts + + def get_colors(self): + return self.colors + + def get_dataset_type(self): + return self.dataset_type + + def prepare_dataset(self): + '''download,extract, and reformat the dataset + the official dataset is in .mat format, format it into json format automaticly. + + Parameters + ---------- + None + + Returns + ------- + None + ''' + self.train_annos_path,self.val_annos_path,self.images_path=prepare_dataset(self.dataset_path) + + def generate_train_data(self): + return generate_train_data(self.images_path,self.train_annos_path,self.dataset_filter,self.input_kpt_cvter) + + def generate_eval_data(self): + return generate_eval_data(self.images_path,self.val_annos_path,self.dataset_filter) + + def generate_test_data(self): + raise NotImplementedError("MPII test dataset generation has not implemented!") + + def set_input_kpt_cvter(self,input_kpt_cvter): + self.input_kpt_cvter=input_kpt_cvter + + def set_output_kpt_cvter(self,output_kpt_cvter): + self.output_kpt_cvter=output_kpt_cvter + + def get_input_kpt_cvter(self): + return self.input_kpt_cvter + + def get_output_kpt_cvter(self): + return self.output_kpt_cvter + + def official_eval(self,pd_anns,eval_dir=f"./eval_dir"): + '''providing official evaluation of MPII dataset + + output model metrics of PCHs on mpii evaluation dataset(split automaticly) + + Parameters + ---------- + arg1 : String + A string path of the json file in the same format of cocoeval annotation file(person_keypoints_val2017.json) + which contains predicted results. one can refer the evaluation pipeline of models for generation procedure of this json file. + arg2 : String + A string path indicates where the result json file which contains MPII PCH metrics of various keypoint saves. + + Returns + ------- + None + ''' + #format predict result in dict + pd_dict={} + for pd_ann in pd_anns: + image_id=pd_ann["image_id"] + kpt_list=np.array(pd_ann["keypoints"]) + x=kpt_list[0::3][np.newaxis,...] + y=kpt_list[1::3][np.newaxis,...] + pd_ann["keypoints"]=np.concatenate([x,y],axis=0) + if(image_id not in pd_dict): + pd_dict[image_id]=[] + pd_dict[image_id].append(pd_ann) + #format ground truth + metas=PoseInfo(self.images_path,self.val_annos_path,dataset_filter=self.dataset_filter).metas + gt_dict={} + for meta in metas: + gt_ann_list=meta.to_anns_list() + for gt_ann in gt_ann_list: + kpt_list=np.array(gt_ann["keypoints"]) + x=kpt_list[0::3][np.newaxis,...] + y=kpt_list[1::3][np.newaxis,...] + vis_list=np.array(gt_ann["vis"]) + vis_list=np.where(vis_list>0,1,0) + gt_ann["keypoints"]=np.concatenate([x,y],axis=0) + gt_ann["vis"]=vis_list + gt_dict[meta.image_id]=gt_ann_list + + all_pd_kpts=[] + all_gt_kpts=[] + all_gt_vis=[] + all_gt_headbbxs=[] + #match kpt into order for PCK calculation + for image_id in pd_dict.keys(): + #sort pd_anns by score + pd_img_anns=np.array(pd_dict[image_id]) + sort_idx=np.argsort([-pd_img_ann["score"] for pd_img_ann in pd_img_anns]) + pd_img_anns=pd_img_anns[sort_idx] + gt_img_anns=gt_dict[image_id] + #start to match pd and gt anns + match_pd_ids=np.full(shape=len(gt_img_anns),fill_value=-1) + for pd_id,pd_img_ann in enumerate(pd_img_anns): + pd_kpts=pd_img_ann["keypoints"] + match_id=-1 + match_dist=np.inf + for gt_id,gt_img_ann in enumerate(gt_img_anns): + #gt person already matched + if(match_pd_ids[gt_id]!=-1): + continue + gt_kpts=gt_img_ann["keypoints"] + gt_vis=gt_img_ann["vis"] + vis_mask=np.ones(shape=gt_vis.shape) + vis_mask[6:8]=0 + vis_num=np.sum(gt_vis) + if(vis_num==0): + continue + dist=np.sum(np.linalg.norm((pd_kpts-gt_kpts)*gt_vis*vis_mask,axis=0))/vis_num + if(dist=0 and y>=0): + kpts+=[x,y,1.0] + flag=True + if(not flag): + kpts+=[0.0,0.0,0.0] + return kpts \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/format.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/format.py new file mode 100644 index 000000000..72f9e308b --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/format.py @@ -0,0 +1,209 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import numpy as np +from pycocotools.coco import COCO +from scipy.spatial.distance import cdist + +## read coco data +class CocoMeta: + """ Be used in PoseInfo. """ + def __init__(self, image_id, img_url, img_meta, kpts_infos, masks, bbxs, is_crowd): + self.image_id = image_id + self.img_url = img_url + self.img = None + self.height = int(img_meta['height']) + self.width = int(img_meta['width']) + self.masks = masks + self.bbx_list=bbxs + self.is_crowd=is_crowd + + self.joint_list=[] + for kpts_info in kpts_infos: + if kpts_info.get('num_keypoints', 0) == 0: + continue + kpts = np.array(kpts_info['keypoints']) + self.joint_list.append(kpts) + +class PoseInfo: + """ Use COCO for pose estimation, returns images with people only. """ + + def __init__(self, image_base_dir, anno_path, with_mask=True,dataset_filter=None, eval=False): + self.metas = [] + # self.data_dir = data_dir + # self.data_type = data_type + self.eval=eval + self.image_base_dir = image_base_dir + self.anno_path = anno_path + self.with_mask = with_mask + self.coco = COCO(self.anno_path) + self.get_image_annos() + self.image_list = os.listdir(self.image_base_dir) + if(dataset_filter!=None): + filter_metas=[] + for meta in self.metas: + if(dataset_filter(meta)==True): + filter_metas.append(meta) + self.metas=filter_metas + + @staticmethod + def get_keypoints(annos_info): + annolist = [] + for anno in annos_info: + adjust_anno = {'keypoints': anno['keypoints'], 'num_keypoints': anno['num_keypoints']} + annolist.append(adjust_anno) + return annolist + + @staticmethod + def get_bbxs(annos_info): + bbxlist=[] + for anno in annos_info: + bbxlist.append(anno["bbox"]) + return bbxlist + + def get_image_annos(self): + """Read JSON file, and get and check the image list. + Skip missing images. + """ + images_ids = self.coco.getImgIds() + len_imgs = len(images_ids) + for idx in range(len_imgs): + + image_info = self.coco.loadImgs(images_ids[idx])[0] + image_path = os.path.join(self.image_base_dir, image_info['file_name']) + # filter that some images might not in the list + if not os.path.exists(image_path): + print("[skip] json annotation found, but cannot found image: {}".format(image_path)) + continue + + annos_ids = self.coco.getAnnIds(imgIds=images_ids[idx]) + annos_info = self.coco.loadAnns(annos_ids) + kpts_info = self.get_keypoints(annos_info) + bbxs=self.get_bbxs(annos_info) + + ############################################################################# + anns = annos_info + prev_center = [] + masks = [] + #check for crowd + is_crowd=False + for ann in anns: + if("iscrowd" in ann and ann["iscrowd"]): + is_crowd=True + + # sort from the biggest person to the smallest one + if self.with_mask: + persons_ids = np.argsort([-a['area'] for a in anns], kind='mergesort') + + for p_id in list(persons_ids): + person_meta = anns[p_id] + + if person_meta["iscrowd"]: + is_crowd=True + masks.append(self.coco.annToRLE(person_meta)) + continue + + # skip this person if parts number is too low or if + # segmentation area is too small + if person_meta["num_keypoints"] < 5 or person_meta["area"] < 32 * 32: + masks.append(self.coco.annToRLE(person_meta)) + continue + + person_center = [ + person_meta["bbox"][0] + person_meta["bbox"][2] / 2, + person_meta["bbox"][1] + person_meta["bbox"][3] / 2 + ] + + # skip this person if the distance to existing person is too small + too_close = False + for pc in prev_center: + a = np.expand_dims(pc[:2], axis=0) + b = np.expand_dims(person_center, axis=0) + dist = cdist(a, b)[0] + if dist < pc[2] * 0.3: + too_close = True + break + + if too_close: + # add mask of this person. we don't want to show the network + # unlabeled people + masks.append(self.coco.annToRLE(person_meta)) + continue + + ############################################################################ + #eval accept all images + if(self.eval): + meta = CocoMeta(images_ids[idx], image_path, image_info, kpts_info, masks, bbxs, is_crowd) + self.metas.append(meta) + #train filter images + else: + total_keypoints = sum([ann.get('num_keypoints', 0) for ann in annos_info]) + if total_keypoints > 0: + meta = CocoMeta(images_ids[idx], image_path, image_info, kpts_info, masks, bbxs, is_crowd) + self.metas.append(meta) + + print("Overall get {} valid pose images from {} and {}".format( + len(self.metas), self.image_base_dir, self.anno_path)) + + def load_images(self): + pass + + def get_image_id_list(self): + img_id_list=[] + for meta in self.metas: + img_id_list.append(meta.image_id) + return img_id_list + + def get_image_list(self): + img_list = [] + for meta in self.metas: + img_list.append(meta.img_url) + return img_list + + def get_kpt_list(self): + joint_list = [] + for meta in self.metas: + joint_list.append(meta.joint_list) + return joint_list + + def get_mask_list(self): + mask_list = [] + for meta in self.metas: + mask_list.append(meta.masks) + return mask_list + + def get_bbx_list(self): + bbx_list=[] + for meta in self.metas: + bbx_list.append(meta.bbx_list) + return bbx_list diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/generate.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/generate.py new file mode 100644 index 000000000..8c77c8ad2 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/generate.py @@ -0,0 +1,73 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import tensorflow as tf +import _pickle as cPickle +from pycocotools.coco import COCO +from .format import CocoMeta,PoseInfo + +def generate_train_data(train_imgs_path,train_anns_path,dataset_filter=None,input_kpt_cvter=lambda x: x): + # read coco training images contains valid people + data = PoseInfo(train_imgs_path, train_anns_path, with_mask=True, dataset_filter=dataset_filter) + img_paths_list = data.get_image_list() + kpts_list = data.get_kpt_list() + mask_list = data.get_mask_list() + bbx_list=data.get_bbx_list() + target_list=[] + for kpts,mask,bbx in zip(kpts_list,mask_list,bbx_list): + target_list.append({ + "kpt":kpts, + "mask":mask, + "bbxs":bbx, + "labeled":1 + }) + return img_paths_list,target_list + +def generate_eval_data(val_imgs_path,val_anns_path,dataset_filter=None): + # read coco evaluation images contains valid people + coco_data=PoseInfo(val_imgs_path,val_anns_path,with_mask=False, dataset_filter=dataset_filter, eval=True) + img_file_list,img_id_list=coco_data.get_image_list(),coco_data.get_image_id_list() + return img_file_list,img_id_list + +def generate_test_data(test_imgs_path,test_anns_path): + # read coco test-dev images used for test + print("currently using the test-dev dataset for test! if you want to test over the whole test2017 dataset, change the annotation path please!") + dev_coco=COCO(test_anns_path) + img_id_list=dev_coco.getImgIds() + img_file_list=[] + for img_id in img_id_list: + img_info=dev_coco.loadImgs(img_id)[0] + img_file=img_info["file_name"] + img_path=os.path.join(test_imgs_path,img_file) + img_file_list.append(img_path) + return img_file_list,img_id_list diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/prepare.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/prepare.py new file mode 100644 index 000000000..743d4bbfb --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/prepare.py @@ -0,0 +1,138 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + + +import os +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.files.utils import (del_file, folder_exists, maybe_download_and_extract) + +from ..common import unzip + +def prepare_dataset(data_path="./data",version="2017",task="person"): + """Download MSCOCO Dataset. + Both 2014 and 2017 dataset have train, validate and test sets, but 2017 version put less data into the validation set (115k train, 5k validate) i.e. has more training data. + + Parameters + ----------- + path : str + The path that the data is downloaded to, defaults is ``data/mscoco...``. + dataset : str + The MSCOCO dataset version, `2014` or `2017`. + task : str + person for pose estimation, caption for image captioning, instance for segmentation. + + Returns + --------- + train_image_path : str + Folder path of all training images. + train_ann_path : str + File path of training annotations. + val_image_path : str + Folder path of all validating images. + val_ann_path : str + File path of validating annotations. + test_image_path : str + Folder path of all testing images. + test_ann_path : None + File path of testing annotations, but as the test sets of MSCOCO 2014 and 2017 do not have annotation, returns None. + + Examples + ---------- + >>> train_im_path, train_ann_path, val_im_path, val_ann_path, _, _ = \ + ... tl.files.load_mscoco_dataset('data', '2017') + + References + ------------- + - `MSCOCO `__. + + """ + + if version == "2014": + logging.info(" [============= MSCOCO 2014 =============]") + path = os.path.join(data_path, 'mscoco2014') + + + elif version == "2017": + # 11.5w train, 0.5w valid, test (no annotation) + path = os.path.join(data_path, 'mscoco2017') + + + else: + raise Exception("dataset can only be 2014 and 2017, see MSCOCO website for more details.") + + if version == "2014": + train_images_path = os.path.join(path, "train2014") + if task == "person": + train_annotations_file_path = os.path.join(path, "annotations", "person_keypoints_train2014.json") + elif task == "caption": + train_annotations_file_path = os.path.join(path, "annotations", "captions_train2014.json") + elif task == "instance": + train_annotations_file_path = os.path.join(path, "annotations", "instances_train2014.json") + else: + raise Exception("unknown task") + val_images_path = os.path.join(path, "val2014") + if task == "person": + val_annotations_file_path = os.path.join(path, "annotations", "person_keypoints_val2014.json") + elif task == "caption": + val_annotations_file_path = os.path.join(path, "annotations", "captions_val2014.json") + elif task == "instance": + val_annotations_file_path = os.path.join(path, "annotations", "instances_val2014.json") + test_images_path = os.path.join(path, "test2014") + test_annotations_file_path = None #os.path.join(path, "annotations", "person_keypoints_test2014.json") + + elif version == "2017": + train_images_path = os.path.join(path, "train2017") + if task == "person": + train_annotations_file_path = os.path.join(path, "annotations", "person_keypoints_train2017.json") + elif task == "caption": + train_annotations_file_path = os.path.join(path, "annotations", "captions_train2017.json") + elif task == "instance": + train_annotations_file_path = os.path.join(path, "annotations", "instances_train2017.json") + else: + raise Exception("unknown task") + + val_images_path = os.path.join(path, "val2017") + if task == "person": + val_annotations_file_path = os.path.join(path, "annotations", "person_keypoints_val2017.json") + elif task == "caption": + val_annotations_file_path = os.path.join(path, "annotations", "captions_val2017.json") + elif task == "instance": + val_annotations_file_path = os.path.join(path, "annotations", "instances_val2017.json") + + test_images_path = os.path.join(path, "test2017") + test_annotations_file_path = os.path.join(path,"annotations","image_info_test-dev2017.json") + + return train_images_path,train_annotations_file_path,\ + val_images_path,val_annotations_file_path,\ + test_images_path,test_annotations_file_path + \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/multi_dataset.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/multi_dataset.py new file mode 100644 index 000000000..2772e0782 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/multi_dataset.py @@ -0,0 +1,117 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import random +import tensorflow as tf +from .base_dataset import Base_dataset +from .common import visualize + +class Multi_dataset(Base_dataset): + def __init__(self,config,combined_dataset_list): + self.vis_dir=config.data.vis_dir + self.dataset_type=config.data.dataset_type + self.combined_dataset_list=combined_dataset_list + self.parts=combined_dataset_list[0].get_parts() + self.colors=combined_dataset_list[0].get_colors() + + def visualize(self,vis_num=10): + train_dataset=self.get_train_dataset() + visualize(vis_dir=self.vis_dir,vis_num=vis_num,dataset=train_dataset,parts=self.parts,colors=self.colors,\ + dataset_name="multiple_dataset") + + def set_parts(self,userdef_parts): + self.parts=userdef_parts + + def set_colors(self,userdef_colors): + self.colors=userdef_colors + + def get_parts(self): + return self.parts + + def get_colors(self): + return self.colors + + def get_dataset_type(self): + return self.dataset_type + + def generate_train_data(self): + print("generating training data:") + train_img_paths_list,train_targets_list=[],[] + #generate training data individually + for dataset_idx,dataset in enumerate(self.combined_dataset_list): + print(f"generating training data from dataset:{dataset_idx} {dataset.dataset_type.name}") + part_img_paths_list,part_targets_list=dataset.get_train_dataset(in_list=True) + train_img_paths_list+=part_img_paths_list + train_targets_list+=part_targets_list + #shuffle training data + print("shuffling all combined training data...") + shuffle_list=[{"image_path":img_path,"target":target} for img_path,target in zip(train_img_paths_list,train_targets_list)] + random.shuffle(shuffle_list) + train_img_paths_list=[shuffle_dict["image_path"] for shuffle_dict in shuffle_list] + train_targets_list=[shuffle_dict["target"] for shuffle_dict in shuffle_list] + print("shuffling training data finished!") + print(f"total {len(train_img_paths_list)} combined training data in total generated!") + return train_img_paths_list,train_targets_list + + def generate_eval_data(self): + print("temply using the evaluate data from the first combined dataset!") + eval_img_file_list,eval_img_id_list=self.combined_dataset_list[0].generate_eval_data() + print(f"total {len(eval_img_file_list)} evaluation data in total generated!") + return eval_img_file_list,eval_img_id_list + + def get_train_dataset(self): + train_img_paths_list,train_targets_list=self.generate_train_data() + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(train_img_paths_list) == len(train_targets_list) + for _input, _target in zip(train_img_paths_list, train_targets_list): + yield _input.encode('utf-8'), cPickle.dumps(_target) + + train_dataset = tf.data.Dataset.from_generator(generator, output_types=(tf.string, tf.string)) + return train_dataset + + def get_eval_dataset(self): + eval_img_file_list,eval_img_id_list=self.generate_eval_data() + #tensorflow data pipeline + def generator(): + """TF Dataset generator.""" + assert len(eval_img_file_list)==len(eval_img_id_list) + for img_file,img_id in zip(eval_img_file_list,eval_img_id_list): + yield img_file.encode("utf-8"),img_id + + eval_dataset = tf.data.Dataset.from_generator(generator,output_types=(tf.string,tf.int32)) + return eval_dataset + + def official_eval(self,pd_json,eval_dir=f"./eval_dir"): + print("temply using the official_eval from the first combined dataset!") + return self.combined_dataset_list[0].official_eval(pd_json,eval_dir) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__init__.py new file mode 100644 index 000000000..fac584811 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__init__.py @@ -0,0 +1,514 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import logging +from functools import partial +from .common import TRAIN,MODEL,DATA,KUNGFU,BACKBONE +from .backbones import MobilenetV1_backbone +from .backbones import MobilenetV2_backbone +from .backbones import MobilenetDilated_backbone +from .backbones import MobilenetThin_backbone +from .backbones import MobilenetSmall_backbone +from .backbones import vggtiny_backbone +from .backbones import vgg16_backbone +from .backbones import vgg19_backbone +from .backbones import Resnet18_backbone +from .backbones import Resnet50_backbone +from .pretrain import single_pretrain +from .common import log_model as log +from .augmentor import BasicAugmentor +from .examine import exam_model_weights, exam_npz_dict_weights, exam_npz_weights +from .processor import ImageProcessor + +#claim: +#all the model preprocessor,postprocessor,and visualizer processing logic are written in 'channels_first' data_format +#input data in "channels_last" data_format will be converted to "channels_first" format first and then handled + +def get_model(config): + '''get model based on config object + + construct and return a model based on the configured model_type and model_backbone. + each preset model architecture has a default backbone, replace it with chosen common model_backbones allow user to + change model computation complexity to adapt to application scene. + + Parameters + ---------- + arg1 : config object + the config object return by Config.get_config() function, which includes all the configuration information. + + Returns + ------- + tensorlayer.models.MODEL + a model object inherited from tensorlayer.models.MODEL class, has configured model architecture and chosen + model backbone. can be user defined architecture by using Config.set_model_architecture() function. + ''' + model=config.model + #user configure model arch themselves + if("model_arch" in model): + log("Using user self-defined model arch!") + ret_model=model.model_arch(config) + #using defualt arch + else: + backbone=None + if("model_backbone" in model): + model_backbone=model.model_backbone + if(model_backbone==BACKBONE.Default): + log(f"Using default model backbone!") + elif(model_backbone==BACKBONE.Mobilenetv1): + backbone=MobilenetV1_backbone + log(f"Setting MobilnetV1_backbone!") + elif(model_backbone==BACKBONE.Mobilenetv2): + backbone=MobilenetV2_backbone + log(f"Setting MobilenetV2_backbone!") + elif(model_backbone==BACKBONE.MobilenetDilated): + backbone=MobilenetDilated_backbone + log(f"Setting MobilenetDilated_backbone!") + elif(model_backbone==BACKBONE.MobilenetThin): + backbone=MobilenetThin_backbone + log(f"Setting MobilenetThin_backbone!") + elif(model_backbone==BACKBONE.MobilenetSmall): + backbone=MobilenetSmall_backbone + log("Setting MobilenetSmall_backbone!") + elif(model_backbone==BACKBONE.Vggtiny): + backbone=vggtiny_backbone + log(f"Setting Vggtiny_backbone!") + elif(model_backbone==BACKBONE.Vgg16): + backbone=vgg16_backbone + log(f"Setting Vgg16_backbone!") + elif(model_backbone==BACKBONE.Vgg19): + backbone=vgg19_backbone + log(f"Setting Vgg19_backbone!") + elif(model_backbone==BACKBONE.Resnet18): + backbone=Resnet18_backbone + log(f"Setting Resnet18_backbone!") + elif(model_backbone==BACKBONE.Resnet50): + backbone=Resnet50_backbone + log(f"Setting Resnet50_backbone!") + else: + raise NotImplementedError(f"Unknown model backbone {model_backbone}") + + model_type=model.model_type + dataset_type=config.data.dataset_type + pretraining=config.pretrain.enable + log(f"Enable model backbone pretraining:{pretraining}") + if(model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose): + from .openpose.utils import get_parts + from .openpose.utils import get_limbs + model.parts=get_parts(dataset_type) + model.limbs=get_limbs(dataset_type) + elif(model_type == MODEL.PoseProposal): + from .pose_proposal.utils import get_parts + from .pose_proposal.utils import get_limbs + model.parts=get_parts(dataset_type) + model.limbs=get_limbs(dataset_type) + elif(model_type == MODEL.Pifpaf): + from .pifpaf.utils import get_parts + from .pifpaf.utils import get_limbs + model.parts=get_parts(dataset_type) + model.limbs=get_limbs(dataset_type) + + custom_parts=config.model.custom_parts + custom_limbs=config.model.custom_limbs + if(custom_parts!=None): + log("Using user-defined model parts") + model.parts=custom_parts + if(custom_limbs!=None): + log("Using user-defined model limbs") + model.limbs=custom_limbs + + #set model + if model_type == MODEL.Openpose: + from .openpose import OpenPose as model_arch + ret_model=model_arch(parts=model.parts,n_pos=len(model.parts),limbs=model.limbs,n_limbs=len(model.limbs),num_channels=model.num_channels,\ + hin=model.hin,win=model.win,hout=model.hout,wout=model.wout,backbone=backbone,pretraining=pretraining,data_format=model.data_format) + elif model_type == MODEL.LightweightOpenpose: + from .openpose import LightWeightOpenPose as model_arch + ret_model=model_arch(parts=model.parts,n_pos=len(model.parts),limbs=model.limbs,n_limbs=len(model.limbs),num_channels=model.num_channels,\ + hin=model.hin,win=model.win,hout=model.hout,wout=model.wout,backbone=backbone,pretraining=pretraining,data_format=model.data_format) + elif model_type == MODEL.MobilenetThinOpenpose: + from .openpose import MobilenetThinOpenpose as model_arch + ret_model=model_arch(parts=model.parts,n_pos=len(model.parts),limbs=model.limbs,n_limbs=len(model.limbs),num_channels=model.num_channels,\ + hin=model.hin,win=model.win,hout=model.hout,wout=model.wout,backbone=backbone,pretraining=pretraining,data_format=model.data_format) + elif model_type == MODEL.PoseProposal: + from .pose_proposal import PoseProposal as model_arch + ret_model=model_arch(parts=model.parts,K_size=len(model.parts),limbs=model.limbs,L_size=len(model.limbs),hnei=model.hnei,wnei=model.wnei,lmd_rsp=model.lmd_rsp,\ + lmd_iou=model.lmd_iou,lmd_coor=model.lmd_coor,lmd_size=model.lmd_size,lmd_limb=model.lmd_limb,backbone=backbone,\ + pretraining=pretraining,data_format=model.data_format) + elif model_type == MODEL.Pifpaf: + from .pifpaf import Pifpaf as model_arch + ret_model=model_arch(parts=model.parts,n_pos=len(model.parts),limbs=model.limbs,n_limbs=len(model.limbs),hin=model.hin,win=model.win,\ + scale_size=32,pretraining=pretraining,data_format=model.data_format) + else: + raise RuntimeError(f'unknown model type {model_type}') + log(f"Using {model_type.name} model arch!") + info_propt() + return ret_model + +def get_pretrain(config): + return partial(single_pretrain,config=config) + +def get_train(config): + '''get train pipeline based on config object + + construct train pipeline based on the chosen model_type and dataset_type, + default is single train pipeline performed on single GPU, + can be parallel train pipeline use function Config.set_train_type() + + the returned train pipeline can be easily used by train(model,dataset), + where model is obtained by Model.get_model(), dataset is obtained by Dataset.get_dataset() + + the train pipeline will: + 1.store and restore ckpt in directory ./save_dir/model_name/model_dir + 2.log loss information in directory ./save_dir/model_name/log.txt + 3.visualize model output periodly during training in directory ./save_dir/model_name/train_vis_dir + the newest model is at path ./save_dir/model_name/model_dir/newest_model.npz + + Parameters + ---------- + arg1 : config object + the config object return by Config.get_config() function, which includes all the configuration information. + + Returns + ------- + function + a train pipeline function which takes model and dataset as input, can be either single train or parallel train pipeline. + + ''' + # determine train process + model_config = config.model + model_type = config.model.model_type + train_type = config.train.train_type + # determine train type + if(train_type == TRAIN.Single_train): + from .train import single_train as train_pipeline + log("Single train procedure initialized!") + elif(train_type == TRAIN.Parallel_train): + from .train import parallel_train as train_pipeline + log("Parallel train procedure initialized!") + # get augmentor + Augmentor = get_augmentor(config) + augmentor = Augmentor(**model_config) + log("Augmentor initialized!") + # get preprocessor + PreProcessor = get_preprocessor(config) + preprocessor = PreProcessor(**model_config) + log("Preprocessor initialized!") + # get postprocessor + PostProcessor = get_postprocessor(config) + postprocessor = PostProcessor(**model_config) + log("Postprocessor initialized!") + # get visualizer + Visualizer = get_visualizer(config) + visualizer = Visualizer(save_dir=config.train.vis_dir,**model_config) + log("Visualizer initialized!") + + # assemble training pipeline + train = partial( + train_pipeline, + config = config, + augmentor = augmentor, + preprocessor = preprocessor, + postprocessor = postprocessor, + visualizer = visualizer + ) + return train + +def get_evaluate(config): + '''get evaluate pipeline based on config object + + construct evaluate pipeline based on the chosen model_type and dataset_type, + the evaluation metric fellows the official metrics of the chosen dataset. + + the returned evaluate pipeline can be easily used by evaluate(model,dataset), + where model is obtained by Model.get_model(), dataset is obtained by Dataset.get_dataset() + + the evaluate pipeline will: + 1.loading newest model at path ./save_dir/model_name/model_dir/newest_model.npz + 2.perform inference and parsing over the chosen evaluate dataset + 3.visualize model output in evaluation in directory ./save_dir/model_name/eval_vis_dir + 4.output model metrics by calling dataset.official_eval() + + Parameters + ---------- + arg1 : config object + the config object return by Config.get_config() function, which includes all the configuration information. + + Returns + ------- + function + a evaluate pipeline function which takes model and dataset as input, and output model metrics + + ''' + model_type=config.model.model_type + if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: + from .openpose import evaluate + elif model_type == MODEL.PoseProposal: + from .pose_proposal import evaluate + elif model_type == MODEL.Pifpaf: + from .pifpaf import evaluate + else: + raise RuntimeError(f'unknown model type {model_type}') + evaluate=partial(evaluate,config=config) + log(f"evaluating {model_type.name} model...") + return evaluate + +def get_test(config): + '''get test pipeline based on config object + + construct test pipeline based on the chosen model_type and dataset_type, + the test metric fellows the official metrics of the chosen dataset. + + the returned test pipeline can be easily used by test(model,dataset), + where model is obtained by Model.get_model(), dataset is obtained by Dataset.get_dataset() + + the test pipeline will: + 1.loading newest model at path ./save_dir/model_name/model_dir/newest_model.npz + 2.perform inference and parsing over the chosen test dataset + 3.visualize model output in test in directory ./save_dir/model_name/test_vis_dir + 4.output model test result file at path ./save_dir/model_name/test_vis_dir/pd_ann.json + 5.the test dataset ground truth is often preserved by the dataset creator, you may need to upload the test result file to the official server to get model test metrics + + Parameters + ---------- + arg1 : config object + the config object return by Config.get_config() function, which includes all the configuration information. + + Returns + ------- + function + a test pipeline function which takes model and dataset as input, and output model metrics + + ''' + model_type=config.model.model_type + if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: + from .openpose import test + elif model_type == MODEL.PoseProposal: + from .pose_proposal import test + elif model_type == MODEL.Pifpaf: + from .pifpaf import test + else: + raise RuntimeError(f'unknown model type {model_type}') + test=partial(test,config=config) + log(f"testing {model_type.name} model...") + return test + +def get_augmentor(config): + if(config.model.custom_augmentor is not None): + return config.model.custom_augmentor + else: + return BasicAugmentor + +def get_preprocessor(config): + '''get a preprocessor class based on the specified model_type + + get the preprocessor class of the specified kind of model to help user directly construct their own + train pipeline(rather than using the integrated train pipeline) when in need. + + the preprocessor class is able to construct a preprocessor object that could convert the image and annotation to + the model output format for training. + + Parameters + ---------- + arg1 : config + config object return by Config.get_config function + + Returns + ------- + class + a preprocessor class of the specified kind of model + ''' + model_type = config.model.model_type + if(config.model.custom_preprocessor is not None): + return config.model.custom_preprocessor + else: + if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: + from .openpose import PreProcessor + elif model_type == MODEL.PoseProposal: + from .pose_proposal import PreProcessor + elif model_type == MODEL.Pifpaf: + from .pifpaf import PreProcessor + return PreProcessor + +def get_postprocessor(config): + '''get a postprocessor class based on the specified model_type + + get the postprocessor class of the specified kind of model to help user directly construct their own + evaluate pipeline(rather than using the integrated evaluate pipeline) or infer pipeline(to check the model utility) + when in need. + + the postprocessor is able to parse the model output feature map and output parsed human objects of Human class, + which contains all dectected keypoints. + + Parameters + ---------- + arg1 : config + config object return by Config.get_config function + + Returns + ------- + function + a postprocessor class of the specified kind of model + ''' + model_type = config.model.model_type + if(config.model.custom_postprocessor is not None): + return config.model.custom_postprocessor + else: + if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: + from .openpose import PostProcessor + elif model_type == MODEL.PoseProposal: + from .pose_proposal import PostProcessor + elif model_type == MODEL.Pifpaf: + from .pifpaf import PostProcessor + return PostProcessor + +def get_visualizer(config): + '''get visualize function based model_type + + get the visualize function of the specified kind of model to help user construct thier own + evaluate pipeline rather than using the integrated train or evaluate pipeline directly when in need + + the visualize function is able to visualize model's output feature map, which is helpful for + training and evaluation analysis. + + Parameters + ---------- + arg1 : config + config object return by Config.get_config function + + Returns + ------- + function + a visualize function of the specified kind of model + ''' + model_type = config.model.model_type + if(config.model.custom_visualizer is not None): + return config.model.custom_visualizer + else: + if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: + from .openpose import Visualizer + elif model_type == MODEL.PoseProposal: + from .pose_proposal import Visualizer + elif model_type == MODEL.Pifpaf: + from .pifpaf import Visualizer + return Visualizer + +def get_imageprocessor(): + return ImageProcessor + +def info(msg): + info_logger = logging.getLogger("INFO") + info_logger.info(msg) + +def info_propt(): + # information propt + print("\n") + info("Welcome to Hyperpose Development Platform!") + print("\n"+"="*100) + + # variable definition + info("Variable Definition:") + + info("parts: \t the joints of human body, Enum class") + info("limbs: \t the limbs of human body, List of tuple.\t example: [(joint index 1, joint index 2),...]") + info("colors:\tthe visualize color for each parts, List.\t example: [(0,0,255),...] (optional)") + + info("n_parts:\tnumber of human body joints, int.\t example: n_parts=len(parts)") + info("n_limbs:\tnumber of human body limbs, int.\t example: n_limbs=len(limbs)") + + info("hin: \t height of the model input image, int.\t example: 368") + info("win: \t width of the model input image, int.\t example: 368") + info("hout: \t height of model output heatmap, int.\t example: 46") + info("wout: \t wout of model output heatmap, int.\t example: 46") + print("\n"+"="*100) + + # object definition + info("Object Definition:") + info("config: a object contains all the configurations used to assemble the model, dataset, and pipeline. easydict object.\n"+\ + "\t return by the `Config.get_config` function.\n") + + info("model: a neural network takes in the image and output the calculated activation map. BasicModel object.\n"\ + +"\t have `forward`, `cal_loss`, `infer`(optional) functions.\n" + +"\t custom: users could inherit the Model.BasicModel class for customization.\n" + +"\t example: please refer to Model.LightWeightOpenPose class for details. \n") + + info("dataset: a dataset generator provides train and evaluate dataset. Base_dataset object.\n"\ + +"\t have `get_train_dataset` and `get_eval_dataset` functions.\n" \ + +"\t custom: users could inherit the Dataset.BasicDataset class for customizationn\n" + +"\t example: please refer to Datset.CocoDataset class for details.\n") + + info("augmentor: a data augmentor that takes in the image, key point annotation, mask and perform affine transformation "\ + +"for data augmentation. BasicAumentor object.\n"\ + +"\t have `process` and `process_only_image` functions.\n" + +"\t custom: users could inherit the Model.BasicAugmentor class for customization.\n" + +"\t example: please refer to Model.BasicAugmentor class for details.\n") + + info("preprocessor: a data preprocessor that takes in the image, key point annotation and mask to produce the target heatmap\n"\ + +"\tfor model to calculate loss and learn. BasicPreProcessor object.\n"\ + +"\t have `process` function.\n" + +"\t custom: users could inherit the Model.BasicPreProcessor class for customizationn\n" + +"\t example: please refer to Model.openpose.PreProcessor class for details.\n") + + info("postprocessor: a data postprocessor that takes in the predicted heatmaps and infer the human body joints and limbs.\n"\ + +"\t have `process` function. BasicPostProcessor object.\n" + +"\t custom: users could inherit the Model.BasicPostProcessor class for customization\n" + +"\t example: please refer to the Model.openpose.PostProcessor class for details.\n") + + info("visualizer: a visualizer that takes in the predicted heatmaps and output visualization images for train and evaluation.\n"\ + +"\t have `visualize` and `visualize_comapre` functions. BasicVisualizer object.\n"\ + +"\t custom: users could inherit the Model.BasicVisualizer class for customization.\n" + +"\t example: please refer to the Model.openpose.Visualizer class for details.\n" + ) + print("\n"+"="*100) + + info("Development platform basic usage:\n"\ + +"\t1.Use the `sets` APIs of Config module to configure the pipeline, choose the algorithm type, the neural network\n"\ + +"\tbackbone, the dataset etc. that best fit your application scenario.\n"\ + +"\t2.Use the `get_model` API of Model module to get the configured model, use `get_dataset` API of dataset module to\n"\ + +"\tget the configured dataset, use the `get_train` API of Model module to get the configured train procedure. Then start\n"\ + +"\ttraining! Check the loss values and sample training result images during training.\n" + +"\t3.Use the `get_eval` API of Model module to get the configured evaluation procedure. evaluate the model you trained. \n"\ + +"\t4.Eport the model to .pb, .onnx, .tflite formats for deployment." + ) + + info("Development platform custom usage:\n"\ + +"\t Hyperpose enables users to custom model, dataset, augmentor, preprocessor, postprocessor and visualizer.\n"\ + +"\t Users could inherit the corresponding basic class(mentioned above), and implement corresponding the member functions\n"\ + +"\trequired according to the function annotation, then use Config.set_custom_xxx APIs to set the custom component.") + + info("Additional features:\n"\ + +"\t 1.Parallel distributed training with Kungfu.\n" + +"\t 2.Domain adaption to leverage unlabled data.\n" + +"\t 3.Neural network backbone pretraining.") + + info("Currently all the procedures are uniformed to be `channels_first` data format.") + info("Currently all model weights are saved in `npz_dict` format.") + print("\n"+"="*100) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/augmentor.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/augmentor.py new file mode 100644 index 000000000..4ae244590 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/augmentor.py @@ -0,0 +1,102 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import cv2 +import numpy as np +import tensorflow as tf +import tensorlayer as tl + +class BasicAugmentor: + def __init__(self,hin,win,angle_min=-30,angle_max=30,zoom_min=0.5,zoom_max=0.8,flip_list=None, *args, **kargs): + self.hin=hin + self.win=win + self.angle_min=angle_min + self.angle_max=angle_max + self.zoom_min=zoom_min + self.zoom_max=zoom_max + self.flip_list=flip_list + + def process(self,image,annos,mask,bbxs=None): + # get transform matrix + image_h,image_w,_=image.shape + M_rotate = tl.prepro.affine_rotation_matrix(angle=(-30, 30)) # original paper: -40~40 + M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=(0.5, 0.8)) # original paper: 0.5~1.1 + M_combined = M_rotate.dot(M_zoom) + transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=image_w, y=image_h) + # apply data augmentation + image = tl.prepro.affine_transform_cv2(image, transform_matrix) + annos = tl.prepro.affine_transform_keypoints(annos, transform_matrix) + mask = tl.prepro.affine_transform_cv2(mask, transform_matrix, border_mode='replicate') + if(self.flip_list!=None): + image, annos, mask = tl.prepro.keypoint_random_flip(image, annos, mask, prob=0.5, flip_list=self.flip_list) + image, annos, mask = tl.prepro.keypoint_resize_random_crop(image, annos, mask, size=(self.hin, self.win)) + if(type(bbxs)==np.ndarray): + # prepare transform bbx + transform_bbx=np.zeros(shape=(bbxs.shape[0],4,2)) + bbxs_x,bbxs_y,bbxs_w,bbxs_h=bbxs[:,0],bbxs[:,1],bbxs[:,2],bbxs[:,3] + transform_bbx[:,0,0],transform_bbx[:,0,1]=bbxs_x,bbxs_y #left_top + transform_bbx[:,1,0],transform_bbx[:,1,1]=bbxs_x+bbxs_w,bbxs_y #right_top + transform_bbx[:,2,0],transform_bbx[:,2,1]=bbxs_x,bbxs_y+bbxs_h #left_buttom + transform_bbx[:,3,0],transform_bbx[:,3,1]=bbxs_x+bbxs_w,bbxs_y+bbxs_h #right top + transform_bbx=tl.prepro.affine_transform_keypoints(transform_bbx,transform_matrix) + transform_bbx=np.array(transform_bbx) + final_bbxs=np.zeros(shape=bbxs.shape) + for bbx_id in range(0,transform_bbx.shape[0]): + bbx=transform_bbx[bbx_id,:,:] + bbx_min_x=np.amin(bbx[:,0]) + bbx_max_x=np.amax(bbx[:,0]) + bbx_min_y=np.amin(bbx[:,1]) + bbx_max_y=np.amax(bbx[:,1]) + final_bbxs[bbx_id,0]=bbx_min_x + final_bbxs[bbx_id,1]=bbx_min_y + final_bbxs[bbx_id,2]=bbx_max_x-bbx_min_x + final_bbxs[bbx_id,3]=bbx_max_y-bbx_min_y + resize_ratio=max(self.hin/image_h,self.win/image_w) + final_bbxs[:,2]=final_bbxs[:,2]*resize_ratio + final_bbxs[:,2]=final_bbxs[:,3]*resize_ratio + bbxs=final_bbxs + return image,annos,mask,bbxs + return image,annos,mask,bbxs + + def process_only_image(self,image): + # print(f"process_only_image dtype:{image.dtype} shape:{image.shape}") + # get transform matrix + image_h,image_w,_=image.shape + M_rotate = tl.prepro.affine_rotation_matrix(angle=(-30, 30)) # original paper: -40~40 + M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=(0.5, 0.8)) # original paper: 0.5~1.1 + M_combined = M_rotate.dot(M_zoom) + transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=image_w, y=image_h) + # apply data augmentation + image = tl.prepro.affine_transform_cv2(image, transform_matrix) + image, _, _ = tl.prepro.keypoint_resize_random_crop(image, [], None, size=(self.hin, self.win)) + return image + \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/backbones.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/backbones.py new file mode 100644 index 000000000..1e5162265 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/backbones.py @@ -0,0 +1,730 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from tensorlayer import layers +from tensorlayer.models import Model +from tensorlayer.layers import BatchNorm2d, Conv2d, DepthwiseConv2d, LayerList, MaxPool2d, SeparableConv2d,\ + MeanPool2d, Dense, Flatten, UpSampling2d + +class MobilenetV1_backbone(Model): + def __init__(self,scale_size=8,data_format="channels_last",pretraining=False): + super().__init__() + self.name="MobilenetV1_backbone" + self.data_format=data_format + self.scale_size=scale_size + self.pretraining=pretraining + self.main_layer_list=[] + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + self.main_layer_list+=self.conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),name="block_1") + self.main_layer_list+=self.separable_conv_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),name="block_2") + self.main_layer_list+=self.separable_conv_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(2,2),name="block_3") + self.main_layer_list+=self.separable_conv_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),name="block_4") + self.main_layer_list+=self.separable_conv_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(2,2),name="block_5") + self.main_layer_list+=self.separable_conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),name="block_6") + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=(1,1),name="block_7") + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),name="block_8") + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),name="block_9") + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),name="block_10") + self.out_channels=512 + if(self.scale_size==32 or self.pretraining): + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(2,2),name="block_11") + self.main_layer_list+=self.separable_conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),name="block_12") + self.main_layer_list+=self.separable_conv_block(n_filter=1024,in_channels=512,filter_size=(3,3),strides=(2,2),name="block_13") + self.main_layer_list+=self.separable_conv_block(n_filter=1024,in_channels=1024,filter_size=(3,3),strides=(1,1),name="block_14") + self.out_channels=1024 + if(self.pretraining): + self.main_layer_list+=[MeanPool2d(filter_size=(7,7),strides=(1,1),data_format=self.data_format,name="meanpool_1")] + self.main_layer_list+=[Flatten(name="Flatten")] + self.main_layer_list+=[Dense(n_units=1000,in_channels=1024,act=None,name="dense_1")] + self.main_block=LayerList(self.main_layer_list) + + def forward(self,x): + return self.main_block.forward(x) + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + + def conv_block(self,n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),padding="SAME",name="conv_block"): + layer_list=[] + layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,\ + data_format=self.data_format,padding=padding,name=f"{name}_conv1")) + layer_list.append(BatchNorm2d(num_features=n_filter,is_train=True,act=tf.nn.relu,data_format=self.data_format,name=f"{name}_bn1")) + return layer_list + + def separable_conv_block(self,n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),name="spconv_block"): + layer_list=[] + layer_list.append(DepthwiseConv2d(in_channels=in_channels,filter_size=filter_size,strides=strides,\ + data_format=self.data_format,name=f"{name}_dw1")) + layer_list.append(BatchNorm2d(num_features=in_channels,is_train=True,act=tf.nn.relu,data_format=self.data_format,name=f"{name}_bn1")) + layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=(1,1),strides=(1,1),data_format=self.data_format,name=f"{name}_dw2")) + layer_list.append(BatchNorm2d(num_features=n_filter,is_train=True,act=tf.nn.relu,data_format=self.data_format,name=f"{name}_bn2")) + return layer_list + +class MobilenetV2_backbone(Model): + def __init__(self,scale_size=8,data_format="channels_last",pretraining=False): + super().__init__() + self.name="MobilenetV2_backbone" + self.data_format=data_format + self.scale_size=scale_size + self.pretraining=pretraining + self.main_layer_list=[] + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + #block_1 n=1 + self.block_1_1=Conv2d(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),data_format=self.data_format,name="block1_conv1") + self.block_1_2=BatchNorm2d(num_features=32,is_train=True,act=tf.nn.relu6,data_format=self.data_format,name="blcok1_bn1") + #block_2 n=1 + self.block_2_1=self.InvertedResidual(n_filter=16,in_channels=32,strides=(1,1),exp_ratio=1,data_format=self.data_format,name="block2") + #block_3 n=2 + self.block_3_1=self.InvertedResidual(n_filter=24,in_channels=16,strides=(2,2),exp_ratio=6,data_format=self.data_format,name="block3_1") + self.block_3_2=self.InvertedResidual(n_filter=24,in_channels=24,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block3_2") + #block_4 n=3 + self.block_4_1=self.InvertedResidual(n_filter=32,in_channels=24,strides=(2,2),exp_ratio=6,data_format=self.data_format,name="block4_1") + self.block_4_2=self.InvertedResidual(n_filter=32,in_channels=32,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block4_2") + self.block_4_3=self.InvertedResidual(n_filter=32,in_channels=32,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block4_3") + #block_5 n=4 + self.block_5_1=self.InvertedResidual(n_filter=64,in_channels=32,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block5_1") + self.block_5_2=self.InvertedResidual(n_filter=64,in_channels=64,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block5_2") + self.block_5_3=self.InvertedResidual(n_filter=64,in_channels=64,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block5_3") + self.block_5_4=self.InvertedResidual(n_filter=64,in_channels=64,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block5_4") + self.out_channels=64 + if(self.scale_size==32 or self.pretraining): + #block_6 n=3 + self.block_6_1=self.InvertedResidual(n_filter=96,in_channels=64,strides=(2,2),exp_ratio=6,data_format=self.data_format,name="block6_1") + self.block_6_2=self.InvertedResidual(n_filter=96,in_channels=96,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block6_2") + self.block_6_3=self.InvertedResidual(n_filter=96,in_channels=96,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block6_3") + #block_7 n=3 + self.block_7_1=self.InvertedResidual(n_filter=160,in_channels=96,strides=(2,2),exp_ratio=6,data_format=self.data_format,name="block7_1") + self.block_7_2=self.InvertedResidual(n_filter=160,in_channels=160,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block7_2") + self.block_7_3=self.InvertedResidual(n_filter=160,in_channels=160,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block7_3") + #block_8 n=1 + self.block_8=self.InvertedResidual(n_filter=320,in_channels=160,strides=(1,1),exp_ratio=6,data_format=self.data_format,name="block8") + self.out_channels=320 + if(self.pretraining): + self.block_9_1=Conv2d(n_filter=1280,in_channels=320,filter_size=(1,1),strides=(1,1),data_format=self.data_format,name="block9_conv1") + self.block_9_2=MeanPool2d(filter_size=(7,7),strides=(1,1),data_format=self.data_format,name="block9_pool1") + self.block_9_3=Conv2d(n_filter=1000,in_channels=1280,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format,name="block9_conv2") + self.block_9_4=Flatten(name="Flatten") + + def forward(self,x): + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + x=self.block_1_1.forward(x) + x=self.block_1_2.forward(x) + x=self.block_2_1.forward(x) + x=self.block_3_1.forward(x) + x=self.block_3_2.forward(x) + x=self.block_4_1.forward(x) + x=self.block_4_2.forward(x) + x=self.block_4_3.forward(x) + x=self.block_5_1.forward(x) + x=self.block_5_2.forward(x) + x=self.block_5_3.forward(x) + x=self.block_5_4.forward(x) + if(self.scale_size==32 or self.pretraining): + x=self.block_6_1.forward(x) + x=self.block_6_2.forward(x) + x=self.block_6_3.forward(x) + x=self.block_7_1.forward(x) + x=self.block_7_2.forward(x) + x=self.block_7_3.forward(x) + x=self.block_8.forward(x) + if(self.pretraining): + x=self.block_9_1.forward(x) + x=self.block_9_2.forward(x) + x=self.block_9_3.forward(x) + return x + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + + class InvertedResidual(Model): + def __init__(self,n_filter=128,in_channels=128,strides=(1,1),exp_ratio=6,data_format="channels_first",name="block"): + super().__init__() + self.n_filter=n_filter + self.in_channels=in_channels + self.strides=strides + self.exp_ratio=exp_ratio + self.data_format=data_format + self.name=name + self.hidden_dim=self.exp_ratio*self.in_channels + self.identity=False + if(self.strides==(1,1) and self.in_channels==self.n_filter): + self.identity=True + if(self.exp_ratio==1): + self.main_block=LayerList([ + DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\ + b_init=None,data_format=self.data_format,name=f"{self.name}_conv1"), + BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format,name=f"{self.name}_bn1"), + Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{self.name}_dw1"), + BatchNorm2d(num_features=self.n_filter,is_train=True,act=None,data_format=self.data_format,name=f"{self.name}_bn2") + ]) + else: + self.main_block=LayerList([ + Conv2d(n_filter=self.hidden_dim,in_channels=self.in_channels,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{self.name}_conv1"), + BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format,name=f"{self.name}_bn1"), + DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\ + b_init=None,data_format=self.data_format,name=f"{self.name}_dw1"), + BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format,name=f"{self.name}_bn2"), + Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{self.name}_conv2") + ]) + + def forward(self,x): + if(self.identity): + return x+self.main_block.forward(x) + else: + return self.main_block.forward(x) + +initializer=tl.initializers.truncated_normal(stddev=0.005) +def conv_block(n_filter,in_channels,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),W_init=initializer,b_init=initializer,padding="SAME",data_format="channels_first"): + layer_list=[] + layer_list.append(Conv2d(n_filter=n_filter,filter_size=filter_size,strides=strides,in_channels=in_channels,\ + dilation_rate=dilation_rate,padding=padding,W_init=initializer,b_init=initializer,data_format=data_format)) + layer_list.append(BatchNorm2d(decay=0.99, act=tf.nn.relu,num_features=n_filter,data_format=data_format,is_train=True)) + return layers.LayerList(layer_list) + +def dw_conv_block(n_filter,in_channels,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),W_init=initializer,b_init=initializer,data_format="channels_first"): + layer_list=[] + layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels, + dilation_rate=dilation_rate,W_init=initializer,b_init=None,data_format=data_format)) + layer_list.append(BatchNorm2d(decay=0.99,act=tf.nn.relu,num_features=in_channels,data_format=data_format,is_train=True)) + layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1,1),strides=(1,1),in_channels=in_channels,W_init=initializer,b_init=None,data_format=data_format)) + layer_list.append(BatchNorm2d(decay=0.99,act=tf.nn.relu,num_features=n_filter,data_format=data_format,is_train=True)) + return layers.LayerList(layer_list) + +def nobn_dw_conv_block(n_filter,in_channels,filter_size=(3,3),strides=(1,1),W_init=initializer,b_init=initializer,data_format="channels_first"): + layer_list=[] + layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels, + act=tf.nn.relu,W_init=initializer,b_init=None,data_format=data_format)) + layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1, 1),strides=(1, 1),in_channels=in_channels, + act=tf.nn.relu,W_init=initializer,b_init=None,data_format=data_format)) + return layers.LayerList(layer_list) + +class MobilenetDilated_backbone(Model): + def __init__(self,scale_size=8, data_format="channels_first", pretraining=False): + super().__init__() + self.scale_size = scale_size + self.data_format = data_format + self.pretraining = pretraining + self.out_channels=512 + self.scale_size=8 + if(self.scale_size==8): + strides=(1,1) + elif(self.scale_size==32 or self.pretraining): + strides=(2,2) + self.main_block=layers.LayerList([ + conv_block(n_filter=32,in_channels=3,data_format=self.data_format,strides=(2,2)), + dw_conv_block(n_filter=64,in_channels=32,data_format=self.data_format), + dw_conv_block(n_filter=128,in_channels=64,data_format=self.data_format,strides=(2,2)), + dw_conv_block(n_filter=128,in_channels=128,data_format=self.data_format), + dw_conv_block(n_filter=256,in_channels=128,data_format=self.data_format,strides=(2,2)), + dw_conv_block(n_filter=256,in_channels=256,data_format=self.data_format), + dw_conv_block(n_filter=512,in_channels=256,data_format=self.data_format), + dw_conv_block(n_filter=512,in_channels=512,data_format=self.data_format,dilation_rate=(2,2), strides=strides), + dw_conv_block(n_filter=512,in_channels=512,data_format=self.data_format), + dw_conv_block(n_filter=512,in_channels=512,data_format=self.data_format, strides=strides), + dw_conv_block(n_filter=512,in_channels=512,data_format=self.data_format), + dw_conv_block(n_filter=512,in_channels=512,data_format=self.data_format) + ]) + + def forward(self,x): + return self.main_block.forward(x) + +initial_w=tl.initializers.random_normal(stddev=0.01) +initial_b=tl.initializers.constant(value=0.0) + +def conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,padding="SAME",data_format="channels_first"): + layer_list=[] + layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,act=act,\ + W_init=initial_w,b_init=initial_b,data_format=data_format,padding=padding)) + layer_list.append(BatchNorm2d(num_features=n_filter,decay=0.999,is_train=True,act=act,data_format=data_format)) + return LayerList(layer_list) + +def separable_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),act=tf.nn.relu,data_format="channels_first"): + layer_list=[] + layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels, + dilation_rate=dilation_rate,W_init=initial_w,b_init=None,data_format=data_format)) + layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=in_channels,data_format=data_format,is_train=True)) + layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1,1),strides=(1,1),in_channels=in_channels,W_init=initial_w,b_init=None,data_format=data_format)) + layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=n_filter,data_format=data_format,is_train=True)) + return layers.LayerList(layer_list) + +class MobilenetThin_backbone(Model): + def __init__(self,scale_size=8,data_format="channels_first", pretraining=False): + super().__init__() + self.scale_size = scale_size + self.data_format = data_format + self.pretraining = pretraining + self.out_channels=1152 + if(self.data_format=="channels_first"): + self.concat_dim=1 + else: + self.concat_dim=-1 + if(self.scale_size==8): + strides=(1,1) + elif(self.scale_size==32 or self.pretraining): + strides=(2,2) + self.convblock_0=conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_1=separable_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_2=separable_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_3=separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_4=separable_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_5=separable_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_6=separable_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=strides,act=tf.nn.relu,data_format=self.data_format) + self.convblock_7=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_8=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_9=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=strides,act=tf.nn.relu,data_format=self.data_format) + self.convblock_10=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_11=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.maxpool=MaxPool2d(filter_size=(2,2),strides=(2,2),padding="SAME",data_format=self.data_format) + + def forward(self,x): + concat_list=[] + x=self.convblock_0.forward(x) + x=self.convblock_1.forward(x) + x=self.convblock_2.forward(x) + x=self.convblock_3.forward(x) + concat_list.append(self.maxpool.forward(x)) + x=self.convblock_4.forward(x) + x=self.convblock_5.forward(x) + x=self.convblock_6.forward(x) + x=self.convblock_7.forward(x) + concat_list.append(x) + x=self.convblock_8.forward(x) + x=self.convblock_9.forward(x) + x=self.convblock_10.forward(x) + x=self.convblock_11.forward(x) + concat_list.append(x) + x=tf.concat(concat_list,self.concat_dim) + return x + +class MobilenetSmall_backbone: + def __init__(self,scale_size=8,data_format="channels_first", pretraining=False): + super().__init__() + self.scale_size = scale_size + self.data_format = data_format + self.pretraining = pretraining + if(self.data_format=="channels_first"): + self.concat_dim=1 + else: + self.concat_dim=-1 + self.out_channels=704 + + if(self.scale_size == 8): + strides=(1,1) + elif(self.scale_size == 32 or self.pretraining): + strides=(2,2) + + self.convblock_0=conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_1=separable_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_2=separable_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_3=separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_4=separable_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format) + self.convblock_5=separable_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format) + self.convblock_6=separable_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=strides,act=tf.nn.relu,data_format=self.data_format) + self.convblock_7=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=strides,act=tf.nn.relu,data_format=self.data_format) + self.maxpool=MaxPool2d(filter_size=(2,2),strides=(2,2),padding="SAME",data_format=self.data_format) + self.upsample=UpSampling2d(scale=2,data_format=self.data_format) + + def forward(self,x): + concat_list=[] + x=self.convblock_0.forward(x) + x=self.convblock_1.forward(x) + concat_list.append(self.maxpool.forward(x)) + x=self.convblock_2.forward(x) + x=self.convblock_3.forward(x) + concat_list.append(x) + x=self.convblock_4.forward(x) + x=self.convblock_5.forward(x) + x=self.convblock_6.forward(x) + x=self.convblock_7.forward(x) + concat_list.append(self.upsample.forward(x)) + x=tf.concat(concat_list,self.concat_dim) + return x + +class vggtiny_backbone(Model): + def __init__(self,in_channels=3,scale_size=8,data_format="channels_first",pretraining=False): + super().__init__() + self.name="vggtiny_backbone" + self.in_channels=in_channels + self.data_format=data_format + self.scale_size=scale_size + self.pretraining=pretraining + self.main_layer_list=[] + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + self.main_layer_list+=self.conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),name="block_1_1") + self.main_layer_list+=self.conv_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),name="block_1_2") + self.main_layer_list+=[MaxPool2d(filter_size=(2,2),strides=(2,2),padding="SAME",data_format=self.data_format,name="maxpool_1")] + self.main_layer_list+=self.conv_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(1,1),name="block_2_1") + self.main_layer_list+=self.conv_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),name="block_2_2") + self.main_layer_list+=[MaxPool2d(filter_size=(2,2),strides=(2,2),padding="SAME",data_format=self.data_format,name="maxpool_2")] + self.main_layer_list+=self.conv_block(n_filter=200,in_channels=128,filter_size=(3,3),strides=(1,1),name="block_3_1") + self.main_layer_list+=self.conv_block(n_filter=200,in_channels=200,filter_size=(3,3),strides=(1,1),name="block_3_2") + self.main_layer_list+=self.conv_block(n_filter=200,in_channels=200,filter_size=(3,3),strides=(1,1),name="block_3_3") + self.main_layer_list+=[MaxPool2d(filter_size=(2,2),strides=(2,2),padding="SAME",data_format=self.data_format,name="maxpool_3")] + self.main_layer_list+=self.conv_block(n_filter=384,in_channels=200,filter_size=(3,3),strides=(1,1),name="block_4_1") + self.main_layer_list+=self.conv_block(n_filter=384,in_channels=384,filter_size=(3,3),strides=(1,1),name="block_4_2") + self.out_channels=384 + if(self.scale_size==32 or self.pretraining): + self.main_layer_list+=self.conv_block(n_filter=384,in_channels=384,filter_size=(3,3),strides=(2,2),name="block_4_3") + self.main_layer_list+=self.conv_block(n_filter=384,in_channels=384,filter_size=(3,3),strides=(1,1),name="block_4_4") + self.main_layer_list+=self.conv_block(n_filter=384,in_channels=384,filter_size=(3,3),strides=(2,2),name="block_4_5") + self.out_channels=384 + if(self.pretraining): + self.main_layer_list+=[ + Flatten(name="Flatten"), + Dense(n_units=4096,in_channels=384*7*7,act=tf.nn.relu,name="fc1"), + Dense(n_units=4096,in_channels=4096,act=tf.nn.relu,name="fc2"), + Dense(n_units=1000,in_channels=4096,act=None,name="fc3") + ] + self.main_block=LayerList(self.main_layer_list) + + def conv_block(self,n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,padding="SAME",name="block"): + layer_list=[] + layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,\ + act=None,data_format=self.data_format,padding=padding,name=f"{name}_conv1")) + layer_list.append(BatchNorm2d(num_features=n_filter,act=act,is_train=True,data_format=self.data_format,name=f"{name}_bn1")) + return layer_list + + def forward(self,x): + return self.main_block.forward(x) + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + +class vgg16_backbone(Model): + def __init__(self,in_channels=3,scale_size=8,data_format="channels_first",pretraining=False): + super().__init__() + self.name="vgg16_backbone" + self.in_channels=in_channels + self.data_format=data_format + self.scale_size=scale_size + self.pretraining=pretraining + self.main_layer_list=[] + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + self.main_layer_list+=[ + self.conv_block(n_filter=64,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_1_1"), + self.conv_block(n_filter=64,in_channels=64,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_1_2"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_1"), + self.conv_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_2_1"), + self.conv_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_2_2"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_2"), + self.conv_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_3_1"), + self.conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_3_2"), + self.conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_3_3"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_3"), + self.conv_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_4_1"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_4_2"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_4_3") + ] + self.out_channels=512 + if(self.scale_size==32 or self.pretraining): + self.main_layer_list+=[ + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_4"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_5_1"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_5_2"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="block_5_3"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_5") + ] + self.out_channels=512 + if(self.pretraining): + self.main_layer_list+=[ + Flatten(name="Flatten"), + Dense(n_units=4096,in_channels=512*7*7,act=tf.nn.relu,name="fc1"), + Dense(n_units=4096,in_channels=4096,act=tf.nn.relu,name="fc2"), + Dense(n_units=1000,in_channels=4096,act=None,name="fc3") + ] + self.main_block=LayerList(self.main_layer_list) + + def forward(self,x): + return self.main_block.forward(x) + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + + def conv_block(self,n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=None,padding="SAME",name="block"): + return Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,\ + act=act,data_format=self.data_format,padding=padding,name=f"{name}_conv1") + +class vgg19_backbone(Model): + def __init__(self,in_channels=3,scale_size=8,data_format="channels_first",pretraining=False): + super().__init__() + self.name="vgg19_backbone" + self.in_channels=in_channels + self.data_format=data_format + self.scale_size=scale_size + self.pretraining=pretraining + self.vgg_mean=tf.constant([103.939, 116.779, 123.68])/255 + if(self.data_format=="channels_first"): + self.vgg_mean=tf.reshape(self.vgg_mean,[1,3,1,1]) + elif(self.data_format=="channels_last"): + self.vgg_mean=tf.reshape(self.vgg_mean,[1,1,1,3]) + self.initializer=tl.initializers.truncated_normal(stddev=0.005) + self.main_layer_list=[] + if(self.scale_size==8 or self.scale_size==32 or self.pretraining): + self.main_layer_list+=[ + self.conv_block(n_filter=64,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv1_1"), + self.conv_block(n_filter=64,in_channels=64,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv1_2"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_1"), + self.conv_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv2_1"), + self.conv_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv2_2"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_2"), + self.conv_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv3_1"), + self.conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv3_2"), + self.conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv3_3"), + self.conv_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv3_4"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_3"), + self.conv_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv4_1"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv4_2") + ] + self.out_channels=512 + if(self.scale_size==32 or self.pretraining): + self.main_layer_list+=[ + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv4_3"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv4_4"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_4"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv5_1"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv5_2"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv5_3"), + self.conv_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,name="conv5_4"), + MaxPool2d(filter_size=(2,2),strides=(2,2),data_format=self.data_format,name="maxpool_5") + ] + self.out_channels=512 + if(self.pretraining): + self.main_layer_list+=[ + Flatten(name="Flatten"), + Dense(n_units=4096,in_channels=512*7*7,act=tf.nn.relu,W_init=self.initializer,b_init=self.initializer,name="fc6"), + Dense(n_units=4096,in_channels=4096,act=tf.nn.relu,W_init=self.initializer,b_init=self.initializer,name="fc7"), + Dense(n_units=1000,in_channels=4096,act=None,W_init=self.initializer,b_init=self.initializer,name="fc8") + ] + self.main_block=LayerList(self.main_layer_list) + + def forward(self,x): + x=x-self.vgg_mean + return self.main_block.forward(x) + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + + def conv_block(self,n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=None,padding="SAME",name="conv_default"): + return Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,\ + act=act,data_format=self.data_format,padding=padding,W_init=self.initializer,b_init=self.initializer,name=name) + + +class Resnet18_backbone(Model): + def __init__(self,n_filter=512,in_channels=3,scale_size=8,data_format="channels_first",pretraining=False): + super().__init__() + self.name="resnet18_backbone" + self.data_format=data_format + self.out_channels=n_filter + self.scale_size=scale_size + self.pretraining=pretraining + self.out_channels=512 + if(self.scale_size==8): + strides=(1,1) + elif(self.scale_size==32 or self.pretraining): + strides=(2,2) + self.conv1=Conv2d(n_filter=64,in_channels=in_channels,filter_size=(7,7),strides=(2,2),b_init=None,data_format=self.data_format,name="conv_1_1") + self.bn1=BatchNorm2d(decay=0.9,act=tf.nn.relu,is_train=True,num_features=64,data_format=self.data_format,name="bn_1_1") + self.maxpool=MaxPool2d(filter_size=(3,3),strides=(2,2),data_format=self.data_format,name="maxpool_1") + self.res_block_2_1=self.Res_block(n_filter=64,in_channels=64,strides=(1,1),is_down_sample=False,data_format=self.data_format,name="block_2_1") + self.res_block_2_2=self.Res_block(n_filter=64,in_channels=64,strides=(1,1),is_down_sample=False,data_format=self.data_format,name="block_2_2") + self.res_block_3_1=self.Res_block(n_filter=128,in_channels=64,strides=(2,2),is_down_sample=True,data_format=self.data_format,name="block_3_1") + self.res_block_3_2=self.Res_block(n_filter=128,in_channels=128,strides=(1,1),is_down_sample=False,data_format=self.data_format,name="block_3_2") + self.res_block_4_1=self.Res_block(n_filter=256,in_channels=128,strides=strides,is_down_sample=True,data_format=self.data_format,name="block_4_1") + self.res_block_4_2=self.Res_block(n_filter=256,in_channels=256,strides=(1,1),is_down_sample=False,data_format=self.data_format,name="block_4_2") + self.res_block_5_1=self.Res_block(n_filter=512,in_channels=256,strides=strides,is_down_sample=True,data_format=self.data_format,name="block_5_1") + if(self.pretraining): + self.res_block_5_2=self.Res_block(n_filter=512,in_channels=512,strides=(1,1),is_down_sample=False,data_format=self.data_format,name="block_5_2") + self.avg_pool=MeanPool2d(filter_size=(7,7),strides=(1,1),data_format=self.data_format,name="avgpool_2") + self.flatten=Flatten(name="Flatten") + self.fc=Dense(n_units=1000,in_channels=512,name="FC") + + def forward(self,x): + x=self.conv1.forward(x) + x=self.bn1.forward(x) + x=self.maxpool.forward(x) + x=self.res_block_2_1.forward(x) + x=self.res_block_2_2.forward(x) + x=self.res_block_3_1.forward(x) + x=self.res_block_3_2.forward(x) + x=self.res_block_4_1.forward(x) + x=self.res_block_4_2.forward(x) + x=self.res_block_5_1.forward(x) + if(self.pretraining): + x=self.res_block_5_2.forward(x) + x=self.avg_pool.forward(x) + x=self.flatten.forward(x) + x=self.fc.forward(x) + return x + + class Res_block(Model): + def __init__(self,n_filter,in_channels,strides=(1,1),is_down_sample=False,data_format="channels_first",name="res_block"): + super().__init__() + self.data_format=data_format + self.is_down_sample=is_down_sample + if(is_down_sample): + init_filter_size=(1,1) + else: + init_filter_size=(3,3) + self.main_block=LayerList([ + Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=(3,3),strides=strides,b_init=None,data_format=self.data_format,name=f"{name}_conv_1"), + BatchNorm2d(decay=0.9,act=tf.nn.relu,is_train=True,num_features=n_filter,data_format=self.data_format,name=f"{name}_bn_1"), + Conv2d(n_filter=n_filter,in_channels=n_filter,filter_size=(3,3),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{name}_conv_2"), + BatchNorm2d(decay=0.9,is_train=True,num_features=n_filter,data_format=self.data_format,name=f"{name}_bn_2"), + ]) + if(self.is_down_sample): + self.down_sample=LayerList([ + Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=init_filter_size,strides=strides,b_init=None,data_format=self.data_format,name=f"{name}_downsample_conv"), + BatchNorm2d(decay=0.9,is_train=True,num_features=n_filter,data_format=self.data_format,name=f"{name}_downsample_bn") + ]) + + def forward(self,x): + res=x + x=self.main_block.forward(x) + if(self.is_down_sample): + res=self.down_sample.forward(res) + return tf.nn.relu(x+res) + +class Resnet50_backbone(Model): + def __init__(self,in_channels=3,n_filter=64,scale_size=8,decay=0.9,eps=1e-5,data_format="channels_first",pretraining=False,use_pool=True): + super().__init__() + self.name="resnet50_backbone" + self.in_channels=in_channels + self.n_filter=n_filter + self.scale_size=scale_size + self.data_format=data_format + self.pretraining=pretraining + self.out_channels=2048 + self.use_pool=use_pool + if(self.scale_size==8): + strides=(1,1) + elif(self.scale_size==32 or self.pretraining): + strides=(2,2) + self.eps=eps + self.decay=decay + #first layers + self.conv1=Conv2d(n_filter=64,in_channels=self.in_channels,filter_size=(7,7),strides=(2,2),padding="SAME",b_init=None,data_format=self.data_format,name="conv1") + self.bn1=BatchNorm2d(decay=self.decay,epsilon=self.eps,is_train=True,num_features=64,data_format=self.data_format,act=tf.nn.relu,name="bn1") + self.maxpool1=MaxPool2d(filter_size=(3,3),strides=(2,2),data_format=self.data_format,name="maxpool_1") + #block_1 + self.block_1_1=self.Basic_block(in_channels=64,n_filter=64,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_1_1") + self.block_1_2=self.Basic_block(in_channels=256,n_filter=64,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_1_2") + self.block_1_3=self.Basic_block(in_channels=256,n_filter=64,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_1_3") + #block_2 + self.block_2_1=self.Basic_block(in_channels=256,n_filter=128,strides=(2,2),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_2_1") + self.block_2_2=self.Basic_block(in_channels=512,n_filter=128,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_2_2") + self.block_2_3=self.Basic_block(in_channels=512,n_filter=128,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_2_3") + self.block_2_4=self.Basic_block(in_channels=512,n_filter=128,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_2_4") + #block_3 + self.block_3_1=self.Basic_block(in_channels=512,n_filter=256,strides=strides,data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_1") + self.block_3_2=self.Basic_block(in_channels=1024,n_filter=256,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_2") + self.block_3_3=self.Basic_block(in_channels=1024,n_filter=256,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_3") + self.block_3_4=self.Basic_block(in_channels=1024,n_filter=256,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_4") + self.block_3_5=self.Basic_block(in_channels=1024,n_filter=256,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_5") + self.block_3_6=self.Basic_block(in_channels=1024,n_filter=256,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_3_6") + #block_4 + self.block_4_1=self.Basic_block(in_channels=1024,n_filter=512,strides=strides,data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_4_1") + self.block_4_2=self.Basic_block(in_channels=2048,n_filter=512,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_4_2") + self.block_4_3=self.Basic_block(in_channels=2048,n_filter=512,strides=(1,1),data_format=self.data_format,eps=self.eps,decay=self.decay,name="block_4_3") + if(self.pretraining): + self.block_5=LayerList([ + MeanPool2d(filter_size=(7,7),strides=(1,1),data_format=self.data_format,name="avgpool_1"), + Flatten(name="Flatten"), + Dense(n_units=1000,in_channels=2048,act=None,name="fc") + ]) + + def forward(self,x): + x=self.conv1.forward(x) + x=self.bn1.forward(x) + if(self.use_pool): + x=self.maxpool1.forward(x) + #block_1 + x=self.block_1_1.forward(x) + x=self.block_1_2.forward(x) + x=self.block_1_3.forward(x) + #block_2 + x=self.block_2_1.forward(x) + x=self.block_2_2.forward(x) + x=self.block_2_3.forward(x) + x=self.block_2_4.forward(x) + #block_3 + x=self.block_3_1.forward(x) + x=self.block_3_2.forward(x) + x=self.block_3_3.forward(x) + x=self.block_3_4.forward(x) + x=self.block_3_5.forward(x) + x=self.block_3_6.forward(x) + #block_4 + x=self.block_4_1.forward(x) + x=self.block_4_2.forward(x) + x=self.block_4_3.forward(x) + if(self.pretraining): + x=self.block_5.forward(x) + return x + + def cal_loss(self,label,predict): + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,logits=predict)) + + class Basic_block(Model): + def __init__(self,in_channels=64,n_filter=64,strides=(1,1),data_format="channels_first",decay=0.9,eps=1e-5,name="basic_block"): + super().__init__() + self.in_channels=in_channels + self.n_filter=n_filter + self.strides=strides + self.data_format=data_format + self.downsample=None + self.name=name + self.decay=decay + self.eps=eps + if(self.strides!=(1,1) or self.in_channels!=4*self.n_filter): + self.downsample=LayerList([ + Conv2d(n_filter=4*self.n_filter,in_channels=self.in_channels,filter_size=(1,1),strides=self.strides,b_init=None,\ + data_format=self.data_format,name=f"{name}_ds_conv1"), + BatchNorm2d(decay=self.decay,epsilon=self.eps,is_train=True,num_features=4*self.n_filter,act=None,data_format=self.data_format,name=f"{name}_ds_bn1") + ]) + self.main_block=LayerList([ + Conv2d(n_filter=self.n_filter,in_channels=self.in_channels,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{name}_conv1"), + BatchNorm2d(decay=self.decay,epsilon=self.eps,is_train=True,num_features=self.n_filter,act=tf.nn.relu,data_format=self.data_format,name=f"{name}_bn1"), + Conv2d(n_filter=self.n_filter,in_channels=self.n_filter,filter_size=(3,3),strides=self.strides,b_init=None,data_format=self.data_format,name=f"{name}_conv2"), + BatchNorm2d(decay=self.decay,epsilon=self.eps,is_train=True,num_features=self.n_filter,act=tf.nn.relu,data_format=self.data_format,name=f"{name}_bn2"), + Conv2d(n_filter=4*self.n_filter,in_channels=self.n_filter,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format,name=f"{name}_conv3"), + BatchNorm2d(decay=self.decay,epsilon=self.eps,is_train=True,num_features=4*self.n_filter,act=None,data_format=self.data_format,name=f"{name}_bn3") + ]) + + def forward(self,x): + res=x + x=self.main_block.forward(x) + if(self.downsample!=None): + res=self.downsample.forward(res) + return tf.nn.relu(x+res) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/base_model.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/base_model.py new file mode 100644 index 000000000..5f2752dc5 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/base_model.py @@ -0,0 +1,78 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.models import Model +from .metrics import MetricManager + +class BasicModel(Model): + def __init__(self,config, *args, **kargs): + super().__init__() + self.config=config + + @tf.function(experimental_relax_shapes=True) + def forward(self, x, is_train=True, ret_backbone=False): + '''custom model forwarding + The `forward` function is expected to take in the images and return the activation maps calculated by the neural network. Implement your custom forwarding computation logic here. + + Parameters + ---------- + x : tf.Tensor + The input batch of images + is_train : bool + a bool value indicate that whether the forward function is invoked at training time or inference time. In the training procedure, the forward function will be called with `is_train=True`; while in the inference procedure and model exportation procedure, the forward function will be called with `is_train=False`. This is because in training, it's better to arrange the model output results in dict by their names, while in inference and model exportation procedure, the model forwarding output results are required to be tensors, especially in model exportation. + ret_backbone : bool + a bool value indicate that whether the forward function will output the backbone extracted feature maps when invokded. If `ret_backbone` is set to be False, the model output should be the original final forwarding result(noted as `output`), else if `ret_backbone` is set to be False, the model output should include the backbone extracted feature maps besides the final forwarding result(noted as `output,backbone_features`). This is use for domain adaptation, which need to use the discriminator to align the feature maps extracted by the model backbone facing the labeled training data and unlabeled training data, thus extend the model ability towards the extra unlabled datasets. + + ''' + raise NotImplementedError("virtual class BaseModel function: `forward` not implemented!") + + @tf.function(experimental_relax_shapes=True) + def infer(self, x): + '''custom model inference + The `infer` function is expected to take in the images and return the activation maps calculated by the neural network. Implement your custom inference computation logic here. The difference between the `forward` function and the `infer` function is that, the `infer` function is invoked especially in model exportation procedure, thus it's outputs are required to be the decoded `tf.Tensor` variables. In this way, The `infer` function is usually a warp function of the `forward` function, and the `infer` function output is usually constracted by parsing and formatting the `forward` function result into the `tf.Tensor` format. + ''' + raise NotImplementedError("virtual class BaseModel function: `infer` not implemented!") + + def cal_loss(self, predict_x, target_x, metric_manager:MetricManager ,mask=None): + '''custom loss calculation function + Teh `cal_loss` function is expected to take the output predict activation map and the ground truth target activation map and return the calculated loss for gradient descent. + + Parameters: + ---------- + predict_x : Dictionary + a dictionary contains the model predict activation map, the keys are the activation map name and the values are the corresponding activation map value. `predict_x` should be the output of the `forward` function. + target_x : Dicyionary + a dictionary contains the ground truth activation map, the keys are the activation map name and the values are the corresponding activation map value. `target_x` should have the same keys as the `predict_x` but the corresponding value of `target_x` should be the ground truth while corresponding value of the `predict_x` should be the model predict value. + ''' + raise NotImplementedError("virtual class BaseModel function: `cal_loss` not implemented!") \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/common.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/common.py new file mode 100644 index 000000000..2ca41662e --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/common.py @@ -0,0 +1,306 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import logging +from enum import Enum +import time +import functools +import multiprocessing +from distutils.dir_util import mkpath + +import numpy as np +import tensorflow as tf +import cv2 + +from pycocotools.coco import maskUtils +from ..Config.define import MODEL,TRAIN,DATA,BACKBONE,KUNGFU,OPTIM + +regularizer_conv = 0.004 +regularizer_dsconv = 0.0004 +batchnorm_fused = True +activation_fn = tf.nn.relu + +def read_imgfile(path, width, height, data_format='channels_last'): + """Read image file and resize to network input size.""" + val_image = cv2.imread(path, cv2.IMREAD_COLOR) + val_image = val_image[:,:,::-1] + if width is not None and height is not None: + val_image = cv2.resize(val_image, (width, height)) + if data_format == 'channels_first': + val_image = val_image.transpose([2, 0, 1]) + return val_image / 255.0 + + +def get_sample_images(w, h): + val_image = [ + read_imgfile('./images/p1.jpg', w, h), + read_imgfile('./images/p2.jpg', w, h), + read_imgfile('./images/p3.jpg', w, h), + read_imgfile('./images/golf.jpg', w, h), + read_imgfile('./images/hand1.jpg', w, h), + read_imgfile('./images/hand2.jpg', w, h), + read_imgfile('./images/apink1_crop.jpg', w, h), + read_imgfile('./images/ski.jpg', w, h), + read_imgfile('./images/apink2.jpg', w, h), + read_imgfile('./images/apink3.jpg', w, h), + read_imgfile('./images/handsup1.jpg', w, h), + read_imgfile('./images/p3_dance.png', w, h), + ] + return val_image + +def load_graph(model_file): + """Load a freezed graph from file.""" + graph_def = tf.GraphDef() + with open(model_file, "rb") as f: + graph_def.ParseFromString(f.read()) + + graph = tf.Graph() + with graph.as_default(): + tf.import_graph_def(graph_def) + return graph + +def get_op(graph, name): + return graph.get_operation_by_name('import/%s' % name).outputs[0] + + +class Profiler(object): + + def __init__(self): + self.count = dict() + self.total = dict() + + def __del__(self): + if self.count: + self.report() + + def report(self): + sorted_costs = sorted([(t, name) for name, t in self.total.items()]) + sorted_costs.reverse() + names = [name for _, name in sorted_costs] + hr = '-' * 80 + print(hr) + print('%-12s %-12s %-12s %s' % ('tot (s)', 'count', 'mean (ms)', 'name')) + print(hr) + for name in names: + tot, cnt = self.total[name], self.count[name] + mean = tot / cnt + print('%-12f %-12d %-12f %s' % (tot, cnt, mean * 1000, name)) + + def __call__(self, name, duration): + if name in self.count: + self.count[name] += 1 + self.total[name] += duration + else: + self.count[name] = 1 + self.total[name] = duration + + +_default_profiler = Profiler() + + +def measure(f, name=None): + if not name: + name = f.__name__ + t0 = time.time() + result = f() + duration = time.time() - t0 + _default_profiler(name, duration) + return result + +def plot_humans(image, heatMat, pafMat, humans, name): + import matplotlib.pyplot as plt + fig = plt.figure() + a = fig.add_subplot(2, 3, 1) + + plt.imshow(draw_humans(image, humans)) + + a = fig.add_subplot(2, 3, 2) + tmp = np.amax(heatMat[:, :, :-1], axis=2) + plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5) + plt.colorbar() + + tmp2 = pafMat.transpose((2, 0, 1)) + tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0) + tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0) + + a = fig.add_subplot(2, 3, 4) + a.set_title('Vectormap-x') + plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5) + plt.colorbar() + + a = fig.add_subplot(2, 3, 5) + a.set_title('Vectormap-y') + plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5) + plt.colorbar() + mkpath('vis') + plt.savefig('vis/result-%s.png' % name) + +def tf_repeat(tensor, repeats): + """ + Args: + + input: A Tensor. 1-D or higher. + repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input + + Returns: + + A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats + """ + expanded_tensor = tf.expand_dims(tensor, -1) + multiples = [1] + repeats + tiled_tensor = tf.tile(expanded_tensor, multiples=multiples) + repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats) + return repeated_tesnor + +def decode_mask(meta_mask_list): + if(type(meta_mask_list)!=list): + return None + if(meta_mask_list==[]): + return None + inv_mask_list=[] + for meta_mask in meta_mask_list: + mask=maskUtils.decode(meta_mask) + inv_mask=np.logical_not(mask) + inv_mask_list.append(inv_mask) + mask=np.ones_like(inv_mask_list[0]) + for inv_mask in inv_mask_list: + mask=np.logical_and(mask,inv_mask) + mask = mask.astype(np.uint8) + return mask + +def regulize_loss(target_model,weight_decay_factor): + re_loss=0 + regularizer=tf.keras.regularizers.l2(l=weight_decay_factor) + for trainable_weight in target_model.trainable_weights: + re_loss+=regularizer(trainable_weight) + return re_loss + +def pad_image(img,stride,pad_value=0.0): + img_h,img_w,img_c=img.shape + pad_h= 0 if (img_h%stride==0) else int(stride-(img_h%stride)) + pad_w= 0 if (img_w%stride==0) else int(stride-(img_w%stride)) + pad=[pad_h//2,pad_h-pad_h//2,pad_w//2,pad_w-pad_w//2] + padded_image=np.zeros(shape=(img_h+pad_h,img_w+pad_w,img_c))+pad_value + padded_image[pad[0]:img_h+pad[0],pad[2]:img_w+pad[2],:]=img + return padded_image,pad + +def pad_image_shape(img,shape,pad_value=0.0): + img_h,img_w,img_c=img.shape + dst_h,dst_w=shape + pad_h=dst_h-img_h + pad_w=dst_w-img_w + pad=[pad_h//2,pad_h-pad_h//2,pad_w//2,pad_w-pad_w//2] + padded_image=np.zeros(shape=(img_h+pad_h,img_w+pad_w,img_c))+pad_value + padded_image[pad[0]:img_h+pad[0],pad[2]:img_w+pad[2],:]=img + return padded_image,pad + +def scale_image(image,hin,win,scale_rate=0.95): + #scale a image into the size of scale_rate*hin and scale_rate*win + #used for model inferecne + image_h,image_w,_=image.shape + scale_h,scale_w=int(scale_rate*image_h),int(scale_rate*image_w) + scale_image=cv2.resize(image,(scale_w,scale_h),interpolation=cv2.INTER_CUBIC) + padded_image,pad=pad_image_shape(scale_image,shape=(hin,win),pad_value=0.0) + return padded_image,pad + +def get_optim(optim_type): + if(optim_type==OPTIM.Adam): + print("using optimizer Adam!") + return tf.keras.optimizers.Adam + elif(optim_type==OPTIM.RMSprop): + print("using optimizer RMSProp!") + return tf.keras.optimizers.RMSprop + elif(optim_type==OPTIM.SGD): + print("using optimizer SGD!") + return tf.keras.optimizers.SGD + else: + raise NotImplementedError("invalid optim type") + +def regulize_loss(target_model,weight_decay_factor): + re_loss=0 + regularizer=tf.keras.regularizers.l2(l=weight_decay_factor) + for weight in target_model.trainable_weights: + re_loss+=regularizer(weight) + return re_loss + +def resize_CHW(x, dst_shape): + x = x[np.newaxis,:,:,:] + x = resize_NCHW(x, dst_shape) + x = x[0] + return x + +def resize_NCHW(x, dst_shape): + x = tf.transpose(x,[0,2,3,1]) + x = tf.image.resize(x, dst_shape) + x = tf.transpose(x,[0,3,1,2]) + return x + +def NCHW_to_NHWC(x): + return tf.transpose(x,[0,2,3,1]) + +def NHWC_to_NCHW(x): + return tf.transpose(x,[0,3,1,2]) + +def to_tensor_dict(dict_x): + for key in dict_x.keys(): + dict_x[key]=tf.convert_to_tensor(dict_x[key]) + return dict_x + +def to_numpy_dict(dict_x): + for key in dict_x.keys(): + value=dict_x[key] + if(type(value) is not np.ndarray): + value=value.numpy() + dict_x[key]=value + return dict_x + +def get_num_parallel_calls(): + return max(multiprocessing.cpu_count()//2,1) + +@functools.lru_cache(maxsize=16) +def get_meshgrid(mesh_h,mesh_w): + x_range=np.linspace(start=0,stop=mesh_w-1,num=mesh_w) + y_range=np.linspace(start=0,stop=mesh_h-1,num=mesh_h) + mesh_x,mesh_y=np.meshgrid(x_range,y_range) + mesh_grid=np.stack([mesh_x,mesh_y]) + return mesh_grid + +def log_model(msg): + logger=logging.getLogger("MODEL") + logger.info(msg) + +def log_train(msg): + logger=logging.getLogger("TRAIN") + logger.info(msg) + +def image_float_to_uint8(image): + return np.clip(image*255,0,255).astype(np.uint8) \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/domainadapt.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/domainadapt.py new file mode 100644 index 000000000..25a3c222f --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/domainadapt.py @@ -0,0 +1,86 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.models import Model +from tensorlayer.layers import Conv2d,BatchNorm2d,Dense,Flatten,LayerList + +def get_discriminator(feature_hin,feature_win,in_channnels,n_filter=512,layer_num=5,data_format="channels_first"): + last_channels=in_channnels + layer_list=[] + return layer_list + + +class Discriminator(Model): + def __init__(self,feature_hin,feature_win,in_channels,data_format="channels_first"): + super().__init__() + self.data_format=data_format + self.feature_hin=feature_hin + self.feature_win=feature_win + self.in_channels=in_channels + self.layer_num=5 + self.n_filter=256 + # construct Model + layer_list=[] + last_channels=self.in_channels + dis_hin,dis_win=self.feature_hin,self.feature_win + for layer_idx in range(0,self.layer_num): + strides=(1,1) + if(dis_hin>=4 or dis_win>=4): + strides=(2,2) + dis_hin,dis_win=(dis_hin+1)//2,(dis_win+1)//2 + layer_list+=[ + Conv2d(n_filter=self.n_filter,in_channels=last_channels,strides=strides,act=tf.nn.relu,\ + data_format=data_format,name=f"dis_conv_{layer_idx}") + ] + last_channels=self.n_filter + layer_list.append(Flatten(name="Flatten")) + layer_list.append(Dense(n_units=4096,in_channels=dis_hin*dis_win*self.n_filter,act=tf.nn.relu,name="fc1")) + layer_list.append(Dense(n_units=1000,in_channels=4096,act=tf.nn.relu,name="fc2")) + layer_list.append(Dense(n_units=1,in_channels=1000,act=None,name="fc3")) + self.main_block=LayerList(layer_list) + + def forward(self,x): + return self.main_block.forward(x) + + def cal_loss(self,x,label): + label_shape = [x.shape[0],1] + if(label == True): + gt_label = tf.ones(shape=label_shape) + elif(label == False): + gt_label = tf.zeros(shape=label_shape) + loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_label,logits=x)) + return loss + + + \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/examine.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/examine.py new file mode 100644 index 000000000..2100285f2 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/examine.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import numpy as np +from .base_model import BasicModel + +def exam_model_weights(model:BasicModel): + weight_list = model.all_weights + # construct weight_dict + weight_dict = {} + for weight in weight_list: + name = weight.name + weight_dict[name]=weight + # exam by name_list + name_list = sorted(list(weight_dict.keys())) + for name in name_list: + shape = weight_dict[name].shape + print(f"model weight name: {name}\t shape:{shape}") + +def exam_npz_dict_weights(npz_dict_path): + npz_dict = np.load(npz_dict_path, allow_pickle=True) + # consturct weight_dict + weight_dict = npz_dict + # exam by name_list + name_list = sorted(list(weight_dict.keys())) + for name in name_list: + shape = weight_dict[name].shape + print(f"npz_dict weight name: {name}\t shape:{shape}") + +def exam_npz_weights(npz_path): + print("weights in npz file don't have names") + npz = np.load(npz_path, allow_pickle=True) + npz = npz["params"] + for widx,weight in enumerate(npz): + print(f"npz weight idx: {widx}\t shape:{weight.shape}") \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/human.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/human.py new file mode 100644 index 000000000..bcb9b8aca --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/human.py @@ -0,0 +1,183 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import cv2 + +class Human: + """ + body_parts: list of BodyPart + """ + + def __init__(self,parts,limbs,colors): + self.local_id=-1 + self.global_id=-1 + self.parts=parts + self.limbs=limbs + self.colors=colors + self.body_parts = {} + self.score = 0.0 + self.bbx=None + self.area=None + + def get_global_id(self): + return int(self.global_id) + + def get_score(self): + for part_idx in self.body_parts.keys(): + body_part=self.body_parts[part_idx] + self.score+=body_part.score + self.score=self.score/len(self.body_parts.keys()) + return float(self.score) + + def get_partnum(self): + return len(self.body_parts.keys()) + + def get_bbx(self): + min_x,min_y=10000,10000 + max_x,max_y=-1,-1 + for body_part_idx in self.body_parts.keys(): + body_part=self.body_parts[body_part_idx] + x=body_part.x + y=body_part.y + min_x=min(x,min_x) + min_y=min(y,min_y) + max_x=max(x,max_x) + max_y=max(y,max_y) + center_x=(min_x+max_x)/2 + center_y=(min_y+max_y)/2 + h=max_y-min_y + w=max_x-min_x + self.bbx=[center_x,center_y,w,h] + return [center_x,center_y,w,h] + + def get_area(self): + bbx=self.get_bbx() + self.area=float(bbx[2]*bbx[3]) + return self.area + + def bias(self,bias_w,bias_h): + for part_idx in self.body_parts.keys(): + body_part=self.body_parts[part_idx] + body_part.x=body_part.x+bias_w + body_part.y=body_part.y+bias_h + if(body_part.x<0): + body_part.x=-1000.0 + if(body_part.y<0): + body_part.y=-1000.0 + + def scale(self,scale_w,scale_h): + for part_idx in self.body_parts.keys(): + body_part=self.body_parts[part_idx] + body_part.x=body_part.x*scale_w + body_part.y=body_part.y*scale_h + body_part.w=body_part.w*scale_w + body_part.h=body_part.h*scale_h + if(body_part.x<0): + body_part.x=-1000.0 + if(body_part.y<0): + body_part.y=-1000.0 + + def unpad(self, pad): + pad_start_h, pad_start_w = pad[0], pad[2] + self.bias(bias_w=-pad_start_w, bias_h=-pad_start_h) + + def unscale(self, scale): + self.scale(1/scale, 1/scale) + + def draw_human(self,img): + img_h,img_w,img_c=img.shape + radius=int(min(img_h,img_w)/80) + thickness=int(min(img_h,img_w)/100) + for part_idx in self.body_parts.keys(): + body_part=self.body_parts[part_idx] + x=body_part.x + y=body_part.y + if(x<0 or x>=img_w or y<0 or y>=img_h): + continue + color=self.colors[part_idx] + img=cv2.circle(img,(int(x),int(y)),radius=radius,color=color,thickness=-1) + line_color=(255,0,0) + for limb in self.limbs: + src_part_idx,dst_part_idx=limb + if((src_part_idx in self.body_parts) and (dst_part_idx in self.body_parts)): + src_body_part=self.body_parts[src_part_idx] + src_x,src_y=int(src_body_part.x),int(src_body_part.y) + dst_body_part=self.body_parts[dst_part_idx] + dst_x,dst_y=int(dst_body_part.x),int(dst_body_part.y) + if(src_x<0 or src_x>=img_w or src_y<0 or src_y>=img_h): + continue + if(dst_x<0 or dst_x>=img_w or dst_y<0 or dst_y>=img_h): + continue + img=cv2.line(img,(src_x,src_y),(dst_x,dst_y),color=line_color,thickness=thickness) + return img + + def print(self): + for part_idx in self.body_parts.keys(): + body_part=self.body_parts[part_idx] + print(f"body-part:{self.parts(part_idx):20} x:{body_part.x:< 8.2f} y:{body_part.y:< 8.2f} score:{body_part.score:< .8f}") + print() + + def __str__(self): + return ' '.join([str(x) for x in self.body_parts.values()]) + + def __repr__(self): + return self.__str__() + +class BodyPart: + """ + part_idx : part index(eg. 0 for nose) + x, y: coordinate of body part + score : confidence score + """ + + def __init__(self, parts, u_idx, part_idx, x, y, score, w=-1, h=-1 ): + self.parts=parts + self.u_idx=u_idx + self.part_idx = part_idx + self.x, self.y = x, y + self.w, self.h = w, h + self.score = score + + def get_part_name(self): + return self.parts(self.part_idx) + + def get_x(self): + return float(self.x) + + def get_y(self): + return float(self.y) + + def __str__(self): + return 'BodyPart:%d-(%.2f, %.2f) score=%.2f' % (self.part_idx, self.x, self.y, self.score) + + def __repr__(self): + return self.__str__() \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/metrics.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/metrics.py new file mode 100644 index 000000000..a1dd13c5b --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/metrics.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import time +import numpy as np + +class AvgMetric: + def __init__(self,name="default_name",init_value=0): + self.name=name + self.step=0 + self.value=init_value + + def update(self,value): + self.step+=1 + self.value+=value + return self.value + + def reset(self): + self.step=0 + self.value=0 + + def gen_report_value(self): + if(self.step==0): + return 0 + else: + return self.value/self.step + + def report_train(self): + report_value=self.gen_report_value() + msg=f"{self.name}: {report_value:.8f}" + self.reset() + return msg + +class TimeMetric: + def __init__(self): + self.init_time=time.time() + self.start_time=time.time() + + def start_timing(self): + self.start_time=time.time() + + def report_timing(self): + last_time=self.start_time + cur_time=time.time() + self.start_time=cur_time + return cur_time-last_time + +class MetricManager: + def __init__(self,debug=False): + self.debug=debug + self.metric_group={} + self.metric_name_list=[] + self.timer=TimeMetric() + + def debug_print(self,msg): + if(self.debug): + print(msg) + + def update(self,metric_name,metric_value): + if(type(metric_value)!=np.ndarray and type(metric_value)!=float): + metric_value = metric_value.numpy() + if(metric_name not in self.metric_group): + self.metric_group[metric_name]=AvgMetric(name=metric_name,init_value=0) + self.metric_name_list.append(metric_name) + self.debug_print(f"test metric_name:{metric_name} type(metric_value):{type(metric_value)} value:{metric_value}") + self.metric_group[metric_name].update(value=metric_value) + + def report_train(self): + msg="" + for midx,metric_name in enumerate(self.metric_name_list,start=1): + metric=self.metric_group[metric_name] + msg+=metric.report_train()+" " + if(midx%3==0 and midx!=0): + msg+="\n" + msg.replace("\n\n","\n") + return msg + + def start_timing(self): + self.timer.start_timing() + + def report_timing(self): + msg="" + msg+=f"time:{self.timer.report_timing():.8f}" + return msg diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__init__.py new file mode 100644 index 000000000..4a0a48f9b --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from .model import * +from .eval import * +from .processor import PreProcessor +from .processor import PostProcessor +from .processor import Visualizer \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/define.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/define.py new file mode 100644 index 000000000..9ae1d9f7b --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/define.py @@ -0,0 +1,136 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import numpy as np +from enum import Enum + +#specialize for coco +class CocoPart(Enum): + Nose=0 + LEye=1 + REye=2 + LEar=3 + REar=4 + LShoulder=5 + RShoulder=6 + LElbow=7 + RElbow=8 + LWrist=9 + RWrist=10 + LHip=11 + RHip=12 + LKnee=13 + RKnee=14 + LAnkle=15 + RAnkle=16 + +CocoLimb=[[15, 13],[13, 11],[16, 14],[14, 12],[11, 12],[ 5, 11],[ 6, 12],[ 5, 6],[ 5, 7],\ + [ 6, 8],[ 7, 9],[ 8, 10],[ 1, 2],[ 0, 1],[ 0, 2],[ 1, 3],[ 2, 4],[ 3, 5],[ 4, 6]] + +CocoColor = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] +COCO_SIGMA=[ + 0.026, # nose + 0.025, # eyes + 0.025, # eyes + 0.035, # ears + 0.035, # ears + 0.079, # shoulders + 0.079, # shoulders + 0.072, # elbows + 0.072, # elbows + 0.062, # wrists + 0.062, # wrists + 0.107, # hips + 0.107, # hips + 0.087, # knees + 0.087, # knees + 0.089, # ankles + 0.089, # ankles +] + +COCO_UPRIGHT_POSE = np.array([ + [0.0, 9.3, 2.0], # 'nose', # 1 + [-0.35, 9.7, 2.0], # 'left_eye', # 2 + [0.35, 9.7, 2.0], # 'right_eye', # 3 + [-0.7, 9.5, 2.0], # 'left_ear', # 4 + [0.7, 9.5, 2.0], # 'right_ear', # 5 + [-1.4, 8.0, 2.0], # 'left_shoulder', # 6 + [1.4, 8.0, 2.0], # 'right_shoulder', # 7 + [-1.75, 6.0, 2.0], # 'left_elbow', # 8 + [1.75, 6.2, 2.0], # 'right_elbow', # 9 + [-1.75, 4.0, 2.0], # 'left_wrist', # 10 + [1.75, 4.2, 2.0], # 'right_wrist', # 11 + [-1.26, 4.0, 2.0], # 'left_hip', # 12 + [1.26, 4.0, 2.0], # 'right_hip', # 13 + [-1.4, 2.0, 2.0], # 'left_knee', # 14 + [1.4, 2.1, 2.0], # 'right_knee', # 15 + [-1.4, 0.0, 2.0], # 'left_ankle', # 16 + [1.4, 0.1, 2.0], # 'right_ankle', # 17 +]) +area_ref=((np.max(COCO_UPRIGHT_POSE[:, 0]) - np.min(COCO_UPRIGHT_POSE[:, 0])) * + (np.max(COCO_UPRIGHT_POSE[:, 1]) - np.min(COCO_UPRIGHT_POSE[:, 1]))) + +c, s = np.cos(np.deg2rad(45)), np.sin(np.deg2rad(45)) +rotate = np.array(((c, -s), (s, c))) +COCO_UPRIGHT_POSE_45=np.einsum('ij,kj->ki', rotate, np.copy(COCO_UPRIGHT_POSE[:,:2])) +area_ref_45=((np.max(COCO_UPRIGHT_POSE_45[:, 0]) - np.min(COCO_UPRIGHT_POSE_45[:, 0])) * + (np.max(COCO_UPRIGHT_POSE_45[:, 1]) - np.min(COCO_UPRIGHT_POSE_45[:, 1]))) + +#specialize for MPII +#TODO: modified to be specialized for MPII +class MpiiPart(Enum): + Headtop=0 + Neck=1 + RShoulder=2 + RElbow=3 + RWrist=4 + LShoulder=5 + LElbow=6 + LWrist=7 + RHip=8 + RKnee=9 + RAnkle=10 + LHip=11 + LKnee=12 + LAnkle=13 + Center=14 + Background=15 + +MpiiLimb=list(zip([0, 1, 2, 3, 1, 5, 6, 1, 14, 8, 9, 14, 11, 12], + [1, 2, 3, 4, 5, 6, 7, 14, 8, 9, 10, 11, 12, 13])) + +MpiiColor = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/eval.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/eval.py new file mode 100644 index 000000000..f2608cd0d --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/eval.py @@ -0,0 +1,275 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import numpy as np +import tensorflow as tf +from functools import partial +import multiprocessing +import matplotlib.pyplot as plt +from .processor import PostProcessor, Visualizer +from .utils import get_hr_conf,get_arrow_map,maps_to_numpy +from ..common import pad_image_shape + +def infer_one_img(model,postprocessor:PostProcessor,visualizer:Visualizer,img,img_id=-1,is_visual=False,enable_multiscale_search=False,debug=False): + img=img.numpy().astype(np.float32) + if(debug): + print(f"infer image id:{img_id}") + print(f"infer image shape:{img.shape}") + #TODO: use padded-scale that wouldn't casue deformation + img_h,img_w,_=img.shape + hin,win=model.hin,model.win + scale_rate=min(hin/img_h,win/img_w)*0.95 + scale_h,scale_w=int(scale_rate*img_h),int(scale_rate*img_w) + scale_image=cv2.resize(img,(scale_w,scale_h),interpolation=cv2.INTER_CUBIC) + padded_image,pad=pad_image_shape(scale_image,shape=(hin,win),pad_value=0.0) + #default channels_first + input_image=np.transpose(padded_image[np.newaxis,:,:,:].astype(np.float32),[0,3,1,2]) + predict_x = model.forward(input_image,is_train=False) + humans=postprocessor.process(predict_x)[0] + for human in humans: + human.bias(bias_w=-pad[2],bias_h=-pad[0]) + human.scale(scale_w=1/scale_rate,scale_h=1/scale_rate) + if(is_visual): + visualizer.visualize(image_batch=input_image, predict_x=predict_x, name=f"{img_id}_heatmap") + visualizer.visualize_result(image=img, humans=humans, name=f"{img_id}_result") + return humans + +def visualize(img,img_id,processed_img,pd_pif_maps,pd_paf_maps,humans,stride=8,save_dir="./save_dir"): + print(f"{len(humans)} human found!") + print("visualizing...") + os.makedirs(save_dir,exist_ok=True) + ori_img=np.clip(img*255.0,0.0,255.0).astype(np.uint8) + processed_img=np.clip(processed_img*255.0,0.0,255.0).astype(np.uint8) + #get ouput_result + vis_img=ori_img.copy() + for human in humans: + vis_img=human.draw_human(vis_img) + #decode result_maps + pd_pif_conf,pd_pif_vec,_,pd_pif_scale=pd_pif_maps + pd_paf_conf,pd_paf_src_vec,pd_paf_dst_vec,_,_,_,_,=pd_paf_maps + pd_pif_conf_show=np.amax(pd_pif_conf,axis=0) + pd_pif_hr_conf_show=np.amax(get_hr_conf(pd_pif_conf,pd_pif_vec,pd_pif_scale,stride=stride,thresh=0.1),axis=0) + pd_paf_conf_show=np.amax(pd_paf_conf,axis=0) + pd_paf_vec_show=np.zeros(shape=(pd_pif_hr_conf_show.shape[0],pd_pif_hr_conf_show.shape[1],3)).astype(np.int8) + pd_paf_vec_show=get_arrow_map(pd_paf_vec_show,pd_paf_conf,pd_paf_src_vec,pd_paf_dst_vec,thresh=0.1) + #plt draw + fig=plt.figure(figsize=(12,12)) + #show input image + a=fig.add_subplot(3,3,1) + a.set_title("input image") + plt.imshow(ori_img) + #show output result + a=fig.add_subplot(3,3,3) + a.set_title("output result") + plt.imshow(vis_img) + #pif + #show processed image + a=fig.add_subplot(3,3,4) + a.set_title("processed image") + plt.imshow(processed_img) + #show pif_conf_map + a=fig.add_subplot(3,3,5) + a.set_title("pif_conf_map") + plt.imshow(pd_pif_conf_show,alpha=0.8) + plt.colorbar() + #show pif_hr_conf_map + a=fig.add_subplot(3,3,6) + a.set_title("pif_hr_conf_map") + plt.imshow(pd_pif_hr_conf_show,alpha=0.8) + plt.colorbar() + #paf + a=fig.add_subplot(3,3,7) + a.set_title("processed image") + plt.imshow(processed_img) + #show paf_conf_map + a=fig.add_subplot(3,3,8) + a.set_title("paf_conf_map") + plt.imshow(pd_paf_conf_show,alpha=0.8) + plt.colorbar() + #show paf_vec_map + a=fig.add_subplot(3,3,9) + a.set_title("paf_vec_map") + plt.imshow(pd_paf_vec_show,alpha=0.8) + plt.colorbar() + #save fig + plt.savefig(os.path.join(save_dir,f"{img_id}_visualize.png")) + plt.close() + +def _map_fn(image_file,image_id): + #load data + image = tf.io.read_file(image_file) + image = tf.image.decode_jpeg(image, channels=3) # get RGB with 0~1 + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + return image,image_id + +#@tf.function(experimental_relax_shapes=True) +def evaluate(model,dataset,config,vis_num=30,total_eval_num=10000,enable_multiscale_search=False): + '''evaluate pipeline of Openpose class models + + input model and dataset, the evaluate pipeline will start automaticly + the evaluate pipeline will: + 1.loading newest model at path ./save_dir/model_name/model_dir/newest_model.npz + 2.perform inference and parsing over the chosen evaluate dataset + 3.visualize model output in evaluation in directory ./save_dir/model_name/eval_vis_dir + 4.output model metrics by calling dataset.official_eval() + + Parameters + ---------- + arg1 : tensorlayer.models.MODEL + a preset or user defined model object, obtained by Model.get_model() function + + arg2 : dataset + a constructed dataset object, obtained by Dataset.get_dataset() function + + arg3 : Int + an Integer indicates how many model output should be visualized + + arg4 : Int + an Integer indicates how many images should be evaluated + + Returns + ------- + None + ''' + print(f"enable multiscale_search:{enable_multiscale_search}") + model.load_weights(os.path.join(config.model.model_dir,"newest_model.npz"),format="npz_dict") + model.eval() + pd_anns=[] + vis_dir=config.eval.vis_dir + kpt_converter=dataset.get_output_kpt_cvter() + postprocessor=PostProcessor(parts=model.parts,limbs=model.limbs,colors=model.colors,hin=model.hin,win=model.win,\ + hout=model.hout,wout=model.wout,debug=False) + visualizer = Visualizer(save_dir=vis_dir) + + eval_dataset=dataset.get_eval_dataset() + dataset_size=dataset.get_eval_datasize() + paramed_map_fn=_map_fn + eval_dataset=eval_dataset.map(paramed_map_fn,num_parallel_calls=max(multiprocessing.cpu_count()//2,1)) # max(multiprocessing.cpu_count()//2,1)) + for eval_num,(img,img_id) in enumerate(eval_dataset): + img_id=img_id.numpy() + if(eval_num>=total_eval_num): + break + is_visual=(eval_num<=vis_num) + humans=infer_one_img(model,postprocessor,visualizer,img,img_id=img_id,is_visual=is_visual,enable_multiscale_search=enable_multiscale_search) + for human in humans: + ann={} + ann["category_id"]=1 + ann["image_id"]=int(img_id) + ann["id"]=human.get_global_id() + ann["area"]=human.get_area() + ann["score"]=human.get_score() + kpt_list=[] + for part_idx in range(0,model.n_pos): + if(part_idx not in human.body_parts): + kpt_list.append([-1000.0,-1000.0]) + else: + body_part=human.body_parts[part_idx] + kpt_list.append([body_part.get_x(),body_part.get_y()]) + ann["keypoints"]=kpt_converter(kpt_list) + pd_anns.append(ann) + if(eval_num%100==0): + print(f"evaluating {eval_num}/{dataset_size} ...") + + dataset.official_eval(pd_anns,vis_dir) + +#@tf.function(experimental_relax_shapes=True) +def test(model,dataset,config,vis_num=30,total_test_num=10000,enable_multiscale_search=False): + '''evaluate pipeline of Openpose class models + + input model and dataset, the evaluate pipeline will start automaticly + the evaluate pipeline will: + 1.loading newest model at path ./save_dir/model_name/model_dir/newest_model.npz + 2.perform inference and parsing over the chosen evaluate dataset + 3.visualize model output in evaluation in directory ./save_dir/model_name/eval_vis_dir + 4.output model metrics by calling dataset.official_eval() + + Parameters + ---------- + arg1 : tensorlayer.models.MODEL + a preset or user defined model object, obtained by Model.get_model() function + + arg2 : dataset + a constructed dataset object, obtained by Dataset.get_dataset() function + + arg3 : Int + an Integer indicates how many model output should be visualized + + arg4 : Int + an Integer indicates how many images should be evaluated + + Returns + ------- + None + ''' + print(f"enable multiscale_search:{enable_multiscale_search}") + model.load_weights(os.path.join(config.model.model_dir,"newest_model.npz"),format="npz_dict") + model.eval() + pd_anns=[] + vis_dir=config.test.vis_dir + kpt_converter=dataset.get_output_kpt_cvter() + postprocessor=PostProcessor(parts=model.parts,limbs=model.limbs,colors=model.colors,hin=model.hin,win=model.win,\ + hout=model.hout,wout=model.wout,debug=False) + visualizer = Visualizer(save_dir=vis_dir) + + test_dataset=dataset.get_test_dataset() + dataset_size=dataset.get_test_datasize() + paramed_map_fn=_map_fn + test_dataset=test_dataset.map(paramed_map_fn,num_parallel_calls=max(multiprocessing.cpu_count()//2,1)) + #test_dataset=test_dataset.map(paramed_map_fn,num_parallel_calls=1) + for test_num,(img,img_id) in enumerate(test_dataset): + img_id=img_id.numpy() + if(test_num>=total_test_num): + break + is_visual=(test_num<=vis_num) + humans=infer_one_img(model,postprocessor,visualizer,img,img_id=img_id,is_visual=is_visual,enable_multiscale_search=enable_multiscale_search) + for human in humans: + ann={} + ann["category_id"]=1 + ann["image_id"]=int(img_id) + ann["id"]=human.get_global_id() + ann["area"]=human.get_area() + ann["score"]=human.get_score() + kpt_list=[] + for part_idx in range(0,model.n_pos): + if(part_idx not in human.body_parts): + kpt_list.append([-1000.0,-1000.0]) + else: + body_part=human.body_parts[part_idx] + kpt_list.append([body_part.get_x(),body_part.get_y()]) + ann["keypoints"]=kpt_converter(kpt_list) + pd_anns.append(ann) + if(test_num%100==0): + print(f"testing {test_num}/{dataset_size} ...") + + dataset.official_test(pd_anns,vis_dir) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/model.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/model.py new file mode 100644 index 000000000..096d15321 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/model.py @@ -0,0 +1,313 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from tensorlayer import layers +from tensorlayer.models import Model +from tensorlayer.layers import BatchNorm2d, Conv2d, DepthwiseConv2d, LayerList, MaxPool2d +from .define import CocoColor +from .utils import pixel_shuffle,get_meshgrid, regulize_loss +from ..backbones import Resnet50_backbone +from ..metrics import MetricManager + + +class Pifpaf(Model): + def __init__(self,parts,limbs,colors=CocoColor,n_pos=17,n_limbs=19,hin=368,win=368,scale_size=32,backbone=None,pretraining=False,quad_size=2,quad_num=1, + lambda_pif_conf=1.0,lambda_pif_vec=1.0,lambda_pif_scale=1.0,lambda_paf_conf=1.0,lambda_paf_src_vec=1.0,lambda_paf_dst_vec=1.0, + lambda_paf_src_scale=1.0,lambda_paf_dst_scale=1.0,data_format="channels_first"): + super().__init__() + self.parts=parts + self.limbs=limbs + self.n_pos=n_pos + self.n_limbs=n_limbs + self.colors=colors + self.hin=hin + self.win=win + self.quad_size=quad_size + self.quad_num=quad_num + self.scale_size=scale_size + self.stride=int(self.scale_size/(self.quad_size**self.quad_num)) + self.lambda_pif_conf=lambda_pif_conf + self.lambda_pif_vec=lambda_pif_vec + self.lambda_pif_scale=lambda_pif_scale + self.lambda_paf_conf=lambda_paf_conf + self.lambda_paf_src_vec=lambda_paf_src_vec + self.lambda_paf_dst_vec=lambda_paf_dst_vec + self.lambda_paf_src_scale=lambda_paf_src_scale + self.lambda_paf_dst_scale=lambda_paf_dst_scale + self.data_format=data_format + self.mean = np.array([0.485, 0.456, 0.406])[np.newaxis,:,np.newaxis,np.newaxis] + self.std = np.array([0.229, 0.224, 0.225])[np.newaxis,:,np.newaxis,np.newaxis] + if(backbone==None): + self.backbone=Resnet50_backbone(data_format=data_format,use_pool=False,scale_size=self.scale_size,decay=0.99,eps=1e-4) + self.stride=int(self.stride/2) #because of not using max_pool layer of resnet50 + else: + self.backbone=backbone(data_format=data_format,scale_size=self.scale_size) + self.hout=int(hin/self.stride) + self.wout=int(win/self.stride) + #construct head + self.pif_head=self.PifHead(input_features=self.backbone.out_channels,n_pos=self.n_pos,n_limbs=self.n_limbs,\ + quad_size=self.quad_size,hout=self.hout,wout=self.wout,stride=self.stride,data_format=self.data_format) + self.paf_head=self.PafHead(input_features=self.backbone.out_channels,n_pos=self.n_pos,n_limbs=self.n_limbs,\ + quad_size=self.quad_size,hout=self.hout,wout=self.wout,stride=self.stride,data_format=self.data_format) + + @tf.function(experimental_relax_shapes=True) + def forward(self,x,is_train=False,ret_backbone=False): + # normalize + x = (x-self.mean)/self.std + # backbone feature extraction + backbone_x=self.backbone.forward(x) + # pif maps + pif_maps=self.pif_head.forward(backbone_x,is_train=is_train) + pif_conf, pif_vec, pif_bmin, pif_scale = pif_maps + # paf maps + paf_maps=self.paf_head.forward(backbone_x,is_train=is_train) + paf_conf, paf_src_vec, paf_dst_vec, paf_src_bmin, paf_dst_bmin, paf_src_scale, paf_dst_scale = paf_maps + + # construct predict_x + predict_x = { + "pif_conf": pif_conf, + "pif_vec": pif_vec, + "pif_bmin": pif_bmin, + "pif_scale": pif_scale, + "paf_conf": paf_conf, + "paf_src_vec": paf_src_vec, + "paf_dst_vec": paf_dst_vec, + "paf_src_bmin": paf_src_bmin, + "paf_dst_bmin": paf_dst_bmin, + "paf_src_scale": paf_src_scale, + "paf_dst_scale": paf_dst_scale + } + if(ret_backbone): + predict_x["backbone_features"] = backbone_x + return predict_x + + @tf.function(experimental_relax_shapes=True) + def infer(self,x): + predict_x = self.forward(x,is_train=False) + # pif maps + pif_conf, pif_vec, pif_scale = predict_x["pif_conf"], predict_x["pif_vec"], predict_x["pif_scale"] + # paf maps + paf_conf, paf_src_vec, paf_dst_vec = predict_x["paf_conf"], predict_x["paf_src_vec"], predict_x["paf_dst_vec"] + paf_src_scale, paf_dst_scale = predict_x["paf_src_scale"], predict_x["paf_dst_scale"] + return pif_conf,pif_vec,pif_scale, paf_conf, paf_src_vec, paf_dst_vec, paf_src_scale, paf_dst_scale + + def soft_clamp(self,x,max_value=5.0): + above_mask=tf.where(x>=max_value,1.0,0.0) + x_below=x*(1-above_mask) + x_soft_above=tf.where(x>=max_value,x,max_value) + x_above=(max_value+tf.math.log(1+x_soft_above-max_value))*above_mask + return x_below+x_above + + def Bce_loss(self,pd_conf,gt_conf,focal_gamma=1.0): + #shape conf:[batch,field,h,w] + batch_size=pd_conf.shape[0] + valid_mask=tf.logical_not(tf.math.is_nan(gt_conf)) + #select pd_conf + pd_conf=pd_conf[valid_mask] + #select gt_conf + gt_conf=gt_conf[valid_mask] + #calculate loss + bce_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=pd_conf,labels=gt_conf) + bce_loss=self.soft_clamp(bce_loss) + if(focal_gamma!=0.0): + p=tf.nn.sigmoid(pd_conf) + pt=p*gt_conf+(1-p)*(1-gt_conf) + focal=1.0-pt + if(focal_gamma!=1.0): + focal=(focal+1e-4)**focal_gamma + bce_loss=focal*bce_loss*0.5 + bce_loss=tf.reduce_sum(bce_loss)/batch_size + return bce_loss + + def Laplace_loss(self,pd_vec,pd_logb,gt_vec,gt_bmin): + #shape vec: [batch,field,2,h,w] + #shape logb: [batch,field,h,w] + batch_size=pd_vec.shape[0] + valid_mask=tf.logical_not(tf.math.is_nan(gt_vec[:,:,0:1,:,:])) + #select pd_vec + pd_vec_x=pd_vec[:,:,0:1,:,:][valid_mask] + pd_vec_y=pd_vec[:,:,1:2,:,:][valid_mask] + pd_vec=tf.stack([pd_vec_x,pd_vec_y]) + #select pd_logb + pd_logb=pd_logb[:,:,np.newaxis,:,:][valid_mask] + #select gt_vec + gt_vec_x=gt_vec[:,:,0:1,:,:][valid_mask] + gt_vec_y=gt_vec[:,:,1:2,:,:][valid_mask] + gt_vec=tf.stack([gt_vec_x,gt_vec_y]) + #select gt_bmin + gt_bmin=gt_bmin[:,:,np.newaxis,:,:][valid_mask] + #calculate loss + norm=tf.norm(tf.stack([pd_vec_x-gt_vec_x,pd_vec_y-gt_vec_y,gt_bmin]),axis=0) + pd_logb=3.0*tf.tanh(pd_logb/3.0) + scaled_norm=norm*tf.exp(-pd_logb) + scaled_norm=self.soft_clamp(scaled_norm) + laplace_loss=pd_logb+scaled_norm + laplace_loss=tf.reduce_sum(laplace_loss)/batch_size + return laplace_loss + + def Scale_loss(self,pd_scale,gt_scale,b=1.0): + batch_size=pd_scale.shape[0] + valid_mask=tf.logical_not(tf.math.is_nan(gt_scale)) + pd_scale=pd_scale[valid_mask] + pd_scale=tf.nn.softplus(pd_scale) + gt_scale=gt_scale[valid_mask] + scale_loss=tf.abs(pd_scale-gt_scale) + denominator=10.0*(0.1+gt_scale) + scale_loss=scale_loss/denominator + scale_loss=self.soft_clamp(scale_loss) + scale_loss=tf.reduce_sum(scale_loss)/batch_size + return scale_loss + + def cal_loss(self, predict_x, target_x, metric_manager:MetricManager, mask=None): + # calculate pif losses + # predict maps + pd_pif_conf, pd_pif_vec, pd_pif_logb, pd_pif_scale = \ + predict_x["pif_conf"], predict_x["pif_vec"], predict_x["pif_bmin"], predict_x["pif_scale"] + # target maps + gt_pif_conf, gt_pif_vec, gt_pif_bmin, gt_pif_scale = \ + target_x["pif_conf"], target_x["pif_vec"], target_x["pif_bmin"], target_x["pif_scale"] + # loss calculation + loss_pif_conf = self.Bce_loss(pd_pif_conf,gt_pif_conf)*self.lambda_pif_conf + loss_pif_vec = self.Laplace_loss(pd_pif_vec,pd_pif_logb,gt_pif_vec,gt_pif_bmin)*self.lambda_pif_vec + loss_pif_scale = self.Scale_loss(pd_pif_scale,gt_pif_scale)*self.lambda_pif_scale + + # calculate paf losses + # predict maps + pd_paf_conf, pd_paf_src_vec, pd_paf_dst_vec = \ + predict_x["paf_conf"], predict_x["paf_src_vec"], predict_x["paf_dst_vec"] + pd_paf_src_logb, pd_paf_dst_logb, pd_paf_src_scale, pd_paf_dst_scale = \ + predict_x["paf_src_bmin"], predict_x["paf_dst_bmin"], predict_x["paf_src_scale"], predict_x["paf_dst_scale"] + # target maps + gt_paf_conf, gt_paf_src_vec, gt_paf_dst_vec = \ + target_x["paf_conf"], target_x["paf_src_vec"], target_x["paf_dst_vec"] + gt_paf_src_bmin, gt_paf_dst_bmin, gt_paf_src_scale, gt_paf_dst_scale = \ + target_x["paf_src_bmin"], target_x["paf_dst_bmin"], target_x["paf_src_scale"], target_x["paf_dst_scale"] + # loss calculation + loss_paf_conf = self.Bce_loss(pd_paf_conf,gt_paf_conf)*self.lambda_paf_conf + loss_paf_src_scale = self.Scale_loss(pd_paf_src_scale,gt_paf_src_scale)*self.lambda_paf_src_scale + loss_paf_dst_scale = self.Scale_loss(pd_paf_dst_scale,gt_paf_dst_scale)*self.lambda_paf_dst_scale + loss_paf_src_vec = self.Laplace_loss(pd_paf_src_vec,pd_paf_src_logb,gt_paf_src_vec,gt_paf_src_bmin)*self.lambda_paf_src_vec + loss_paf_dst_vec = self.Laplace_loss(pd_paf_dst_vec,pd_paf_dst_logb,gt_paf_dst_vec,gt_paf_dst_bmin)*self.lambda_paf_dst_vec + + # regularize loss + loss_re = regulize_loss(self,2e-4) + + # calculate total loss + total_loss=loss_pif_conf + loss_pif_vec + loss_pif_scale+\ + loss_paf_conf + loss_paf_src_scale + loss_paf_dst_scale + loss_paf_src_vec +loss_paf_dst_vec + loss_re + # metrics + # pif metrics + metric_manager.update("model/loss_pif_conf", loss_pif_conf) + metric_manager.update("model/loss_pif_vec", loss_pif_vec) + metric_manager.update("model/loss_pif_scale", loss_pif_scale) + # paf metrics + metric_manager.update("model/loss_paf_conf", loss_paf_conf) + metric_manager.update("model/loss_paf_src_vec", loss_paf_src_vec) + metric_manager.update("model/loss_paf_dst_vec", loss_paf_dst_vec) + metric_manager.update("model/loss_paf_src_scale", loss_paf_src_scale) + metric_manager.update("model/loss_paf_dst_scale", loss_paf_dst_scale) + # regularize + metric_manager.update("model/loss_re", loss_re) + # total + metric_manager.update("model/total_loss", total_loss) + return total_loss + + class PifHead(Model): + def __init__(self,input_features=2048,n_pos=19,n_limbs=19,quad_size=2,hout=8,wout=8,stride=8,data_format="channels_first"): + super().__init__() + self.input_features=input_features + self.n_pos=n_pos + self.n_limbs=n_limbs + self.hout=hout + self.wout=wout + self.stride=stride + self.quad_size=quad_size + self.out_features=self.n_pos*5*(self.quad_size**2) + self.data_format=data_format + self.tf_data_format="NCHW" if self.data_format=="channels_first" else "NHWC" + self.main_block=Conv2d(n_filter=self.out_features,in_channels=self.input_features,filter_size=(1,1),data_format=self.data_format) + + def forward(self,x,is_train=False): + x=self.main_block.forward(x) + x=pixel_shuffle(x,scale=2) + low_cut=int((self.quad_size-1)//2) + high_cut=int(tf.math.ceil((self.quad_size-1)/2.0)) + hout,wout=x.shape[2],x.shape[3] + x=tf.reshape(x,[-1,self.n_pos,5,hout,wout]) + pif_conf=x[:,:,0,:,:] + pif_vec=x[:,:,1:3,:,:] + pif_logb=x[:,:,3,:,:] + pif_scale=x[:,:,4,:,:] + #restore vec_maps in inference + if(is_train==False): + pif_conf=tf.nn.sigmoid(pif_conf) + pif_scale=tf.math.softplus(pif_scale) + return pif_conf,pif_vec,pif_logb,pif_scale + + class PafHead(Model): + def __init__(self,input_features=2048,n_pos=19,n_limbs=19,quad_size=2,hout=46,wout=46,stride=8,data_format="channels_first"): + super().__init__() + self.input_features=input_features + self.n_pos=n_pos + self.n_limbs=n_limbs + self.quad_size=quad_size + self.hout=hout + self.wout=wout + self.stride=stride + self.out_features=self.n_limbs*9*(self.quad_size**2) + self.data_format=data_format + self.tf_data_format="NCHW" if self.data_format=="channels_first" else "NHWC" + self.main_block=Conv2d(n_filter=self.out_features,in_channels=self.input_features,filter_size=(1,1),data_format=self.data_format) + + def forward(self,x,is_train=False): + x=self.main_block.forward(x) + x=pixel_shuffle(x,scale=2) + low_cut=int((self.quad_size-1)//2) + high_cut=int(tf.math.ceil((self.quad_size-1)/2.0)) + hout,wout=x.shape[2],x.shape[3] + x=tf.reshape(x,[-1,self.n_limbs,9,hout,wout]) + paf_conf=x[:,:,0,:,:] + paf_src_vec=x[:,:,1:3,:,:] + paf_dst_vec=x[:,:,3:5,:,:] + paf_src_logb=x[:,:,5,:,:] + paf_dst_logb=x[:,:,6,:,:] + paf_src_scale=x[:,:,7,:,:] + paf_dst_scale=x[:,:,8,:,:] + #restore vec_maps in inference + if(is_train==False): + paf_conf=tf.nn.sigmoid(paf_conf) + paf_src_scale=tf.math.softplus(paf_src_scale) + paf_dst_scale=tf.math.softplus(paf_dst_scale) + return paf_conf,paf_src_vec,paf_dst_vec,paf_src_logb,paf_dst_logb,paf_src_scale,paf_dst_scale diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/processor.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/processor.py new file mode 100644 index 000000000..93d8b1876 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/processor.py @@ -0,0 +1,620 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import cv2 +import heapq +import numpy as np +import matplotlib.pyplot as plt +from collections import defaultdict + +from .utils import get_hr_conf,get_arrow_map,nan2zero_dict +from .utils import get_pifmap,get_pafmap, restore_pif_maps, restore_paf_maps +from ..human import Human,BodyPart +from ..processor import BasicVisualizer +from ..processor import BasicPreProcessor +from ..processor import BasicPostProcessor +from ..processor import PltDrawer +from ..common import to_numpy_dict, image_float_to_uint8 + +class PreProcessor(BasicPreProcessor): + def __init__(self,parts,limbs,hin,win,hout,wout,colors=None,data_format="channels_first", *args, **kargs): + self.hin=hin + self.win=win + self.hout=hout + self.wout=wout + self.parts=parts + self.limbs=limbs + self.data_format=data_format + self.colors=colors if (colors!=None) else (len(self.parts)*[[0,255,0]]) + + def process(self,annos, mask, bbxs): + mask_out=cv2.resize(mask[0],(self.wout,self.hout)) + pif_conf,pif_vec,pif_bmin,pif_scale = get_pifmap(annos, mask_out, self.hin, self.win, self.hout, self.wout, self.parts, self.limbs, data_format=self.data_format) + paf_conf,paf_src_vec,paf_dst_vec,paf_src_bmin,paf_dst_bmin,paf_src_scale,paf_dst_scale = get_pafmap(annos, mask_out, self.hin, self.win, self.hout, self.wout, self.parts, self.limbs, data_format=self.data_format) + target_x = { + "pif_conf": pif_conf, + "pif_vec": pif_vec, + "pif_bmin": pif_bmin, + "pif_scale": pif_scale, + "paf_conf": paf_conf, + "paf_src_vec": paf_src_vec, + "paf_dst_vec": paf_dst_vec, + "paf_src_bmin": paf_src_bmin, + "paf_dst_bmin": paf_dst_bmin, + "paf_src_scale": paf_src_scale, + "paf_dst_scale": paf_dst_scale + } + return target_x + +class PostProcessor(BasicPostProcessor): + def __init__(self,parts,limbs,hin,win,hout,wout,colors=None,thresh_pif=0.3,thresh_paf=0.1,thresh_ref_pif=0.3,thresh_ref_paf=0.1,\ + thresh_gen_ref_pif=0.1,part_num_thresh=4,score_thresh=0.1,reduction=2,min_scale=4,greedy_match=True,reverse_match=True,\ + data_format="channels_first",debug=False, *args, **kargs): + self.parts=parts + self.limbs=limbs + self.colors=colors if (colors!=None) else (len(self.parts)*[[0,255,0]]) + self.n_pos=len(self.parts) + self.n_limbs=len(self.limbs) + self.hin=hin + self.win=win + self.hout=hout + self.wout=wout + self.stride=int(self.hin/self.hout) + self.thresh_pif=thresh_pif + self.thresh_paf=thresh_paf + self.thresh_ref_pif=thresh_ref_pif + self.thresh_ref_paf=thresh_ref_paf + self.thresh_gen_ref_pif=thresh_gen_ref_pif + self.part_num_thresh=part_num_thresh + self.score_thresh=score_thresh + self.reduction=reduction + self.min_scale=min_scale + self.greedy_match=greedy_match + self.reverse_match=reverse_match + self.data_format=data_format + self.debug=debug + #by source generation + self.by_source=defaultdict(dict) + for limb_idx,(src_idx,dst_idx) in enumerate(self.limbs): + self.by_source[src_idx][dst_idx]=(limb_idx,True) + self.by_source[dst_idx][src_idx]=(limb_idx,False) + #TODO:whether add score weight for each parts + + def process(self, predict_x, resize=True): + predict_x = to_numpy_dict(predict_x) + batch_size = list(predict_x.values())[0].shape[0] + humans_list = [] + for batch_idx in range(0,batch_size): + predict_x_one = {key:value[batch_idx] for key,value in predict_x.items()} + humans_list.append(self.process_one(predict_x_one, resize=resize)) + return humans_list + + def process_one(self,predict_x, resize=True): + # shape: + # conf_map:[field_num,hout,wout] + # vec_map:[field_num,2,hout,wout] + # scale_map:[field_num,hout,wout] + # decode pif_maps,paf_maps + pif_conf, pif_vec, pif_scale = predict_x["pif_conf"], predict_x["pif_vec"], predict_x["pif_scale"] + paf_conf, paf_src_vec, paf_dst_vec, paf_src_scale, paf_dst_scale = predict_x["paf_conf"], predict_x["paf_src_vec"],\ + predict_x["paf_dst_vec"], predict_x["paf_src_scale"], predict_x["paf_dst_scale"] + self.debug_print(f"exam pif shapes: pif_conf:{pif_conf.shape} pif_vec:{pif_vec.shape} pif_scale:{pif_scale.shape}") + self.debug_print(f"exam paf shapes: paf_conf:{paf_conf.shape} paf_src_vec:{paf_src_vec.shape} paf_dst_vec:{paf_dst_vec.shape} "\ + +f"paf_src_scale:{paf_src_scale.shape} paf_dst_scale:{paf_dst_scale.shape}") + + # restore maps + pif_vec, pif_scale = restore_pif_maps(pif_vec_map_batch=pif_vec, pif_scale_map_batch=pif_scale, stride=self.stride) + paf_src_vec, paf_dst_vec, paf_src_scale, paf_dst_scale = restore_paf_maps(paf_src_vec_map_batch=paf_src_vec,\ + paf_dst_vec_map_batch=paf_dst_vec, paf_src_scale_map_batch=paf_src_scale,\ + paf_dst_scale_map_batch=paf_dst_scale, stride=self.stride) + + #get pif_hr_conf + pif_hr_conf=get_hr_conf(pif_conf,pif_vec,pif_scale,stride=self.stride,thresh=self.thresh_gen_ref_pif,debug=False) + self.debug_print(f"test hr_conf") + for pos_idx in range(0,self.n_pos): + self.debug_print(f"test hr_conf idx:{pos_idx} max_conf:{np.max(pif_conf[pos_idx])} max_hr_conf:{np.max(pif_hr_conf[pos_idx])}") + #generate pose seeds according to refined pif_conf + seeds=[] + for pos_idx in range(0,self.n_pos): + mask_conf=pif_conf[pos_idx]>self.thresh_pif + cs=pif_conf[pos_idx,mask_conf] + xs=pif_vec[pos_idx,0,mask_conf] + ys=pif_vec[pos_idx,1,mask_conf] + scales=pif_scale[pos_idx,mask_conf] + hr_cs=self.field_to_scalar(xs,ys,pif_hr_conf[pos_idx]) + ref_cs=0.9*hr_cs+0.1*cs + mask_ref_conf=ref_cs>self.thresh_ref_pif + for ref_c,x,y,scale in zip(ref_cs[mask_ref_conf],xs[mask_ref_conf],ys[mask_ref_conf],scales[mask_ref_conf]): + seeds.append((ref_c,pos_idx,x,y,scale)) + #print(f"seed gen pos_idx:{pos_idx} ref_c:{ref_c} x:{x} y:{y} scale:{scale}") + self.debug_print(f"test before sort len_seeds:{len(seeds)}") + seeds=sorted(seeds,reverse=True) + self.debug_print(f"test after sort len_seeds:{len(seeds)}") + #generate connection seeds according to paf_map + cif_floor=0.1 + forward_list=[] + backward_list=[] + for limb_idx in range(0,self.n_limbs): + src_idx,dst_idx=self.limbs[limb_idx] + mask_conf=paf_conf[limb_idx]>self.thresh_paf + score=paf_conf[limb_idx,mask_conf] + src_x=paf_src_vec[limb_idx,0,mask_conf] + src_y=paf_src_vec[limb_idx,1,mask_conf] + dst_x=paf_dst_vec[limb_idx,0,mask_conf] + dst_y=paf_dst_vec[limb_idx,1,mask_conf] + src_scale=paf_src_scale[limb_idx,mask_conf] + dst_scale=paf_dst_scale[limb_idx,mask_conf] + #generate backward (merge score with the src pif_score) + cifhr_b=self.field_to_scalar(src_x,src_y,pif_hr_conf[src_idx]) + score_b=score*(cif_floor+(1-cif_floor)*cifhr_b) + mask_b=score_b>self.thresh_ref_paf + backward_list.append([score_b[mask_b],dst_x[mask_b],dst_y[mask_b],dst_scale[mask_b],src_x[mask_b],src_y[mask_b],src_scale[mask_b]]) + #generate forward connections (merge score with the dst pif_score) + cifhr_f=self.field_to_scalar(dst_x,dst_y,pif_hr_conf[dst_idx]) + score_f=score*(cif_floor+(1-cif_floor)*cifhr_f) + mask_f=score_f>self.thresh_ref_paf + forward_list.append([score_f[mask_f],src_x[mask_f],src_y[mask_f],src_scale[mask_f],dst_x[mask_f],dst_y[mask_f],dst_scale[mask_f]]) + #debug + mask_all=np.sum(mask_conf) + self.debug_print(f"test limb_gen limb_idx:{limb_idx} {self.parts(self.limbs[limb_idx][0])}-{self.parts(self.limbs[limb_idx][1])} max_conf:{np.max(paf_conf[limb_idx])} mask_all:{mask_all}") + if(mask_all>0): + self.debug_print(f"test bk_list_gen: limb_idx:{limb_idx} max_score:{np.max(score)} max_cifhr_b:{np.max(cifhr_b)} max_score_b:{np.max(score_b)} mask_num_b:{np.sum(mask_b)}") + self.debug_print(f"test fw_list_gen: limb_idx:{limb_idx} max_score:{np.max(score)} max_cifhr_f:{np.max(cifhr_f)} max_score_f:{np.max(score_f)} mask_num_f:{np.sum(mask_f)}") + self.debug_print("") + #greedy assemble + #TODO: further check! + occupied=np.zeros(shape=(self.n_pos,int(pif_hr_conf.shape[1]/self.reduction),int(pif_hr_conf.shape[2]/self.reduction))) + annotations=[] + self.debug_print(f"test seeds_num:{len(seeds)}") + for c,pos_idx,x,y,scale in seeds: + check_occupy=self.check_occupy(occupied,pos_idx,x,y,reduction=self.reduction) + if(check_occupy): + continue + #ann meaning: ann[0]=conf ann[1]=x ann[2]=y ann[3]=scale + ann=np.zeros(shape=(self.n_pos,4)) + ann[:,0]=-1.0 + ann[pos_idx]=np.array([c,x,y,scale]) + ann=self.grow(ann,forward_list,backward_list,reverse_match=self.reverse_match) + annotations.append(ann) + #put the ann into occupacy + for ann_pos_idx in range(0,self.n_pos): + occupied=self.put_occupy(occupied,ann_pos_idx,ann[ann_pos_idx,1],ann[ann_pos_idx,2],ann[ann_pos_idx,3],\ + reduction=self.reduction,min_scale=self.min_scale) + #point-wise nms + if(len(annotations)!=0): + annotations=self.kpt_nms(annotations) + #convert to humans + ret_humans=[] + for ann_idx,ann in enumerate(annotations): + self.debug_print(f"\nchecking human found:{ann_idx}") + ret_human=Human(parts=self.parts,limbs=self.limbs,colors=self.colors) + for pos_idx in range(0,self.n_pos): + score,x,y,scale=ann[pos_idx] + if(score>0.0): + self.debug_print(f"{self.parts(pos_idx)} x:{x} y:{y} scale:{scale} score:{score}") + ret_human.body_parts[pos_idx]=BodyPart(parts=self.parts,u_idx=f"{ann_idx}-{pos_idx}",part_idx=pos_idx,\ + x=x,y=y,score=score) + #check for num + if(ret_human.get_partnum()=0 and x=0 and y=field_w or y<0 or y>=field_h): + return True + if(occupied[pos_idx,y,x]!=0): + return True + else: + return False + + #mark the postion as occupied + def put_occupy(self,occupied,pos_idx,x,y,scale,reduction=2,min_scale=4,value=1): + _,field_h,field_w=occupied.shape + x,y=np.round(x/reduction),np.round(y/reduction) + size=np.round(max(min_scale/reduction,scale/reduction)) + min_x=max(0,int(x-size)) + max_x=max(min_x+1,min(field_w,int(x+size)+1)) + min_y=max(0,int(y-size)) + max_y=max(min_y+1,min(field_h,int(y+size)+1)) + occupied[pos_idx,min_y:max_y,min_x:max_x]+=value + return occupied + + #keypoint-wise nms + def kpt_nms(self,annotations): + max_x=int(max([np.max(ann[:,1]) for ann in annotations])+1) + max_y=int(max([np.max(ann[:,2]) for ann in annotations])+1) + occupied=np.zeros(shape=(self.n_pos,max_y,max_x)) + annotations=sorted(annotations,key=lambda ann: -np.sum(ann[:,0])) + for ann in annotations: + for pos_idx in range(0,self.n_pos): + _,x,y,scale=ann[pos_idx] + if(self.check_occupy(occupied,pos_idx,x,y,reduction=2)): + ann[pos_idx,0]=0 + else: + self.put_occupy(occupied,pos_idx,x,y,scale,reduction=2,min_scale=4) + annotations=sorted(annotations,key=lambda ann: -np.sum(ann[:,0])) + return annotations + + #get closest matching connection and blend them + def find_connection(self,connections,x,y,scale,connection_method="blend",thresh_second=0.01): + sigma_filter=2.0*scale + sigma_gaussian=0.25*(scale**2) + first_idx,first_score=-1,0.0 + second_idx,second_score=-1,0.0 + #traverse connections to find the highest score connection weighted by distance + score_f,src_x,src_y,src_scale,dst_x,dst_y,dst_scale=connections + con_num=score_f.shape[0] + for con_idx in range(0,con_num): + con_score=score_f[con_idx] + con_src_x,con_src_y,_=src_x[con_idx],src_y[con_idx],src_scale[con_idx] + #ignore connections with src_kpts too distant + if(xcon_src_x+sigma_filter): + continue + if(ycon_src_y+sigma_filter): + continue + distance=(con_src_x-x)**2+(con_src_y-y)**2 + w_score=np.exp(-0.5*distance/sigma_gaussian)*con_score + #replace to find the first and second match connections + if(w_score>first_score): + second_idx=first_idx + second_score=first_score + first_idx=con_idx + first_score=w_score + elif(w_score>second_score): + second_idx=con_idx + second_score=w_score + #not find match connections + if(first_idx==-1 or first_score==0.0): + return 0.0,0.0,0.0,0.0 + #method max: + if(connection_method=="max"): + return first_score,dst_x[first_idx],dst_y[first_idx],dst_scale[first_idx] + #method blend: + elif(connection_method=="blend"): + #ignore second connection with score too slow + if(second_idx==-1 or second_score(dst_scale[first_idx]**2/4.0)): + return first_score*0.5,dst_x[first_idx],dst_y[first_idx],dst_scale[first_idx] + #otherwise return the blended two connection + blend_score=0.5*(first_score+second_score) + blend_x=(dst_x[first_idx]*first_score+dst_x[second_idx]*second_score)/(first_score+second_score) + blend_y=(dst_y[first_idx]*first_score+dst_y[second_idx]*second_score)/(first_score+second_score) + blend_scale=(dst_scale[first_idx]*first_score+dst_scale[second_idx]*second_score)/(first_score+second_score) + return blend_score,blend_x,blend_y,blend_scale + + #get connection given a part, forwad_list and backward_list generated from paf maps + def get_connection(self,ann,src_idx,dst_idx,forward_list,backward_list,connection_method="blend",reverse_match=True): + limb_idx,forward_flag=self.by_source[src_idx][dst_idx] + if(forward_flag): + forward_cons,backward_cons=forward_list[limb_idx],backward_list[limb_idx] + else: + forward_cons,backward_cons=backward_list[limb_idx],forward_list[limb_idx] + self.debug_print(f"connecting {self.parts(src_idx)}-{self.parts(dst_idx)}") + c,x,y,scale=ann[src_idx] + #forward matching + fc,fx,fy,fscale=self.find_connection(forward_cons,x,y,scale,connection_method=connection_method) + if(fc==0.0): + return 0.0,0.0,0.0,0.0 + merge_score=np.sqrt(fc*c) + #reverse matching + if(reverse_match): + rc,rx,ry,_=self.find_connection(backward_cons,fx,fy,fscale,connection_method=connection_method) + #couldn't find a reverse one + if(rc==0.0): + return 0.0,0.0,0.0,0.0 + #reverse finding is distant from the orginal founded one + if abs(x-rx)+abs(y-ry)>scale: + return 0.0,0.0,0.0,0.0 + #successfully found connection + return merge_score,fx,fy,fscale + + #greedy matching pif seeds with forward and backward connections generated from paf maps + def grow(self,ann,forward_list,backward_list,reverse_match=True): + frontier = [] + in_frontier = set() + #add the point to assemble frontier by_source + def add_frontier(ann,src_idx): + #traverse all the part that the current part connect to + for dst_idx,(_,_) in self.by_source[src_idx].items(): + #ignore points that already assigned + if(ann[dst_idx,0]>0): + continue + #ignore limbs that already in the frontier + if((src_idx,dst_idx) in in_frontier): + continue + #otherwise put it into frontier + self.debug_print(f"test adding frontier {self.parts(src_idx)}-{self.parts(dst_idx)} src_score:{ann[src_idx,0]} dst_score:{ann[dst_idx,0]}") + max_possible_score=np.sqrt(ann[src_idx,0]) + heapq.heappush(frontier,(-max_possible_score,src_idx,dst_idx)) + in_frontier.add((src_idx,dst_idx)) + + #find matching connections from frontier + def get_frontier(ann): + while frontier: + pop_frontier=heapq.heappop(frontier) + _,src_idx,dst_idx=pop_frontier + #ignore points that assigned by other frontier + if(ann[dst_idx,0]>0.0): + continue + #find conection + fc,fx,fy,fscale=self.get_connection(ann,src_idx,dst_idx,forward_list,backward_list,reverse_match=reverse_match) + self.debug_print(f"get connection fc:{fc} fx:{fx} fy:{fy} fscale:{fscale}") + if(fc==0.0): + continue + return fc,fx,fy,fscale,src_idx,dst_idx + return None + + #initially add joints to frontier + self.debug_print("\nbegin grow!:") + for pos_idx in range(0,self.n_pos): + if(ann[pos_idx,0]>0.0): + add_frontier(ann,pos_idx) + #recurrently find the matched connections + while True: + find_match=get_frontier(ann) + if(find_match==None): + break + score,x,y,scale,src_idx,dst_idx=find_match + if(ann[dst_idx,0]>0.0): + continue + ann[dst_idx,0]=score + ann[dst_idx,1]=x + ann[dst_idx,2]=y + ann[dst_idx,3]=scale + self.debug_print(f"grow part:{self.parts(dst_idx)} score:{score} x:{x} y:{y} scale:{scale}\n") + add_frontier(ann,dst_idx) + #finished matching a person + return ann + +class Visualizer(BasicVisualizer): + def __init__(self, save_dir="./save_dir", debug=False, *args, **kargs): + self.save_dir = save_dir + self.debug=debug + + def debug_print(self,msg, debug=False): + if(self.debug or debug): + print(msg) + + def visualize(self, image_batch, predict_x, mask_batch=None, humans_list=None, name="vis"): + # mask + if(mask_batch is None): + mask_batch = np.ones_like(image_batch) + # transform + image_batch = np.transpose(image_batch,[0,2,3,1]) + mask_batch = np.transpose(mask_batch,[0,2,3,1]) + # defualt values + # TODO: pass config values + stride = 8 + + # predict maps + predict_x = nan2zero_dict(predict_x) + pd_pif_conf_batch, pd_pif_vec_batch, pd_pif_scale_batch = predict_x["pif_conf"], predict_x["pif_vec"], predict_x["pif_scale"] + pd_paf_conf_batch, pd_paf_src_vec_batch, pd_paf_dst_vec_batch, pd_paf_src_scale_batch, pd_paf_dst_scale_batch =\ + predict_x["paf_conf"], predict_x["paf_src_vec"], predict_x["paf_dst_vec"], predict_x["paf_src_scale"], predict_x["paf_dst_scale"] + + # restore maps + # pif maps + pd_pif_vec_batch, pd_pif_scale_batch = restore_pif_maps(pd_pif_vec_batch, pd_pif_scale_batch) + self.debug_print(f"test pd_pif_vec_batch.shape:{pd_pif_vec_batch.shape}") + # paf maps + pd_paf_src_vec_batch, pd_paf_dst_vec_batch, pd_paf_src_scale_batch, pd_paf_dst_scale_batch = \ + restore_paf_maps(pd_paf_src_vec_batch, pd_paf_dst_vec_batch, pd_paf_src_scale_batch, pd_paf_dst_scale_batch) + self.debug_print(f"test visualize shape: pd_paf_src_vec_batch:{pd_paf_src_vec_batch.shape}") + self.debug_print(f"test visualize shape: pd_paf_dst_vec_batch:{pd_paf_dst_vec_batch.shape}") + self.debug_print(f"test visualize shape: pd_paf_src_scale_batch:{pd_paf_src_scale_batch.shape}") + self.debug_print(f"test visualize shape: pd_paf_dst_scale_batch:{pd_paf_dst_scale_batch.shape}") + + batch_size = image_batch.shape[0] + for b_idx in range(0,batch_size): + image, mask = image_batch[b_idx], mask_batch[b_idx] + # pd map + pd_pif_conf, pd_pif_vec, pd_pif_scale = pd_pif_conf_batch[b_idx], pd_pif_vec_batch[b_idx], pd_pif_scale_batch[b_idx] + pd_paf_conf, pd_paf_src_vec, pd_paf_dst_vec = pd_paf_conf_batch[b_idx], pd_paf_src_vec_batch[b_idx], pd_paf_dst_vec_batch[b_idx] + + # draw maps + # begin draw + pltdrawer = PltDrawer(draw_row=2, draw_col=3, dpi=400) + + # draw origin image + origin_image = image_float_to_uint8(image.copy()) + pltdrawer.add_subplot(origin_image, "origin_image") + + + # draw pd_pif_conf + pd_pif_conf_show = np.amax(pd_pif_conf, axis=0) + pltdrawer.add_subplot(pd_pif_conf_show, "pd pif_conf", color_bar=True) + + # darw pd_pif_hr_conf + pd_pif_hr_conf = get_hr_conf(pd_pif_conf, pd_pif_vec, pd_pif_scale, stride) + pd_pif_hr_conf_show = np.amax(pd_pif_hr_conf, axis=0) + pltdrawer.add_subplot(pd_pif_hr_conf_show, "pd pif_hr_conf", color_bar=True) + + # draw mask + pltdrawer.add_subplot(mask, "mask") + + # darw pd paf_conf + pd_paf_conf_show = np.amax(pd_paf_conf, axis=0) + pltdrawer.add_subplot(pd_paf_conf_show, "pd paf_conf", color_bar=True) + + # draw pd paf_vec + hout, wout = pd_paf_conf.shape[1], pd_paf_conf.shape[2] + pd_paf_vec_map_show = np.zeros(shape=(hout*stride,wout*stride,3)).astype(np.int8) + pd_paf_vec_map_show = get_arrow_map(pd_paf_vec_map_show, pd_paf_conf, pd_paf_src_vec, pd_paf_dst_vec) + pltdrawer.add_subplot(pd_paf_vec_map_show, "pd paf_vec") + # save fig + pltdrawer.savefig(f"{self.save_dir}/{name}_{b_idx}_paf.png") + + # draw results + if(humans_list is not None): + humans = humans_list[b_idx] + self.visualize_result(image, humans, f"{name}_{b_idx}_result") + + + def visualize_compare(self, image_batch, predict_x, target_x, mask_batch=None, humans_list=None, name="vis"): + # mask + if(mask_batch is None): + mask_batch = np.ones_like(image_batch) + # transform + image_batch = np.transpose(image_batch,[0,2,3,1]) + mask_batch = np.transpose(mask_batch,[0,2,3,1]) + # defualt values + # TODO: pass config values + stride = 8 + + # predict maps + predict_x = nan2zero_dict(predict_x) + pd_pif_conf_batch, pd_pif_vec_batch, pd_pif_scale_batch = predict_x["pif_conf"], predict_x["pif_vec"], predict_x["pif_scale"] + pd_paf_conf_batch, pd_paf_src_vec_batch, pd_paf_dst_vec_batch = predict_x["paf_conf"], predict_x["paf_src_vec"], predict_x["paf_dst_vec"] + pd_paf_src_scale_batch, pd_paf_dst_scale_batch = predict_x["paf_src_scale"], predict_x["paf_dst_scale"] + # target maps + target_x = nan2zero_dict(target_x) + gt_pif_conf_batch, gt_pif_vec_batch, gt_pif_scale_batch = target_x["pif_conf"], target_x["pif_vec"], target_x["pif_scale"] + gt_paf_conf_batch, gt_paf_src_vec_batch, gt_paf_dst_vec_batch = target_x["paf_conf"], target_x["paf_src_vec"], predict_x["paf_dst_vec"] + gt_paf_src_scale_batch, gt_paf_dst_scale_batch = target_x["paf_src_scale"], target_x["paf_dst_scale"] + + # restore maps + # pif maps + pd_pif_vec_batch, pd_pif_scale_batch = restore_pif_maps(pd_pif_vec_batch, pd_pif_scale_batch) + gt_pif_vec_batch, gt_pif_scale_batch = restore_pif_maps(gt_pif_vec_batch, gt_pif_scale_batch) + # paf maps + pd_paf_src_vec_batch, pd_paf_dst_vec_batch, pd_paf_src_scale_batch, pd_paf_dst_scale_batch = \ + restore_paf_maps(pd_paf_src_vec_batch, pd_paf_dst_vec_batch, pd_paf_src_scale_batch, pd_paf_dst_scale_batch) + gt_paf_src_vec_batch, gt_paf_dst_vec_batch, gt_paf_src_scale_batch, gt_paf_dst_scale_batch = \ + restore_paf_maps(gt_paf_src_vec_batch, gt_paf_dst_vec_batch, gt_paf_src_scale_batch, gt_paf_dst_scale_batch) + + batch_size = image_batch.shape[0] + for b_idx in range(0,batch_size): + image, mask = image_batch[b_idx], mask_batch[b_idx] + # pd map + pd_pif_conf, pd_pif_vec, pd_pif_scale = pd_pif_conf_batch[b_idx], pd_pif_vec_batch[b_idx], pd_pif_scale_batch[b_idx] + pd_paf_conf, pd_paf_src_vec, pd_paf_dst_vec = pd_paf_conf_batch[b_idx], pd_paf_src_vec_batch[b_idx], pd_paf_dst_vec_batch[b_idx] + # gt map + gt_pif_conf, gt_pif_vec, gt_pif_scale = gt_pif_conf_batch[b_idx], gt_pif_vec_batch[b_idx], gt_pif_scale_batch[b_idx] + gt_paf_conf, gt_paf_src_vec, gt_paf_dst_vec = gt_paf_conf_batch[b_idx], gt_paf_src_vec_batch[b_idx], gt_paf_dst_vec_batch[b_idx] + # draw pif maps + # begin draw + pif_pltdrawer = PltDrawer(draw_row=2, draw_col=3, dpi=400) + + # draw origin image + origin_image = image_float_to_uint8(image.copy()) + pif_pltdrawer.add_subplot(origin_image, "origin_image") + + # draw gt_pif_conf + gt_pif_conf_show = np.amax(gt_pif_conf, axis=0) + pif_pltdrawer.add_subplot(gt_pif_conf_show, "gt pif_conf", color_bar=True) + + # darw gt_pif_hr_conf + gt_pif_hr_conf = get_hr_conf(gt_pif_conf, gt_pif_vec, gt_pif_scale, stride) + gt_pif_hr_conf_show = np.amax(gt_pif_hr_conf, axis=0) + pif_pltdrawer.add_subplot(gt_pif_hr_conf_show, "gt pif_hr_conf", color_bar=True) + + # draw mask + pif_pltdrawer.add_subplot(mask, "mask") + + # draw pd_pif_conf + pd_pif_conf_show = np.amax(pd_pif_conf, axis=0) + pif_pltdrawer.add_subplot(pd_pif_conf_show, "pd pif_conf", color_bar=True) + + # darw pd_pif_hr_conf + pd_pif_hr_conf = get_hr_conf(pd_pif_conf, pd_pif_vec, pd_pif_scale, stride) + pd_pif_hr_conf_show = np.amax(pd_pif_hr_conf, axis=0) + pif_pltdrawer.add_subplot(pd_pif_hr_conf_show, "pd pif_hr_conf", color_bar=True) + + # save fig + pif_pltdrawer.savefig(f"{self.save_dir}/{name}_{b_idx}_pif.png") + + # draw paf maps + # begin draw + paf_pltdrawer = PltDrawer(draw_row=2, draw_col=3, dpi=400) + + # draw origin image + paf_pltdrawer.add_subplot(image, "origin image") + + # draw gt paf_conf + gt_paf_conf_show = np.amax(gt_paf_conf, axis=0) + paf_pltdrawer.add_subplot(gt_paf_conf_show, "gt paf_conf", color_bar=True) + + # draw gt paf_vec_map + hout, wout = gt_paf_src_vec.shape[-2], gt_paf_src_vec.shape[-1] + gt_paf_vec_map_show = np.zeros(shape=(hout*stride,wout*stride,3)).astype(np.int8) + gt_paf_vec_map_show = get_arrow_map(gt_paf_vec_map_show, gt_paf_conf, gt_paf_src_vec, gt_paf_dst_vec, debug=False) + paf_pltdrawer.add_subplot(gt_paf_vec_map_show, "gt paf_vec") + + # draw mask + paf_pltdrawer.add_subplot(mask, "mask") + + # darw pd paf_conf + pd_paf_conf_show = np.amax(pd_paf_conf, axis=0) + paf_pltdrawer.add_subplot(pd_paf_conf_show, "pd paf_conf", color_bar=True) + + # draw pd paf_vec + hout, wout = pd_paf_src_vec.shape[-2], pd_paf_src_vec.shape[-1] + pd_paf_vec_map_show = np.zeros(shape=(hout*stride,wout*stride,3)).astype(np.int8) + pd_paf_vec_map_show = get_arrow_map(pd_paf_vec_map_show, pd_paf_conf, pd_paf_src_vec, pd_paf_dst_vec) + paf_pltdrawer.add_subplot(pd_paf_vec_map_show, "pd paf_vec") + + # save fig + paf_pltdrawer.savefig(f"{self.save_dir}/{name}_{b_idx}_paf.png") + + # draw results + if(humans_list is not None): + humans = humans_list[b_idx] + self.visualize_result(image, humans, f"{name}_{b_idx}_result") \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/utils.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/utils.py new file mode 100644 index 000000000..17657a1fa --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/utils.py @@ -0,0 +1,425 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import functools +import numpy as np +import tensorflow as tf +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from .define import area_ref,area_ref_45 +from .define import COCO_SIGMA,COCO_UPRIGHT_POSE,COCO_UPRIGHT_POSE_45 +from ..common import regulize_loss, get_meshgrid + +def nan2zero(x): + x=np.where(x!=x,0,x) + return x + +def nan2zero_dict(dict_x): + for key in dict_x.keys(): + dict_x[key]=nan2zero(dict_x[key]) + return dict_x + +def maps_to_numpy(maps): + ret_maps=[] + for m_idx,m in enumerate(maps): + ret_maps.append(m.numpy()) + return ret_maps + +@functools.lru_cache(maxsize=64) +def get_patch_meshgrid(patch_size): + x_range=np.linspace(start=(patch_size-1)/2,stop=-(patch_size-1)/2,num=patch_size) + y_range=np.linspace(start=(patch_size-1)/2,stop=-(patch_size-1)/2,num=patch_size) + mesh_x,mesh_y=np.meshgrid(x_range,y_range) + patch_grid=np.stack([mesh_x,mesh_y]) + return patch_grid + +def get_max_r(kpt,other_kpts): + min_dist=np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float32) + if(len(other_kpts)!=0): + dif=other_kpts-kpt[np.newaxis,:] + mask=np.zeros(shape=(other_kpts.shape[0])) + mask[dif[:,0]<0.0]+=1 + mask[dif[:,1]<0.0]+=2 + for quadrant in range(0,4): + if(not np.any(mask==quadrant)): + continue + min_dist[quadrant]=np.min(np.linalg.norm(dif[mask==quadrant],axis=1)) + return min_dist + +def get_scale(keypoints): + keypoints=np.array(keypoints) + ref_pose=np.copy(COCO_UPRIGHT_POSE) + ref_pose_45=np.copy(COCO_UPRIGHT_POSE_45) + visible=np.logical_not(np.logical_and(keypoints[:,0]<0,keypoints[:,1]<0)) + if(np.sum(visible)<=3): + return None + #calculate visible area + area_vis=(np.max(keypoints[visible, 0]) - np.min(keypoints[visible, 0]))*\ + (np.max(keypoints[visible, 1]) - np.min(keypoints[visible, 1])) + #calculate reference visible area + area_ref_vis=(np.max(ref_pose[visible, 0]) - np.min(ref_pose[visible, 0]))*\ + (np.max(ref_pose[visible, 1]) - np.min(ref_pose[visible, 1])) + factor_ref_vis=area_ref/area_ref_vis if area_ref_vis>0.1 else np.inf + #calculate reference 45 rotated visible area + area_ref_45_vis=(np.max(ref_pose_45[visible, 0]) - np.min(ref_pose_45[visible, 0]))*\ + (np.max(ref_pose_45[visible, 1]) - np.min(ref_pose_45[visible, 1])) + factor_ref_45_vis=area_ref_45/area_ref_45_vis if area_ref_45_vis>0.1 else np.inf + #calculate scale-factor + if(factor_ref_vis==np.inf and factor_ref_45_vis==np.inf): + factor=1.0 + else: + factor=np.sqrt(min(factor_ref_vis,factor_ref_45_vis)) + factor=min(factor,5.0) + scale=np.sqrt(area_vis)*factor + #print(f"test label scale area_vis:{area_vis} area_ref_vis:{area_ref_vis} area_ref_45_vis:{area_ref_45_vis} "+\ + # f"factor_ref_vis:{factor_ref_vis} factor_ref_45_vis:{factor_ref_45_vis} factor:{factor} scale:{scale}\n") + #in original pifpaf, scale<0.1 should be set to nan + scale=max(scale,0.1) + return scale + +def get_pifmap(annos, mask, height, width, hout, wout, parts, limbs,bmin=0.1,dist_thresh=1.0,patch_size=4,padding=10, data_format="channels_first"): + stride=height/hout + strided_bmin=bmin/stride + n_pos,n_limbs=len(parts),len(limbs) + padded_h,padded_w=hout+2*padding,wout+2*padding + #TODO: change mask shape here + #init fields + pif_conf=np.full(shape=(n_pos,padded_h,padded_w),fill_value=0.0,dtype=np.float32) + pif_vec=np.full(shape=(n_pos,2,padded_h,padded_w),fill_value=np.nan,dtype=np.float32) + pif_bmin=np.full(shape=(n_pos,padded_h,padded_w),fill_value=np.nan,dtype=np.float32) + pif_scale=np.full(shape=(n_pos,padded_h,padded_w),fill_value=np.nan,dtype=np.float32) + pif_vec_norm=np.full(shape=(n_pos,padded_h,padded_w),fill_value=np.inf,dtype=np.float32) + #print(f"pif_vec_norm:{pif_vec_norm.shape} pif_conf:{pif_conf.shape} mask:{mask.shape}") + pif_vec_norm[:,padding:-padding,padding:-padding][:,mask==0]=dist_thresh + pif_conf[:,padding:-padding,padding:-padding][:,mask==0]=np.nan + #generate fields + for anno_id,anno in enumerate(annos): + anno_scale=get_scale(np.array(anno)/stride) + if(anno_scale==None): + continue + for part_idx,kpt in enumerate(anno): + if(kpt[0]<0 or kpt[0]>width or kpt[1]<0 or kpt[1]>height): + continue + #calculate scale + kpt=np.array(kpt)/stride + kpt_scale=anno_scale*COCO_SIGMA[part_idx] + #generate pif_maps for single point + pif_maps=[pif_conf,pif_vec,pif_bmin,pif_scale,pif_vec_norm] + pif_conf,pif_vec,pif_bmin,pif_scale,pif_vec_norm=put_pifmap(pif_maps,part_idx,kpt,\ + kpt_scale=kpt_scale,strided_bmin=strided_bmin,dist_thresh=dist_thresh,patch_size=patch_size,padding=padding) + #get field without padding (TODO: valid area?) + pif_conf=pif_conf[:,padding:-padding,padding:-padding] + pif_vec=pif_vec[:,:,padding:-padding,padding:-padding] + pif_bmin=pif_bmin[:,padding:-padding,padding:-padding] + pif_scale=pif_scale[:,padding:-padding,padding:-padding] + return pif_conf,pif_vec,pif_bmin,pif_scale + +def put_pifmap(pif_maps,part_idx,kpt,kpt_scale,strided_bmin=0.0125,dist_thresh=1.0,patch_size=4,padding=10): + pif_conf,pif_vec,pif_bmin,pif_scale,pif_vec_norm=pif_maps + padded_h,padded_w=pif_conf.shape[1],pif_conf.shape[2] + #calculate patch grid coordinate range in padded map + patch_offset=(patch_size-1)/2 + left_top=np.round(kpt-patch_offset+padding).astype(np.int) + min_x,min_y=left_top[0],left_top[1] + max_x,max_y=min_x+patch_size,min_y+patch_size + if(min_x<0 or min_x>=padded_w or max_y<0 or max_y>=padded_h): + return pif_conf,pif_vec,pif_bmin,pif_scale,pif_vec_norm + #calculate mesh center to kpt offset + patch_center_offset=kpt-(left_top+patch_offset-padding) + #calculate mesh grid to mesh center offset + patch_meshgrid=get_patch_meshgrid(patch_size) + #calculate mesh grid to kpt offset + patch_grid_offset=patch_meshgrid+patch_center_offset[:,np.newaxis,np.newaxis] + patch_grid_offset_norm=np.linalg.norm(patch_grid_offset,axis=0) + #calculate mash mask acordding to the distance to the keypoints + grid_mask=patch_grid_offset_norm=wout or src_kpt[1]<0 or src_kpt[1]>=hout) + out_of_field_dst=(dst_kpt[0]<0 or dst_kpt[1]>=wout or dst_kpt[1]<0 or dst_kpt[1]>=hout) + if(out_of_field_src or out_of_field_dst): + continue + #calculate src scale + src_scale=anno_scale*COCO_SIGMA[src_idx] + #calculate dst scale + dst_scale=anno_scale*COCO_SIGMA[dst_idx] + #generate paf_maps for single point + paf_maps=[paf_conf,paf_src_vec,paf_dst_vec,paf_src_bmin,paf_dst_bmin,paf_src_scale,paf_dst_scale,paf_vec_norm] + paf_conf,paf_src_vec,paf_dst_vec,paf_src_bmin,paf_dst_bmin,paf_src_scale,paf_dst_scale,paf_vec_norm=put_pafmap(paf_maps,limb_idx,src_kpt,src_scale,dst_kpt,dst_scale,\ + strided_bmin=strided_bmin,padding=padding,patch_size=patch_size,data_format=data_format) + #get field without padding (TODO: valid area?) + paf_conf=paf_conf[:,padding:-padding,padding:-padding] + paf_src_vec=paf_src_vec[:,:,padding:-padding,padding:-padding] + paf_dst_vec=paf_dst_vec[:,:,padding:-padding,padding:-padding] + paf_src_bmin=paf_src_bmin[:,padding:-padding,padding:-padding] + paf_dst_bmin=paf_dst_bmin[:,padding:-padding,padding:-padding] + paf_src_scale=paf_src_scale[:,padding:-padding,padding:-padding] + paf_dst_scale=paf_dst_scale[:,padding:-padding,padding:-padding] + return paf_conf,paf_src_vec,paf_dst_vec,paf_src_bmin,paf_dst_bmin,paf_src_scale,paf_dst_scale + +def put_pafmap(paf_maps,limb_idx,src_kpt,src_scale,dst_kpt,dst_scale,patch_size=3,strided_bmin=0.0125,padding=10,data_format="channels_first"): + paf_conf,paf_src_vec,paf_dst_vec,paf_src_bmin,paf_dst_bmin,paf_src_scale,paf_dst_scale,paf_vec_norm=paf_maps + padded_h,padded_w=paf_conf.shape[1],paf_conf.shape[2] + patch_offset=(patch_size-1)/2 + limb_vec=dst_kpt-src_kpt + limb_vec_norm=np.linalg.norm(limb_vec) + meshgrid_offsets=np.stack(np.meshgrid( + np.linspace(-0.5 * (patch_size - 1), 0.5 * (patch_size - 1), patch_size), + np.linspace(-0.5 * (patch_size - 1), 0.5 * (patch_size - 1), patch_size), + ), axis=-1).reshape(-1, 2) + #split the limb vec into line segmentations to fill the field + sample_num=max(2,int(np.ceil(limb_vec_norm))) + fmargin=(patch_size/2)/(limb_vec_norm+np.spacing(1)) + fmargin=np.clip(fmargin,0.25,0.4) + frange=np.linspace(fmargin,1.0-fmargin,num=sample_num) + filled_points=set() + for lmbda in frange: + for meshgrid_offset in meshgrid_offsets: + mesh_x,mesh_y=np.round(src_kpt+lmbda*limb_vec+meshgrid_offset).astype(np.int)+padding + if(mesh_x<0 or mesh_x>=padded_w or mesh_y<0 or mesh_y>=padded_h): + continue + #check for repeatly filling the same point + mesh_coordinate=(int(mesh_x),int(mesh_y)) + if(mesh_coordinate in filled_points): + continue + filled_points.add(mesh_coordinate) + offset=np.array([mesh_x,mesh_y])-padding-src_kpt + distline=np.fabs(limb_vec[1]*offset[0]-limb_vec[0]*offset[1])/(limb_vec_norm+0.01) + if(distline=min_x and center_x=min_y and center_ythresh + confs=conf_map[field_idx][thresh_mask] + vecs=vec_map[field_idx,:,thresh_mask] + scales=scale_map[field_idx][thresh_mask] + if(debug): + print(f"test filed_idx:{field_idx} scale_mean:{np.mean(scales/stride)} scale_var:{np.var(scales/stride)}") + sigmas=np.maximum(1.0,0.5*scales) + hr_conf[field_idx]=add_gaussian(hr_conf[field_idx],confs,vecs,scales,debug=debug) + return hr_conf + +def get_arrow_map(array_map,conf_map,src_vec_map,dst_vec_map,thresh=0.1,src_color=(255,255,0),dst_color=(0,0,255),debug=False): + #make integer indexes + def toidx(x): + return np.round(x).astype(np.int) + #shape conf:[field,h,w] + #shape vec:[field,2,h,w] + #shape array:[h,w,3] + grid_center_color=(165,42,42) + src_center_color=(179,238,58) + dst_center_color=(30,144,255) + image_h,image_w,_=array_map.shape + stride=image_h/conf_map.shape[1] + radius=max(np.round(min(image_h,image_w)/300).astype(np.int),1) + thickness=max(np.round(min(image_h,image_w)/240).astype(np.int),1) + mask=conf_map>thresh + fields,grid_ys,grid_xs=np.where(mask) + for field,grid_y,grid_x in zip(fields,grid_ys,grid_xs): + src_x,src_y=toidx(src_vec_map[field,:,grid_y,grid_x]) + dst_x,dst_y=toidx(dst_vec_map[field,:,grid_y,grid_x]) + grid_y,grid_x=toidx(grid_y*stride),toidx(grid_x*stride) + array_map=cv2.circle(array_map,(grid_x,grid_y),radius=radius,color=grid_center_color,thickness=thickness) + if(debug): + print(f"test get_arrow_map image_h:{image_h} image_w:{image_w} field:{field} gird_x:{grid_x} grid_y:{grid_y} src_x:{src_x} src_y:{src_y} dst_x:{dst_x} dst_y:{dst_y}") + if(src_x>=0 and src_x=0 and src_y=0 and dst_x=0 and dst_y=3): + lr=lr/5 + stuck_time=0 + train_model.train() + + if(step==total_step): + break + + +def single_val(val_model,dataset,config): + total_val_num=config.pretrain.val_num + log("starting validate... ") + val_dataset=dataset.get_eval_dataset() + val_dataset = val_dataset.shuffle(buffer_size=4096) + data_aug=partial(_data_aug,hin=224,win=224,data_format=val_model.data_format) + val_dataset = val_dataset.map(partial(val_map_fn,data_aug=data_aug),num_parallel_calls=max(multiprocessing.cpu_count()//2,1)) # max(multiprocessing.cpu_count()//2,1)) + val_dataset = val_dataset.batch(64) + val_dataset = val_dataset.prefetch(64) + + total_top1_acc_num,total_top5_acc_num,total_img_num=0,0,0 + @tf.function + def one_step(image,label,val_model): + predict=val_model.forward(image) + val_top1_acc_num=tf.reduce_sum(tf.where(tf.math.in_top_k(label,predict,1),1,0)) + val_top5_acc_num=tf.reduce_sum(tf.where(tf.math.in_top_k(label,predict,5),1,0)) + return val_top1_acc_num,val_top5_acc_num + + for image,label in val_dataset: + top1_acc_num,top5_acc_num=one_step(image.numpy(),label.numpy(),val_model) + total_top1_acc_num+=top1_acc_num + total_top5_acc_num+=top5_acc_num + total_img_num+=image.shape[0] + if(total_img_num>=total_val_num): + break + print(f"validation accuracy_top1:{total_top1_acc_num/total_img_num} accuracy_top5:{total_top5_acc_num/total_img_num}") + return total_top1_acc_num/total_img_num + + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/processor.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/processor.py new file mode 100644 index 000000000..c21668363 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/processor.py @@ -0,0 +1,148 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from .common import pad_image_shape +from .common import image_float_to_uint8 + +# Basic class of processors to be inherit +class BasicPreProcessor: + def __init__(self, parts, limbs, hin, win, hout, wout, colors=None, data_format="channels_first", *args, **kargs): + self.parts = parts + self.limbs = limbs + self.hin, self.win = hin, win + self.hout, self.wout = hout, wout + self.colors = colors + self.data_format = data_format + + def process(self, annos, mask, bbxs): + raise NotImplementedError("abstract class BasicPreProcessor function: process not implemented!") + +class BasicPostProcessor: + def __init__(self, parts, limbs, colors=None, data_format="channels_first", *args, **kargs): + self.parts = parts + self.limbs = limbs + self.colors = colors + self.data_format = data_format + + def process(self, predict_x): + raise NotImplementedError("abstract class BasicPostProcessor function: process not implemented!") + +class BasicVisualizer: + def __init__(self, save_dir="./save_dir", *args, **kargs): + self.save_dir = save_dir + + def set_save_dir(self, save_dir): + self.save_dir = save_dir + + def visualize_result(self, image, humans, name): + pltdrawer = PltDrawer(draw_row=1, draw_col=2, figsize=(8,8)) + # origin image + origin_image = image_float_to_uint8(image.copy()) + pltdrawer.add_subplot(origin_image, "origin image") + + # result image + result_image = image_float_to_uint8(image.copy()) + for human in humans: + result_image = human.draw_human(result_image) + pltdrawer.add_subplot(result_image, "result image") + + # save figure + pltdrawer.savefig(f"{self.save_dir}/{name}.png") + + def visualize(self, image_batch, predict_x, mask_batch=None, humans_list=None, name="vis"): + raise NotImplementedError("abstract class BasicVisualizer function: visualize not implemented!") + + def visualize_compare(self, image_batch, predict_x, target_x, mask_batch=None, humans_list=None, name="vis"): + raise NotImplementedError("abstract class BasicVisualizer function: visualize_compare not implemented!") + +class PltDrawer: + def __init__(self, draw_row, draw_col, figsize=(8,8), dpi=300): + self.draw_row = draw_row + self.draw_col = draw_col + self.figsize = figsize + self.dpi = dpi + self.plot_images=[] + self.plot_titles=[] + self.color_bars=[] + + def add_subplot(self,plot_image, plot_title, color_bar=False): + self.plot_images.append(plot_image) + self.plot_titles.append(plot_title) + self.color_bars.append(color_bar) + + def draw_plots(self): + fig = plt.figure(figsize=self.figsize) + for draw_idx,(image, title, color_bar) in enumerate(zip(self.plot_images,self.plot_titles,self.color_bars)): + a = fig.add_subplot(self.draw_row, self.draw_col, draw_idx+1) + a.set_title(title) + plt.imshow(image) + if(color_bar): + plt.colorbar() + + def savefig(self,save_path): + self.draw_plots() + plt.savefig(save_path,dpi=self.dpi) + plt.close() + +class ImageProcessor: + def __init__(self, input_h, input_w): + self.input_h = input_h + self.input_w = input_w + + def read_image_rgb_float(self, image_path): + # return an image with rgb channel order and float value within [0,1] + image = cv2.imread(image_path) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = np.clip(image.astype(np.float32)/255.0,0.0,1.0).astype(np.float32) + return image + + def write_image_rgb_float(self, image, image_path): + # write an image which has rgb channel order and float value within [0,1] + image = np.clip(image*255.0, 0, 255).astype(np.uint8) + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + return cv2.imwrite(image_path, image) + + def image_pad_and_scale(self, image): + # pad and scale image to input_h and input_w + # output scaled image with pad + image_h ,image_w, _ =image.shape + scale = min(self.input_h/image_h, self.input_w/image_w) + scale_h, scale_w = int(scale*image_h), int(scale*image_w) + scaled_image = cv2.resize(image, (scale_w,scale_h), interpolation=cv2.INTER_CUBIC) + pad_image, pad = pad_image_shape(scaled_image, shape=[self.input_h, self.input_w]) + pad_image = pad_image.astype(np.float32) + return pad_image, scale, pad + + \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/train.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/train.py new file mode 100644 index 000000000..55b3ae9b2 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/train.py @@ -0,0 +1,626 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +from tqdm import tqdm +import numpy as np +import matplotlib +matplotlib.use('Agg') +import tensorflow as tf +import tensorlayer as tl +import _pickle as cPickle +from functools import partial, reduce +from .common import KUNGFU +from .common import log_train as log +from .domainadapt import Discriminator +from .common import decode_mask,get_num_parallel_calls +from .metrics import MetricManager +from .augmentor import BasicAugmentor +from .processor import BasicPreProcessor +from .processor import BasicPostProcessor +from .processor import BasicVisualizer +from .common import to_tensor_dict + + +def _data_aug_fn(image, ground_truth, augmentor:BasicAugmentor, preprocessor:BasicPreProcessor, data_format="channels_first"): + """Data augmentation function.""" + # restore data + image = image.numpy() + ground_truth = cPickle.loads(ground_truth.numpy()) + annos = ground_truth["kpt"] + meta_mask = ground_truth["mask"] + bbxs = ground_truth["bbxs"] + + # decode mask + mask = decode_mask(meta_mask) + if(mask is None): + mask = np.ones_like(image)[:,:,0].astype(np.uint8) + + # general augmentaton process + image, annos, mask, bbxs = augmentor.process(image=image, annos=annos, mask=mask, bbxs=bbxs) + mask = mask[:,:,np.newaxis] + image = image * mask + + # TODO: all process are in channels_first format + image = np.transpose(image, [2, 0, 1]) + mask = np.transpose(mask, [2, 0, 1]) + + # generate result including heatmap and vectormap + target_x = preprocessor.process(annos=annos, mask=mask, bbxs=bbxs) + target_x = cPickle.dumps(target_x) + + return image, mask, target_x + + +def _map_fn(img_list, annos, data_aug_fn): + """TF Dataset pipeline.""" + + # load data + image = tf.io.read_file(img_list) + image = tf.image.decode_jpeg(image, channels=3) # get RGB with 0~1 + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # data augmentation using affine transform and get paf maps + image, mask, target_x= tf.py_function(data_aug_fn, [image, annos], + [tf.float32, tf.float32, tf.string]) + + # data augmentaion using tf + image = tf.image.random_brightness(image, max_delta=35. / 255.) # 64./255. 32./255.) caffe -30~50 + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) # lower=0.2, upper=1.8) caffe 0.3~1.5 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) + + return image, mask, target_x + +def get_paramed_map_fn(augmentor, preprocessor, data_format="channels_first"): + paramed_data_aug_fn = partial(_data_aug_fn, augmentor=augmentor, preprocessor=preprocessor, data_format=data_format) + paramed_map_fn = partial(_map_fn, data_aug_fn=paramed_data_aug_fn) + return paramed_map_fn + +def _dmadapt_data_aug_fn(image, augmentor, data_format="channels_first"): + image = image.numpy() + image = augmentor.process_only_image(image) + if(data_format=="channels_first"): + image = np.transpose(image, [2,0,1]) + return image + +def _dmadapt_map_fn(image, aug_fn): + image = tf.py_function(aug_fn, [image], tf.float32) + return image + +def get_paramed_dmadapt_map_fn(augmentor): + paramed_dmadapt_aug_fn = partial(_dmadapt_data_aug_fn, augmentor=augmentor) + paramed_dmadpat_map_fn = partial(_dmadapt_map_fn, aug_fn=paramed_dmadapt_aug_fn) + return paramed_dmadpat_map_fn + + +def single_train(train_model, dataset, config, augmentor:BasicAugmentor, \ + preprocessor:BasicPreProcessor,postprocessor:BasicPostProcessor,visualizer:BasicVisualizer): + '''Single train pipeline of Openpose class models + + input model and dataset, the train pipeline will start automaticly + the train pipeline will: + 1.store and restore ckpt in directory ./save_dir/model_name/model_dir + 2.log loss information in directory ./save_dir/model_name/log.txt + 3.visualize model output periodly during training in directory ./save_dir/model_name/train_vis_dir + the newest model is at path ./save_dir/model_name/model_dir/newest_model.npz + + Parameters + ---------- + arg1 : tensorlayer.models.MODEL + a preset or user defined model object, obtained by Model.get_model() function + + arg2 : dataset + a constructed dataset object, obtained by Dataset.get_dataset() function + + + Returns + ------- + None + ''' + + # train hyper params + # dataset params + total_step = config.train.n_step + batch_size = config.train.batch_size + # learning rate params + lr_init = config.train.lr_init + lr_decay_factor = config.train.lr_decay_factor + lr_decay_steps = [200000, 300000, 360000, 420000, 480000, 540000, 600000, 700000, 800000, 900000] + weight_decay_factor = config.train.weight_decay_factor + # log and checkpoint params + log_interval = config.log.log_interval + vis_interval = config.train.vis_interval + save_interval = config.train.save_interval + + # model hyper params + data_format = train_model.data_format + model_dir = config.model.model_dir + pretrain_model_dir = config.pretrain.pretrain_model_dir + pretrain_model_path = f"{pretrain_model_dir}/newest_{train_model.backbone.name}.npz" + + # metrics + metric_manager = MetricManager() + + # initializing train dataset + train_dataset = dataset.get_train_dataset() + epoch_size = dataset.get_train_datasize()//batch_size + paramed_map_fn = get_paramed_map_fn(augmentor=augmentor, preprocessor=preprocessor, data_format=data_format) + train_dataset = train_dataset.shuffle(buffer_size=4096).repeat() + train_dataset = train_dataset.map(paramed_map_fn, num_parallel_calls=get_num_parallel_calls()) + train_dataset = train_dataset.batch(config.train.batch_size) + train_dataset = train_dataset.prefetch(3) + train_dataset_iter = iter(train_dataset) + + #train configure + save_step = tf.Variable(1, trainable=False) + save_lr = tf.Variable(lr_init, trainable=False) + opt = tf.keras.optimizers.Adam(learning_rate=save_lr) + domainadapt_flag = config.data.domainadapt_flag + total_epoch = total_step//epoch_size + + #domain adaptation params + if (not domainadapt_flag): + ckpt = tf.train.Checkpoint(save_step=save_step, save_lr=save_lr, opt=opt) + else: + log("Domain adaptaion in training enabled!") + # weight param + lambda_adapt=1e-4 + # construct discrminator model + feature_hin = train_model.hin//train_model.backbone.scale_size + feature_win = train_model.win//train_model.backbone.scale_size + in_channels = train_model.backbone.out_channels + adapt_dis = Discriminator(feature_hin, feature_win, in_channels, data_format=data_format) + opt_d = tf.keras.optimizers.Adam(learning_rate=save_lr) + ckpt = tf.train.Checkpoint(save_step=save_step, save_lr=save_lr, opt=opt, opt_d=opt_d) + # construct domain adaptation dataset + dmadapt_train_dataset = dataset.get_dmadapt_train_dataset() + paramed_dmadapt_map_fn = get_paramed_dmadapt_map_fn(augmentor) + dmadapt_train_dataset = dmadapt_train_dataset.map(paramed_dmadapt_map_fn, num_parallel_calls=get_num_parallel_calls()) + dmadapt_train_dataset = dmadapt_train_dataset.shuffle(buffer_size=4096).repeat() + dmadapt_train_dataset = dmadapt_train_dataset.batch(config.train.batch_size) + dmadapt_train_dataset = dmadapt_train_dataset.prefetch(3) + dmadapt_train_dataset_iter = iter(dmadapt_train_dataset) + + + #load from ckpt + ckpt_manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3) + try: + log("loading ckpt...") + ckpt.restore(ckpt_manager.latest_checkpoint) + except: + log("ckpt_path doesn't exist, step and optimizer are initialized") + #load pretrained backbone + try: + log("loading pretrained backbone...") + train_model.backbone.load_weight(pretrain_model_path, format="npz_dict") + except: + log("pretrained backbone doesn't exist, model backbone are initialized") + #load model weights + try: + log("loading saved training model weights...") + train_model.load_weights(os.path.join(model_dir, "newest_model.npz"), format="npz_dict") + except: + log("model_path doesn't exist, model parameters are initialized") + if (domainadapt_flag): + try: + log("loading saved domain adaptation discriminator weight...") + adapt_dis.load_weights(os.path.join(model_dir, "newest_discriminator.npz")) + except: + log("discriminator path doesn't exist, discriminator parameters are initialized") + + + log(f"single training using learning rate:{lr_init} batch_size:{batch_size}") + step = save_step.numpy() + lr = save_lr.numpy() + + for lr_decay_step in lr_decay_steps: + if (step > lr_decay_step): + lr = lr * lr_decay_factor + + # optimize one step + def optimize_step(image, mask, target_x, train_model, metric_manager: MetricManager): + # tape + with tf.GradientTape() as tape: + predict_x = train_model.forward(x=image, is_train=True, ret_backbone=domainadapt_flag) + total_loss = train_model.cal_loss(predict_x=predict_x, target_x=target_x, \ + mask=mask, metric_manager=metric_manager) + # optimize model + gradients = tape.gradient(total_loss, train_model.trainable_weights) + opt.apply_gradients(zip(gradients, train_model.trainable_weights)) + return predict_x + + def optimize_step_dmadapt(image_src, image_dst, train_model, adapt_dis: Discriminator, metric_manager: MetricManager): + # tape + with tf.GradientTape(persistent=True) as tape: + # feature extraction + # src feature + predict_src = train_model.forward(x=image_src, is_train=True, ret_backbone=True) + backbone_feature_src = predict_src["backbone_features"] + adapt_pd_src = adapt_dis.forward(backbone_feature_src) + # dst feature + predict_dst = train_model.forward(x=image_dst, is_train=True, ret_backbone=True) + backbone_feature_dst = predict_dst["backbone_features"] + adapt_pd_dst = adapt_dis.forward(backbone_feature_dst) + + # loss calculation + # loss of g + g_adapt_loss = adapt_dis.cal_loss(x=adapt_pd_dst, label=True)*lambda_adapt + # loss of d + d_adapt_loss_src = adapt_dis.cal_loss(x=adapt_pd_src, label=True) + d_adapt_loss_dst = adapt_dis.cal_loss(x=adapt_pd_dst, label=False) + d_adapt_loss = (d_adapt_loss_src+d_adapt_loss_dst)/2 + + # optimize model + g_gradient = tape.gradient(g_adapt_loss, train_model.trainable_weights) + opt.apply_gradients(zip(g_gradient, train_model.trainable_weights)) + metric_manager.update("model/g_adapt_loss",g_adapt_loss) + # optimize dis + d_gradients = tape.gradient(d_adapt_loss, adapt_dis.trainable_weights) + opt_d.apply_gradients(zip(d_gradients, adapt_dis.trainable_weights)) + metric_manager.update("dis/d_adapt_loss_src",d_adapt_loss_src) + metric_manager.update("dis/d_adapt_loss_dst",d_adapt_loss_dst) + # delete persistent tape + del tape + return predict_dst + + # formal training procedure + train_model.train() + cur_epoch = step // epoch_size +1 + log(f"Start Training- total_epoch: {total_epoch} total_step: {total_step} current_epoch:{cur_epoch} "\ + +f"current_step:{step} batch_size:{batch_size} lr_init:{lr_init} lr_decay_steps:{lr_decay_steps} "\ + +f"lr_decay_factor:{lr_decay_factor} weight_decay_factor:{weight_decay_factor}" ) + for epoch_idx in range(cur_epoch,total_epoch): + log(f"Epoch {epoch_idx}/{total_epoch}:") + npu.set_npu_loop_size(epoch_size) + for _ in tqdm(range(0,epoch_size)): + step+=1 + metric_manager.start_timing() + image, mask, target_list = next(train_dataset_iter) + # extract gt_label + target_list = [cPickle.loads(target) for target in target_list.numpy()] + target_x = {key:[] for key,value in target_list[0].items()} + target_x = reduce(lambda x, y: {key:x[key]+[y[key]] for key,value in x.items()},[target_x]+target_list) + target_x = {key:np.stack(value) for key,value in target_x.items()} + target_x = to_tensor_dict(target_x) + + # learning rate decay + if (step in lr_decay_steps): + new_lr_decay = lr_decay_factor**(lr_decay_steps.index(step) + 1) + lr = lr_init * new_lr_decay + + # optimize one step + predict_x = optimize_step(image, mask, target_x, train_model, metric_manager) + + # optimize domain adaptation + if(domainadapt_flag): + src_image = image + dst_image = next(dmadapt_train_dataset_iter) + predict_dst = optimize_step_dmadapt(src_image, dst_image, train_model, adapt_dis, metric_manager) + + # log info periodly + if ((step != 0) and (step % log_interval) == 0): + log(f"Train Epoch={epoch_idx} / {total_epoch}, Step={step} / {total_step}: learning_rate: {lr:.6e} {metric_manager.report_timing()}\n"\ + +f"{metric_manager.report_train()} ") + + # visualize periodly + if ((step != 0) and (step % vis_interval) == 0): + log(f"Visualizing prediction maps and target maps") + predict_x = train_model.forward(x=image, is_train=False) + visualizer.visualize_compare(image_batch=image.numpy(), mask_batch=mask.numpy(), predict_x=predict_x, target_x=target_x,\ + name=f"train_{step}") + + # save result and ckpt periodly + if ((step!= 0) and (step % save_interval) == 0): + # save ckpt + log("saving model ckpt and result...") + save_step.assign(step) + save_lr.assign(lr) + ckpt_save_path = ckpt_manager.save() + log(f"ckpt save_path:{ckpt_save_path} saved!\n") + # save train model + model_save_path = os.path.join(model_dir, "newest_model.npz") + train_model.save_weights(model_save_path, format="npz_dict") + # tf.keras.models.save_model(train_model, os.path.join(model_dir, "pb")) + # train_model.save_weights(model_save_path, format="ckpt") + # train_model.save(os.path.join(model_dir, "newest_model.h5")) + log(f"model save_path:{model_save_path} saved!\n") + # save discriminator model + if (domainadapt_flag): + dis_save_path = os.path.join(model_dir, "newest_discriminator.npz") + adapt_dis.save_weights(dis_save_path, format="npz_dict") + log(f"discriminator save_path:{dis_save_path} saved!\n") + +def parallel_train(train_model, dataset, config, augmentor:BasicAugmentor, \ + preprocessor:BasicPreProcessor,postprocessor:BasicPostProcessor,visualizer=BasicVisualizer): + '''Single train pipeline of Openpose class models + + input model and dataset, the train pipeline will start automaticly + the train pipeline will: + 1.store and restore ckpt in directory ./save_dir/model_name/model_dir + 2.log loss information in directory ./save_dir/model_name/log.txt + 3.visualize model output periodly during training in directory ./save_dir/model_name/train_vis_dir + the newest model is at path ./save_dir/model_name/model_dir/newest_model.npz + + Parameters + ---------- + arg1 : tensorlayer.models.MODEL + a preset or user defined model object, obtained by Model.get_model() function + + arg2 : dataset + a constructed dataset object, obtained by Dataset.get_dataset() function + + + Returns + ------- + None + ''' + + # train hyper params + # dataset params + total_step = config.train.n_step + batch_size = config.train.batch_size + # learning rate params + lr_init = config.train.lr_init + lr_decay_factor = config.train.lr_decay_factor + lr_decay_steps = [200000, 300000, 360000, 420000, 480000, 540000, 600000, 700000, 800000, 900000] + weight_decay_factor = config.train.weight_decay_factor + # log and checkpoint params + log_interval = config.log.log_interval + vis_interval = config.train.vis_interval + save_interval = config.train.save_interval + vis_dir = config.train.vis_dir + + # model hyper params + hin = train_model.hin + win = train_model.win + hout = train_model.hout + wout = train_model.wout + parts, limbs, colors = train_model.parts, train_model.limbs, train_model.colors + data_format = train_model.data_format + model_dir = config.model.model_dir + pretrain_model_dir = config.pretrain.pretrain_model_dir + pretrain_model_path = f"{pretrain_model_dir}/newest_{train_model.backbone.name}.npz" + + # metrics + metric_manager = MetricManager() + + # initializing train dataset + train_dataset = dataset.get_train_dataset() + epoch_size = dataset.get_train_datasize()//batch_size + paramed_map_fn = get_paramed_map_fn(augmentor=augmentor, preprocessor=preprocessor, data_format=data_format) + train_dataset = train_dataset.shuffle(buffer_size=4096).repeat() + train_dataset = train_dataset.map(paramed_map_fn, num_parallel_calls=get_num_parallel_calls()) + train_dataset = train_dataset.batch(config.train.batch_size) + train_dataset = train_dataset.prefetch(3) + train_dataset_iter = iter(train_dataset) + + #train configure + save_step = tf.Variable(1, trainable=False) + save_lr = tf.Variable(lr_init, trainable=False) + opt = tf.keras.optimizers.Adam(learning_rate=save_lr) + domainadapt_flag = config.data.domainadapt_flag + total_epoch = total_step//epoch_size + + #domain adaptation params + if (not domainadapt_flag): + ckpt = tf.train.Checkpoint(save_step=save_step, save_lr=save_lr, opt=opt) + else: + log("Domain adaptaion in training enabled!") + # weight param + lambda_adapt=1e-4 + # construct discrminator model + feature_hin = train_model.hin//train_model.backbone.scale_size + feature_win = train_model.win//train_model.backbone.scale_size + in_channels = train_model.backbone.out_channels + adapt_dis = Discriminator(feature_hin, feature_win, in_channels, data_format=data_format) + opt_d = tf.keras.optimizers.Adam(learning_rate=save_lr) + ckpt = tf.train.Checkpoint(save_step=save_step, save_lr=save_lr, opt=opt, opt_d=opt_d) + # construct domain adaptation dataset + dmadapt_train_dataset = dataset.get_dmadapt_train_dataset() + paramed_dmadapt_map_fn = get_paramed_dmadapt_map_fn(augmentor) + dmadapt_train_dataset = dmadapt_train_dataset.map(paramed_dmadapt_map_fn, num_parallel_calls=get_num_parallel_calls()) + dmadapt_train_dataset = dmadapt_train_dataset.shuffle(buffer_size=4096).repeat() + dmadapt_train_dataset = dmadapt_train_dataset.batch(config.train.batch_size) + dmadapt_train_dataset = dmadapt_train_dataset.prefetch(3) + dmadapt_train_dataset_iter = iter(dmadapt_train_dataset) + + + #load from ckpt + ckpt_manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3) + try: + log("loading ckpt...") + ckpt.restore(ckpt_manager.latest_checkpoint) + except: + log("ckpt_path doesn't exist, step and optimizer are initialized") + #load pretrained backbone + try: + log("loading pretrained backbone...") + tl.files.load_and_assign_npz_dict(name=pretrain_model_path, network=train_model.backbone, skip=True) + except: + log("pretrained backbone doesn't exist, model backbone are initialized") + #load model weights + try: + log("loading saved training model weights...") + train_model.load_weights(os.path.join(model_dir, "newest_model.npz")) + except: + log("model_path doesn't exist, model parameters are initialized") + if (domainadapt_flag): + try: + log("loading saved domain adaptation discriminator weight...") + adapt_dis.load_weights(os.path.join(model_dir, "newest_discriminator.npz")) + except: + log("discriminator path doesn't exist, discriminator parameters are initialized") + + + log(f"Parallel training using learning rate:{lr_init} batch_size:{batch_size}") + step = save_step.numpy() + lr = save_lr.numpy() + + #import kungfu + from kungfu.python import current_cluster_size, current_rank + from kungfu.tensorflow.initializer import broadcast_variables + from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer, SynchronousAveragingOptimizer, PairAveragingOptimizer + + total_step = total_step // current_cluster_size() + 1 # KungFu + total_epoch = total_epoch // current_cluster_size() +1 # KungFu + for step_idx, decay_step in enumerate(lr_decay_steps): + lr_decay_steps[step_idx] = decay_step // current_cluster_size() + 1 # KungFu + + # optimize one step + def optimize_step(image, mask, target_x, train_model, metric_manager: MetricManager): + # tape + with tf.GradientTape() as tape: + predict_x = train_model.forward(x=image, is_train=True, ret_backbone=domainadapt_flag) + total_loss = train_model.cal_loss(predict_x=predict_x, target_x=target_x, \ + mask=mask, metric_manager=metric_manager) + # optimize model + gradients = tape.gradient(total_loss, train_model.trainable_weights) + opt.apply_gradients(zip(gradients, train_model.trainable_weights)) + return predict_x + + def optimize_step_dmadapt(image_src, image_dst, train_model, adapt_dis: Discriminator, metric_manager: MetricManager): + # tape + with tf.GradientTape(persistent=True) as tape: + # feature extraction + # src feature + predict_src = train_model.forward(x=image_src, is_train=True, ret_backbone=True) + backbone_feature_src = predict_src["backbone_features"] + adapt_pd_src = adapt_dis.forward(backbone_feature_src) + # dst feature + predict_dst = train_model.forward(x=image_dst, is_train=True, ret_backbone=True) + backbone_feature_dst = predict_dst["backbone_features"] + adapt_pd_dst = adapt_dis.forward(backbone_feature_dst) + + # loss calculation + # loss of g + g_adapt_loss = adapt_dis.cal_loss(x=adapt_pd_dst, label=True)*lambda_adapt + # loss of d + d_adapt_loss_src = adapt_dis.cal_loss(x=adapt_pd_src, label=True) + d_adapt_loss_dst = adapt_dis.cal_loss(x=adapt_pd_dst, label=False) + d_adapt_loss = (d_adapt_loss_src+d_adapt_loss_dst)/2 + + # optimize model + g_gradient = tape.gradient(g_adapt_loss, train_model.trainable_weights) + opt.apply_gradients(zip(g_gradient, train_model.trainable_weights)) + metric_manager.update("model/g_adapt_loss",g_adapt_loss) + # optimize dis + d_gradients = tape.gradient(d_adapt_loss, adapt_dis.trainable_weights) + opt_d.apply_gradients(zip(d_gradients, adapt_dis.trainable_weights)) + metric_manager.update("dis/d_adapt_loss_src",d_adapt_loss_src) + metric_manager.update("dis/d_adapt_loss_dst",d_adapt_loss_dst) + # delete persistent tape + del tape + return predict_dst + + # formal training procedure + + + # KungFu configure + kungfu_option = config.train.kungfu_option + if kungfu_option == KUNGFU.Sync_sgd: + print("using Kungfu.SynchronousSGDOptimizer!") + opt = SynchronousSGDOptimizer(opt) + elif kungfu_option == KUNGFU.Sync_avg: + print("using Kungfu.SynchronousAveragingOptimize!") + opt = SynchronousAveragingOptimizer(opt) + elif kungfu_option == KUNGFU.Pair_avg: + print("using Kungfu.PairAveragingOptimizer!") + opt = PairAveragingOptimizer(opt) + + train_model.train() + cur_epoch = step // epoch_size +1 + log(f"Start Training- total_epoch: {total_epoch} total_step: {total_step} current_epoch:{cur_epoch} "\ + +f"current_step:{step} batch_size:{batch_size} lr_init:{lr_init} lr_decay_steps:{lr_decay_steps} "\ + +f"lr_decay_factor:{lr_decay_factor} weight_decay_factor:{weight_decay_factor}" ) + for epoch_idx in range(cur_epoch,total_epoch): + log(f"Epoch {epoch_idx}/{total_epoch}:") + npu.set_npu_loop_size(epoch_size) + for _ in tqdm(range(0,epoch_size)): + step+=1 + metric_manager.start_timing() + image, mask, target_list = next(train_dataset_iter) + # extract gt_label + target_list = [cPickle.loads(target) for target in target_list.numpy()] + target_x = {key:[] for key,value in target_list[0].items()} + target_x = reduce(lambda x, y: {key:x[key]+[y[key]] for key,value in x.items()},[target_x]+target_list) + target_x = {key:np.stack(value) for key,value in target_x.items()} + target_x = to_tensor_dict(target_x) + + + # learning rate decay + if (step in lr_decay_steps): + new_lr_decay = lr_decay_factor**(lr_decay_steps.index(step) + 1) + lr = lr_init * new_lr_decay + + # optimize one step + predict_x = optimize_step(image, mask, target_x, train_model, metric_manager) + + # optimize domain adaptation + if(domainadapt_flag): + src_image = image + dst_image = next(dmadapt_train_dataset_iter) + predict_dst = optimize_step_dmadapt(src_image, dst_image, train_model, adapt_dis, metric_manager) + + if(step==1): + broadcast_variables(train_model.all_weights) + broadcast_variables(opt.variables()) + + # log info periodly + if ((step != 0) and (step % log_interval) == 0): + log(f"Train Epoch={epoch_idx} / {total_epoch}, Step={step} / {total_step}: learning_rate: {lr:.6e} {metric_manager.report_timing()}\n"\ + +f"{metric_manager.report_train()} ") + + # visualize periodly + if ((step != 0) and (step % vis_interval) == 0 and current_rank() == 0): + log(f"Visualizing prediction maps and target maps") + visualizer.visual_compare(image_batch=image.numpy(), mask_batch=mask.numpy(), predict_x=predict_x, target_x=target_x,\ + name=f"train_{step}") + + # save result and ckpt periodly + if ((step!= 0) and (step % save_interval) == 0 and current_rank() == 0): + # save ckpt + log("saving model ckpt and result...") + save_step.assign(step) + save_lr.assign(lr) + ckpt_save_path = ckpt_manager.save() + log(f"ckpt save_path:{ckpt_save_path} saved!\n") + # save train model + model_save_path = os.path.join(model_dir, "newest_model.npz") + train_model.save_weights(model_save_path) + log(f"model save_path:{model_save_path} saved!\n") + # save discriminator model + if (domainadapt_flag): + dis_save_path = os.path.join(model_dir, "newest_discriminator.npz") + adapt_dis.save_weights(dis_save_path) + log(f"discriminator save_path:{dis_save_path} saved!\n") diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__init__.py new file mode 100644 index 000000000..9c00681bf --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/measure_flops.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/measure_flops.py new file mode 100644 index 000000000..78f046052 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/measure_flops.py @@ -0,0 +1,55 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +import argparse +from argparse import ArgumentParser +import tensorflow as tf +# notes: +# now, 'savemodel' format in tensorflow2, 'pb' format in tensorflow1, 'onnx' format in pytorch are +# official inference formats for each platform, current this script only suppory 'pb' format calculation +print(f"This script currently only support calculation flops of pb format model!") +argparser=ArgumentParser() +argparser.add_argument("--model_path",type=str,default=f"./save_dir/default_model/newest_model.pb",\ + help="the path to the model file") +args=argparser.parse_args() + +#load graph_def from pb format graph_file +model_path=args.model_path +graph_file=tf.io.gfile.GFile(model_path,"rb") +graph_def=tf.compat.v1.GraphDef() +graph_def.ParseFromString(graph_file.read()) +with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def) + run_meta=tf.compat.v1.RunMetadata() + options=tf.compat.v1.profiler.ProfileOptionBuilder.float_operation() + flops=tf.compat.v1.profiler.profile(graph,run_meta=run_meta,options=options) + print(f"model: {model_path} GFLOPS: {flops.total_float_ops/1e9}") \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/modelzoo_level.txt b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/modelzoo_level.txt new file mode 100644 index 000000000..11c8e35d4 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/modelzoo_level.txt @@ -0,0 +1,8 @@ +GPUStatus:OK +NPUMigrationStatus:OK +FuncStatus:OK +PrecisionStatus:OK +AutoTune:POK +PerfStatus:NOK +ModelConvert:OK +QuantStatus:OK \ No newline at end of file diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/official_test.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/official_test.py new file mode 100644 index 000000000..2f67288e5 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/official_test.py @@ -0,0 +1,110 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import sys +import math +import json +import time +import argparse +import matplotlib +import multiprocessing +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='FastPose.') + parser.add_argument("--model_type", + type=str, + default="Openpose", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,PoseProposal") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vgg19, Resnet18, Resnet50") + parser.add_argument("--model_name", + type=str, + default="default_name", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: coco ") + parser.add_argument("--dataset_version", + type=str, + default="2017", + help="dataset version, only use for MSCOCO and available for version 2014 and 2017 ") + parser.add_argument("--dataset_path", + type=str, + default="data", + help="dataset path,to determine the path to load the dataset") + parser.add_argument('--train_type', + type=str, + default="Single_train", + help='train type, available options: Single_train, Parallel_train') + parser.add_argument('--kf_optimizer', + type=str, + default='Pair_avg', + help='kung fu parallel optimizor,available options: Sync_sgd, Sync_avg, Pair_avg') + parser.add_argument('--test_num', + type=int, + default=100000, + help='number of test') + parser.add_argument('--vis_num', + type=int, + default=60, + help='number of visible test') + parser.add_argument('--multiscale', + type=bool, + default=False, + help='enable multiscale_search') + + + args=parser.parse_args() + Config.set_model_name(args.model_name) + Config.set_model_type(Config.MODEL[args.model_type]) + Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) + Config.set_dataset_type(Config.DATA[args.dataset_type]) + Config.set_dataset_path(args.dataset_path) + Config.set_dataset_version(args.dataset_version) + + config=Config.get_config() + model=Model.get_model(config) + test=Model.get_test(config) + dataset=Dataset.get_dataset(config) + + test(model,dataset,vis_num=args.vis_num,total_test_num=args.test_num,enable_multiscale_search=args.multiscale) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/pretrain.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/pretrain.py new file mode 100644 index 000000000..8ae826d6d --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/pretrain.py @@ -0,0 +1,81 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import sys +import math +import json +import time +import argparse +import matplotlib +import multiprocessing +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='FastPose.') + parser.add_argument("--model_type", + type=str, + default="Openpose", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,MobilenetThinOpenpose, PoseProposal") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vggtiny, Vgg19, Resnet18, Resnet50") + parser.add_argument("--model_name", + type=str, + default="default_name", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--dataset_path", + type=str, + default="./data", + help="dataset path,to determine the path to load the dataset") + + args=parser.parse_args() + #config model + Config.set_model_name(args.model_name) + Config.set_model_type(Config.MODEL[args.model_type]) + Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) + Config.set_pretrain(True) + #config dataset + Config.set_pretrain_dataset_path(args.dataset_path) + config=Config.get_config() + #train + model=Model.get_model(config) + pretrain=Model.get_pretrain(config) + dataset=Dataset.get_pretrain_dataset(config) + pretrain(model,dataset) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo.py new file mode 100644 index 000000000..14680a026 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo.py @@ -0,0 +1,128 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import sys +import glob +import argparse +import matplotlib +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Hyperpose') + parser.add_argument("--model_type", + type=str, + default="Openpose", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,MobilenetThinOpenpose, PoseProposal, Pifpaf") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vggtiny, Vgg19, Resnet18, Resnet50") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: MSCOCO, MPII ") + parser.add_argument("--model_name", + type=str, + default="test_open", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--image_dir", + type=str, + default="./save_dir/example_dir/image", + help="image paths to be processed by the model" + ) + parser.add_argument("--output_dir", + type=str, + default="./save_dir/example_dir/output_dir", + help="ouput directory of the model forwarding" + ) + +args=parser.parse_args() +# config model +Config.set_model_name(args.model_name) +Config.set_model_type(Config.MODEL[args.model_type]) +Config.set_dataset_type(Config.DATA[args.dataset_type]) +Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) +config = Config.get_config() +output_dir = os.path.join(args.output_dir,args.model_name) +os.makedirs(output_dir, exist_ok=True) + +# contruct model and processors +model = Model.get_model(config) +# visualizer +VisualizerClass = Model.get_visualizer(config) +visualizer = VisualizerClass(save_dir=output_dir, parts=model.parts, limbs=model.limbs) +# post processor +PostProcessorClass = Model.get_postprocessor(config) +post_processor = PostProcessorClass(parts=model.parts, limbs=model.limbs, hin=model.hin, win=model.win, hout=model.hout, + wout=model.wout, colors=model.colors) +# image processor +ImageProcessorClass = Model.get_imageprocessor() +image_processor = ImageProcessorClass(input_h=model.hin, input_w=model.win) + +# load weights +model_weight_path = f"./save_dir/{args.model_name}/model_dir/newest_model.npz" +model.load_weights(model_weight_path, format="npz_dict") +model.eval() +# model.save(f"./save_dir/{args.model_name}/model_dir/newest_model.h5") +# begin process +for image_path in glob.glob(f"{args.image_dir}/*"): + image_name = os.path.basename(image_path) + print(f"processing image:{image_name}") + # image read, normalize, and scale + image = image_processor.read_image_rgb_float(image_path) + input_image, scale, pad = image_processor.image_pad_and_scale(image) + input_image = np.transpose(input_image,[2,0,1])[np.newaxis,:,:,:] + # model forward + predict_x = model.forward(input_image) + # post process + humans = post_processor.process(predict_x)[0] + # visualize heatmaps + visualizer.visualize(image_batch=input_image, predict_x=predict_x, humans_list=[humans], name=image_name) + # visualize results (restore detected humans) + print(f"{len(humans)} humans detected") + for human_idx,human in enumerate(humans,start=1): + human.unpad(pad) + human.unscale(scale) + print(f"human:{human_idx} num of detected body joints:{human.get_partnum()}") + human.print() + visualizer.visualize_result(image=image, humans=humans, name=f"{image_name}_result") + + + + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo_pb.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo_pb.py new file mode 100644 index 000000000..fd291946f --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/python_demo_pb.py @@ -0,0 +1,170 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.open().as_default() + +import os +import cv2 +import sys +import glob +import argparse +import matplotlib +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset +from cv2 import dnn + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Hyperpose') + parser.add_argument("--model_type", + type=str, + default="Pifpaf", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,MobilenetThinOpenpose, PoseProposal, Pifpaf") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vggtiny, Vgg19, Resnet18, Resnet50") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: MSCOCO, MPII ") + parser.add_argument("--model_name", + type=str, + default="default_name", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--image_dir", + type=str, + default="./save_dir/example_dir/image", + help="image paths to be processed by the model" + ) + parser.add_argument("--output_dir", + type=str, + default="./save_dir/example_dir/output_dir", + help="ouput directory of the model forwarding" + ) + +args=parser.parse_args() +# config model +Config.set_model_name(args.model_name) +Config.set_model_type(Config.MODEL[args.model_type]) +Config.set_dataset_type(Config.DATA[args.dataset_type]) +Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) +config = Config.get_config() +output_dir = os.path.join(args.output_dir,args.model_name) +os.makedirs(output_dir, exist_ok=True) + +# contruct model and processors +model = Model.get_model(config) +# visualizer +VisualizerClass = Model.get_visualizer(config) +visualizer = VisualizerClass(save_dir=output_dir, parts=model.parts, limbs=model.limbs) +# post processor +PostProcessorClass = Model.get_postprocessor(config) +post_processor = PostProcessorClass(parts=model.parts, limbs=model.limbs, hin=model.hin, win=model.win, hout=model.hout, + wout=model.wout, colors=model.colors) +# image processor +ImageProcessorClass = Model.get_imageprocessor() +image_processor = ImageProcessorClass(input_h=model.hin, input_w=model.win) + +# load weights +model_weight_path = f"./save_dir/{args.model_name}/model_dir/newest_model.npz" +model.load_weights(model_weight_path, format="npz_dict") + +def wrap_frozen_graph(graph_def, inputs, outputs, print_graph=False): + def _imports_graph_def(): + tf.compat.v1.import_graph_def(graph_def, name="") + + wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, []) + import_graph = wrapped_import.graph + + print("-" * 50) + print("Frozen model layers: ") + layers = [op.name for op in import_graph.get_operations()] + if print_graph == True: + for layer in layers: + print(layer) + print("-" * 50) + + return wrapped_import.prune( + tf.nest.map_structure(import_graph.as_graph_element, inputs), + tf.nest.map_structure(import_graph.as_graph_element, outputs)) + +# Load frozen graph using TensorFlow 1.x functions +with tf.io.gfile.GFile(f"./save_dir/{args.model_name}/frozen_default_name.pb", "rb") as f: + graph_def = tf.compat.v1.GraphDef() + loaded = graph_def.ParseFromString(f.read()) + +# Wrap frozen graph to ConcreteFunctions +frozen_func = wrap_frozen_graph(graph_def=graph_def, + inputs=["x:0"], + outputs=["Identity:0","Identity_1:0","Identity_2:0","Identity_3:0","Identity_4:0","Identity_5:0","Identity_6:0","Identity_7:0"], + print_graph=True) + +model.eval() +# model.save(f"./save_dir/{args.model_name}/model_dir/newest_model.h5") +# begin process +for image_path in glob.glob(f"{args.image_dir}/*"): + image_name = os.path.basename(image_path) + print(f"processing image:{image_name}") + # image read, normalize, and scale + image = image_processor.read_image_rgb_float(image_path) + input_image, scale, pad = image_processor.image_pad_and_scale(image) + input_image = np.transpose(input_image,[2,0,1])[np.newaxis,:,:,:] + # model forward + # predict_x = model.forward(input_image) + predict_x2 = frozen_func(tf.convert_to_tensor(input_image)) + predict_x2_dict = {} + predict_x2_dict['pif_conf'] = predict_x2[0] + predict_x2_dict['pif_vec'] = predict_x2[1] + predict_x2_dict['pif_scale'] = predict_x2[2] + predict_x2_dict['paf_conf'] = predict_x2[3] + predict_x2_dict['paf_src_vec'] = predict_x2[4] + predict_x2_dict['paf_dst_vec'] = predict_x2[5] + predict_x2_dict['paf_src_scale'] = predict_x2[6] + predict_x2_dict['paf_dst_scale'] = predict_x2[7] + # post process + humans = post_processor.process(predict_x2_dict)[0] + # visualize heatmaps + visualizer.visualize(image_batch=input_image, predict_x=predict_x2_dict, humans_list=[humans], name=image_name) + # visualize results (restore detected humans) + print(f"{len(humans)} humans detected") + for human_idx,human in enumerate(humans,start=1): + human.unpad(pad) + human.unscale(scale) + print(f"human:{human_idx} num of detected body joints:{human.get_partnum()}") + human.print() + visualizer.visualize_result(image=image, humans=humans, name=f"{image_name}_result") + + + + diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/requirements.txt b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/requirements.txt new file mode 100644 index 000000000..6984db6ac --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/requirements.txt @@ -0,0 +1,36 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +cython>=0.29 +numpy==1.16.4 +easydict>=1.9,<=1.10 +opencv-python>=3.4,<3.5 +tensorflow==2.3.1 +tensorlayer==2.2.3 +pycocotools==2.0.0 # must be installed after cython and numpy are installed diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/auto-format.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/auto-format.sh new file mode 100644 index 000000000..12b2efba3 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/auto-format.sh @@ -0,0 +1,44 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +export CLANG_FORMAT=clang-format + +command -v $CLANG_FORMAT || python3 -m pip install clang-format + +format_dir() { + find "$1" -regex '.*\.\(cpp\|hpp\|cc\|cxx\)' -exec $CLANG_FORMAT -style=file -i {} \; +} + +cd "$(dirname "$0")"/.. + +format_dir ./examples +format_dir ./include +format_dir ./src diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/check_docker_run.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/check_docker_run.py new file mode 100644 index 000000000..0cd47cf6e --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/check_docker_run.py @@ -0,0 +1,88 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import unittest +import re +from distutils.version import StrictVersion + + +class LinuxCheck(unittest.TestCase): + def test_cuda_driver(self): + p = os.popen('cat /proc/driver/nvidia/version') + output = p.read() + p.close() + self.assertTrue('NVIDIA' in output.upper(), 'NVIDIA Driver not found. Please visit ' + 'https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index' + '.html ' + '#driver-installation') + if 'NVIDIA' in output.upper(): + version_code = None + for line in output.splitlines(): + if 'NVIDIA' in line: + for item in line.split(' '): + m = re.search('[0-9][0-9][0-9].[0-9][0-9].[0-9][0-9]', item) + if m is not None: + version_code = m.group(0) + break + self.assertNotEqual(version_code, None, 'NVIDIA version not found...') + if version_code is not None: + self.assertGreaterEqual(StrictVersion(version_code), StrictVersion('418.81.07'), 'Your CUDA driver is ' + 'old. Please upgrade ' + 'it to >= 418.81.07 ' + 'according ' + 'to ' + 'https://docs.nvidia' + '.com/cuda/cuda' + '-installation-guide' + '-linux/index.html' + '#driver-installation') + + def test_docker_version(self): + p = os.popen("docker version --format '{{.Client.Version}}'") + version_code = p.read() + return_code = p.close() + self.assertEqual(return_code, None, 'docker command not found...') + self.assertNotEqual(version_code, None, 'Docker version cannot be found...') + self.assertGreaterEqual(StrictVersion(version_code), StrictVersion('19.03'), 'Your docker version is too old ' + 'to support "--gpus" flag... ' + 'Please install a newer version ' + '(>= 19.03) via ' + 'https://docs.docker.com/engine' + '/install/') + + def test_nvidia_docker(self): + return_code = os.system("docker run --rm --gpus all nvidia/cuda:10.0-base nvidia-smi") + self.assertEqual(return_code, 0, 'Docker with CUDA functionality cannot run properly. Please visit ' + 'https://docs.nvidia.com/datacenter/cloud-native/container-toolkit' + '/install-guide.html#pre-requisites to install latest nvidia container') + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/download-openpifpaf-model.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/download-openpifpaf-model.sh new file mode 100644 index 000000000..42d233351 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/download-openpifpaf-model.sh @@ -0,0 +1,43 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/sh + +set -e + +[ "$(command -v gdown)" ] || (echo "Downloading gdown via PIP" && python3 -m pip install gdown -U) + +model_name="openpifpaf-resnet50-HW=368x432.onnx" + +BASEDIR=$(realpath "$(dirname "$0")") +cd "$BASEDIR" +mkdir -p ../data/models +cd ../data/models + +python3 "$BASEDIR/downloader.py" --model $model_name diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/downloader.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/downloader.py new file mode 100644 index 000000000..fa09ec265 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/downloader.py @@ -0,0 +1,68 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gdown +import argparse + + +class Model: + def __init__(self, id, md5): + self.id = id + self.md5 = md5 + + +model_dict = { + 'lopps-resnet50-V2-HW=368x432.onnx': Model('1tb8jnXkoiscfr-ZVydAALg7dtUwAKdEd', 'a6ba26d505c8150d9bf01950143d51d3'), + 'openpose-coco-V2-HW=368x656.onnx': Model('15A0SQyPlU2W-Btcf6Ngi6DY0_1CY50d7', '9f422740c7d41d93d6fe16408b0274ef'), + 'openpose-thin-V2-HW=368x432.onnx': Model('1xqXNFPJgsSjgv-AWdqnobcpRmdIu42eh', '65e26d62fd71dc0047c4c319fa3d9096'), + 'ppn-resnet50-V2-HW=384x384.onnx': Model('1qMSipZ5_QMyRuNQ7ux5isNxwr678ctwG', '0d1df2e61c0f550185d562ec67a5f2ca'), + 'TinyVGG-V1-HW=256x384.uff': Model('1KlKjNMaruJnNYEXQKqzHGqECBAmwB92T', '6551931d16e55cc9370c5c13d91383c3'), + 'openpose-mobile-HW=342x368.onnx': Model('1eDEOC0WBB50bryAbFmhfptyGMoV5wZGn', 'a09d901e39c4f4d913c547d614e249f9'), + 'openpifpaf-resnet50-HW=368x432.onnx': Model('1cxT1PCPPdMxEdvSB8Q5ewxyTh_TWgcsi', + '6c661ded88a91699a1c0582b403d5873'), + 'TinyVGG-V2-HW=342x368.onnx': Model('1ax6fTrxItLXshyHUFTHQVKs5eTRB3t6b', '52933b27c41342c959f4e49499a057ad') +} + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Download and configure HyperPose models.') + parser.add_argument('--model', type=str, nargs=1, help='ModelName') + + args = parser.parse_args() + model_name = args.model[0] + + if model_name not in model_dict.keys(): + print(f'Unknown model resource: {model_name}') + print('You may use these pretrained models:') + for k in model_dict.keys(): + print(f'---> {k}') + else: + m = model_dict[model_name] + url = f'https://drive.google.com/uc?id={m.id}' + gdown.download(url, model_name, quiet=False) + gdown.cached_download(url, model_name, md5=m.md5) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/test_docker.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/test_docker.py new file mode 100644 index 000000000..536759521 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/scripts/test_docker.py @@ -0,0 +1,96 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import unittest +import re +import platform +from distutils.version import StrictVersion + + +class LinuxCheck(unittest.TestCase): + def test_platform(self): + self.assertEqual(platform.system(), 'Linux', 'At this point, this script only works for Linux...') + + def test_cuda_driver(self): + p = os.popen('cat /proc/driver/nvidia/version') + output = p.read() + p.close() + self.assertTrue('NVIDIA' in output.upper(), 'NVIDIA Driver not found. Please visit ' + 'https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index' + '.html ' + '#driver-installation') + if 'NVIDIA' in output.upper(): + version_code = None + for line in output.splitlines(): + if 'NVIDIA' in line: + for item in line.split(' '): + m = re.search('[0-9][0-9][0-9].[0-9][0-9].[0-9][0-9]', item) + if m is not None: + version_code = m.group(0) + break + self.assertNotEqual(version_code, None, 'NVIDIA version not found... Have you installed NVIDIA driver on ' + 'your Linux? If not please refer to ' + 'https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index' + '.html#driver-installation') + if version_code is not None: + self.assertGreaterEqual(StrictVersion(version_code), StrictVersion('410.48'), 'Your CUDA driver is ' + 'old. Please upgrade it ' + 'to >= 410.48 according ' + 'to ' + 'https://docs.nvidia' + '.com/cuda/cuda' + '-installation-guide' + '-linux/index.html' + '#driver-installation') + + def test_docker_version(self): + p = os.popen("docker version --format '{{.Client.Version}}'") + version_code = p.read() + return_code = p.close() + self.assertEqual(return_code, None, 'docker command not found... Have sure you have docker installed and have ' + 'enough privilege to use it. To install latest docker engine on Linux, ' + 'please refer to https://docs.docker.com/engine/install/') + self.assertNotEqual(version_code, None, 'Docker version cannot be found...') + self.assertGreaterEqual(StrictVersion(version_code), StrictVersion('19.03'), 'Your docker version is too old ' + 'to support "--gpus" flag... ' + 'Please install a newer version ' + '(>= 19.03) via ' + 'https://docs.docker.com/engine' + '/install/') + + def test_nvidia_docker(self): + return_code = os.system("docker run --rm --gpus all nvidia/cuda:10.0-base nvidia-smi") + self.assertEqual(return_code, 0, 'Docker with CUDA functionality cannot run properly. Please visit ' + 'https://docs.nvidia.com/datacenter/cloud-native/container-toolkit' + '/install-guide.html#pre-requisites to install latest nvidia container') + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/setup.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/setup.py new file mode 100644 index 000000000..bce1c59b8 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/setup.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import npu_device as npu +npu.open().as_default() + +from setuptools import setup, find_packages + +setup( + #basic info + name="hyperpose", + version="2.2.0", + #pack-up + packages=find_packages(), + include_package_data=False, + install_requires=[ + "cython>=0.29", + "numpy==1.16.4", + "easydict>=1.9,<=1.10", + "opencv-python>=3.4,<3.5", + "tensorflow==2.3.1", + "tensorlayer==2.2.3", + "pycocotools" + ], + #meta data + author="TensorLayer", + author_email="tensorlayer@gmail.com", + description="HyperPose is a library for building human pose estimation systems that can efficiently operate in the wild.", + long_description='Please visit HyperPose [GitHub repo](https://github.com/tensorlayer/hyperpose).', + long_description_content_type="text/markdown", + license="Apache 2.0 license", + keywords="pose estimation platform", + url="https://github.com/tensorlayer/hyperpose", + project_url={ + "Source Code": "https://github.com/tensorlayer/hyperpose", + "Documentation": "https://hyperpose.readthedocs.io/en/latest/" + } +) diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh new file mode 100644 index 000000000..a02b8135e --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +########################################################## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +########################################################## +# shell脚本所在路径 +cur_path=`echo $(cd $(dirname $0);pwd)` + +# 判断当前shell是否是performance +perf_flag=`echo $0 | grep performance | wc -l` + +# 当前执行网络的名称 +Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'` + +export RANK_SIZE=1 +export RANK_ID=0 +export JOB_ID=10087 + +# 路径参数初始化 +data_path="" +output_path="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --data_path # dataset of training + --output_path # output of training + --train_steps # max_step for training + --train_epochs # max_epoch for training + --batch_size # batch size + -h/--help show help message + " + exit 1 +fi + +# 参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --output_path* ]];then + output_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + fi +done + +# 校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +# 校验是否传入output_path,不需要修改 +if [[ $output_path == "" ]];then + output_path="./test/output/${ASCEND_DEVICE_ID}" +fi + +CaseName="" +function get_casename() +{ + if [ x"${perf_flag}" = x1 ]; + then + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf' + else + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc' + fi +} + +# 跳转到code目录 +cd ${cur_path}/../ + +rm -rf ./test/output/${ASCEND_DEVICE_ID} ### +mkdir -p ./test/output/${ASCEND_DEVICE_ID} ### + +# 训练开始时间记录,不需要修改 +start_time=$(date +%s) +########################################################## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +########################################################## + +#========================================================= +#========================================================= +#========训练执行命令,需要根据您的网络进行修改============== +#========================================================= +#========================================================= +# 基础参数,需要模型审视修改 +# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 +# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 +# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 +train_epochs=5 +train_steps=70745 +batch_size=4 + +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 + + +# 性能相关数据计算 +StepTime=`grep 's/it' $print_log | tail -n 10| grep "[0-9.]*s/it" -o| grep "[0-9.]*" -o | awk '{sum += $1} END {print sum/NR}'` +FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'` + +# 精度相关数据计算 +train_accuracy=`grep 'total_loss' $print_log | tail -n 1 | grep "loss: *[0-9.]*" -o` +# 提取所有loss打印信息 +grep 'total_loss' $print_log | grep " *[0-9.]*" -o > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt + + +########################################################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +########################################################### + +# 获取最终的casename,请保留,case文件名为${CaseName} +get_casename + +# 重命名loss文件 +if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ]; +then ### CHANGE: +sudo + sudo mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt ### +fi + +# 训练端到端耗时 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +echo "------------------ Final result ------------------" +# 输出性能FPS/单step耗时/端到端耗时 +echo "Final Performance images/sec : $FPS" +echo "Final Performance sec/step : $StepTime" +echo "E2E Training Duration sec : $e2e_time" + +# 输出训练精度 +echo "Final Train Accuracy : ${train_accuracy}" + +# 最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 + +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh new file mode 100644 index 000000000..1a4d11707 --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +########################################################## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +########################################################## +# shell脚本所在路径 +cur_path=`echo $(cd $(dirname $0);pwd)` + +# 判断当前shell是否是performance +perf_flag=`echo $0 | grep performance | wc -l` + +# 当前执行网络的名称 +Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'` + +export RANK_SIZE=1 +export RANK_ID=0 +export JOB_ID=10087 + +# 路径参数初始化 +data_path="" +output_path="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --data_path # dataset of training + --output_path # output of training + --train_steps # max_step for training + --train_epochs # max_epoch for training + --batch_size # batch size + -h/--help show help message + " + exit 1 +fi + +# 参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --output_path* ]];then + output_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + fi +done + +# 校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +# 校验是否传入output_path,不需要修改 +if [[ $output_path == "" ]];then + output_path="./test/output/${ASCEND_DEVICE_ID}" +fi + +CaseName="" +function get_casename() +{ + if [ x"${perf_flag}" = x1 ]; + then + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf' + else + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc' + fi +} + +# 跳转到code目录 +cd ${cur_path}/../ + +rm -rf ./test/output/${ASCEND_DEVICE_ID} ### +mkdir -p ./test/output/${ASCEND_DEVICE_ID} ### + +# 训练开始时间记录,不需要修改 +start_time=$(date +%s) +########################################################## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 90行,请一定不要、不要、不要修改########## +########################################################## + +#========================================================= +#========================================================= +#========训练执行命令,需要根据您的网络进行修改============== +#========================================================= +#========================================================= +# 基础参数,需要模型审视修改 +# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 +# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 +# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 +train_epochs=5 +train_steps=1000 +batch_size=4 + +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 + + +# 性能相关数据计算 +StepTime=`grep 's/it' $print_log | tail -n 10| grep "[0-9.]*s/it" -o| grep "[0-9.]*" -o | awk '{sum += $1} END {print sum/NR}'` +FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'` + +# 精度相关数据计算 +train_accuracy=`grep 'total_loss' $print_log | tail -n 1 | grep "loss: *[0-9.]*" -o` +# 提取所有loss打印信息 +grep 'total_loss' $print_log | grep " *[0-9.]*" -o > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt + + +########################################################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +########################################################### + +# 获取最终的casename,请保留,case文件名为${CaseName} +get_casename + +# 重命名loss文件 +if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ]; +then ### CHANGE: +sudo + sudo mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt ### +fi + +# 训练端到端耗时 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +echo "------------------ Final result ------------------" +# 输出性能FPS/单step耗时/端到端耗时 +echo "Final Performance images/sec : $FPS" +echo "Final Performance sec/step : $StepTime" +echo "E2E Training Duration sec : $e2e_time" + +# 输出训练精度 +echo "Final Train Accuracy : ${train_accuracy}" + +# 最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 + +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py new file mode 100644 index 000000000..25190fa2b --- /dev/null +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py @@ -0,0 +1,167 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python3 + +import npu_device as npu +npu.global_options().fusion_switch_file="/home/test_user08/hyperpose_model/fusion_switch.cfg" + +# profiling +npu.global_options().profiling_config.enable_profiling = True +profiling_options = '{"output":"/home/test_user08/hyperpose_model/profiling", \ + "training_trace":"on", \ + "task_trace":"on", \ + "fp_point":"", \ + "bp_point":""}' +npu.global_options().profiling_config.profiling_options = profiling_options + + +npu.open().as_default() + +import os +import cv2 +import sys +import math +import json +import glob +import argparse +import matplotlib +import numpy as np +import tensorflow as tf +import tensorlayer as tl +from hyperpose import Config,Model,Dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Hyperpose') + parser.add_argument("--model_type", + type=str, + default="Pifpaf", + help="human pose estimation model type, available options: Openpose, LightweightOpenpose ,MobilenetThinOpenpose, PoseProposal, Pifpaf") + parser.add_argument("--model_backbone", + type=str, + default="Default", + help="model backbone, available options: Mobilenet, Vggtiny, Vgg19, Resnet18, Resnet50") + parser.add_argument("--model_name", + type=str, + default="test_pif", + help="model name,to distinguish model and determine model dir") + parser.add_argument("--dataset_type", + type=str, + default="MSCOCO", + help="dataset name,to determine which dataset to use, available options: MSCOCO, MPII ") + parser.add_argument("--dataset_version", + type=str, + default="2017", + help="dataset version, only use for MSCOCO and available for version 2014 and 2017") + parser.add_argument("--dataset_path", + type=str, + default="data", + help="dataset path,to determine the path to load the dataset") + parser.add_argument('--train_type', + type=str, + default="Single_train", + help='train type, available options: Single_train, Parallel_train') + parser.add_argument('--kf_optimizer', + type=str, + default='Sync_avg', + help='kung fu parallel optimizor,available options: Sync_sgd, Sync_avg, Pair_avg') + parser.add_argument('--use_official_dataset', + type=int, + default=1, + help='whether to use official dataset, could be used when only user data is needed') + parser.add_argument('--useradd_data_path', + type=str, + default=None, + help='path to user data directory where contains images folder and annotation json file') + parser.add_argument('--domainadapt_data_path', + type=str, + default=None, + help='path to user data directory where contains images for domain adaptation') + parser.add_argument('--optim_type', + type=str, + default="Adam", + help='optimizer type used for training') + parser.add_argument('--log_interval', + type=int, + default=None, + help='log frequency, None stands for using default value') + parser.add_argument("--vis_interval", + type=int, + default=None, + help="visualize frequency, None stands for using default value") + parser.add_argument('--save_interval', + type=int, + default=None, + help='log frequency, None stands for using default value') + + args=parser.parse_args() + #config model + Config.set_model_name(args.model_name) + Config.set_model_type(Config.MODEL[args.model_type]) + Config.set_model_backbone(Config.BACKBONE[args.model_backbone]) + #config train + Config.set_train_type(Config.TRAIN[args.train_type]) + Config.set_optim_type(Config.OPTIM[args.optim_type]) + Config.set_kungfu_option(Config.KUNGFU[args.kf_optimizer]) + Config.set_log_interval(args.log_interval) + Config.set_vis_interval(args.vis_interval) + Config.set_save_interval(args.save_interval) + #config dataset + Config.set_official_dataset(args.use_official_dataset) + Config.set_dataset_type(Config.DATA[args.dataset_type]) + Config.set_dataset_path(args.dataset_path) + Config.set_dataset_version(args.dataset_version) + #sample add user data to train + if(args.useradd_data_path!=None): + useradd_train_image_paths=[] + useradd_train_targets=[] + image_dir=os.path.join(args.useradd_data_path,"images") + anno_path=os.path.join(args.useradd_data_path,"anno.json") + #generate image paths and targets + anno_json=json.load(open(anno_path,mode="r")) + for image_path in anno_json["annotations"].keys(): + anno=anno_json["annotations"][image_path] + useradd_train_image_paths.append(os.path.join(image_dir,image_path)) + useradd_train_targets.append({ + "kpt":anno["keypoints"], + "mask":None, + "bbx":anno["bbox"], + "labeled":1 + }) + Config.set_useradd_data(useradd_train_image_paths,useradd_train_targets,useradd_scale_rate=1) + #sample use domain adaptation to train: + if(args.domainadapt_data_path!=None): + domainadapt_image_paths=glob.glob(os.path.join(args.domainadapt_data_path,"*")) + Config.set_domainadapt_dataset(domainadapt_train_img_paths=domainadapt_image_paths,domainadapt_scale_rate=1) + #train + config=Config.get_config() + model=Model.get_model(config) + train=Model.get_train(config) + dataset=Dataset.get_dataset(config) + train(model,dataset) -- Gitee From 65325efa643a658b01253d472abf4f1e26baf69d Mon Sep 17 00:00:00 2001 From: yongqingli Date: Fri, 9 Dec 2022 15:51:48 +0800 Subject: [PATCH 2/5] commit --- .../contrib/cv/PifPaf_for_TensorFlow2/README.md | 13 ++++++------- .../hyperpose/Config/__init__.py | 10 ++++++++++ .../Config/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 17471 bytes .../__pycache__/config_pifpaf.cpython-37.pyc | Bin 0 -> 1406 bytes .../__pycache__/config_pretrain.cpython-37.pyc | Bin 0 -> 652 bytes .../Config/__pycache__/define.cpython-37.pyc | Bin 0 -> 1656 bytes .../Dataset/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 4436 bytes .../__pycache__/base_dataset.cpython-37.pyc | Bin 0 -> 11659 bytes .../Dataset/__pycache__/common.cpython-37.pyc | Bin 0 -> 4204 bytes .../__pycache__/dmadapt_dataset.cpython-37.pyc | Bin 0 -> 1306 bytes .../__pycache__/multi_dataset.cpython-37.pyc | Bin 0 -> 4647 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 286 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 0 -> 3567 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 313 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 0 -> 8528 bytes .../__pycache__/define.cpython-37.pyc | Bin 0 -> 4153 bytes .../__pycache__/format.cpython-37.pyc | Bin 0 -> 5652 bytes .../__pycache__/generate.cpython-37.pyc | Bin 0 -> 1567 bytes .../__pycache__/prepare.cpython-37.pyc | Bin 0 -> 1791 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 317 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 0 -> 7385 bytes .../__pycache__/define.cpython-37.pyc | Bin 0 -> 4025 bytes .../__pycache__/format.cpython-37.pyc | Bin 0 -> 5030 bytes .../__pycache__/generate.cpython-37.pyc | Bin 0 -> 1795 bytes .../__pycache__/prepare.cpython-37.pyc | Bin 0 -> 3456 bytes .../Model/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 18299 bytes .../Model/__pycache__/augmentor.cpython-37.pyc | Bin 0 -> 2874 bytes .../Model/__pycache__/backbones.cpython-37.pyc | Bin 0 -> 27451 bytes .../Model/__pycache__/base_model.cpython-37.pyc | Bin 0 -> 4728 bytes .../Model/__pycache__/common.cpython-37.pyc | Bin 0 -> 9734 bytes .../__pycache__/domainadapt.cpython-37.pyc | Bin 0 -> 2207 bytes .../Model/__pycache__/examine.cpython-37.pyc | Bin 0 -> 1308 bytes .../Model/__pycache__/human.cpython-37.pyc | Bin 0 -> 5501 bytes .../Model/__pycache__/metrics.cpython-37.pyc | Bin 0 -> 3448 bytes .../Model/__pycache__/pretrain.cpython-37.pyc | Bin 0 -> 5846 bytes .../Model/__pycache__/processor.cpython-37.pyc | Bin 0 -> 5642 bytes .../Model/__pycache__/train.cpython-37.pyc | Bin 0 -> 17566 bytes .../pifpaf/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 379 bytes .../pifpaf/__pycache__/define.cpython-37.pyc | Bin 0 -> 2763 bytes .../pifpaf/__pycache__/eval.cpython-37.pyc | Bin 0 -> 8207 bytes .../pifpaf/__pycache__/model.cpython-37.pyc | Bin 0 -> 9735 bytes .../pifpaf/__pycache__/processor.cpython-37.pyc | Bin 0 -> 17945 bytes .../pifpaf/__pycache__/utils.cpython-37.pyc | Bin 0 -> 12841 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 208 bytes .../test/train_full_1p.sh | 6 +++--- .../test/train_performance_1p.sh | 6 +++--- .../contrib/cv/PifPaf_for_TensorFlow2/train.py | 12 ++++++++++++ 47 files changed, 34 insertions(+), 13 deletions(-) create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/define.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/dmadapt_dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/multi_dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/format.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/dataset.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/define.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/generate.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/examine.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/human.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/pretrain.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/train.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/processor.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc create mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md index c3f7f9d15..a6bf80b12 100644 --- a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/README.md @@ -119,21 +119,20 @@ 1. 配置训练参数。 - 首先在脚本test/train_full_1p.sh 中,配置batch_size、steps、epochs、data_path等参数,请用户根据实际路径配置data_path,或者在启动训练的命令行中以参数形式下发。 + 首先在脚本test/train_full_1p.sh 中,配置batch_size、steps、data_path等参数,请用户根据实际路径配置data_path,或者在启动训练的命令行中以参数形式下发。 ``` batch_size=4 steps=1000000 - epochs=70 - data_path="../mscoco2017" + data_path="data" ``` - + 2. 启动训练。 - + 启动单卡训练 (脚本为/test/train_full_1p.sh ) - + ``` - bash train_full_1p.sh --data_path=../mscoco2017 + sh test/train_full_1p.sh --data_path="data" --batch_size=4 --steps=1000000 ```

训练结果

diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py index 970aaacc9..36d074fc5 100644 --- a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__init__.py @@ -535,6 +535,16 @@ def set_log_interval(log_interval): def set_save_interval(save_interval): if(save_interval is not None): update_train.save_interval = save_interval + +# configure batch_size +def set_batch_size(batch_size): + if(batch_size is not None): + update_train.batch_size = batch_size + +# configure n_step +def set_n_step(n_step): + if(n_step is not None): + update_train.n_step = n_step # configure vis_interval def set_vis_interval(vis_interval): diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a165760f97184d8b8ae9194f9d6f17f96e97603 GIT binary patch literal 17471 zcmbtb-ESPpao=w)m!IN?Bpo0|o-W8UZLEFZl<8JOs#79`X<%2#}X~3g7^Nf&2w|%CD+>W@pI} zManC&d#1a(s=Kn*6=qk{i_fBc%U*~IaQgcOjf2U)0NZKna#7-sp^b%Zu30y3)aQWH>|1Z zn<9k zS9}lo1Mz+F3G&axqWBc~L$M_8Ab%w8ie==F#XWH!`HFZTK12RQJQRSGyXOBs4GvGOQrnOX4=neX16kOUZh3F$u;wPsb*m>-5l7d!L=HOngS=!5|Q>}l{&_jU! zF+&d@0sTu2Jp$-oG4$vW&=n0m2Izle==rijn+7ei(=KzDVCmeSLawP?yYg2LXfr{Y~a4 zr7v|a{cD?V@0Zd4(&qIS=@|C;&4tYy`!~^+pEhqXr6QLhRqy_d&3DA0DE&IUSL&Ab z-^KfT%+(wF@4rax4f!|rzaxfTr1yrqL;D|ehZ#p{|HCfIFbg*}Z!5~A7Ji-C8)5iS zz>juE0FNhdli)`*{5xWdsf~5V4pAHD*5hb>symL>r?~YPTB9t1+C)TcvO59z$q0TT zqIQa@O?9UZQJapao$gMfwUV_dP-FQ!G`2GlwX@wbfM+=b-VeJho6N}xrZ&@^IYjMT zMD2X{99o~}zRsYvlH!A$ybw{l*u4PwixK<<9@|G^@*Co90V-(9Jdd5KVflu?VHq{I zRks_4yS{1Fd}G`8Hw@EpVpSb!`nKCL>{i{CO@1zw4E&NYtec)C48Rj|=&onB+wly~ z@_oD2FzTIFjj(xTgK;GF0Mysf)bt3E?=qsOo#EDv$F8s(!)yuTj_HG(Z!}%eaV!8c zQVhxSWe39{^xlr=Tg`dZXSe0-sM3QSGHMt$2(DWgCQxnI9*wZ7`4e`)K=Cuvu|*jm z9gpdCJPjWKnRVZi%tzHc64|yjRbwW|E}6Wv&q9!B)!&^1S^7ISb9Ic5hypH9bfNK^Z-GfIHSR%TdkyTj%a9t}X6fIUaES@sm4u9|iff&lex8?4Da< z!4GG~ZrQ$#dEK?drMZuT;Z7Svw5o~&CC;k;PTP{x7;jK~(zaS{47M_)=Ai1f+ny|< zP7YE+=As;;WSEi>B$dfWc4NcewkVC-59{_0A*f70cGqpkYFYl;hTV#QMm11#-Dj}j zRghklE_z@(m6EP)w_8DB)vmYAdSxUmWTvhKLs!H{}(%pFSx`8o#W1!LB8+w;AuE33^dVas4p@s%D2 zp4na%50!}!tKvPC1%tv`?=%`vAi-HI_bPf)70@g-tJ-$Vn$@&geo#ppfw7fyqUUw`F>jkH%b<=9Pa;Msm?zX=XWLz)EwM{$?pfe*}M;p z=Ad?Pne{BxLNE-#-do8OA_N**21~=29O>q{H;$G+`8LtY_;vg_LAKHE1Vhx0xQR|x zM);+saaM-atD>$e>;08=T{%Qz5~1Oj9vbx;qT#n58uh9xhm|zw?u@Fp2+@ds*TA|u zLZz$2E)BDUhkY7VZ#`W4J;bG6d${zws{Z?wybI3#-`#}`w`ncV`lv$V%NrjoV6m;5 z^=cggy~d)y4AH-}ut9o)w8O#@>zIXVmDEeMI^W(2O1(|x5zu;nha@#UoW7jy|1&bB zbnIWIkna1JDW;3*souZrFxpS0Pvh5H1NM*B~|-!m8UXY^*$ghAC?swhvRIBlYGcEndR}XLcOk%V~Fy z!6~bRkX?T{vNBOKY#(+CZ}4^3!DNul!LvyZWHFGg>nMYUH%v~B-iJ-uxM|!rVhU}Z z$KuWNh7#?&i>+|Sg^gnB@zxyJG3e6<^p0=TrQ3WdB7J+C>{{Z7Kk}$M8*e9rlz^Yyim%C6rAEA*+Z;^i5+dbZ8?}4Wn+-#u!iaw5jOjD zFBChK>0~!rCe0sgC9wE*sY`A3&(#~F8oqa%rZvPl>sd~{{JacJ&vWwKWz+GjIim~@ z7xoRCqSdC^E@KDd-1A{1um$Fo)iY|mK6_<`6#~pF#Z{@M~rN zdD-77``cxIqwHJwk2&zR%1IoWyB$es$VjFxy|(HRjQ+4|u2fGRY%BHT*v)lr&&kV- z+n)KmGMABOpko4UhmmfWOUwHxV%4mYWr7%`omcG?1oiJI^GHvn&dCQzkEy?YgeIiF zV2V(7SaT`og}bk;4q6SgxDr5358jYPehUEz%d;i4;Y(3_YB?TDC8_P@fEqFltJP_e zlhR?T{HD}U9KLLr&rI7fNjJD{_R0EJvJr=#4{cFn)|EDl9MdtvqPad{?{Ai1@*1;F z&kja6rERu+1J;s9UMAQS;)Pl;o$KTekULpxca9JzZB6uch!8RjYgVnV z8A&$zF?Wgoq-N3fK&}jICj`ip0;d{M__YnQ)q+9mN`Ykr_13r2ZkHl1j+S0ZA*S?HKPRqB~fJT219?;j8jF`GR(%>L`p^6TaW2*)WaeAB}PGcla+&=|& zA(EvjxpGjJvMAs)n@v1~9uu8`{5B;rf18B|l@V$Ep2d5%nv33Zc+bb)^LQ`B-V1mi zh`kTsy;vO-MMQiD&F_PBD3KJwAVFmHs)RK%T@WOMtO$f@FkKzn+n|UjM@(&LP(v8v zE_+!Xw$&Pi@g%BUBU%VoQ9%T@Hy|u*<{M48bv6{gq6iW?FiT*xy{m|*W!5$n1W?qr zum`ZHk<*5d0Qbwo))K{*uhJWZhZcAsRib(oU6*h!5xt@KEO|4I_5z~{HA0HIHpwrI zt-u}#4T!cewF8k%9Qw^KB+%Dg0isAi4Mxsa#t~eCil=U881NR{pVW2zK-4r zML&}F6YMj)L$OZnmU>gYJG#&}L%SmjamM16V@8@Ec#X4Xx4583wK7-g^Kz)K@+zsE z3zW=Ka*+}`{grQ0Vo>r95-6u9^h|-OWN(PD-6GvI3Xs%vAw3@bldC8>=AL&!+ms&E z1EQvC23?7Hm2`Z9b0a#cR!Su_bvgtLTa8+%`ZBbGjn%H2N0UKjKDkObK2R!wgsva6 z+M_7a^RUxu)H{Y_uS>0h%v#Mt{FDqsMod(JG15MaR&9VT2a67%UdNG;CCgg7Av6J7 zle9;u-ul5AJN%}my^HZfN20UUE$$Q!1IV9x4a<~|CVIQ%D?K@eFTcW((}p3#&Oha8 zP!kua5?X$JP^)@6H5}D*ua32KlDOh5fCPb$Ody=NGm|WwJ*d@aC5Q#psa{qTCN?39^Kx^?EZ#ik|fr#OxYqKV1c8cBMle@C-C2KQUY_4 zxJ$o2j9hv$o>vAO$$@5^sEvfks?@!MdVvo)`XDwUZVbyG9-kY(3c0av`dH88k8%9< zNlpq|CX{-a}$-TX?cLJ|fRD^>)ss}AW2B@s$D zux%yn^-f$%g-WbtS;8ZAvn|cGjuX>qS`0swE6IvR7SSbZhnxg`yiN`S(7;@( zNSB^0J)!mUcyYB%j;`@|^}z!!SY5iWh;p}ZLhl)is}Eo#z*2zMVcGB)`cA1r%IaW8 z^XbsB4h^B_r-Vzmcgzu<9^wd!j&%sUmV9YP6^BN(FeaI=sA4mSxt!s!%SGo9WxM0%U z3FRh6g{qH9pyoQRj8kynv?iboot7{HiB@61xifveR_2IiR*W^hiw%(|Di(p`>TV4z zPm0Ju1&50Tf*I?J@Zi;T-33fIN?%W5v?r~Q!FtzSO&Pi!>a@iFN}-z9rzGr zWmemEZOgIxdd=R(AKdA16}?Wk!-3c$>R7nGs)YBpVb_gqit|G#H6f1b^RAV&X37do z2adx<5wZ~X!=htROQ=Ojo)(p6|AFOn8I+?G>7amU3$)TGe~i#pS%Lxf6KE+jtf?} zrGRTX)w*Lgf-~Wp(kpgTT^jO&>HZ3}=)GVfEcS3XYq9A*mz944`qHFiost7SKC?4y zN38f~8Wi0`OX(XUvWDkm|9%2B>JByrl=3rrKya(_atd{ofiIrk{TzoED&9cTiQ8Wo zA~u0=4#97M@`L?iejfn+7wQ`BhM;S>P+}o}&2AMRfAMJT!Rn*Cv3{Km9sRIl`s${K z4%QPoI7I?+u+6{r^{_!bBsriT<&jLB#>-)uh@>H>q+whjl#a7fI?+-RaXwyhP%2Kx zD`O&%kv7Ol3K}2e@)?%u7!|!3{*A@~Q%(zk+`>~buVa^Y`q$M+q`^s+!9*v4J~J@_ z)WrXTh%?*LvUwcB>{^?2l@!Ba)y;9j1Lg(mqC|i&=8Nqgr;*VXZF^;GJG2w%j=>1U zA=Fi0-GO0Gm5jJ3MBBTRj17%0L#wOEk;#eENj76$lQ(7FyrRauv*;~*-Z+70h1-7= zHXtQUKhsc$CFmZk0C?}p088LV@2ZdvwcR%6Ik2P)rP`yuZ%Cy$Wj12S}0QtDOS4AT68rZ_Jq5 z#$%3!#-k_uet20A`)|W+Q3Qj~;?T}j#s_;ZWLzj)4aOL~gq@+l55PUd0Ug_vAE~$B3_88C~^_+|Y5af6egu+8lA!a3-eA4dWQet>IAG28uuY-UL zc*MP~(tY(!GT&i=!%Vht-G>DZeaDGAIWRqBx);~IwpFv?xdKIG3Y8?E)kMOHbheSet;cOH0s=AL$l7yFHw8TOlpZW}eSx#j&7q=3;wu_a3NL!CR zbtU0or1D-oV@-Cn (~t!bV^3NjodfMyP#eTGYX%WC_IBYfRq+g5i8!)hgtboSKo z=Z#f_&QO2Y(0u;3s@aY;&3$dDS@^L9n zr{RIa+d>VYR^3z5K;$A-#rM0YgvWS%8*@~P0Yr&(1T=G&?MPN(UX#jO-f{}s2~|1#>U9| z3FJ9Q)(3bdqE7~C=e6l`m>Gjkh&D?KDf6*eX!yS75DTp;Zh*8wak#hosPDX#dXn9F2GGH9t>fVeiV9C*w7V~(8iy+rRs6x; zm4Avfna8iB$q^yY?7o5~!`+C{+&ekVL9^4KkdaF&?9UPivTdyg8Nt3ABl^>m6CK9A zQyr9Lx<5;x%RWWO+!*)=G17m2a?&HXlHru!hqYM7^@_L@-o2}fC3kSDq|{&9!(5? z^Hv$r-@K_xbtR*B8-o;=3Pl)%D*t44?ZM+9w`0yB(8q*w zh7w|_Qgx(zD&txbqjg`I3|B>LMOLQ5!uV3DoYJK|F;>32AR1Ll)+zZDN=!;NDBGtXC!`Cg=?3vGN`giq-C*ql#`d5_~$CJx|nERCa19OID`M&SqqM+4YT zWCM&heBN~>?NX993)bF})Tt!(CP~pqTJVyVpQM!`NibPJVq(MswTz0O_%Uv6;cMXU z(LEXOXGn_tF%*3;Rl-M7!^nrz^I5!Si}BB@a)k{3si2w4Glg97r>T7LLUFoyzIeL$ o2J&oiv^a+6OT}!dP|Ou`r98?eiWiHM#XSGxQmWy9*<$wp0TzcR&j0`b literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de74cc94af8a09fdf7261cf96b00abc7b8646a6f GIT binary patch literal 1406 zcmY+C&u<$=6vt=1>-E}RJ4w?fX_BTXzZw_nb%RtmK-L5u z*dab4aX}op0YYkz+_-S(%0IwikT_5-R!@iv;>4Tv56Z0OGxL2j^Jdtm!y;fIagyLV$o45LaW`)50_IRDVXaiOp~gD1aUmL(?_Gl3NNb*9yyS zIbSz|N?3JmjWGLj;?|(ZYP1d}asf)nMX-=dP)1&Y3UZm5{mVFBfhux^mHVqWu7Qob z3N=>iuY60O{+*6`J@>i+3v7dK!Xok-EFrhx67o7MBe&r)@&;Uim2LrNufQtuRais5 z#;pG9IKDCWdlmg|(3`p69rU|{erI>C}F;>|>8e_&u!;r)3ZDYXKj4UUJyG`+_XuK^eQ8J*whP=b1j&o_m35#UK!%HVY%R-z< z69tc0s($KJ#g(eKQWGI9hFJv6Vp>wN$kbdG^E}96L=!x3M}v^g@+S2jkEDK%FnaMo zNaI}5e8T<2^JQg{L2;3=**wpC>0tVx`eehI*ZJfuj+m_GA^LF|dP0`x`Ip5!fwFSu ziB6AZe8gleuN032OEH_?!GoqPbi7W*v8o6*6a00C3T~Ocah&XlYbA#HLxl3~m zgVYl|j^y$=$7E_2NLBl-m3~N#9phRQTs3rx%CJ$$zRr z#?sci4%Xv|=Tl59J!MfArym6I`L5H$!lYPK=CtF;=Uu0h4>ZZk*Gw*?^&S>s5U_h& v=s7z@Sejk1v^CAv&4Q`TyJl&o>V-uet?Db}pH|70rA_RLs;k}km+$@;C3CVr literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f5e69c1628a70b5cbd7d39801c1807484a311eb GIT binary patch literal 652 zcmZvYzi-qq6vv(1kEFSzJ$i)+p{}=F5EBC`6+b#bRSBjnmM?xuEPQcf`>wj1kXSl1 zF}7o8#LnNq=(Q|2c0-7PmqXVHNBQLIXX|~xFE5fLBDe-$Kj*`Mkehb1T|SCqe2qqM z!aYec54=nNid=TCh}YJ9*`4*WzDJ<{iDUuqKtMy@Megt(GU9#YfCtE!hsb2ZUA}{Q zk4MNMA0Y4X7`e|AThfB%R%v&C zYf0N_4OrSRTe9KuwdhifA0GNmn`3qWO;x=%b5#U5| z0=&c=;Ev)Y;AQ3lcNKSlS6CHzRq-;@2U`8w1t`(Hx-N}QGM^kHyL@yoc3@=N034Ju zjTV?ri>yEmR-`5~sKrcbGm8>t(-I@pVI^8-4s}_XR+tMrR3zDLwqEb{y^g>yUMMJ_ zRYbs?T$l(J0;Ej4AP6M{%%xYaN)iM~IN?DcogkQ`Y#yWS2En^|82_BH+k6zx;~6dY z(zj8}6FxiL`jtGUwbe{Jio+Raw6gLHPojiYS9CB5_*7N1xQ%Vkb z2A|*BzOEQP>Qcy%JGNUkB&03_*8d4h`N#z8bbnVoUOmDBZM$`>iD2xED3l}}9XYLNxm zn1-Wu#+KFwZgK-)6Gn!$Ux%&y`Jvv0g?|ne!nCB(ZVsDyQF?XzYJgU6(CWAP)ad!$ zuC$K^ox^r#Pm#fClb@Qr3 zcMv6GI07>fV$=GefM(_KlIQBX2m8H*kztWhpFmH(w%aiCyEZ)UpwE=n>mzS}?`SPh z^^8}yuBfDBut<)BY|P}+3QxzB_+cahuyR%2hn<@+GHk$Q76tnLOQ+fQhuz*kWlE#T z!ilsGdxI&ott<}q+qvQ%fRH^HzluX&-IY!JL>26{Wzhr4^v%r>eQ+4T)XPY)Kt^J zGx7I7x;xhm<6kt$pAtHc@Q?qBh8fKC4BO=9`_c#Feffi7rnHJ(VI@}fs(#I``E|SQ zH|z%HO042d_)WX1`&Dn!Z`m!~uX$5`+ivTA-JAAj>>1r}cvt*cdsg=+yg7f~p4a^* zWG~oP(Vt|kZw-5qO|drK*Vr_h!TUP9!e;UQf|)zU^4xboYixaIqFpX0)!kQ*H@1@M z=F9bGFOtgoTa! z8u~Mu=eFj#0-pEase?Y7@ifq%BOWzBGDhW5<*a;K9aY~iko(`7!4r-1>S5`vDIWpp z#08|^7f90{kb;*T6OJXB*8y5j_X=}4>+0{2ifwA z)7q#uGS1A?YVaq`bwhJ~53Wh#+BZ(?+Uhq?8=$q+k4B9%^UnsGdRM0THxJbBMOhTmKz2=v7b#fn-(6B zVl|N9bOL5cj(%V{|AUkjy?iUQd`BrRt>eC<^d@_uQlVc+P$v<$MBLGHkGD5sE9~ih zx);D*LhMLx4Fb^%rLPyASVYmgkq*7k^TJ~h>{}#kE!BcFITG=}@x+j?op+&B5GqFr zXqoS;Br>enl(deWBS2IZ$hQeh_ejVt6Y{p+lw`xOd6srbp|#vngh*?cDRoi#)JC}g zy>Tw4kl^IXT8am8D0|SZK$kfYwy^YSsC`l9k>mZ}5;+zRV4HE7VXr4#0iiUxW<^|* zA7HwaY~^bZP&%eWoMT;Srmomc<1AKk;F8^~NJQKd0Z;9Jz95fKQ1&>K9&_baD{h~= zhlnW-14DPn%%h|yJbt&pGZc{p;~)l?5o9nW1H1NNaE2gku3)v;K39C)F_ zh}4sa)k54c-;0IfmNP&I!!)jUavoq0Rv33f28RPqv1A-q2Uzx<05Pa}#)A*B<%Ux7 zI0^$s(Ywi&W4WFa$2>mAiF7>Tfj}OU&m$SSJdU-83RQl&Qsngc1|2JoJgAcfHwAU( zDJybhDy%n?mppKFJmsVFvWNA) z2!szGuuRQuM>;--9m+q%&tr5M_Mfd_(P0u1t_X*rbRr*+{ zzOE(|(`j`iK6;Ac*!ifIrIZWBDJ6~7mQEh-x-Mu0!-dxrm{xkO$s=Y-(xDFrqWLh2 z;;smyfi}+(mv8{+3z{Ad)a8k}C<;EoKTqUOuwRWtFLHXHSh1MRTwXMs%K+nGR=^$J zNQsmO_wqc*481h8(n+7rc9F;LxuL(OGo|3D?c(#|S;NP<#Mr`tP1<&x-#((3thK)`dW^2PTu`d?vX)GayMcn6XxF7;}P88qO^n;sWSU%z?Q+4o~K`Lbc%|NPHfkIzE#!l*^Mz zE%lk4Ck?J@4bq%A!~x$uLQdE-S)dk_q@MXhTC`vx51j&?C-8N>O>>jE=LCCWxAQ@| z=idzZC3t>#zS<9czN&bvx@3d z!)%q>rj}paZ|I&wl|=E*gPk_!dxYs|zX z&oE^N?Wgx4skF}l?n~)TB}o^pX8rR5;b?wKxqo*%eHS(raZ_r z{T2){<+y?9sAT_UNv?vjd?RV%)6ix7NVq&Hqn}hlq*l^&Vo-YyN^VjLV@e?34+B}o zDqV;)I=e+@pgbWYu0Do6rL(x8>8xyx*9EPE-M(0n(y}rmF5Sg-gxS-~zi<`T(I8s{ yY->d?O41 literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2fe68b3a73d72dabe03ac1e807f4a3ee0734fa1 GIT binary patch literal 11659 zcmd5?&5s<%b?@%^+}YXLZ<0$%RFkGDjy;?umvTfgA}vuA6)EwC7HLx%nQ8Y<_3ZYr zr+Zl4v)mob0)Z*O(!qd|V;lv`Q;>6Rxfl)t_&@NW;X{C2tXqKGbjt5lf6vUC%fNxs zyHj0VAMdMP)qC}-Zq3bABs{bK@^|eYUX!H%q>t#yA@eqVE=R&7CR;D)ms>O9U1`lSh2^YjZ>}|mJ_^etUu(^ae8F1iEw&bA>3xY6 zS?N=Wm7XfCB_`jK>gBJ1SK9naMp~Es{Oxz{zk_$#a<(<5dph#Vx1FAD+B(zwo+xt- zuT8z8C+6YbQDT68CbbkMGX z&Es8W3v3bZ8Mefh@vg8Hwu<*GJHbxkU1g`(8s2m4GJA==4A?bxg}uVgAUDr8*sDxM zZh@U;=g?}AU1jIlw^6dhF0j{7vdk{BI^HYnJM6o7ud)Wag!c*7gj8PltG9I5h`A-{ z&_i$fy^EilM>3MoTHgF;lAXNAODGb_*B`F$I6Y(CGh9y_xCXy`ZT+sEP4 zhV451j^#YrSl`+28@%th#`;?n+&GBx$n9Daa`zmqRWma(U$PBdRHksI4T%HdyBBHmOsrezU~uV^)ElX}rZ?^xBU`0z04pUOYOA|M8K z`2Agzdjs84+m`OSYFcsC&cJSarej}McTE>ij!*R4$I=Y~+;%L7zbMs}r01Wf$Ud1-f~3vy>3;v%bf=aY!+SxYFK-(*mLAX0 z5-6J~KC;>~rSb6@x^rxXlC|=M`B{odjDh?x#m=i^!xHiyx6N=9Tj3%gc(853^i()G zBYx>e_ul*H_Ir1Z(B=gi^JrRUw%fjo{0GcLZ(-0d_27fF8uy zmEpJ$#@ikb|H{$$7YTnXQx_}lCn@x2CYaAOJs$Se=U~s+SM3jC&osT$rop9$3|$#N z`gVVcb#WViiuAekIedz)#9N+BMo7leO`h~ml!M$s-ph@0Pi3U}r}87kz2@bACVwW4 za&MsDXYweIx9p+*GwG@POZd=VDglIUf&VMup|32Vx6~~PC{nj1;QUPp2j!?MuSg+0 z)W_cfZqO5u;c0ctzm&cVAj+&jkOK}53I|12JSgqm@@75*RZo@M(!t8`}% ztE2KW`3sqsM}@;V%7e!uax;{B{JT+!mEMpJN{@f{KpGW3M;-CR$}v|nqY@~nd?|3E zW|na8b-fxZq77swFn)YcfkY~6(!uO#mR0et?p+SrjH+RqGY9bX4(Gjv!^Kf`G`A)_ zlb^|ydrSI4e$0WtN`2pwMzaJ0&AlgII9^rTl<)fs?z~LX12YMfEd))zN+=j_9M$^sP2w4u$a1KsLUWlDk{%~ zwN1;>nR_un(1h#O-@jO|pUa@~cF>b>B)p>};r)@Ze+f?efS5A*^@MQZccXblMT6_= zIqU+?sTsgjFFIZa7nbRQiw-sJJErZax()ABCy&$(2Ouc{DV!T_-?14+!a;ZJ+guEr zZbOW20&W6d1YXFFI14(47@}H&!Hqsp!0)ATp_ZTQHTE0355?#-)ZH7Gsnx)S<7#eM zhCn^AbwRYj8WWgzZ(PZC*c!x4qCEg^P<FND=t3|4d zNrlaeC&U#J5DZBM40>tN2JK?9v^0Vq^|47uatW518>-i7nog4&IyTLwWu!V!$}$|T z0BA#K$PM)G?Irr$#ppc58i-MhNcAOfc3A7-&{G@+Vp1A9^$ zl28ti&b~9?s`JDaTB3XElO4k*6|#+By__T`Beu!vfPYXW_W#oa7b3qAEuhnv$P5~i zH_cU7g}epePsDQfaH5r%bV(yIeT-Q{KQ4HFeQCH683naL)?0bVGqo0)|%+T6W~rS)D$BA%AhJLP zblx+2#`WQaAf`ruZO6W3?2)|`gA8*As~3#4D`Iz?q7%c@Ne|Fqx;r2aD~^Zo&+zn= zuBv5pyoNfQ#{`2}HfaeR*sM8xIb5R%w}|M)MjDLxIT-fgO3;L2Flvl!c*flsbUIL$ zZB@6dER*OO?_hEumx5^bXbW)~$u>3%50~I-%LfRJjua;SgYvh^<2>!@>WW_xd<*Bw zF9wW@AW1#vE6vNTauoIBR{(1)eDZbWEpj#5POpFSSA>sbRg{Js6T7XZzrk_Aim{-tG`#nK_^#B20Cap;k<@_?L0zwtOfxKT01ccx={HrlS`Bk*|HGUz5 zFN`f_9#JvE#$Tr-8;ul5{w+Z=d@~s`@}lFxY^Y(tfCh7NUXex@L)}`-4s_HJ+Sdjm z@sPl1z==io4iZnIogPx`daxf{@es7!Yj{e$?>YlzkRVW;#4zk_y!>Hdgm*{zF%AJ9 zs}Hb8gmUDDtM~7yw}S|EgxKNDVL>?7oAsifvkcp>z2jnXl1m}tmws+&_Wfeuhx?`W zUEw+V1qOfEg}xOLu|?S0FKL0Yp=c%0j!4s7jkLmF3QEJdD9QqXi^wkmcOs7B@ZS*M z6fKdI6?sJ-UOBGv#SjqutcJmK3YxgXa{_g=Wr=vt%*W08X+8&>kb$$!FCxdkN6DX1 za)Odml&n#5l9D%&_=@BDc@f?(ibcoui+aCr*vv0*-QG4tq}x~QzF*Ke*Z2J*is3`~ zrBJzj#p`g2M*F33Wc*qO>$Da#;}?USP!OwG41>uy79gRvYGmv+p#lUo9*B z)@odl9)Q+~U>yttJ#nAbN~S#Q;#a~_!W14(L|t0d^zizXpo?a*J+aXEd8~wPtI(&C zRy?(d2mXCxe=g`A?oOkH;08(hCM7~iL`*2tkYjR-FE*ANpQ{}LOe2scqsUTGKD6v2;k(x01E;xw6PBZEC>a$!a;uTZQA;W0hSF+ zbOc_!e7A5|92K6)+#4x}CCUMQ4!NQTuY5Slvm(MP`Ntn3ya9}(oWNO%ah6AUgjZ&Q z@JbkjpaahGLCGt3XSxXCj3`&dZw^6-DuNK@pf`dLHPA1DBO{srJ`O>Y5rU{;p_$kJ z3BriBQ_l%OP}pvO?Z-D6gt$KLEC~~Oj4(vx9L4U~Mi|;K*%2_|IQ+rD#stLF-syUb zhFX!AmYDiRkOg?Svq>n_NQGUF>B}Yq@<;P#Cz$%CyxN4fSd_yHDcc>bkwSujR>q!f zWWmoT9&Q3ow2JHs`MKYmN1E{?$ng?>#M@#an&L-XNcj<`vVO$qg*}LT%4U*%>2|+G zmHQne(wZ=ju`#S>jA3N#@?`8L&z8~<<5TF2t@}MN9M{&RY+U}iHqM%A<8Hym6@-nu zm9cTf*v6HFjVott+zhK6Y2#)mY#fddDA3OTBV*&}^lWZ(_^oLR2b}{&T9x+u)BED6 zNou|LKiHZ6KU@bAb*A|*j?7~H5B3O)ap4j3Uw!WU7Z8py|I;HCjYMWgP!FbmD>)60 zLH{pT1O8W-B3zt0rq_bi)XY!T!q#%CIF`oLRUxJe7c-(&feuUgPbf)Ng>*aIxRYGk z=16dG;WWe1B?tM1)`T>?cn1!+$vqpofrCRt=@6bT#V0ng-F_Km{63N+4xZPVU@HWD{5IUR6fI{n0-4r2qmJkq zj5Y1cNF4)JlmnSezet2TEfXC6X^MobIHoi{9tA=a(^rz(68v+z$WT8?SM6+sqczR1Xc}VG1KbkS{Hmrs#{IUShQErY{9BZKo01EZyhe$L zkJKqg7xj39l1r2{DWS>DTa-McP6 literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ba8236464d3dfcd2e6b0187aa3149c3400532ef GIT binary patch literal 4204 zcmZu!&2JmW72nw}eu$*LCF;Wp;|6KiB2u6fh2aKqY%6xtI!fR=2(W0dU2&GwN=q(1 zJCr4MnM0Kv4Y;|s2FPR2{R?_8dhB6N0osdhJr=p7zc)+Cj?pDJ^XARlnK!@p=DoLf zs@1ZAXXdZpwEn$p82_TdP0RQx#Xt=>yYARt%<>Mp6%Is&hZ@GyPm82 zf>+di$t$5Z<8pV#o0+~-@v3^R=FRGUt~>87V9w%pyx3jxmb%N{a(BgB>8^UK(|77# zojct9%JA0s8ZYwFSIk@I>%7coKyB~}uVQYKZ}3?@2hKG<&lkYi;){F<{cC)gub^-6 zRbEH`I%mHznwu9mo3Y4VZyE}(x{ZRC=;m%$lgEW*o zt)$ycdfWXoN`!1i^y8nHdxG zN7lp~+tN95CycWV15hz;9-HE(bjF3uBp={T2T~fu;=}-K?tTXg8OFEC2VxGrAnXN0 zb3skBq^#D-t}3=p3ZnZ3R$wBsa@ z@9wG+aPOrAfU;Xj|Ey`La?}kvp-81}m}!CbmGgNNakizk+BV100(Hg7v8fX%I97hJ^QIXpd|FLQp#JFlWgCNbbZM zy8yH>VVE(;#SGpzF#%U$KSyaEOTU2X3c`!;!EcttDow9Zvp~%vnoGa{)-FH;AaT?e zD>UN_qMp2=3Q_loh$OsIT%)%(sM(~3-0me%=BI)$P`LqtREHalNsX;D@dkKTu0uEL z>PD?&kQrkG?!i{!I?RwPW9R5Oxdh#$a?@0f)kv2>S zzkO=%8PCiwwtj1l3iuaCrL2&#V@JIIsR93XwT~Ca@al0124uJg<%x-Q2IeiximZ-n zayBa;&GEvTp?iDHI5&B50}!0E(TtoQFJvWJ0cI~{ju!Pgvze7G>iy=j`D}5`z^e1vLQXw*_3E0> z3&+MYme%vRd?ffY_PCzeBt2RJCaYOhR`PYu87RHZIcuBQOt!=q_~KJ%RHb)5Hy;1} zb3@jT*7y=%e#%D6+46WD_E_Qe#2T$+E8`8ms$Hl)-pndGEMH2jTuIcgB&wRaHZEn_ zRzP}HTYf9UDx=kGl^faW_%*&ZVP6B4QC-%@jf_sap4HKAWDeTR=ddu`wmyEHU(?j} zb7alOZ$ekH(N;#5c`Zj>J9T4!xNs7sgCLHEp&#|y$+kR|ianwH)J=FO=t^VYq%sh4 z=nQ20?HhR@dQFrAD7O6_+wmM96aLWN?)N&%Lh2iCT}lQrf@73YJrscqUj#U@L@+s+Kz4I^*aE1k z^3lf+KfJx?>trEN1_4M!6CEIxBa%UnBbs1ECp`?1J_@ZqP(adlkV1GWC{*P!#EPMZ zgM88nmD`F$3qn>umdXV_0}-m3fO9_`9Q5NvsuGf|FCz&dB&_r>d7_+yGo)-)JU}Au z1zk)auJQ0x71H1F(slK3&2=9(%He5eRKP2f-nVtEcnK!ib4C_#2hy~>pD)1rcW z#dGQ9hB^Qm(C*<~gRQVFBngvUXKx@t+n|eV6Nz*IoF(=a zXyWan9;u-sDd(+OGb@nT)Ewbsofee`^iXPX@Yh>#5J;S;WT_?@>8j+&rbNR{MD%>@ zWYoJl#^)SF@;t^*C6I8r%L}~t)Y6<1#wDspX`E4YSs5-py4RSNRgHes596p8ZeLIV zfLmO=ETQN|bE-@_-~5rDzuf|-)D2F39>y}rGq>^Wtts)}Ta@@vyoCa3xsPi+oiL0k z@gI_?QS$arf_M=6(MmxfX`3wqW6pUi87aGAjW|VMv?x0f>$P=#jpTE%D3Kn-bMHss>}4rrx4r zb8JfM$kxf5aYNHKj7-LN{%lj2e!TfPst#Rt>d=Z(hr;f~RXNpU;vN_{jOJ4r(X_=o z=n$oY^jKL32e`3ua1h2JZ#v>vm=Yh+yc@t=Jw6RBh?hx&CV{7y)B&42X*t%C3WAJG$Rm{MtH=*zN%wu~!<%VLT%a9cEW`4EyCMS6h>9s11U zY#pC&=TuW#`*|g)6Oe^)7@VrvF3Kg;he->jN3v0+R)65P5HICRI%+fPI(dZ@MU=wv z1cH4S`z>_oAFyLe7t~;4g!an1#JkdqJdWyPlwP>9WJkKZSJj}PstpONfZ`6R2fARh z2cU6BY|5mtRudx1C4o@9q@dPK*Nqe;zlPAXGQ_72=>~JtN5_lqr!=~*$~~NxhbK`B zK_0aD1^B8QASCCNr1%mH@g6n0!v26LU3Hg0DVy?>phK#HpJxM*G(7IV^rBMH8Sw|~K}2*>5F7+WVL(D`(%l)_W=v9l46~;d z^y0;<2)jqm{x`XL@?Y@edmU$7(Sw1!yrk{--uLC}?bX!|fwA)SV|G0zciT-Z^@OEKYjU1k2nmU6tROFj`s6oo2y28&Ipj|;uUrEnrc zfW=^t+=6LOLr~D7Bts^tWN*nZkbw+g4P^@oNB-!hF2$%4QcPTfWR=Sh00*ui%sVh` z0%1mHgcC<4b!#{I%eZwhFK_s4{V3c*tq2)*WM*EobN%p z)?j&w2#kPN5io)9`s901dy_m{-yt)y2jKhk3Ha?r{1o&@-KWPN-%WlIOtw^|jBwiY zrmfoLmo^3|Yt-|RsxmDVSSJF)liH=!px+x&O&2nMX!-R;*v_?JT{_+Pmlo(uXTU&9 zb#4?I+-Yz2mM~DPt7`|vxP!Z}aG&wg+eiN4>)11w`w+e3+rK#ZwcO~EFF}0-ei~Ka z5P&kMyg|j%U`}TpnXLZNSwnF_w_$qtukJ4(WBBXA`rYstU8mEt|HXQcw$MVAC>7_v z!+EJ?U0~kj{6#H_`4h7W$mSFVH1UCe{g?>CS(uIHF^$9KB<=bR+!UAU^7E8O8!208%6)OLA=4twOgMr)*NnvHRh=uH(9jd+3DKOx$LMjf-KpfCL33 z=mqH79O_beCdt8v_L^o!I`-T@(O=NRUVHLi@Tt$cOMoAiYiFk5><4xi@4oxIpUXS7 zTE)Oq{^#F%|K2c+|IkbR*y!9wNq13kgR|J^uz($#C&saLVlX{scWgCR>NwowR_yk@ zj)ysu+i|&H=~S5U8-tg)^Tgmz;2l|=70y03n(i}DHQLV@>L!!bJKZ$!c{f9o;l2DY zN=Mx|8V5aEmp>Ldw^7m|ssQ|OqhoT$%_m03;@{ymFFj%4$l)$VO5EdRv<|QE6|^p| z@)}x?*ZCW0%Y2oeL0jQ#d>!oy-{5D_R{5L!9NHTHF5l!AK(Wp*g6Az+`)CwrQO;+N z*rA776Xn+^=?zp9c3>Q^iQzNVGto0AR9h3UyJ;NTCkEb1(>F)Gx!k}eW?LvsJZTdg z(ghMekA$pF(ebm#!$6+xCH>E$LBMln{5VQ8S>6jWf7lgSDoeVl&`2+d6Ol?sw@pjh zX%L6>J_w^dQ3lKZFvKdF=fBI(o`F=ErGT(oPkWsqED)>39&c<&pfZ58IKBHwlf|DMC z)NE-)GGuLWIiaec6=$eNU$;Jld^V`DNdTEVxm(cvGE|(dZ4#fOwj(a3AH+_= zo37djc-{1Icu9)6~A$PUd%ZEa{rY{$5N`&0AKI<&FMoLFBQ z+?v=2)`9)C@r_Mi;jKh2_$kA>(xC$-E&U8sV4@CM^TF09w1Y!yxy!vLMn-GQBXh?% zVux<#9(xn_gl5Y$J8{)GgZrMnnV_&j6wp?)@&x>XliD|y;sDe_6Bi=+-Ac4K_`WmAn%Bz?6A+IQnQYurODa$)=AT*vRE`=c<7iB30;8@UHwq~2<^ z3Vg|Vl~6uLNiU$hGyZ@fK(;S=IB(O=1#E4R3u!`;o;u#`q`8x}ZP_=6qV~W}Ni8ae51#oI-H~ zqCzf99f91NIzkmfMeN-)a^}SrrZlgnXrM2v8Z0>9sHab%8CFj4dsy*1lqr+ndbi^I z@Hn)a_&!yOCL(B)U6dS&OEd8;=tCn!j7s+derfDvN!E=U<9A;o+(tBL=p|DP!bi;t zLam4f2)+S=3aMP6BB~(sk~ReragMsyI2uYvbO$^^?3v;ttvDDoJ!Rx_Lx+xB&Z!RS zYkE{G#B^*WxBZAe5@$iZvrh5eU;MOlMhz}}SqO%Ny@Z)u&(AfdA9n~tk}pN8in+ng zvsLt}rpGoA%j@`2Z+z*MnpVh}41=^vIUb4uFlOa=;Ls7pksu+<)MkPiE3>9%ZZDa6 zsXhK63;M(OabuJ!L*EY?!NYDmf)JD)Ym3e>97ZC|W^$e4cS&Vih2Y=LzmAfD{{A$H z+v08Ttb{Dr3dCq%<&wn@F*{8Ob!ZQ1~G(j7h ze?S<2(D(##1Y20+v-j>cc61Ken8KNe*0?mv!XI64Hys72tp6%SVv|8K9}1BOK{=&# zfJL}(F1`@&QKejk&WDN(|AYyhl|jb= z_tj;%mlDC5-z-SbO(q#-*j8ofDOleoF3l_y6lkS8J*KQvr16Unr2|`p0_TY&Wh>%5 zRmAB6>&k(WKlvldR6(j{-MPTVZ@q%!|BHS&|4To}sFi-)M(0%hxICvH=NI*3@~t() zR+=GbN2Lzp$LK-!DpHcYNrx;i;z00yT6C4FLimDUK0uil`};3~SQr?xf>Wg~=+f78 zmEDS8&>kdhL8+klB~`aj$r6IZwX5?YmeqOExvXcLDh*Ys-u${f5L*=dDGA^F2hS_5 zZT%jJg-dJUgDE+A(FaqCa-vN1G(Ve!VbqJ@^N958`B|KO<~eH%1ag1r4!M7jCIji^ z9BX;ag!UmFp9*nL&T)KkQDh5r>2|b^b!c{|d4n#c6q9{lR(!vo@DbdyFRQ-)0RK4X z8L@_?`l7c2D zcvY_|XjQW+EAU%B4<1InKw9WaI~fK8S?S`BfFSJRk}ZCKm9mnl+jxlExKcV<{cP@E k6hY3PuK4|aGSFus`lqocy_*_`TNLI~I-`#7s1fJ?3*`}R*#H0l literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1012322df406a1354aa0db6f5389a42877eb07e GIT binary patch literal 286 zcmZ8byGlec5S`qIu2&cQ0xO%_7_qVt5fQ;g78HbSLfFi81Gz~^a@G9}zhJkt_HWW| zwZCBH_-Z^bXU+_VIdeH4H$d2Zf1SP50PC;e4+V(}s(47GKrsVThHP1^U|Ft!fhtsK zs=l7qq}d&`)rOpKyJ5sF%lkLId_;>$D4Ar$)NbX*`avjs(lg8@>0CLFHa9ZTwv&TN zdHdHLN;>lfVTal`eLV2F>yamnNemIg@hP9^ZX!F;xghS*Mi;J)dp_awg~#AsM1J)X g&Go-uxDeXvB*c+l`%eL{-n&yPHIW*O9Thrl-1GU0w6lSM%1~T!X+h z^YahgFAov&8$L`f2PT(bsEZ&t;WQx~D(DW|B|FY8p{364xOUC!c=qmhYVQ)xxbrpP z&KBzgoPI=_?j7(!R=OxB7lX+q80r)V0lqouFitu9nsglQa2M___qY#tkJoqrcc0h6 zTf;2AH;h+BDs;qS9V?-`=nXHtlzH3 z;8UR0WCxN8>7fCK-i&C;a&@O7Okk7yPjBT<_2dun;asN>Q zR<)^pu1uyaD_pQ`@R{!!CEDK!_-x^TW)Ah+?k@cA#>=1V z#Z-s2GA6Hz34DWYeqT(W`IDGR`-OiQ(=VSuxd6l#S8~>B=YD&+rDK`CjIIMJHY6*U!nxTC9ufOts3EMk!`6ngvMBD3)RZ^cO&>o&Z7EA$o`|(PJCppEd3X=f z?{YH*JO+-(Mm|)(fmka8n_GL3wO7&@=CBF0QzK;t&;C346^=y zfxx>0h2Rdn9pB1Y0R;p;-NG%{T21Df6>N(ZtZ*-o+jJ+ubxLj%n1SvHv5;Q@$UEFG zZJ`k0AC!DczNI~8nKRh*bbZhm%nW7=oXx?Qhqe0mY|}4%Iw)ADcqtU z$rfE>>X4>i(e-7`Zd1@Oco|OsGm%YTvmJ-SYbY73$3BL3cO!%vX4oe3!&l!4E1nIp z4FUW!*~-1K?wx*ZrRm9sVJ|cPrUK5!;(8miX`H-2GQLQ=85hQZ3If^>`oq<-QPgo} z0T1e`K&C2X7Rr*Dm{(O`3!pv?;R8IUrE?4R{M{+AX2WQ@9WaCj=2$fzq~w*e&6?#1WAlODm1*qdL58^XaTL$u{&NQoV_BieZ76q{F-y;gQEGKMM@<|lOKmdM2oFmAA3T*e= zD*!Wx-S9p%VC!K3|JhFfXYY$#%*3Qyw(eK|@;ThQDtqjIca^<7fx72WoJ7$?fgjrv zo3F$VdU+Z}3q>15Rn_uERJ?>DM6p-MGy7til<`?KIfufQ@gkP-{@6c-rN3Uq#~+xB zm1Ye;jgh~i$TXs8nDH@uqC{pkif)YKq+F4+7}Db?&Y*Y|1%9vZ6Bw|>_Jf&J_|X&4 zzzddw$E~(mH;r^Oj<~ql??N+xxp6ZfOw))JXnXNE(UxH?^_qsZ-}*iH)-UsnuJ>(c XcehCjE|fuEL?|iL1DP?{oCfR6(n4`!-rT zYs5F#X%MF+FJ$!YA0F{)nG7-|7~*N9RTF&@Qieq3WWlMXjxP!Om&>*yy(_ szTFwLMmyvWr#`$jN}bDaA;zxa((J*f5Wp!3& zb!JvCg)Cceb4K=o0Of;VTN2Q%gCPUH*%w^|{RSTshG9Uu@Zgg##vE*aFET4@VH4u0 z3K{E*7cbrt5%0yv)6+!(zv93AZLNGm5PnaU!B0Wq9zOrqNSMGxThK&PJd%!uBl%bm z<2FT8cwa`#@I0$!d7jhqJTGWPCNbHbYL~PU#z;)Dr`u(%jBt~N2K1^wqqHN7|>Q*DlCFK8;;LTv*S5G#ZZrj`pOh3@OzUkfk_U2=& z{@AGNb=T8>U^>3*eb;vPZf`aZI;PjbLN*`79N27QiGwLN6TBOpgQ#RVRxs$;AR0;h zf|wV*S>!dU41T-^760#$m{1BPXc7}OnO*vcP#2lZloti4#buUdxfdc7BhLyDST+${ zE*2XUq|9c}USJhAi@eC@*gWznw!jvVm)H_Jg?ySVvlZlJw#rT;pJ8X%S>zRVg`Hxw z#vR+O8Mc2Hg?Plf&g#tp>%_d?JlZ~{_5~!>N|f=tP}V3vm@ZOCZqc+`uUj<>=2W+h zMpR0KsRvN)VXb2X%_s-k)|usLMZCCD23OwVb^FUp&w~t9v|OTuAmSKN5U~QClEuS zC2`FZNF!Sc>D0?Cc2Ai!##jT2bT3qiVSy1?uH&n2T@9M1%J;Q_X%vfU@*9pJpy4>R z<62JOtCqt@bFQm_t2Rx$19M`g=Ld$vFoLPIrdit_9m}y72w{qzsWFN@%eK|$rs^BJ zFxdqA8p%RW#Jx74xaNAE>33X*Sx!T3yUeszsDoJxT<_r8(C(ifB%O}YHme)y(kA8~ zuFY4m`Kxu$ZNJHxjUj7rhyWpU!%tD(NPlUQ;WcilAF2K!EA}0eGQ7|!6R3P&CMxfDr+@2<9OF`ak zHoo4IXDbq_c&m9fGTo`M#SLNJZN3#?dduo6glix%+9`iDGV9qlW*vnbo15gpIlGDi zU07P;q4+~Oe&`^g9NP-E6RG!K%-wTr*I?`BK15s#*14s7CLNK#FwZN z>-5N>Q>N?RueXlm$=Rj|}MGbWvhi|3ed=qAF2L&r)i9KPo#`~9K94}MTe zCk7|MTc;A8Q2fAp3q=IJgKuCI>KH}o6?bUJV^pOoLudXtRflx);HIm1Q$zwW#e>r^ zI5SVOQw|vbWcAM#qP%}mgi@p+KuU--QnSfjtg-I*I zvDnXsS@PFd<_mcR=Gu~2wvL{D4(+)U+Vg;s!(5nez^P~Xml9#RBZQey-p+Zwh5+Xz zgF4a`;Rtgci=wbod@2NRfm(2WS~-012VcsqLcahCi~GM03!w0*KNU_L6>)hj?f-RH zLd%c))121nKb=;rbea|UO3T1C6_%O2tzczv*ZOFgSPNzX?PVHsN7(u8Qz0yc#c--s zLhYZQ3Jsw@gRg>b7T??}asOXK`DprBcp$tIpZ#OPTfTPib{;d9*%T|i5W@KfaQ4x* z5YC4S;O{)}fa|NBbZ`9pY?k2;4O9O$3N$ocSmYk|$Y z6#FWZLzVpQFR)_Jxfq@Ybr-@5;E(i5^6n7^YR9Oh)~RqMJhv=_=NE)fU4RQgl0O#1 z3#WztTDTUT-p2a9KZI-j)8U!^Dvbs2RugJ|7oGxjt9Jmi^Sh_QelMlnNocP?Q#iGx zE#WM$ajBs;$7^#WL)7>x`xnED9Jl2CPdJ1A`S2pv&NW&ToCRtF?aN_>P186U`AXV3 zdP)@7(8U!t51w<$9g05}*aB)5URwNR<`B>rErv74;?XK}Wr@iR8QhuSt(A$^3U8fF zTIB}#(x2sRb7O6D2`A?$s__NFU`+W!B3p~O*}s(Z&xDobq%FJz{$57=VmJ#r!1MkU zw#@67*~+#Uw?QUX0CyF0o(3+&NTA!-P&Z-OHM|b#DLa?IJE3*HC5Bfi1_OH5*|~7NHKIwp_e#=x1HCtf zz2OZ|wHV$2UoHU8+OY__H^QsI#nz?PWpHAJm#`58QxqGelGk{A*ffGou7fn@3%fU`UZkQ7mua>t>BwS z-wHQ_Z-=+or9Mp-1t<46S{WbZ-6DHS~DCp)Zw6$<1)gWj@2=33lN}(M;&}3P#w8V zLP|B0brD1WNYI3jldfy%p(8MG8-am8%9&w!V6SNcB~cR-I>f3@EQxCYgsYCode`tQ zx9h6|7V!{^KjKDxEte?D^LUMJv8XV>=zDZQ+j>nGlnsyk6HMmA$F}^S=C(U`e*>ias+kpf8)H|^f%WJl*U?DlVCd`r!w-{X{HB4(V-ZSblTLL_ ztmPx%eNA`J*A$G?a!u1<&!6x2Dg!Mc``m@*^h$$J4#mNGIkjWDa4GzwDzDrze8~*kWs)b1T6mx0$&zo-7MvzU*e8+z7?rE8) z9w^u3PpyuoeBUumEpy-5w#`WS%ru-{p3-|aH*WQ&3$gCpc>Kxtn;(897JABuFvi|g z+|11-68EZq7D-Mg${3z!97H+C+#@kYQ!p9|RXO;gtkLP14)cfy6W(2vf#MmyRtj7a zk*||Tq6}AH?;C)R6ex`!x;6m35k5ZNwo}|U>qnQ2KCrSP&mP7VQoGXvGK%P*V zc@)d#3D2^RxO}wilSEE!NGOtAKT>!cTWKM{7b&!FATr)jPC&nHWpO9-+n%0EbG+mo%1p?4%F0DWVYi`#G;v$|6EjLOG zEatil?N3`s-V>YH$TJAZ_e)Uk{}zcbr{D=8FP6n+{FnIS!JM=t#;wJqy)4e5uOyZw z1wHsnb5dDcAZTe0eR;HHMQT$xhB9F3VL}nF@RZ7BK$iJGkJ^H)=GTC830M|H6?2!x zRg7QeST9Kxz~d8_a~&-<;T6efu6v9&Q{A8S!xGAfc_2d$=bBb@-5Y zs~x@gAe^&nI#uIo`z*Ku{|+Egy-`VQWxlb)QeGq;9!Bsvk}7<-jU*C~0A z5*~@QYdiv>glM6NR!UYU8`nBGRj^H!=s{sr#Z83;5y}*)F9V( zZ+lHDWnfR81MeY~b9%?B(J_^)J&p^d=OYjJ9w`7l+O^xR!+8;9ffaWzzDXxQ^+zemY~APUn;>=D)^8} HdFB5B<-f50 literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf6dbd2eb69f2c2f4bbd8eaad58ea0df38542111 GIT binary patch literal 4153 zcmdT{&2JmW6`z^?;4Z&Jk&-3Z`EXP<0Fl&6eQ1o9W!z;O@lAzbY5{bt^q@Auv; zKP;8<48Qze|J3~39Ap2a!pUz2ScPIaPd_ng2noOj(I z_Ysqpw4X6)N6u3#@T3DP*0vzsvCNGnG^o#!1*AKc?2L3#PvzWLI!2;}k!WEgEFjUl zk?7q>8kg!xQhSoRj7* z`@>$7N+&-H+jc7R<9a?-^~y z6imr7MxDZlm7p5cu;S`7_LK)RvW6MVXz$ZOr?cKrse&i4BLjjMP2k8Ta0pz29DzsR z6Oex$6$pw1C4w@+6rf36(T^HY#cjMvh5U`fw^)zwbIww+Wo3NJZV504%rVRba}CRZ z^Jb-mn5#!LTA&(XbL&$*?k5*{~_FDZ?sY6~m^%rdy(2-WP4QZ{f9f zA7$bW`)%+WvG44=nY&fZ*jDXe=8$dY)IT!oU{+d(n15!2*~A`yL$zc@)(dRw(iuM2 z5)*4@Wc`q3R)%%s6x=mMdkjDDZpY1-PNOc&c^J6&pgyl>dr1?zP&U#=5~bH<)avvj zRYN;PN2TqRs38M;Elwg})ZEto#{JEB*p-pe?#A8jqxcErL)A%A?XRAyR>!Kj! z=?t{9@ku|5VE;jXyBleH)l{yIDpyC9&Q7B{jDq~5Snh=bOgho_cC?phw=oz*eX04b z=6m&=b~cB-M&Ha;^E4<9Fvsw^40~|Bz~AX4okyK+x~F-lZCd1Cn0gCK`tGyk&A1mW zr%{rI!z5Dg+*n@kwALG~uobKDLDWxTb*~#gxxT!)H;5EK6i_*{j~kUSIk-Jj1#VLQag|z;F43@G3 zA#L11E^fP%yGNYfKJD@$-xnFfUBhJgxwU}&f>Lo(vQZL~l7s%N%#o#Se#m7B)~=jJ zo^Wq6NSh=|0`)nRNP0AbESGt0WS9f{kY%_jWc4}q$)1arpcWFA)aM_0vV>CkT**CG z^3Rm)bM5SN?QD6roh>Kr)HON1@8iw)S4Ja`Mx3L0ppB8Ro?~+A3fuQHGYSy1g3k17 zGtkKW%v_`Bif4Vm83dh}!m7dmRbhpl|9U0#kCsrrfY#@^UTbDI1Nf+1!m+j2| z>-hNS-^T#F$8|@GK9c2Ek%|-TG?Z%WY3Ipiq$2I7s?kpzYv&aVse^)H=C4O1VaP=!#PC|gfl{V3Y!0$kCJ(An= zL$)t=yQ!dXcuJS$mvC)3=eDahQyey&6V9F43v7IeI+Aa*WF>P+sxNSG79P88y^9_n z?_@l4pu>lqDbHWf&p(7~k1k7dFqFUBc+GE2zWw_NdB1My=`;?_c1$)x{Rr0D!NIg6 z+)EygA?(t9XpatLbV_uEd4|#<_(R4j3Ta3UE!n-xsP0? zwx-Bi#vfVgUE=?fOrUO{tGc6pPCfpD;3mN@3El%xg!3$$|hB~_#x?iKo_l52Q=<8LTN1hF3w9{!u+5*>hj1OO( zZzQN~HHKYemI(js6#b*p4%lFCU-9Y zs@0Ofuk^>y+y8n&h<{LH^0QI7fiLO-D4}FuG-W6ctV3~N9}1cEIZcPh+@{N=*DP@9 zH+_^=-`^@Wi&FepC|fy?gmU(+W(h4oVVPm&u^eAEqUXOY z9?SdBC5~~Fy(|D7hAu;IS*$xBJBgckNg?r#YYK}(6)uRRXguTR^^z$VcS#uEcuT?( zNja&+)q|QTnvyA-O2;{r2UBsiR|n-)>{&dRHq}EZX*YjaBsKJv4yG}uZfdGHCk}BY z9xX8yNarPOLbM*=4LaO>;UTnHj$s&!`G|3;WjF;{Ml@S?p6?7V)f^ z-IvSaV>_8k=92|8W9H1fS?JiWW7cE2{o04%Rj2zKZhd6T0ZsjoPh#*g+M)?v2zP4WnS36}uYuCsJo~6i1D@)EY*qGit>f zssErAg@e{snA+VzoVwkC3dgD6-D-8hpsUhSR%2Xu5Mar))EW%1a=#nJX=Nj9sRs|n zS*yepWc6}89K@l{2YN%Sp4F|vD0N#}w{}yn6~()wFfFY2hpqVASJ5#Vg#(qkx;5y4 zq$t*n#ng$y{<@yW0W`^EGpC8Ec8D15(HP09_6ZaLQa>jEr^#!(sr)3B8)+dYT9*mA zLZC`BY@ANlsOEuBbb2{XC1ySRgV3w!|Id4?8^f(|H4dXV*oi{@@*Asny6bmZ>%sa^ z2lv83G}O2H!%wcRZs1%R>xHXtw_>n3UfmjXyMf9ZK;LS`S4O+39|YY&Hx7c!*dTfp zKv)&IB)wDrx^+ob@Sl+@vSRsi!J3f^s5$6akaKcxdZNBpChN87#_2~aaq+Pp$EZL? z<+vAXkxIeVu04a)k@l zyM0)rW+f{F-XL|TIC~RxPaopTC1Ik6BnuAfU=7+fVb;^y#`lNm%rv^GSRBZhbxg^9 z_zmdZYtRocjC}+Ase$)QNCVviFD}GB{8I_7vat>=W7C{WkJt-X@B+F~Nu6Fa9Hb5m zr%Ih6tY&s#2YM9gYuJ}L!cwHCsL77QA+2EkG#uttTBLO{1v#>?XIb(C%vbm#G9>~g zhg4#3>4*tu?&8Suki;Gd&?Pk3@~q^7QBQF81Dw8>ov@Ny3c1#>Pn=*5^U(1Uie`1< zm)Mo)AENIJt?BeJF+A-mTGkS{Z z#dD}8%x6n7S=^lD%=(_kitUqqJ9 zm!+!j9sf0r-fB~zOg}oxQEu~J^k!L60$-|}N3h0-eH_Yn3};@yvEam9Rah33FL2J_ z8LwxlV%|dBgBG#{9`lXV4`Sp-##jC(+5F<5mbc4U8<7&`CZ)&L*q9P>AnU$P%BGA} zDkpZSmG8`7J@+ps zGjQ2;GqZ2$FJsTt5&g{^?}r?2+RR)N+h4*K15efWt;bRu!c+BQJY+PMnN`y~`T^FN z#rX8`IIW|w4r9$`Yvfq7XRziNtFGpOW$C~1PV>h&$sE?0Qw#a%9QIkrMw5ADkQFn} z=d_SbS!O{k@?KxW1+xIm#p8AMWwOW^M6;zmP(v7}Xik-et6`^QL|S2AQOp}rSWJ;OLSZ-BXpO>#)hr&5z7Iu{2x*fUqVkaDgx)p~3MJ`va^@m7%qw7~rOlo5i|0wt%HRsd(0u`t= zt2(JUJ*lWdr+|QL&PXUn1?uk`2rXEHw55v#Nfob&@ugT$5Y5T_LSiG3MktLdz$ln$ z1N#OTnWgCGrJ`r@%np+#{`nWqHJNoB`DXn$e*4!a|HH2#^$S33I4R{gLuO=?Q*Y*3 z#Hr6PVC2jYCJLY!$Iou`-7gLekXg87$|N!9uu#^dXO$px&*NcZ1`#-(Fyu;Jr&$$x z$~gK;=pj|76#=b_-1hOvp|`MOL*8ms)6!tH6R7ZEw~Z@AUEjnI62u6d>#d!BoaH=k zQLj7L*&6NUw;r{-sFP4+sS$+*yzYq;b8x|y$iJ2)^VgpTT14{Uv*u(E0~M>bT6^>V DLTnYg literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a40d5b4c57f8a910634a581597d4bfda6b6bdb GIT binary patch literal 1567 zcmZWpOOGQp5N^Al={zPgyC5MUlof(iE(ACr4x<%<1!)c&MOq}J7O3Tk-AQZuh1(e> zqt1mLj$Ak+ksP^j;otbmDQ9k+sB&i>ENo@7 z5I%!mXE1P!lgx^UAX~b#w)V~44Pa?7ffdO=jCKs=3=5mt>RTJJy-}8r#V}QI7@4#d#Hnds|`^@XYg$;_H|lmT2$I)QnIyn z<|Tmdv8oaUANQhTC0SauJJxZ<)KU*y6Dx|cwMb;TK1z#f)t)uKgk*H*uC~!2%@@YG zZcpt%M6DPFccF9@lNg_g+rbr4aGR*MVc9+9q>m>)y5&pUMucD@Tj~)E79EfPKkAUj zWI#snIfNd*0hLc+%^YFI=0Qh78#cWU!hhIgH@0Du12eJ-Sk<_uiWI77(jVIfe|ZAF z`uM&GDL46yy=W*dS@saf=*U=J!%@BsJ4Ag13`o&dG3ugv*y?o@PVP`J+aU<2utklx zW&n`H4GDR@9$ksk@i8&0R!AK|kw!uvcr5Oj1nBrS{4lGUT4RHHPn zGLefy3H}7Q{AgOLvwPE*QkHT|qoH{g3?o~%L`TN*Anm)Cq%G@xLS`XGU?Mf)6Zswl z4A0Q`M2A4Qn2XDFF0>7@c1mEa4p|M>gsn2A-C~tjYxySl=unxLD1nzqQ5+Wcy OYPbk|9MY3FLjPa1@S`>W literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3ee8b5c9dd396745056c36b12cdbb0d3fa7a48b GIT binary patch literal 1791 zcmZ`(O>ZML7`EptlgVU~t*QbQLbGsyNGNH$hefLuTES|Ms}&%DMxbFfcA9DD3)^Y8 z(d0t;0js?tq~r!%x$q145x#Pp$ruRYTe74XQO_j#VzAFut|pY;14g5v%5YWjVE z(4Pui7Xyq(Pg9gq17{IC>*qlffbEb@Hpqw35Nwxpe?ZY4f6w9=_!VT&l(}J=@ z(D;Ox#ZHp$IF0MSt{bQ>k;=CT)9?^awELmhi_ali&pl|wn525AWb>fpyho?L82g}`mcr}-bg+Gw^ zeEGqD#n(EkP-kh8@;N1UrIY5#G3BE`8YP!jm58~tPRg{9PJ2!5ilv2MUASGKz@AA< zT~}$96)j|!7K>b8N@<CV=R+aOA0FG(^(3_hGN%(kxri5>vV)H%&(hhmWERg#7JoqtUa}`yd44pR zUsRN-e4adR4s@dPwcU>iq)U}BI<79{pta*x8>!dq2{Z@?M$lBek6r8<77nlv){fy~ z3%)(YDi6bhb_jS6d)lLET{FP!Js4@UBfX+p#Dt!uQz}ic*&guHOE{1^``7>owyW5l z(2-=x1!d9@8B?#ad}E6pt1aT=g-A2bOx4j+je3oxGcEJHELcYa*cdowoke;ndLy@* eca@j>FfYj>qaVKmi0?q-VjqH3pO1H;`o`b6t`Rl> literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ea9c191bad6ebeec7b4aa728c0c4b6fb443d882 GIT binary patch literal 317 zcmX|+zfQw25QoqC*Ak^X0(%!+F)<*7C}M+FDlA#7D88tXoH()_P+x3 zaUKsIktcaV&Q>su_p}9%dq$ixpVe*cge+WP(NTZ;u$nLCi~q^Ur)}2@i7$1DJ|y>% z?y&V~VQE?wy>fo^w{Jqa)I@|2FozEe_v*Z#p!cw#e^(6Y<8Dy41%wT!rLnPSmfPq`K7EvNqG+vSp?q{b%kya>UHEqyZzDak5HDWbha$-I|U=?sJ(ARj>)VTMch%6 z!YCNK!=$q1MkC8}$If?hIKAvHo-$bLV(h3q^O20mL}4k zyem%#b(yek+|LPmG`u{S}!5Ze)_7FxgPq)vu(!%v7d5sem5~tcd$A5D%d&vI?uBEU_h4 zLs`!Fp^|cgQDw_)1^r8Gm9Cw=!-M;HE9n{i^8q*2a^dS$oT;@dj zC~z_A>$mRTje2$=!<$M%v zV>~I}^eq1&v);Xf)_;+tapy4Gc1=_8k=ecJg|_8I@1l~fdDe6JS?s%TGCzmD52*h< z9eVN8$PEU1di3=A^eu-+ZWz4XIh7Qm5ja07PoYUPVuGZWv4R;xFp}o9l`WhN53P73 zDM786%;iZlRRXR*h)gk(byz`=b<{~A%*I7?Hrf!9IvY;g`8X$ikQAV=;)vI&4#9^W zijt-q3`eo~Xc(LJRt$Vv;V3@b)0pDWcRiY+LiKbCi559tpRW?ZeoWb`lwCuHYps~3 zkOb45-7LR=vA2+-7m-PFORi@BnoR#9{^jgX^`bmJbHc^jr03{IQc^||I`3svCepri z0Pc-taHb-pMj~n1)R@ear$un73I^1yHEfaVbRC2WV=KnZMta!S;|)g_7c#x~YE{pE zvo$nnwoV_0ZV*Sh8;I4eZw;_VsBbvl5Tee&H?b8ktYEsm;nSzVpgVCq3NB1_R4? zI^7(#h4W{)MLOO&z0X7cC}p}c(%_B5hTxDW-}1Hmmv>ow@VfrCem98oE+Xx%0LaD8 zz~LyFYvW;z^o?*^_pM+jrA4G~yYU8r;PN@@rO*vP{s9pjw3-odhFpHw$4)%r!4VMi z@m?4>dBZ5Cj~Q4xmusS05B$G8aN@bQ8!s9!fdly!%3h(YgRGS_$H?&w*fM~NOm zYG=SNwno`i z%1HV0mnb8B$$y0Gc>WMu^f8=rMq_+w;i47{;Ai8HODCR}wCLuiZw-ZYH!wa&8+yIOV|D^XX!;J)R94oG0CmKqKLi&U zKmsl@8xO;^N6!+Bbd5JLHg`+ui+Y>@iO_m>oI(zJ^7CGn-@Pkm{-HRnwVD?^W;fg%&VvK;$_yF)UfCBx1fnEp}i8X zOp1Fl`c|_(ZBhk}jY(sFby5Qjn{j*6m^4|NoqDS7wkECpQ{v3l{^_hdBSJ!_p(xH+ zpW$*+r{ALm95tp#!o?cykcS+Hq1UAfEzJCAJgIQ8gqM8GzBA)4 z<+aSJg$c@CTiEgUZrw*2q#pPjK-1Sge_o%_RG6U1jq9(!*`*yj`k|AzH(YxITLm$KND-n7 z4vgm*t^!BGDO?@$B;dRVAmF&t`5Ags`CJC{{)YL83Oe zOFfuG<^yc6S_~hJYwL4RC-V0A`ua)SXy%bWbINOUmE%`a&vV9!bL^H* z%$!-6)Zfcj#_gYHnu4?kyk|J#o<6Q-K3})nO)9x0bW{U@3p@NUPRl#7QF6o4ComR! zowJEX03;gO`iT|}ogg)I$;L=34!kcBa7~KV5M!9)0qaBwhDi~@xGzTzMO7_!7eSb` zb_>xL;U=L%bW=hM_|_xH7(yo+8~MYeV0#FD^4I7L)pLTRM0(L7@s#_acbhd)Y8u!vBtECinp;aeH&RZE}&gX z+aktU?v39#Di#FBYs}dqO?`rF@DBkDJYN=MT(2S#+nU^ld^}hQ{)tJ;J>8oGs=EV z88Ng`O?NRz+=I8&rT#CFCFLp2`9tcnDO)gyXY4?8|AiUiql6?b8`q7?Kly5DNk{GV z$O(7{OHnvfv?<(50T|Ovs-_77jy$T@O!G^8p2}|>zWgEzndKS^N9SCI>)g+7o**Z-kPg8sipsgbvSs4u9 zXqVe%75|pVsh0h#Ju$m;HL1dXF~K*`{xLWdC_7=o-i_HTP~Dc$e7(T9CJ7r zH9(1UVNR5fPNj0rfjm0b{D=GqTytVo4mqb>C4cV$8j({>NmcQ|K;e76JahxEzwb5h z-rU@bz)}9kUpFo-3h^(hJUE&tyn`$Lw-iDNB?lsqS{_)3;=n!>lG?0+v+V{hm*v0< zO79D0E9ah2j&}F$z*jE3M9+rx3R@~HdeB}X4_L3T;+XZ&PW4h@UADM`E$(0o4_J)O z7NfJJd8wT&-A@+d_3A+xC0~`*jG9#yHK(d-UM;Aas;h=-sztS=merZR)q)xLHPE+$ z-dXhcXs@8RhI^~HuZ|I(fZwcoQvK|n2rB9+^)&3dGwNB4yP$rq&cfFE zBB<{^s~bw_ruO&czI8=>Q4JRLqOR-3i{dZx_Q5RyuciOtwN!Zh^OjK0sdE@}Sv{{_ zfIXw+P0@Psh&Zs;l9_XLxU;=MwFd{dkh9_j3L1=1BCwPUY-I(GvIAE+0ahj`DG#e( zLbQEEvWzIrAVRZP%%y!FDiZ<&6apq^W&{Xu_} zIqQUO`@^BunY+%$Yri|(8fbX3{Z5#3cvgo0-dx>`w)JYF<0RaPwR!cz>WzN)MyDHg zBNKkChjC=C4WhfRt#0m(v>D;KSFd!EPOOvF?RW#h3{_E8y4xS>^P@dOv3w3!d=}tK z=Mg9SAKi!V(I*6d2N;UDppcmkf3hvlhwY@UxNph=Io+B!Bi&&YHCC=+ZDIq9J1TgG)6 zS9~3~G~*CgQ+K?8H$H@{os|M|p8N16g?v>eb=pJ=$yE+LwoNY^BJx zlu5p^SH##xtv0DSs9BSmi>Gi?=yMDADF@!3YNmGTT@ZbZX7+9cY%O$2)yAmjox`?5SQt9;g_kMk9rXRW3* zzk|nwoy2vD9<%dLDD=cw?tYqxg!l4&iQVJc*h*xI6(Em`Kdc9Ihz`v~*JlpO>DMq#QTbPHBp znrmpA1nGo?msr9FGJ$AG#`H-8!CHkVr-fUCWF>qa=~YNpLY5?e8cUW0JU^+Cfa${^ z?{gp=hZlh)E3rJTs;5@!LNIlR6&5ubi-aQ!$v$kEpGdX}$$GeB+V^40N#7D?mZ_JP zShDV+82hPD8+5+3|CjbS1D_(mmS=u}H8HCM#1r#Ng4YOs1!$Gc>s0AO&N4MhbSJSxUXG2~w}HGIdcdQm%4vw;ea3YnRR<5jTrOTm6qkiYP<1~Dmr>j{}!r70d|PIu7hJ%UZk#W2%^`ZGoE41o03 z#6{Z{W#lT#l%LHX(brnZX0Y*vO5f>k-~mz2oM@zn*-R%!YqzsANHRy`-^9?rLFS@_ z&sxqNHZ_##FXD1Eh)mY#nrM4!w=tLC#JeQ#0$H7XPKxLQav|*-;(D-{yG%vLd`J#$ zK<1C|(?YwG`2)2P8~A0835_MkxyZkm|9D)Y%{)F2Kp3nFg0axJATP*s_Dj}FmS6WP PZ0G$7g{x}i=+FEBfm~K^ literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc5617c25ed47a3269c96b342ff75c099d68cb64 GIT binary patch literal 5030 zcmb_g&2t<_6`!7&ot>TikhJ<%f*Bw{w#rB*Ibc#DgxG`%6=YW|aB71YM%$y+NV_wu z?vcGxX1T~5Q)M`&IINCvN!D@-ANaYXEL=)-&2HVq^2%7+dECEBfrVt;b4j2Vb*S z?z?SQk6FD+-)no!_?f|NUV3Qo(wW(=qUG>1T4l6qoZT^6?qlp=Y`2)QZ``_Z3*S|s_w+G=yo*<^0YqSh8*P)bw#7|uJv6|E4N@iL-bg#?FQP0wUGd+G{9_q$KTiDH zsf<2I_R>wi~@r8Sk|_1wN`+`Be%b9d})LbsJAC7M38jX#=hO44^_9l!$Bs482Xi>PJ7wdM2a~=E3+|W6Z&UEtfT*{ zJ6ro{KibM7nFTl{aqarn?Rf8YxEJiDBG`!%nTq#&>4P`6_D=?p7~lZ6ZiZPXqim}$ zJ2>TlPnuZJ53|j|iE;yQ5obZr!WQyX0E0RD*IP7sJ!u!R=rDX>9(Ho$lBN;XDs~1JIqEp(dEz*!>%!MhOeRDfNO1x%DJ7F&lqQLtDW2#SM#!VqMf^j zcF4k&%RFbLsh6%s8V9}yI5luah4=HLxM zv52NhW#TQ9`_OW)7@?+E!lITdmhlnaBJdi4Zv&{t#8891xR*sje3LqBMMFCf*ds14 zn-JO7kMK?n*(yxgVKwH$e95S4_>mz`&vRzT9f}k5(0R5G{=U=Sd=R0ghud|180mxbKt@&a9Zc)sHFQU zdFh@kI`7-4I{!@~D3F1+2q3n15snFIzZha+g;g8ngnQ>GA4k z=4kE_8*XHa`J9%F-K&k}b1$EVGz<9xQF_G0>qINBjh8fc%lX1N(XKHAgM+@eAgIvbN?0PS^syuNbQa)>69(J1?-s=}6ZcH6iKZ@Z-D+NpFvqAjwL; z#FzNO@5v(=UxpmZy0&|*2k6X6$Ek8aG5j=yIIfI5dm#+ z<^NlM*MNnrjjOOa(50vgOEL~#Jx2sa#kK@LUq8M3Q516j=XY*x`-o4`RXccd<0c4pp2p_`)J9lUY{fHKg% z#t7Mm8|y~Z_=9;wg(7OrP(($S5Rslr z4x4ZH(oWcu?*N%DRF!@tx)F+*j5a-t6RyMPV_lg202E_lSWsksOdC9=!2Ot_ny!e$ zVJy3E>gPW{z91$gT`eY6#9GXG5_62?$Jx0#5!BSXY4s!4Enq4F_;q}g1@&GEapElu z`~~k+)U8|hBN6s`+5|ow`N*L$FNy>$xAgZ!+Q+GA`@rFRO?-!FS70(9=tELgFG^HN zgh@A2ZZ8cv`lWcCX4yLN+Eje%l&PEu(q>YX1_Je{a*D!EQ~ODh;K&OKz@_*;D2O)+ zP>vN%B3KSZmo%+x9mFXm>uj`>e)Qo_RW%w8V9rSVeJM!6IuYx#Onif;TJpHB({^h^ z=ka!t-%;6e*>)iZDuJ#RLM^f!3rK7e6+C@Gre5?eBmiq6|8Ka$HolXYBI;heAWO)-+5Hur)w|rpP6}+3(%``Cb_e|8nJE7 zKg?JcS#*mi1fmqHs$^S5sV4u54sF5)J6)RX_SA&gPGknOf!^sgAk*qWSDN#>eqgyt z^(b;`4KbVQA-3JJw8`l5M_-B1L0qXSOaup~?^5~@F6mq?P=!zgjmfmXV_i{BKx;MJ zO-?svJU=t}<#JB7)|7L7ozs3pwsV>7#uH9x&Oe^vdG$Fw>nKPENzY4B^*jho7#4&- zSH(5r6?aX22>-;ai`rej!0B|}%T+9S_EfPIx>UT_Aan^loAR$`*u7W)&nCWHej88t z6@rbs* ze-4DNF{%=TS&XFClyGusUs;#Vl||Y)I;CcSg_DA2tr7=^z_UhuLU4 zf=*b6buX-$&3eqeAk#4$F^}~jt+4?cURcw0M!vIx@jKjV?FU5B-50wr-Vtblt=!!r zF23P1Qtm-f@x6Rj#Cl18n(MKPQBjcE!a-#JcMwI+L>E(r{Qdo8QDl4~c_qVT#l@q? zlY?Y-5Y56_A;LqRSA}?<7N^^j#d*m^iJKV73n5#nbDnb%NxoH{EB8qn zWk)R9`2;upg<%N~B-Xy@Z-n7u7za}b5-!$+k=kbVTiVc?UXm-T>3}23O&1)IZhGJp z+?`vEceW)vb+_)-?i=Ts_0zo{ou=Omnqlo-cI$pUsE0b{ztJl~te>C$VAY-4o7o6N zXa5iZ^iC7G2(zd* zWYL_5X;Miw)H*h`+DL19_nk!?(`wG^q*UFgEP2j?ky?`?N^+bvufj5ti|dIf&)W%s zq_ospl9$W&iueScDyltKW9&2As`hXXK3t2p4!Ad$4`d{?bH-_^jm20W@4(_VIfOlYgjZ-@s zaZuoGTMgCN*Byfw-G6=FQqHF#s!wn=jbX-&_*s&zVVBLMcLQ+KWivp>=$&}9yIPtsQ%B)C=guI$D|L7)Ik zia$V#)7n{u?Da44QV5>Q^xR)oNj~@Gg8RDAKV$sFUuC?HGB=a={bc5!7fb&%%B3$0 zGwBy69Lv~ux+v0i_x7XDzr5c4(2o~UKF0z~5ZOXTGAVLjXRu$U9Btyh(2YoWCa`&w z@xWD`QY5()Iy%Y&aC_N&k5$S|i-+rl_?4M&qyM$+!IlD_8u@ijNU|K)ZWEr8J{cqR$N0q; zm`8sNHdG%48p4kx=E{MrXn_(}{iwq1ELx^gY{6LdrRl9R^j2ldnd*gQ5+B2*cwAi!6$?+A^;)0qX*>ERsUE(zXC}rI~y)Zrpi`Y4fIt-6vVWmMP!42is~B#3jSk NLxyDC-rVSS{{fJ6@dp3^ literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23edcf632897c11513c57daaea7f0aaa770df19f GIT binary patch literal 3456 zcmb7HOK;mo5GE-~qGb7zG*9QX2nsj?6e$VPst6Jz$g8I&LE;mOmZHU#&6wn}yDKLa zluO%VQ3U-3@vXh|(tCeL!D~;s20i7}ndL)ttP~AlQoHlb&TD68*LM~dn;JZgKYkhh zeoE8+#!3EEU~n5=@hyBj&C>(T)~Wv5*w z1DY(3Sm06a&=J|khG)S#Sx~|I{mRQ~bQ6#TKz=SEErnbJ zWD$^GOURN!P}?$UE07h?QOk7vRZtt=5#X;!*E1b&OUO}$tfo4^-c||w1hHa%3}jp0 z@t2M0ZR-6igC8x>6Ep8X%0@wNcqf&uw^QAK#U<>N!agmu|5U>Ogn{5<+u%o^SWKpRtYTAkLW^{F*&jB1dn#Qms!b2n`0v)Cp3m=ad9>>-P2u|JsG4-UjElTxxPEm^29tjeO0 z3G7vtrtkFvMj+a0E0{^?5S}IrLH(s1y8prrV<`W^q&Y>ozP_I31O!Fxd?%acZmDiF z1Iv5SArAbJTj;5Jpb+s_238=;dvI@vZfNl?UusJ4Pjc>@ml z5}`l&GVdHiHzF(w_Q)nZ*s?p&RB0~%Io%XKbZTr9s)>43x_%(Kc0<*WQi3;%WMQ_W zVsatvF3t|!iy8E7;S}86XK)!w>RF4KoXiG?IIO(QOitya_jx9*akLXbmnGQa>^f|& zir2YA+A~Dyo*XY!9w48XsVUQK3lkgVKAid7{|^uQsg}czWxyPFQY3evKlzXYnS}Qp z$jFBr$i(Ij^e1Q2cI!tY=Ac)XJ-Q2?rF`ug$7^I_BAggZBqsLd#C!%jm^ALc7*cgA zw3|r-lSq|YQqNAH#Kb+|D=6Ve+Kkf{t=Q}{DXEkr>W-U&%MkChsfm>DWZ&fS^KubSnw2XfN7X$7E literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d08b998f72d35ea107f0d8c25cd4b68e152ebc17 GIT binary patch literal 18299 zcmds9OKcp;d7c-CGsEGCA}MM0Zc5q}M>{)|w5zq2e#Cn1!>i>oT1j@safZ$59`+$SL3dSM_6t zLy6iL@!H&->gvC${;K|~{`w#Fmu^o_ms0r4|M-tvKmMIm>Obiv{L3Kob^JUdok|%g zBWF2qmatqShq8=4(aAURfMt!GJ=rNV3Y}u3*eNwiovFrDr`#xW z*@QjasWd8_&m(`NafI`e$j>xpIA1{iXyYj7i^$J5W;tI%ey%ac`6=Y*8}pnmBY&)M zjPujTA8#Dzd;YsR8c#j|c)Ffg0&-8YS7;qj>O;ilyzOHM>3A9ZxwP8Q8P+1T(c zXEzC}0Cw%oeUP`3kfRSw55vB8a}4mt)gk*bbMD-v%#$SRPV_0=8kD?Z2DesjA2f<-!!b2AAx5gu&PK1-|Ofb=BjX8 zrsuh$MRP)b)A-HccLzW3K`xaVq_$FfX(K&IZ|B7&KW$__NvBf-u*@L!qx4?pvz&9u z$nJqfsK#e$@nDeN%I@U`IX|~Gv7HesXfr`=K1(|ing_J)Pwr(=lBW{q=Ulr0_;>vx z!zUU3?+h;i{-^#F!wU@mCx({+|2u!0;YEi3CBrL#|FeID;U$LuA;V_?|EYhJ;ZqFv z89od6fBSO`FEjkN89u+A!S~Gg$GBvgOKx+?aW1*ypWu=Tmn?G0OI&i-KglIWxTNBr zGG+$3@27qM+Wj6VxpjJP0(3pIb#^b0=ee!(dy}BW(ZQsw1Nu(*+SUT$^8TFj7ZUl4 z+{eEdv&Ni#sq^w)0jxZ0%p?EGUJ-2z15kC4ZeAV~Rc_G{jHQZ}8Ij?ZR}w9+Jfr1J zZuu&=T;`UwM9bR2EotR!y|!24XMJyKP%^UHS@ge-Z#_no8I*XGQwrV({5Zp>0AGw~ zkP-ieeoxp-ovVA;atCEl@Unl6Qu@xVMb0mB{yN@Y$L|gNZs7OkAUl}E^Gn7{pO&#& z(~c3*Gb^qD?@8dj`3t~XX1p{vS@AvKJq5fM|F3wRe`KsQZ&~qEU_EWW)u}!g>oUIh z%lLf-zpvu=HT-@9zpqEsIP+;`FXMcRvD4h<#3HbtwQqG^eJ*yUuW?S%_57zrV5<1H z4Q((Hu5rxd!eCrpx zYeRF_x6GDbDrxuw$n!<7o$;D_ zYg5B+>qVo`MQhXY(J@eHo4SWy4708GY##%ZLZ^lSbZxz5YL>6^g<(fd}o2I}l8CqMo9qpjzTFcfwug23TKO|lk+R#MPs#22}^~nQ?CcUn; zbVpk^Iaw`1OG%laBo_=->R)`s7`9J> z>MX1T71<(Oko~i=Rk%=xN50TM^O5O8aoNyVw;9%;7W!w0N?E~*6{&^(g?+`N`qV=I z{JuIvYSltt+gD*&yISa<3~M|dQL+~LClW;oU2CKuR=F1Xr@|KdG_QSS2?Y!YA358O zyQ5e*ntQE(;hv+f+abq>bf=wz^)%3SSzt3uk-|o7LprV(zSvf0-Rqx=XLe*#x$-m0 zxjm2-KJ1##Dt4M`Ihf!^!GzQ7x?V8BRf0*UDKllK*J-w}$vCF%1zD)LL3Rg8Zqx1g zL2ifALiiQIRN^y&DMR=5ret39Id`9Q%5Sn}8FilyjzwBBO*9o?7^IDtX?)Np@v7U^ ziZ~CVH1h8^J*#hu1%A2DFBpqsS{MtZlXk~w3a=|OmfH&o1ZlahkU45DbM%2?5r{;U zk}F8iZGP2iclGwqGe9mP~)ionwm zn8+*21gk_aW*tE)dN8TDMO5f*0&mye6i1@`TY{AMpb)ZwIGT99aaGLH>)eX#zei$+ zj0)4ZCj_Xj_9UW0kfS9LlqvPFwi#9k*G!N%nKi@&qYjsXS~$|u2!W!8I*p2}mP@_o zTec_804UBM}>Qt~q$yG|OQF5J<*C}}eNsyOA3d#?9j&F5L zNyeN5`5cr;8uEwu)#)HFMUnUtnl*~pL8Oq7i<;NbJ;M49CA>Ui0^KLr2b9pF`|Ff= zjK}}nUD|Xz=8|uEev<^p)tgIThgH2zY}|avvh{7KAlH{RF`a@wZRrEDD3_W|%dz}s zv)!G)pzkr7v*J_^I*xE9qI_qZ${|GnoqB@@ercj6Jet$)<$`D{Wa~B|qh- z2XOj)N?K?JJ_+#%&?+njQh zXdm{}Cu!J^6Kz=Gf0W+Kqa|C>{uGx?4klGe0VQll`(-XE4vMOzgc7!-{b??l8celQ zMv+HYv0zjESq7NPsKxfPe`Jswqy~9dmZd@2nEEuoH$9jJ4a@q+ATVk2EDg1;)irJC zM~77D4{F3Js?@A`UB;?>ax1E_!WODNmZwP{%IHwA@JXmi3qw_TXZ4Y6tyDY6ROsra zJ=zvb73sf8<&Fo=qgt#=g`9?r^#SVy9gTdK>coT#$0kh?HJ~warA7X}n)JVLS&b1&=X+;Fj4(^Ok+q~=|@X>(mwB{|lSA?{eVz0CByR>zi#?xndU z^Q~=IBbK8XmVkZaie0U~1Ro90m{C)%~YF zV|l&)#PWK&4Lv}KV(2h1n>5U;Ps;kkFYZFTYwmKvo+|F1)#sW0P zN{C-IdwF%Fzp)Mb$0O(K5W@b+Rrmx#H|lWFsa4nW$Cm|#kbn9o9*65&3^lJ#uyhKF zdg%2H%Fw+ce5|U(=z~(kC!#vTI))&1BA8%$1SNj(o+U^q24zVuNd?gWNRa#wDj_{o z$+rnOO#(0;$c&PQ@nDWJB>BBTg+TG3f=R`1ENx*Skn9c0a{Ex45$iwefHx{~xS>F4 z6eIGB?{GVieKjPzz%dWTNaT)TO@ftT;)*o@giZ-5PYLPHh8y?yV-$GHNK$85VaZ6v zhtdL+6DTDk>G5>qsRa(=bv&{sb>d8GwlOp$u$iyjM7vi*n>Le}ff+ zd}nem1LruLcc@DW!F`4o6L-*KH=pQs4`(JXD#@SG?nQ(-)m5m-DFzRhuv%K%BtM$R&L+3rwp!q3 zRR-zUmQE;2M1b2xx_4wBN@c6EcK+>o&P+>(m+^Xqk` zB4PV!L-TOp-z4deeUkQ9&3!`tOOR6e$)fQoVn16}{;WL>W%~DOHQ6rC*d=E{fIkg49YHe|+wlQcRhYa>wOId)SL-BQd$%1U( zh*3=ahVny$FO7wst3iiVrVKYy!splD!W-HiLeX<*1Ml!sB}Zg^rbH=Fjn5@cKE|g0 zxx@)K91$l4vSw%~XW&+pqU7{gQ6iF}gj|p$x7Z~}?p&$=8>#K0n3wsFQa;>~Mhfn< z^a`XJd541;<=Ku*E^!Rt@!m;cqUbJ-DvwNxZ54(M3AAVw-$k9s3L)2I#zUQ+Pm44(r03B$`0PPPWegyr;Y z42M`tCq|B}NO^1SuY3>7pHg~%&_ABoT#_OS)k!1HFy1h8R+u*h#exDEdJ&E{WhNt$=Wq7VM z!tfhtXhjS^KZfC{#uu33KU54KIdG#*Tgm5x#POI0FT&fg-19drh}lRA@8eJHkb)Ro zyuYnH%7WBSGHiU5H|8S}OdFRn5iWxV<(X7FX4@!S&SPDp-Lpx|^3rDgfHYcClhx43 zIi@?hz3W-O0&E!*AmrH>lER%)t~{!^GQukkW=U%NT$bcYBP_XzhJO&T zCe<=UVu&<4@w-U=mWD%LlY$G3AwwTbgW>`G92pb`rLX=!{PpJ_>%wBI0P$sfmgzdn{nwy4_};QsWZhwl#X0eyR1MZ$W4ET=LolFJ z(E2hwFs#lIQlqu+Tkr_dP7OziSubjIX#}}t?c0lt9;%RGKGm+(wB+mcY7Oq)@AMzH zT-z1ia*c+q0Lu1-47jF2*EH&CxQDB?tF`MlZb)R>qT8j2W5f~m-$9%se`ygP zvrh6L`HgZYzzA(7iBL8ADuTWixp}q!whED49*>AgG6oOB84u^G{bj`E5glP7B0iD{ z7AbFhysa_+??d(an>YJ!ArzC^?pOwVSxPEOE%LcdDS&ghU)fMeo=r#|D2w~LedEpk zmniC!yTy}wJ#53-4>=GY%RgRse<6*$Wlaz5ydfo%W?$A&@!+0}cY)rFfNZnF#%)Ni zD0ZdBk}&jO*EK}S?9vS;)kVEjOvI>D7i)J$1J&0e^`u_XZ(@_%n2_>CO^Ur`c0_m}99i=!#GS{-UDXr4N)4Fob=lPFt4R9< zzk+EIt_h0aFpoS563LiwUS+%yHp7IWV0(3~q`wT}DQ7B?FZ8Q!AU+%l7wST^J+bHl zfb?@Dl$gj1%alTRkc>gvM=N%vk&`5>vBnEvjo_pY#o$oAI25(+s*oq9mwHKj;}FFh zP(Rw?Mk$82h zLs2@hE~B`NgD6ew{6I=S2T@Gxk3s9C!z?0q68K^iXIeAS!6YDw(Zj(vZ2&&*&%n2+ zw$xIQDBfYFC-Xnt9b{x1t$y&EaMb3ong~ldcoxEC{epYI5b^`If@dylN$xN*x_$t@ znCkb5>PbsECVLV(hVGaa+QG?LAte)>5FnLwrx1~2MMVj5E@vT2RFRu%v}_}S$#p&y z>60My3+$)EC=MXzvz9J>#3(&Ml+`dODZvjhK}q|Mv?D>(jNcAVMJ24lrgC3kS?0yQ zT&8DiUB*NWo0)1ih~9DS0#Z%@9+DdZfj85~afq7qI=$e+O|(CN(uDB9U1AKW$6}1> z{I4)%l3F`%?XlLbtzQ+zVsb3J+_ab*Zt|pR8 zjs_E|DhOrVbP-k0K;7POaW`!f*V14))EG9be3Xje)f-uADEVmmt-753bvk!z<8~oV zFNLO$-2Q_h;DS)s<582u)C!3R9yi zh}cp*HdCVW3+O$360O)_Bsdtn_jpnZI2POk#QXKumMbs~ zUDx4IHOX_#_>1%{Fzv7k^>qZ6A%303LU;=axRNcIny?z6$VOXI2{&gx?%~i5C|an_ zUEtfOZdeIOW4#ImbWmp|1NbE&noplTl`N0;*Z|}bmo%cwxfmxX-~awC!$1fz8Kj6s zWXHUR%|p9ZUkyWt4Oq0oTJJ^2f8^fd8^Z7RoQ-ymh;_Yw7yBzL55Ca;FE1IX+yOh=$`Yh}I<-1RGY*v1m;J zHTuIn*0u0{V6$xr&tKyn)v~MizdVdjE>-pKMld=GTqZ%Tt%d*41mp#;**ct16I}%3 z`|MasxSt{qnRp1l(%)kxd-st^@r9p4mZEC$AEh!!Z&~_fviS`Ea(L2Dy%#c>$$V6b znrX&dy&9AdV%{{&CsxZ0vd9NHQg?!qj#z(rKTqtU=b*@I#dUEHjhtR|Ig9Rab8NJ9 zKGJErn52X*^)#Zr+v7WZe2tGU?eQ%=zLm$9@%R=VU%%r^cYM`Oyh~{4f{~yDKn}F# zIA%W9Z%mTpgQ_o%Q{f3p$RQXMc_!)K=a4SPZ&3+dIpQE=4q6iwZosGge1a(GR6&p* zlbwxn9*E}SCL<_(1=9-w_cgjA=Y0(c&M7bh<@6c+4*G9CUCtMtVt#HHE*BOH`NH{v JhF`Xj`(MSfu-gCt literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9b789ad27e6ab23680a998d6d18ca1315d69220 GIT binary patch literal 2874 zcmZuzOK%)S5bmDG?(FQtYsax;f!2E$(+D3b8gTuV>9r~*b4klnXrWYfC>A) z6|@Eak`0_A*kT()!n(O2-b2q{0>oHDu)q=>(GblCJg`Mqc%lW$5x!_YV1X-KSnBEC z2T`65-{0FB$CDzHM}Iwj{P^e!Z2W_c_UG+?e*THt(d~_)SIHP{*FygsJ--Q1T(OWT z3&@oXw3Gw1l?!xK1L&$I5ZlADd}Gkm_I5ha_B}u=nrw~YaD0l4_O*AAWn&|pQ;@Zi zQ92DrXBD4;fIdyO2|5H_fOaCel?U!F4ZkqihwR^v*SE8Ayk5k4 z5$@%&eEF^QFVf_TC<&8HhF`^#Jd>Y{vU{(rZ_lQ&oMw5vekT+0Xg#WZb#pq?UKpm6 zvq<~RdL#I7_h@RiTaZ!AS-L*@%Y?RG{?^rvw;KF*y zOV4nQu!RGzrMyE|wI1=(H}Zz^;b(;Pp@d+@V>pc6#1=h#dq>f6XWv_*omgw^m|Hl*!S<=SK5>b2*D1SYuUshms;hby zQ=QrpY}vO?^9zRWs)bc{Mswku=7Oj-d#b;RgMDnXdrq@^tZ8hX)9#N(KY^s?#db6TDq#1)gsnIOyI{ALt_r~MYVj186GhKzp7T|Jfd=o{a}6T zea(s$b0*x&jLwB>TxR#J@{(Gde`giVOUtlxB7v=%Gx1eVm@l3AiE|R?OUg7jo669G z#f^cLQpS=F{+3WIP?E!>EV{JZNub zQp7SG<3B&P099uyvrUsqV8zMPgIID{KcYpCC=b%g`F1pokBpph{$N!;jX_(*NH?Zf zMP_;>N|JOEhcYXoBF!e@I4Weiub1ZXD&bW!TV#d4Qs8TIBqZcPBM!49$>Sm%B3Z{$ zFV9~*Q5$~uO7PVGq`UEKnx&H>H=78SYd(Z|fh0>=RbM`l#OTq-wmA2&HY?A z%}lsm6ZbR^wRfj-SF9U%LUNGy?}Wo_ya`7N-9OoR<)EN@B7xDML6*aMoXz=r^Km=y z5s+vUl9rK$sn9mP*aA@T!Mhb1?H`kn&Q7GWpd)LCA*!alRIo_(^SZXUhO27}>IiB}1DT?#Kn>JxTUn;Im?~syi&xi{9Z&Hs zp)s!s4*k8T_|0>Fs>D5sJ^4SC%VtVm#2{CQ=8}r!pl$vr`3%Xg5+g=LudQxK2Pt)m#h)EpzfDML@nv@EXDE1aA<$N$?iI+XQq!I2FL9xmUw%GMa_v zj0VuiNsZa3V|k4qJp1H`oO*ZQO3Du=V@bLMNQ%$~-H94>otQ-9I1IHPQX1OBwJ6k` zF#L8e8dWoLiQajK;1oTT`Z4{|c?alhr?FtR~M`;+NjDB|zZVXSS?+R3Kz zMEg+=X%g*?3T-2aXm_$Vp3WrQCbeIf>rjI05b{}~+Z8{GX0fcoK?^?VOSE^Z+Qqwc QrOfF{=JAf@ar0RJ0axte4gdfE literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07ea66658cc35caabe9519c5584b3539adcc05d1 GIT binary patch literal 27451 zcmd^oYj7Odb>2LCUcg}Rei8&p@Bxxb5&$1dMcx;8sg<+~ue`Kf9!EPIpaRK+mHbFj zE>}6g^zzRk@yB5{7(*^ zC-Aj@C7DQ=2{XBps3sf9yQzDLyXkw0qzcPavqCefIr+_33;0c~c z9pcZdbgq`G<<+ig*J`C&S?#WNul7`XR(q?xt9{kJ)&6S#>Ogg1b+9^^OuR2)W=-Su zglRN7UQ1Vp@SHRAc+Si7aHFt1vOH=Q%;I*kI%by44zr^%))+H8&GKT->@q8_C#wgv z@baQzcAGs29cQS-EiY!xUb7Ej2a$ijIe`2JB>xG?|ByK-`5%_2Bj%7fjM65}5pz`X z$SogTKIWA;=9TEtP=cbeHV4dclyMwo95g3T#)On{0?&uc!+1XYTB3NAP^gJdNkm@_fcjzL=PLWQRREH>R5NDYEZ{e54wx0Q8|`VJ zcU14?K9A*MB z#PqgLlnXgA5;H9ZP22sn-_4x$2HJ%-{|jbi9_V#<=B$CCX5&VrM z$7BACWM2?!B=hhOB*&7Q2jbZ78NXc@IClBrFNgLOd~Nm_CIVRPR`MRDRdNnP{wzY> z9A4s9-7@ifj?c+gtQq_&XO_P6aNduvEmg;GlFTv7A~a zdrz6k?Ud9cg_=-g&O=(@cWnv72OX1g7TuE7FgF$&H4aTT)oQsXY)C>3HG6H*S*^cP zTd>y9M~#+at*zg#-D)~FYF-9u^)gw>*Ue#6nN1AF=$pBD3_PT;?dMQG|MlYA*Ny4IaWdj0uOTv1T4u`dyu^KBIQ+a9q*lMr^q;$6!PVBaRk+%D8M6?AqCo{=1hW=!7 z^I+`A)2F|^9zHGjkcYj%oTT+X0S{W=*fON`wYCphUbQOeX$(}o<+F&Rw>;zIWH=ip z5eRw^F>>ge{I(%|qjHJ%ALA6X{xi^j#X0nbc?-P}*Kw-9tfx_TM0f^2)>S%f9i)xI zDs!8eGGaR+=u#O0Gw84PF*mlC>h5T-TF;WeKF8Q1lZee!gai3Rdv891uI)rx{aNCf}4o zMFwy_Sw5Ig=hMZUK#v842Ol;xKQ>DVP%HS>vlvt_3kU50+1qOzG1FZ&#{Ab`N7Jfm}901Pt-)G!Q zGvZMW1T{G5QAT%BCOpcRP!0i4`?($VCsp&+*-9%XzNWzwS@6v|N`Zofs3d6WtB zsP{g6htFe79EWnp;D``ljf;yNWa<<#66AJh-(t8q|HanGut^( zCY__^S@YaBQT#DM@zZnGhcIE(&<A+6_uTXr4z`bctKHi5XUaJ=8R~FV@ zhE7Z2xF@&WAtypHf#6rt-;lDRoL7)^q4COk&8j=iwNRw)kg`L*>%sEbnvdLPYcpQh z`5^4P7B*{*gvl-hQ7!~gF4TOif1!4M=Xx0VVi5Ua5cy(lcIR#w zcyGcWpME)vdbu_$ZCdr3=yL5mv_KK{(vZ@mY)xOviP#fSN44fUzV_2_+SHoVP;w-N zKf1}lg%7HY9s)lVoFU+Xk%)p5O0oMWI2@SZ8Nkl443R&=IO_S5*5~n@x(gJ2FL?(% zWI3bxvzp(~{5j2^*Zc*|U)201&EKK3QeH9zxyFY+GH!Ui?}P~fLhn4Sw6>eR5(ie(D@AbM7Tp%2qnqjQ~3oz6TRq8iJj z)1b3RXNk@YI!!vubY7;jLT8muiw*~=wMJ*1&PU*Yugd7Vz<>Z_DR4!7^r!H(=}BP3 zbnNlh(-xlho^=gY8)4sx25VhJRjf;JFmFU1y37FTMAlzns8UF+_cGwS=(N2Q@!#k0 zjnP82D*9Sz1Vd7M@MDun1{ks^7?KuUT6UF1HzU^DEV{b1c{D`cS^@1`&pLoS-_(WN z_ane9`{pm0#4H$Qvcg#db(abkA#fUB`?HvuA-ojRu5ae0fj2|dx0Z$KOJgm;0Yv^X zo|e;nn9{YpNwKd?mYCAkOQhf{ev>(nmNhe={V%8Jl<<@{nLZ;h0y!?{pl}&bxN_Tu zLkOCOQrEd$UM9@+>3Pr#+gXQG5k_dlff63cp@d_>9XyPD*vc5NxtXjO$ zG+~vjnax$0GvITY&TX;(DJ3_<=rT=ZhW=GP5+aC6Y`2mdQ$)NV{71;c=FE-?LIVa2 z111auR=*UOP+xKa>hH013hspWym{tnr<-A2xIeUuobUf(H$p-^xn^)BcC%b;#;-Xk2A=B`s{+Bq%}SN;-%r ztYc-jl3qF^HXU;e(mDCNg{{1FN^TA;CfXlX+gr13_13pOih$U**JQKLQrof)+mgAW z)uppIjjb&@oHrUXe3p;G48xxC#!!^;$JN=2=_I$)yrr$tdFxVrpd(C)1)aE=7cAKMP;Rdd|$=$vDQ{oRi0bKM@=k_H)}Am>Llx z&xP$Yo{FYIU_Q0zEh{T($$RptC6kB?#!5+>YU?y2(Ew(&?s_GF- zWzI_-_YkvI6d~L>CyTqHcSXcRH<}Cqz4%cM+Y!1&hwr|p7tX?|XPL+fIvI>sUl%*HcO86a<1J3e@PC^) z=7&+?9ejNpGh#8sgBGtpzJv$)a9`2Q`pQNlT;hnZ%>YU~jp!QNOmLlwD~$jWX((FR z07{T;V$y(ZavjnLU_^E+_gWN0v@xg{ekn*|>D0#t{_uDIkKhJnKNazH6%|t4lA{dG z6rzTLhZ<_P+`v1pB$Ggecm^ruv$yvL2-qMO0)z@i*-$dlVgWX|8Er8g8$^W+umKd- zB-$#F@3K@9XpB9s6zfX&K_kn$AgAVqf;g5zEtoz9-5_W(A-1W>gU&Y{2 zy5%T3cjqtgc~B{TtYM_9~5b0zM_opM+1}L?t{?Sk%*uz^ekK%+;@&*R!IH8!#r)WD-SV8FvyIpO2NMz#g1`3?+ zocdSrwZ8-hVv?BC0}HZ9kWzc4#GlsunZTdAODm(Ho<%2zr7@tw_NVy^n!gzMnFGu- z*yfX1E(M{9H7&fAEzTS z_mlMfB%Pn4vrXr()A<{8-k|f-bUs0cL(nzQ;~1FMU&DiM%)USZMBt95J+ zd0@e_h}!HYc+k6PE^Jw6L_5kt5cE3({??Ig>Xd^b82aWy znPB}G`n~x@s5tMwU=SS-8zHl@7gouNp}l1VXQ5KiPSY0^5^C)G^2F4KkJd{KIa0XJ=O3%^sIWA_E@g7v zG6OJQ-p+alAN15+-r<$-07{g~BPKkRwO`&nd*t0K2UH9*Jc6R<-HWYx%sV`S5=RY> zpm@~CTMh0}@{pJJaC^O_J+XC8N)C^jP1_oxWZ(UbR+{YFLF7|-%d2c?L>aknVsKa%BR+ZEj!9$#5N_08S6*VjO;I?Z#J@aEcKV@@;aiM zh{^L5^AU+EQc*0R9SkFLXqU2NToGHScf+|J|z!71j!J;w)oT2@^b_^SLA@a4<0pY1s~kgxtKnP)ka zEe|+Xd>ktH_!d*U>QTFo{w^*Z0Zuz@RZ_@tv{Re1DBv#u>ZlT4W!A^z2mx zhhq>uFQSy}TZLv=KtZXTdzKOu5VxWt-T(9Kpd#|304>8bHIdvL-PO?4fz@H8hdp%j zKo;oFuo93(sKn;buEO8-%}T$ZRd4qo^fxM@i`s-PmK>hPJGHO!w?SB$gv2sx`#o0x zL!{%+s#%793eGLoh2t3N+~imetMUw8z&H2=FoOjJFr&8(X5iQ}j=#b}huC z2&l+m4EMWb^u+YV2ZI!32x)xdPrANcNb#yB$^b&RBn7(}tclAdf6qO@Mn;{4^}?|% z#;s2P7Zi>e!U5kEdH{$z*t^mTy;E`bvl$zL=FJ}#WP{1Ab6xOgC5`JmzGNP5Q zmqVx!R}Mi!TsYx@{$AtEcS)T{&IR>h9j<$ID62EAP8ScVd%cE3>VO9fIpl>L^+Jvz zjEB@gU)l>WoS3sdgTbc(23aT6jhouqwk3dJA8V%w&0T=uZ=r;UsWS#3Sf6G!exA+* zo$GY!bp8&VzY9m%P3QT@St}b!L!5BFZBJ=PiNOU40r772B@1R;WL)=wecg99OexB%kpceYX>66>K5x)=q-5kX0j`c<|7K@vxp zU`I`soe2R=jM&bF%gxp=u#A5IM{5vNEKaV6GF(LZK7yRg>P^cbSwir6 zM+N!ihZN-RO+UD8fP0O&q;6)G()W^Pb`(~AZy&P(TkbsQ6vhP#ZG3Y}nR~P>6k3N7 z3J`)guWNC*beQ+!I5#J5GvFh(UjYUkvZ zyO(`JllyNnytPF@Lj@N@=<$Yv^n;*;>2VcAB>hN`el!wq zEJ!~Jojgo`V2|+eJ;D#Rl{XQTcPNtY;UGN(t5)6-hI^m^LKl6(_s*JV$2U?o*JvoP ze^YD8X!^-L@;%yCuVY%h^zx7Ik^aOU;Zu8rpKL4d6w6!63m6LD%XlMgomTJS^yA>& zBd}Hsm}kths`v4>Jd#;9&zaNcMN-bRQD!{KENE;U75_X#Jy6o+@Ze*YSTTmgig97i z`W&Y6(()IaPY(V04?MAS{f~d|@K>il{KV3?KlIDvKla~WdSd4V_O{6}VAfZ{Sjg6}Lm%A8fl7$J*fXAvO}Wc*n?!+L9U?V&20dpQS^w zpw>M2Ug3=%Z_ftvHA3svmj;gZW_k!XrnVo{%*JAUW5tQ!G7c2tA^R|#7+gkEm6)rr6^?5LxQiry zPr4v08SczZ+ak=#g5~f4p2_h2&M?|&q}OO#nbvykfic<_SYSAyiR-a(0nox-@a4l5dR$w5h7alma{LgQKuj03e!hiHjrf;3mDWSf#g zV#IVAf0f$oli0k*ph7$tQ2ISek5C3cF=RC;4HTu)@EU^(3?lgd)D&q#)V$3-J%$1V05bj|Z7gR7gSM7$3h<5gaZjAPX{}J@MI(SYIbd#sN{qqo_ z(ueCGBIRTjJyb8H;p?HlqCKnV-_$)z5BR2(SgcaBOHo6 z&ghQ>L>iT_NP8pziabWBL_Hc6*higc&!aJFYz=7(+-aq*LlltUrWO}45=2slYXb7h z(gL#b!=CZY$|6Z;+1e-I93nfWG>rmwkGX^|>^l^k(zfI@g(D@Rb z-=Xunbp9=!-=iZP@cZ=r0iAzG=ikG*AH4Z4_RIt#CW1X=`blsj2;Cgs-3vbYVRHYI z@jTJ(#@JGTD>7ko_O>F(8bNR*rnot}J9C7nsqm&|Ji01Mja#sMVq_0Gp^6BuZ^9D_ zE)h^E*=&Hl`PPJ)ypwC;Cd@lI2Zx^^%6QaC9Yt36f~9e=H-kgUhM9%BhTXdmJOq@A z>fKBwDU)EsxCAty@kS~yWh9L{_MG*OHq&I0w!Y?f~KQDc2>& zbF3FQFUFDH=UADfk;>rUF_)Lel2lyi-<;fM;O^#99GsjWSMasTBDa#e_sm+fU2(T8 z?)JMQXH#*&0Z)SoIdcacCKswN>h=C3vT9pekL)i^W4^>GHzwb*h{S4>6KDJ8k^SDt zoAEGhei@0>Y_$FZyb#fQS}tDzJuKaCZxIgv8l>-LCDw~YTxpq)3^TTjc@$;FaFb1zxX7&+Z z(f%`3KjdtYja4*!diVXx6%a6)BP1ul9D5MpSC>3sYZcB{WbPC+c+T?`Ih?OBaA+C^ z_Zm(uz~y-g1;Yu}aalHEadTB^o4Em!I=)5m1jy~bh zd23i4{g6kW*n@u9qr)T{oBoJLC-Y(c!)^7S^yo)~4#lRe{zpCfcTfdf<8xw*!=-CS8~sf#3^VRVr+qVKdN(id$dtbKX^lJ+UWqjWo91

P1Lxg zR@9OIj7QZ10tN&jXBgIo2-q{~Q^yi@w(AJO+U zI@jrl(kScdU+3e0rSlCs^K>XJ)GbXp`pBB49h5co5FTLw6HTe@1qC3<%g7)R$o@^H zW@r0ik*Gjj%Wwj|6-q1aBlBTMc;9iTJRvMKj$2UrQWHu1DL4Gp!twwu6|8LgDwQ^g zc7qiZEi#9*z)mFhSE;O%nAhR4o_A203RI+rRjFWr(b9Hn{vOTWtNHshf4}wTc%y+J zB&p2DgY62Mbl@MOiQe(iCF4Ve(|t_TpXMOpoKZR)P+UrMIOY0JN|W=#1nGGiojwSB zjV{gH@Zm4Pl8gZk=Y1}qen+nF`enWgM;`3ip5Cw5&o^l=Foyr(G~_3$a+UR-rd!)c|hS@J$Ic~P*~`ir=YUkV9hPfhYTiSHz#go^Vx4;>V%L@ET=JdcM)9S z0IWbZfna$7aq7;ly378Lh%`OiY5imLAG!l83)F7?$|+AMj4 z1OGLHmtLTpily%K%6V#~;obfvTTr3eD&m9T*9C!Ks1+)_+1S8Wa!|Ago@*-BYAK4M$hgt(y#e4o+2FJwA&|r{`OVFq`%6Vv8ABV4qFUC_loKqD4CXvQ0uF7SoYOx*x$XUy8alWvGpad{q*TS zM5LEldLAB>el~vqD_IPkI_nl&>l;{5yxCl6xM}#^%o?t}cZ+pc?$t7*o5hv+>$mY= z3D(^r{_zg}1;ir$1%&jDTT%gXzrFQqtPKiExIJ3%8C*M#e|R8utwsov%GNGmpbS&? zBv#cb&|zmP%tpJR)k&vJ=Re;aBMV zD4qAvd6v#=bY7?Pb96pM=QDIZN9Q-_u+6kl7Ysc&|6X;s`eWmWAtobSz?sSe!>Nln m&!sCDD<8r3`rJUFFJBoiC(Eg^+*qM9T|Qlz0qx1k2mc@46V&_w diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc deleted file mode 100644 index 604b81582c42dd6c8188c32d5dcbad8fd0f45af6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4728 zcmc&&O>ZMf8SZxM#Bq|DVP_FSNT|55SY(~mo=}(tw7?(^Ga8AAgD>rJSG8TF-R-Ta zPU2`!Or$+=LE^??_Q;uk(^pRW7dY{}Ro!Jf>oCKYXxZ&{eY_vf`&s?z$;lwX)&J`s z)1Q7bivC7_+N+C)pW$m>;U*#x=g~Np@te+<(VM+5qu7si$2~W3C+{x~$A_5P z6Wx5UI2s?t(XS%W6Z?0O*q4W|_r}K}{!KJI_zF~`m&3U3y(opuzl!k}|4v>=t5y1f z7knmlN{iZy1ft9Q7B>lDL^SSv5oNK6Mdz+R-jm(m^@#BwV)01qi-WuPbv)kR810M0 zy9i?k;<4k*T_86eQdA0T+dA1yQCi| zeV8O^&W%ZuzeIn3el{-`^2|zOlgdc_(T~nvsq7WclC0Fpg)B^|pXcTE$7k~uSS?E< z&z!htQ*LCkAjwC|l|BHY4@h8#TRezwpRQFKwenqn{kP=}2&siEEYA}ybAFSUIbTZi zJZ(X5A^15_JjTt!BVNb92lP;VkL&#?N-JZ_1#=QG2z$-7P{nM(n(JaN*`yhtu&gRl ztIC2Y!$^SCO2KT&EWeaY70hC_TEGhoa+DiLTj(u0$?OpJtVJXkkD(AF+x^nvp zS!tfLLfY$6Uyj(X7t36der#1%nl*y1MPzBYSXS24%*&Zd*<5Nl3ZjRHS6uUjv{IWe z^kKNd&<#6hHXC_kg;9cmFi-_}u_?FdoRwLlWLTw4Vl@W<=WJS*c^IeN+zqiSo>xRh zs1#a5V{X~?Tmmue)Nd5qf>;$-BDVFYk_oWr`ME>kHg zEl+oozgBt9rjiVigT=M7b2e%9_~b%Ya`IycsPeU81IIwyS=svz+@aKsHNyPA#rrwW zjhu`CJG=~^sZ2S7%J~<83=~e8vLDclg;mJ`xz@ax5v;QD<+8G5F)dA%ThJ*OkcS1G zvnkCAxP&=fYq-XfJol|S-47Ucj~~|(yg#dy_7MW4txE&oDGee=0rwS&^)*&&4@UeX zQ=VQ<%R>G)UCY_N5ttAQJmd}h#|3pJIbs)JQn12L5%T1WLJGKA#wMGhO~^_{!bwZ= zC}%%!d9Pc;$;`_tN4R?{m0HbIfq>8aJ*1w-WIrfMN)FtxiQhjN0y%gFQ~wcrRit?( zybN#Cdn%0*$Yq4lyNY?*5=|Ni&1AUtW%6T0IuqrB(ghcM>0`!uB63S1y}!^xVk?wM zk>-Js#WwQvT+Mu_Z<%Oqxh9=@=v!e}##5I)Kr-i3Ng*Fnh2WOENUn;Vb7aek5Y1)>azi z+iQtbKrXPODWJsZrWzGDy9KIuJ}0yQY|gfY?k$;-Z50-{q^klxJ)q;*k#ZrZkSV-N zut_NH6BJ{u_%xTMshh}O*L-#A^xY+2W#F$!*Bs0Aa&0*_qlfvde~=e)ot42fRIhhuCYZwm<4;lPkAniE408|b3ne->hY+6y6;muB5W zQb*dlQ_%H7qM<=|lhD$)w5|kPW0xfgHZ=;{4QnvxOqUh8xvtPJS)2t>_I51Y=zKRi zT_-e*!e*KaMXfEWON!r(^tf(T&x0zVS^Y2!HDV^zwf+L~0s^jAVcIvIq==tDgyW!~ z3ZJBX`rfUn57JAy@&QH2+|!Rbr+|lRYu8KDQo|vaCE8r_;H{?Lx4{zSbK8)+6ujw29Od%`6#X8)1K{ z0AODy+w@sm+WLO}0TM2IJZPJ3pK*NG=D7Xj9rDUL79UJ4J(pU>he3*QzlBTVT?Y=~ z3vMp&XPtga*_CEWAkop*!MU9ub}pXNk^hL!`sBtq>kkq-epfj?A1BFYNc4Wj9mea9 z&FX%eTl7AW|Rsft;`K?xJJ! v`n`>Tku#Uk1V(+})2;DBlPGH`{gZ2m`p-NNKlv_Jxc3DRw9}7q$L~J?ulV?- diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc deleted file mode 100644 index 7bfa6d758796323509c67c88eca4c78eb971b134..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9734 zcmcIq*^eB@dGC9Aj=jhwxuj@mD2bAWB6msZqG(zaxuhuC;)*6IG3Z!m_fGZ9^zO{` zu)2r4-1H=lmded2b`Bsxfb6~`P);O9kOcVyawkZhf_gHX2Zt~aAa8*Y!1;YuGtJ>h z&V!NkpsT9CI{K@xzPsn*)Kp2s<$m)U_1B-%w11P(nyz4$XQ9t%+a1SqhOmNHkmFs?m37f8qBh&5PT4CD$5gy2**ERY$a>bBMQyd` zI(xi57-KW1y|**(&3E>B`#Sr*{hb5efzE=r&^hQGM1R3M#9Z$QR`d>sGi#q)d(wM~ zU1ViexvzUwHpQmzYu*ty!)8%G&F0u1)X!Ye*j_e&Ut{y(v)iV3G(5VkdC#$BwvX+{ zyyw{gwt#Uju!HOn>Kc239Y%eOeU3ee`bBn$RoM}YJkBn&r`a=TonUXVqwG1f7TNRc z1&mr^wc+Sv>_t4EWXIVF)Th`YTS9%Bon)s_zr;?nmr%dVUS_YLeucftUPJwA=&rrS z&al@X=tWEW6>p6NHW+LecNW-B#aSeLu~4r-^f z-V4$eTE$L~tiP8=?c||>K{ZF%*WSH!`5j^3x^dy^im)zSxOG9e7cVS->EgR9mxc4C z_g1dF^`5Zby?*QJwdQ|+@}K{5ZMAp4d1CqScfbAW+4Hx=RJ|2+yJ0)=+d-1niB0IT zaXYw|aVKjiQf)`iP_7xf#=y!frZfE`<59_skIaU7P(vSj){Fd$8G5P4Ofv&CGa72H z$;@?wuV&Vc#Vlftrn#%jH49H@Ab`7d>P8r_YSamuVYLyp!)nlFRURhMR#;8rYBx+b zV!mFDy1jl{CFtVH!|&q~)A?>15l>Q1+u&e$?I9IS+?w{S_|2Lv%=+Ea!ihQ@ZxWTO z*KSqVHW?YQ>#!DGig?SdPs~vBgUTWRzg`D2B zbS-9Kd#N6GI&pWgcTZFZK`h%OI=qZklWE$7F|8lgXYq4!^TUADi6Q7RanInENB1=} zQb>EIW%{;>mXWv2yk+GrJ8wC8tB|+cyj9FwrMy+nTNTx6Rt839;6SHVwq_QWBx#bR z-l@g4UUM99dJ^!G1kB}AnsK`!L3?r#n3k!99*3#57V>dHFY2zJ^6NbA$rWbvo@70e zki9uXj#Udx02M;}@{#oRq(9urBXI8}p9S4|xY+A9YwA>LrqDNp-r@&w5RxlY@dO^l zTr*7lBpc`f3Why6YpB&WODKu>YdOy4#> zp2s2RkU(R{+SPV-UAqk?__B6G`w<#zW2~8(JI=25@vKgeLZ&}HDl>6}=GIg1#E{9L z+TdZh6|!oR2fY@a;*J#2MFEjssky>P8^XOpotMIfu;O0W6*JcZo`i2~%EK%x^O!y=KqL_CGy&-J*G)dM{8b%ZR0GiYp9Dh&v<($?JQ^^2qa&&Fd9 zW@Xx%#-~Acur7HN3JZgiz9V%^ECGRf6_1l|FZke+w(p0JUhe2yvtuup-TAv+j4R-l zH25_%NIMcefrIphF2O=?>jNDB&@VD&uTJoR!K{IqTA8^GK}sd%_XAFC1>3xs8JDyV z&cd8wz05jD>)KQs+Q1$-nVmULXoCXg7O0)!%;vSsYFKKWJGUU7wEyBtj1#@CHI*3B{6-~^4ull&wG3q75n+4oB4+7WDh-08hRrd@KM zqisEt6HQTk@mxEu2kqnyG)Ch;8z~YRiBx5T)Ta^RXC*SeL}~&fehz)@F;uRf_&>h3 z=8v8@bvmg&RwgxTs{r1vzL3-;uWc1OVbHC<(7`j&PR(k_SxlB_C(*y-MBsWkFyCp& z_$#zAJ4(Y&A__db8$#C!y9Y&=%2jKwM#)N2`hR*K|m2=JJFQQ-ZfRUb*-X|;}zd}%n zaE`SofHRUl4?jeoGHYM~2S@LK4N5vEb5|$4>8jwS-AVizbU*YQgsh_vY+z?o_ESfCD}Cx&`3L0Ng-f9iX<`jBQswQYtwqW2qNT)rLnOxHvM15mUDbd zayNyYDOQ&IAgoG10ILxND*2qLd`|hZ=cLnYnzTOXHN$2y=x&-J1!8tZ`UW!ccu4Fc z<%3V5MFuq3j1m=wXx};y2g0g%f}XI0cCQtHRp{*0e3Gh%r1iJT?}zo2LX6&t&4)B# z9CA;XccWx$4#g6}7BJ3(VSOkg+Y2PNIj<)G-R#bGt_}SxPT@m2t5#H+QF+)oc3HQ z{g^SYC|l66lL}IlX%9oJe;zMSj-k+`dYm?95idOp$Ipd!Ij$ea6S;xzsD=dbb&Sqs zpF-a}{Cxut)GmsstN@ctEWy}s>Zz9LU0-KML7nh7!*rr6dB>g>Po+}8uDt~ zKtN1gRm76jFsQex3>v1Jzz;oMZHL`v+NyT?Nm^Y+%-jmAh%*pJC$f#S!f=gzPJ(D! z1=I?+8)4e#-4UjbP<(s1gj_B~*h*{4jVxC4zE%J@Fid5LyO_x5V?%@!wJ(wgWjiRX z0mh9G1|?93N6;b4;bsp(mJj?A(w%VqVOr%v!j=^oV6{H=NFsSNYF5a6)x*iB@G3NY z1oCFoUnH7F#5E`&F^V`nM>qVG7FMVjQ!bGPA9QYglB(uD(a zb0O0)a~51??YJ5E47_FD`RZ-x>YZY0t(9n$v^g-z;5DsQ2HkKz!Xm!mq)LRt!a8Cp z{sBFd+i^33b@jWX7=j)GH71I2H$*BZT8C~J_2V4SpUB1RKD?SUHQSr+gh+i*Itrtc z)SHUBcgG&#NDaH;&R~$|j`}C*T~;{BxqlA4lEWxSZppc^@Ka&%H0WPKuA`#!A4h*K zsYHL;n>+*ZK^UB3$fdq!teK#n@qnmz2zJ|2^dotNg@&^;Fzy@%{U}3W4r!(fy**+< zdmUk2LJ62=za4O7nK;y8`?Q!tUMT7Yt8hOyWF81yD=KNgDWH?Fs@SVKVD0O{J--o< z0w*^0+i{YJ^4OBz?Br^b1e%hg@p?J=ud&6UyJSgnApP?*fPDwIymF_He5wL&Ng0|( zzROq4fjO|Cl|Y;Qpq1oKy*r&kUn$rEI2e8b5_ZG>b8OTvVY0uFIm|c&|3K9X8sZpw zE~t7zYgz+0r4ukS=K$iHtbo#PkfuZ;5W15xNX5)0{>B(tAAs#g3*k1#%bFm{GiaD_ zJu$vPr)7=VScHW=?5$?WN|@wyOZK*LUU7mr*hKOSzB&>Q3>K<~)BqB7eR$}A`$*|g zwV|py+;;Q>NY#cgQHcr)26Z6~*yQ2J^<>XK0gIteR?(~YJ*z*dpVm3WnE9EIPnMs_ z4jvvm6CKf$124?UO!$&6sA-{=R03^XqS7G^v@TK&YRFpBx|D+7iH?%i#UZW1`;yj_ znJZ@{2EPhpWG!i3lGKN1MD+x%F@8jA60sqfm1Z#|ttFW`nc7-~{{kun{Y7ehOvNuz z@pTl!X2^hos#I<8ZxH;KsrVHtewB&|M|XzkFUjglW&f`jlgy$}2Hk~f7?Stb0sBrv zUjK2vPMUx&qI0g9Jvca273iH1Yp(J5M|X+ zTq|iTsr@*i`EOzJw{a_#VpjA+uyU|-1kddpnG*xI)FfX;Lw*R5x+67^WQ4^^%{6Py zUc>hb+2=s7oI#&4*5?fS3h29qK9xgJ^9nb$)Me1hA0obhgxf1y`}zqoh1Ix+z=?Ek z^#ThzN46Fw!8fkmMA+LqvNb>1bMwljBU^i5cY=1r)IwD~eU5V4Hl&gi8g|PoJ=P}30s|h|WJjumHmEpq%zKf+3a+Wn1 zF^bp6N`wD4;QwC+jsFA|ADT3|L;jNh@DHijR$b(|@QaMso7j=GH2;PE z2@8}XPh67w&18)gr;JB;Lw$~n*g&HsHNHz21B@dbL^V!-(8xr1{`+V@hI6iy&!T7K>#Ml9NuCy9 zYvi#3TlJ)(0g9Uu4agA^b9>A(@h@X7<>6Y2!H5B*Y)i?9ZungS{2mn(V)+SRh|))B z&%^ma)|#M?31yBpt^8@Ur64^DN5>E{9w7wHF41@#15qI&`)Ta2ynTDQOsLS=YC8Xe zA0COEATrLy{*PcS8AJX+B=B;H1dy2?L*fs|ks!o!W&Z*mHlL;p{w}%N`(?65vKcwj zbb#lyJEhx-(NwB?J-nAlRK#38?jn7M7&ab85MVQhQsMrlFfnf8oWyiK5Rpnfog);9 z|1}jYd=stwz)U>kp6WR~Y`&VKc?|hxpauyrV4a{f$#=rX)8p#1jZE`?3U!NuchB&NLl|VtrGXlsuSG5gw20VM2Oc6zjb{GNJgqFou)l zLHTKHKdDY&=Jx`QG$Atllm|?n9OHq%#5*369fcAu;2oqIa-k;4$a&(6c%YDgzUeTe z=}^Lqt1y8wn-Yeen*D%y9UfW+yy+tS;*`r1#573$J|d!(IC-oCvg3dwM|m8 z6u=q2s5}n09pUqFP!ENROmC|R2UHHOuh$BmLm~FnX$(LV!@_7HoWQ`v_meesN0AkA7&?ke@z3zPY2TgtB|NhXi4`BDwzMn4~-BV zEEt>O8U8m^{4D_t{&&>=dnzUrS&>kXG1iI4Q20lT&1r(3N9;lijFjm-Wj6I)bBAI| zMCmww6aO`!_t2ctQCszP)Z;%!;Ece!Qu7n2%q!omf$x)2#mHFt6k#>gt^cg%k_ggg zIGz8j0t~C)pr>=9gugu?vb-CCvrM%4Gl28wsUWflD+CjAT9Us*&&5W+ivTokC;Tty zc@IOt-lsp2EV79E)RO5s{--oWvTjM`Pk`x$Qqm-6-fZt)J+8-csYPnZtb#YS9Cwkk zTg2Des2fU_;dD%#Y%VWSLHsD4E^l@$_J#a+?|OU2AuN%wnbjbzw~(Fb_znDXf*}pr zrH{t#zBh$nI=ZXAw>G-+uMhH*xcsIozn}^yUR^`ZRHpO!HG1`RD%Po>1dhxgNMA-q zXVUAI9#CgVG*Ie;B2*JLi^NYqsi`rJ{QnPqS{JR4kS7 VH02gt2R{=(+jU)Y-lfkN{{zk+@&Nz< diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc deleted file mode 100644 index 09c80152e0c0d5ba0ed2f78851539c0ad6e40452..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2207 zcmZuy&5ImG6tC*(uj!fDuWS+$qaa41HSWfEFb5GyLV^&NpaG#licVL}Y)^W+C)GXK zZ0I@dLO|GyR}prPp8Zq2)zyppFL?6zYIc%Mu$p>R^pO!%#L&V&f6e|H zF!nbsmd(ZFTl9JiBAMg`OSt04&Ivp2oG`xNxrt|GeiC4Gig40Rx>nXHqDe35P5McH zGDrrKm1JeInyf?e0{1f`xZam9;X#{UU7|RhsU@bYJO*dEJ~chp|%+TFgqeo0g?2YB9`p z-DE!LMhnq7`d#$857Ihwf5hfN8^8p<^9PGV(;KR^nQ0|<^YYDNFE5jRUJ9$3RGYAb z_M%bRbPKXUl(UKH$+Stu5Vod`85C*VEKbmYXtAw;t-6QHE&2Ds_HH#%+l|83S*`TV z+uM)w;iGg&TEt_#w$k^DYVX$e?*3HiX;rIj>!EE~P13wfWjbxPru$}nq#7afI@9?C z?55J6&@=7Zxr?R%QO7D;*uJmPq(R;;Q zmiX{i*9`8Rzf2RRGIBpPdLJ=5==BZwykQ6Im=l0nw`IaPh1Gk~jfk z;!l1Lq$5`N_O1-Fego(tGOFMH|wnf04fB1(%r&uQG7Y zW(ysy*L0yFX#F;7qH%BBZ(Z4KyT7sD9T^?@BmULm_F(Qmd;Ak?z2iZ%0{Hfi_z8#o zv|g2iHfY05Mw&L+LXTXLt4Hp+J!^7sCRu++638_q&c>44thL?&y?ejL zEudvfQ0(X!A`>9{_fjpf{*>1I#haR;t(`Z({*JU*)%@kfw{Jc>pV4L1D8$t5#Xn%u z@-fQ$6_oFhGe;)M&QnKUZ}`}0k@ZJ>?v|V2df?s$x3lCn{Mg0X8~ey)F8w3tgd=v|v|5-xrcz$-rc-r4c8p)7PY8j2QH_A7IM7sb^v58^tx7=Bd#QAVBB_;}WlGeu ziLGDe!#W>Ls$7Z;;##P((baTc?B&g_I8)|Z7ikKb@$5yWX9r>LPr^|n7HqeGj&~es z!YnPIs;(bGK~4B%h;e9p|9@h-+>V5y5*G&qdrBiAOeDmllCy&5eIcIB(qbX8u+kJW zeH+9K&Y+j6qFBO@FbJie(Iy+>iF^|(Q6t*2FP1Ea9?f?k}6d@!i0|h1oCxieoAW^dQ~zV zD+?Dhkvdr!~b_azUy-2lPxe|#Q^HbOs*(+meT_rc^7 zm>6O>LlNfqkQ|{y=Lq4|8I4>p#yw?Yj<(2av}B-L_y4XG<9oFc??(d3u2`5zMyaP z0AuuF=M}sPdUS@%ZQDt>$J2Kc73Rr|-wBs~JCu{8PfD3TWl7DY$w}fjfK&de{Jvi@mr^NAEMfJeQZt*ueB3T1Z|3{zbxs5s&cA z1ky$pLD7L--;26Up}0)cMEi{^HcaHEnaulEIR84Y$f;Yylpgf|Io zP&)_y8a2cG8nq!f3J^20Q+P@MJ@|}P=rNFJjThHI#BdL&U3^VyQacduRs|E2L%~$Rnk?L7eb+hMOK%Q<2z35ICgSp z(mK`{A*HXY1tbtsh19Hg0SO6pC7yZUnLl7&dCH$aUzXoF^I<33th8gzx#!OPI`@1Z zuT?6phR6B*_pSe3(6s+jCwmMacku?R08*2pr!|BxUg$^K3*$%=$v3lMeyB-Z8qYLo z9O?}VJyTlfS?JkPJk;v;F?P__TLiO52XYs0@EZUhJ4>yhOVKcry&7h+OT&`3EIiY& zlOv003(}P(w2myx3fiKq${Dn-oRxEEOLAT=pe@Tqc@AwwF3Iy~tMY=ph;~N4Aupkw zm6zo=(9X##pzxcq^`rfrW`FdyTf?(4kO$swQ-#4wt=9>{+Mr!q!?y?c#1mR3sHTTn zy2?Gg!3uyBk>({(M1<>+4roLMpxMy?ElFQ&NqB*wBQ+3VtRH`YXA`^dQ!h50-k{a& zc^w%SH+zGP1hts5*y?q5HiFo04SE9=#O{RX*a}($<;TXx#xORU%5T=q*bMw$TUppy z*#rszf7Mh4&8r{Y*c$BkH$p!Mz5T#fH*eo~&}lztw!QX1d5`>lFi`hje(%N>acz0`Aa*>j)9-|ycMTf`^8lK#PyI$!X7X}2wIyg89%^$gl17H$ zO;;R}@`&}i9vhv0sEF5fp~`5O>;zQNjVqge=;a*xK0XDspC(2N$&n|<^K(hXX;+rs zI*{RYoU6SMN7@J4$5+OXUssF7MQsd;nOYaJ?CS4^8@I24yiqIzi3 z_l$N+IoLNgxBY`a6$v$Zem}O_y+Jdq8+@DCA&ElD!?>7B@$?mobOn~{yVxaQyA`v- z6^mj?jAka+@SW*YB)BtpTp|L9(Ek3;`J zTAn@WLE}tlTxa$>n#Q26E8-u6V^XSmVZy3d3}bN+8#|poKy#=_EcHqvRSRFeVa&|| zn&yyuXbu{cT+SqRwL~YpLEsWV?6mxT=qpUmu!C4^#p3B{r6`b0VC8qQDyRWy!oiQ! zf{f4QUsaUxqwiHQs!XJXb-ASIV!5Q=LMPH*Xh#}24lgEQ0f72NZ4pXCI!+qS9!p)s z++$)8bop&GYLP&RKIn<55=&0g?GWAMn0|)Afcy~=4n0miL^j-_o#`RJMfg3uK?7j; zXi9VLkJ$<%1Daz-ChSz4*CG>inq4E(88_&=6x*6wga1v(xL@YJK^C3*VMSJx_~E;s z{;O_I1U?Z`-AbfvZ*-c0_cTu0Ep-k{6M2&MPx&a5CVqr(!7PBrLL$MCs9Z5}6T#4= z*WX%U(Xcml>tB8O){9fOW}I$qMuKtTj1}pOTRArxUzZ#9>$st=f;vTtlkuexAaeO& zOp1I3XArJ1wTZ<6Y0rc;Q|u`v?BSU$_U)KR8k=h^1)gbL)#0@>4wbY>FTpE(qEp(! zHqJ6;^@XC=u~vN-0KV^g)R>8IjoqeflmNUSBp1Ck-T9I=TKj!W-NE{RsM5r|7^QUc zNKQA=Pa4hIjJ%7j4VeQXNCJ;CI`#Eq(nXN9OigN1gr7F4!hWBbvxl+FHK)d&j^xLa zx1p%S$Nw7pN204eHi%tgGZZ1EEO4z7IbnUSl6{WF_^byY0*tXERY2S_XZA%UJRq?>1Ek@*QiiZTZ?oJk5({Fus8Cg>-x&9bX)ui{_Ztm(d*4{xX_R`}0KmZ-shy?`5=@ep^iEoJ;%X z(m9R2m$+96A!#m}$9)!~bAaUt87<%GC50;}qqBbe0OB~70Et?XtE(Hat(vm4AH){t zcyTe@>a@1|eh}1+<1g~btRT9gv_QQ_3(H6Vx0*=Q$kC|F1W4Q*+$Ro~;EneJM47&dz3R0eD+U^E7E%>IbwV9XI9i@(9Mn#4hdRG3^cPy0g(J z2dc$61VugM=*Ty-)TdFQu{O;0i$Q7*PFsR2Rr9I0MVV;``hr^k z8XWCBepzI!2^~)NEn#wGTm-(v@3--t-kKQACLU+9SspA&Iuzm9ejWokmBvh^jUlWE zp%h8S5d!FQQz9I7QQH86EACp1*}$L^&&@wOheBu+h(i64D=gNI5$E>r5T{le`mLKI zqc&U_*{ikN%eUI-9IT8AHBS3hM%HR=`F8t6j$-X9N}tK$Oi7<|(KP>@lHkdhr9Q-@ zPw@t%KZ#$yo%=N2HfIWHVeZpN5u|vL7KSi3E*n39J2@hSH`iZ@TD~&NJsx5a2Y^lZ z!;j0zJzyS(;&=L<`7m*JbrBmus8A5-%Qus|oFw>_yGr$1Yp}C>=Z~}T1)G{BFb*qJdR~ zn3&{M-~s9yp{V9K;}8^7%HIXGf5M#1q2~ouGPGWwr^2h>-0?jxc0F%rAome;y}0ao zd;85^Is$^_poI-*u$P)6aGn5#o+N9#Mi@mtjx*{eVPrSzJ^@k#rd9ofunEr@qpCvD zCEvswTmf(lQ+G;^;S})BJC@@*Wyb`5$*~=S=e95{e+*!E@dj%EC;kkWm4-B*3H%*^ zKLSv$Te2V>w6-it7i~e7WErg^E3%5VC}+Uix!74t%k@#83--**)K+SQ);fLZ53l)~ z%e9*|)VQ@iO4>Tl9A1Y#)mnprlAV4t^r`kI$HVJ*23s07Y7P4Bj`aI2KQTTY#)T2Yko$KL|-# zLia!8Xp8n8{(b7m>WL#KrP2wLKJzH5WZVodRbfWk zTq5<**GF$=^sZc$?_9fz&~Uv*u%2k=`;42fQ-s&k$p-)V6qU2t0gyfW(Gzr%17J1( zI$nygnAWe%$4wRBbyrcQq<%_(YDYyuBXMiLAdKoO^-BWeTF-V@k1+ZiZ$SF) zSf=hea8LYGG*&OfZhv>*lm3%V3;(16kIlg@{<(2c?Mc7g-0y{I1vA;3@tP?s%Kc*O UtR_DB4*5k6qmC%+j$R%9AJ!;q*Z=?k diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc deleted file mode 100644 index ef71b407910778c83987b06e4f19ef93d2ee6777..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3448 zcmb7HTWcFf6rP#AYAwr7;yRAqi<@4E@(`C6n$(0eZ9)nKi_ivIM9Z?(tQ6V0Wpw>_y#JaqQZlGw4Tef#)8cWE0JZk5~vh z(ejZ%DCQBy z>a$yW58KU$Q8R4zRro0GC4Kdv)BpYRy~oFcSPl9~ym!!-ac8e<3`lMa zjR;jjy9W)~doQJX@b?l?yazQ~J@naQQk0#@S<+ z&pCTm1DEA9;1NUBHNzov?yArM{yz}OsqaN>YcH3c4Ff4lKjJ^cxk_~P5?_v zK+Z-2@v>@}*x${p8@HNOOReEU14M;X#-edsy*#U&wz^4cF^m(5R7LIHSNJsX(J=1w zi+HllsR84yj9?AOAieYC@Cl@p9$~11Fzoi_utWWF7(N+BoqR>Dko+n&q+zb`1?o*> zYkGt(ML_u&PeLP~2afOfw(pwx2BwN13Est%kaR&B@E9n;GSoqokyD2wJ^0Pn!J~FJ z&N-rp!W=rcuvf|`3PcA*FQ)`vqr^|KMZIJ)le#)c(g-M^esw8kjr~T`9I*k%zZlOh&m27~Q}~0TOR@%)@1jE~ za>7%7#(_;?C=xgv@vTA`(!*#pMxQQqA}}Ahr0zF{%JgQUZD?e50;_fxDt?D8#sG8y zKV1HrNCmpGsfok{Ci5A5B3tH$E zV^e749$|e-=~w18nUR|-97+KmQ-xF^+zG);6n$;OqL;|lM5yX}b|?3w>?(kv%$y63m)^9`YJX_cHYnHRLtJuV#xGI24x`?cdI?KzGQgXsX2jMZHunCVs;6ft+Dc9<%$o3r;`%ZA6f`N94dF?^q4?R2R zMG+Xepx3rz=J;XLph5UinZffCp6DAOTp20*N}|NrBUiY}wU)N8B?{MFbw^`YVtr(A z?19$a)@6@^RhZXAxw-9xp%+AU%V$x1K&oT36sCjJO<{(EZhuec&0AMQhItWICbY0G za#&Ir^?Q+5H-zbTowg_RZ9f$HE=a+3V#jXvSl5YvtFT$f`sdY+?Oxa0h`lJb2a(5~ z{^rJYzjfVd(R=m{FN}KZ<)F8FWn+7e3!@S3!8`jMJUJW@?}P>Gd}RM5Vf=&0)*gB0Xy_qB1`zXj3 zKaUA>(8t(u2LzJ$OK5&i%cx^5ku~Sdnc7JtuzViYWB=?A9P`#(pmpvoI95*R{8p-`m<1s?*eDhyxvH(bbYZZld@8r^#M`2f)&pnD3xqW}ez_=VSt+T~8jI3S?5@+d zTj3ROq6+O@FTy~o>as?(qOv-8k2aHQ==uMc$@I|8Yiu4;>ndAFi$xUjVCxn;MfKA} z&JZC*v$I5ML{^Ecfz%Clp6VBfTqGjjB)>Yc`#g?yy!RK;lRAes-n+`40B^eMb7+tZ zGQH?C&{*NxT~!6`s2wZnTXcrKoYYi05Ff@|G3 zk|N$%1b2xS;ci;I9G@7S+*jQENyRPP)!=d(Ng1th6OK-`Q*McB@#)SPxFq86eeDyK zT5wX?N=j&*?bJG}yyRBytDh)nuXWDdQ|~F@%)*JwlJgiZ^SoPi=fIhv^(Pfx8Ld%H z*{ksK=y5*NIlqsc-c-VQ6&)o`j?Yw4ecZ@e4#{ zcgbD8uRS#O#Nk-?<$?PCAK%lG6Uj-w%ukG-+E@5VNh^1CNLxuxC8v`!$yq+jPxI`G@|=>)@+v7ELX4$hb8&rbWl#JQgx zt>07S%o$#iCFWUDFfYxqu%ciF(0!~PxwM8gW2f%wdwROtv1FYYkenM!MvKpkHee++ zXnBp-pviMz=p^CM8ctTB@2C0dMAI1c7+*acHJ#-SX0fKT+`$=NrZbMO@Cs$uyLz&Q zJw49XF!nrO1iiqQV96Ppo3HW39p!<}e#)2J0<^Uh8hkBTM60@Ef`5CWFFL=8{b0rn zw5hVMuH)1``06H53R1?!Ys2TFm@q=vuEo9DBp24Ylp1SWPIG6g7kaf_Ki;kdp2I?V zkvXxqIV_?(Zf@I@v^R%KlcDs29BosI*&J5Jt%9__i)kt6s z>8Cc8S#y|I^FyC77v^vq+0ff1NHR3m!~RfMAhzo_V=OR}0+``us;x)Om!1Ocw(G?n ziyjtVH&ZkvA=M+uG#(eAp&TAUFwq}C;4MI|6iW!z?JK`rr609|V>& zW5nir+FcE?+2oleCCQTQyrI0QyhCF%c(SqDu|_!>0hto??CX$YJ)&Pf(hpcO%5#eK z10XLCno;zrIb~NY;b9dWgo-lwWw}s`0FoeFnBdqpV zkeHp?mA&f`%mBu4`u$+fp1cwXedzb=g%rz}DVQjX;R3mmFk-nJcnKXpfVnGIBeE>T z;vygS;_(szG* zlf4e`azq$G(ef(&i+HM9O|7YAz?b1_1P2a0d*aUP&j&rIE_z{oB7r@J!NcASx-`^$ zn59ENtoC72+W^_Dy>={2fPJ{JO|-@vr|Nj?RK;Eg%xUhC?k_*gcovMh_Aua-0pqyQ z4`;}AYmsvc8^CF94lUVmSM$S4>P$!aRxl-lq0|6ud;mXhv*r=T!CE$lr#E)@=ZDCDRC9(mtiH+F9#s?u|PW~`ZPL5gfi*)(&L#PnbiXzQD zZI$Fgr_JhQ5<86+uFu$ya1O%kGrS~B$rkyvmm;sQCJ|Q{A>v14w(Z5~RZm`Wa7Pf? zXd5v&@aXQ<_ner?3mOFp8GnTis4L+t)ozQK?jVT$KI=6BvtcG8*X$4MW^X_Xkb9KM zXn9BrsWVz{JItqmEX*AkN+j~rP#Fp1JFWxolU^e7FKv4`ydG_4_OSlC6XNd2$W03Nm_6$0N zL8jU$%3RY;cgen&j*x+1>?aT)o0Bk6O`FF8#3Ao6OpJ&5-8RLLM)9aMH}Z$J%&yZ? zCEmYEwI33BjmWD+zE9*8kVaXqbm~5hnbgz{8P>#EWY|G6r`wJUPSR4Dyh6=96kbSl z^eKpGo`TvZ}6 zmA?BRYZ@JczJlCG293=);~Mf7l66hoD@^oS;3A*V#Xio*g;5driY)FG7VZ_eYmkId z$<50QY>dij%`E_>;zopQI@777-+O?;bGNT^XuIs7PrvScTD!_b>)LkZz}PeJA?PF6$Oa1f)TQo-mOwT za`5c6;ldO#!a-!$_D$POu!uC(G{O<5or=} ziFia>MC8W?f)_Hz3NzXU;$aOi>Sgu|;tz>@2r>z3@IV-iw|lkfqxma}CzKZP2E1q6jtsvJ{a7Wojl%=K>hO?+xZT^PN- z7ZSb%x8)%7F$S58Ofk(O1gfPbkksY(MSP&zP4iwMWdzP1-814qvR28m GEdSqz#2&!_ diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc deleted file mode 100644 index 95deddf43de404e0c5048e23cb93bd5a9df4be23..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5642 zcmbtY&2!tv702R>AV^V`E!&Chw6dGJy-9Vc$tv=|70BrqgF zFCa_ifHR%SmmYG-wbL2tm~;P|J$0rh-Fo(={k>fPlxVu?Oo7>lw~PIF`}Xbcz1`aj z3$BKz_V=%X&P7f8CuPdV0CNi|{R$-1Le|&1EMkZHk#=YtX-t)wT~q$Ey0-k*22Qt* ze|l*2-9e+5vOT9ltN^gSXEZ00O*Fjls8{^bfA})0 z@;8al+MFOXG`B#$kCc+89OgQ*1cIW-#XV58MA~Kz_oGt0%CrBw7wlXFd(&bl0o?4Pz4pgQurPo^0 zow=5DQV;)=De?0dq^&E{sgHOR#zE%o&(4Gv^yf&WxfGk>J`!|@z zTd4A5q|!8OP9^hA<9+<8;IKttF5%D&trr^L&<<<(TH#{oh7J6&!!uzsTmT0+$3|Qd zi*gMZ!p`TcBvBQRbCW-aaw*mD#aRR;=)h?3VVvv_cMs@QlnzgMQ9!AOgm9-?_bFEJK7&wlUiOoG;{liP3)&PpJ=ka9okeMyp?M` z`>*h~xdH0@%{*e{!LeBN4yOe=hgwjHqqJ+bNcG+)Q}9 z9S;GW@VVnAkJ8;f0V#4X+1l7#9SyhJHBsN_XLry|#5t|EFoOiXpy7w1m+o$k`bpMx z7g&_^0mk^*GEnJKLIP% zNOOFY0v1~-sG%aAO+O2EIK?u;CHLgX0?|-<^ZGG-;~M_XYNK8a^ca83?W0`QRIus9 z1HycIO6-5_TtA+NqSG$t&D9H%0ivDgwR0{!#d{cE&DF^etzaMSbHX=H9%1e@m#S%& zV?06Ca=KS%ckC;*+l;TELEGqb_>EJHGZ%`lp$?}Yfd7;TMS&+YFOfvLv2WwYhe#*KoWB^`3GnrY=1P0h9Un2GMrYe@J(HJI?IBWlN9x8jCbY{K^cm>C&@t)7H?d}{~B z0fT$E^SM6JKQ|_DDsUz25XXyN9m&o0P`{`h8529V4;#5jGtRWbrkZZ&-sO>6_Ifnj9YoyEB4LcFwG>e&->8&j z$Aff-{Ebu^?4pPZg@Nw7${)qyKK~(l5SDDiZxPS@8SM(?RcKMt7&QuGX|e0<0-)Vx zCL1qLl`UP6tXzdBv=_)?q^@btY5i6 zN`*^lq#zP{rbKQk!iFx5M*#ppsP0-$T_Q$+g6biq+(JsJqY8o$vjAoc2}Bei$}>I! zM0vUg{M4ZsSF|2ba8R8i0s~#pL*EK`$OoSUo)TJgQHzJ8-OSr5nZ355AcNvZzCmPW zPINxlN0K2g0mb;@X%Q-ns1}`MAs}3*X0*Lw{t_T2IygNC?HNbdOc}*g`k|efz1k5A z*=2d>ab%Pw6V%15&CWJD(X@Xgw-f`uVE*+H4)#Xy?1q5V^T-cj_0O_(I>d{G^@qQD z_{h6`|Iv*{_iqb*I6~mmk4J!?G{YeqVHn+NymzC}fpAp>AT5bD$BZh(G7V7NkMQkH zPd3j{QJtN13U;os@poqQGq2UDb*pqmu2<`5)NdNy`IEw{IH@HK#pF>x}7?u5_}`q&0&f2jk~>k~JxAJ#Ls z*FfA~&+F*P-7;Wz1kz#w7hZH9)A#Ad0|l148f$VNG74i54|%nDgn`;pzrsdLj1XTm z9&{exedMiwdi%jTr=1~}Laoeik!?5B29#)w1)$3}+Mbfx6U@Vaf!~L<$`Ach%wD%C znZhDPy}G=EGx5XOB|E2jq)Pq}=LYKYFBo58Izsge>=M!&OsW~>M()L;50-wAvdKL$8d*FqcAXE^ppQ3cj$MHz1 z*Mf5#J(p?}OSq&Xx6_X|aYsg+u+>*O4!YDE43eSJK6xW~p(Bs(GCq_DEd`h-uW&w4 c*GM1Ip&-2g;;^RfFi?7pRz4~PeNQwJ$Z)Jod1#uZ13IYvD(l?v)u?19hQtq!P(S{IX~^rugI8xD{VZnBhmdUz5yssG3x^ zQ+;=3yYX$5dn&!z-b!D#4}1|bTJEn5WCv6_hV+)~pi0NfTPs7^A(c*`d|P%~WjH&m z^2ze{%1CxZr8~+yDm$|~RXSDPRY_;lD&2|nXm(VkyO7?U-L2BykhLef2k9|urfG)B~n|Q_JjopPIg&(OtuN)2z7RbBl|akQ=M# zg;UIz-Ken4m4fv?w6B3F`rN0kU%C7#q!Q)YV$KoyVilRri$%L2ij`tD@6-g^cbHb8 zW?H#Q-d=VG7cD1OU8&?CG+!=T%UAP@mOw$*`Mg~$oLgC}SXD~W z-Ck94Ls&ONtzg-R;{v~ zTPOf5wt z1m7WiNAVrTcgzXP=pDTomvPfrj*EH6Sc^!sFb&iA-jGit3*cRgIz}l5S#cRE#ib@A zGA?6g^uF=B(S+n?ycwgE^>*M-uq9e(GCsgD8EZ+YnK9UJd@=ke7MMw z{ctmZd34AG`b|)Zghbdyva$VQ-pQwZ?c|+et(sm~sTP=+a>L7Y$2HXI8uRnF?dw7r zh~Y+x6)dG2ftG8w7=*gSR*?5YOi_)Qu-L|QP?br!5g12()r}Qy6bs8`v@X~3rY)#> zH&m^=iCWdNbLHZ)O!e&>TCDG3@46uU^6Z6b1)I=rksG+%uiiHwRPK{j+)r~uOK|8+vbDGGY z-3RBUmueMj+OcdWw_;mj=EU@k;=+ym0%hf1Rz`QZT)TC6dIKG84?G;QwWn2f7EFVA7!3D zh=ik+XqYt0Fw)_YA;Tt&@_xwAMC}63~68`!`HEQgMA~$(I%sd zIevwOfiGp4s+UY;6JxfYXMe>q+b!PhY8858-#n*2?zfFBMRa3x*cv96(gm$Pm=aAY zn9>X~t&cA{A#ucTP$TQ;b{})SVb6;fdxQS%%n-wA<$& zHo020yy}^P*h!`_5a@u%j5lxUX7g+Ya%LrO=Ic(o3aVhV)$IoxJ&8}BE-s<0!1I!S z{N_@r?aedO(J&XxW>Lqc3q1;BP8_+;#W{+4F-vk1Bop#1IqDk_PpvkK=?=!@X@3RP zHkVQ3Agt< z>|kqcTZ-`py0N^TcYM>{tkB_h^Jt0i=CTU$zL}(GCRaHOW8Eg*P8`Qh@T_-}Zv~fz zLJYk8qY8WQ;pFXc*cG12I9l-x=66UvrE$FCsmZ+2O!APWJ@M|ygOSJKVzSwx>Lbk* zzN4jBvs2~c%`WwwXm(4jlx+4O*HKC}d-2^_>T34MzAtHJ2kN>@JxHY<8qI#E_m0*a zfHn8w{;>t${g3d(9@d(J&VV{{d(2K8zgy*&(iXGJ?7pwB4JytprLA(SGjvBYd$43% zB`0cby~8{B&^xf-(wLgtUbNYP z)Oe8FsZ#rzyPW-UhukT5$sThX4y5#^p2O{Z+zXq-;2D8VGMMqGGadGKzkx!>83 z(#dy2Z|iS|-VQH>%_(yl<>b>g>zI+$nAGR7m<5ozIg!BPs#(}oK<~1 z{V+mK=c%=0a*(<~n-k7)=csu~4&Djf(N(*#=0UmF>^n|946L1?$LXAuLw9ssjwYK^ z&1v(rdFHMrr>Uch=o=+x%{i8sr*NmA^-E`*r_FQbd6dsA>T(*R3^ixW3yO25jq8-0 zfs7ewrDHkzzz`2)Qg*DJmV4nbKu7&r^N>u)L)5w4FDK=coWVnT;7+7@80&9{-;k5d zBN*WxNI5Dq@`yZ&`n2MB3Ntyi9DWcHBaq)Ys38|(4w|{BG;>L5=CacECA9`uzJqDf^ z^+DqVlQ669Cg$~(Es|9vV^Mp*J#fi2SFUb?@EnxXy zeHE?V{MA=A^CljDFXub)$he6+VA)EmtA23nP1hFEFVw9neSvhLoVV?Cg&u*OOs4Ty ztkzc?uOyAjW17wZF3kti&eCI&ZWYVrv<;ucfs5wU;JXwH8WWu|CR-pSxl<|=Bl{6`AHNiNr$Hp0Z5l?m_)Jaxli;F zKZYE%n{sYDO3IO`;L+>Pytd*{4Y<)4Yi4;hZLaV>?P-#UkI{kBdJcj)gFq+n5HVrD zm!h4{qm#U|^tC2C?X}A(MNC!e4R5h-sQavA30rkA8T_d%e8ofQ*|ZmWDCbu#G3BlF z)U_8bUV2{faLMyn}t)kK<3WZJYA;2xPRUCfuTA_vp8c?W?O6wj}wuhC+rc=u5f z_h$mPTuE(pe;D2S2GrIYTn{)GD+LF#{8|WqypxNb>8Wmfz9)91M89M?E26qV532k+ z+;){H&S6Uwzx+i*E6#l;q7~PcqFPbA8Pkg2bvmvUe|=v9sb5HdoN;(!Sd7vKJVMP5=E1=suXCBE>2z;`R(62 zHVoI*bB_%10z1D!@*;>Efv}v(anrL!aBA7I>Wyy3J1B@dacXL+@iY@~ePEqt&9bZe zoV11D1}uSNQ=1op5N|`2E1ky{E>?>S*CB5OW_zsL z;~Tq$%e}r%DrjZ4hbGhBI5>@`%VH^LNxO zQAY6`Gp@gk9iiVwP{Q^Cy>1Eym1~(&#&8p`8irMKw}pyHsF)rj5Sh60McvdD0mDWp z_vJidb}<;+t-LT1^vRcVd~&$)MW4a#4K`-Zw-vWLU$4W_Zn4;IxKXTU5}P6+Zii~= zorZ$FBb1Hq1oh)4oto3?ED=aTWT?lp6wf1HDCZZ|25s|?J6k&}$36GIAJXQMA#|V~vAcU3E{-Wx z68B2rj(YV`72E5Qow5s8>ezOz*}chN=)lrodVGVS%i1y+V=ZpRW$FSf+HF7hU1r{ZNgDZ7~dOhhJNb{#M~-A!WsY#ewn9UT3yLQCQ`l23!Uy)6^W zEm(OxdMq2E4)P!%n4h=GmsR|x(WlOi>BXG4#|%)x~XzO14w*A1D38gP`UP*!`?T|dQ*vN(az&07lW;bNMw+B5Q-u&|uR@PQ7NR*~XqB+_M( zj?KXLl~m|#r0u%V+)JVf*-h$wdzJxy#4V( z@ll}$b>d?TDq!9a3gy7m!;i661R8V%8gN?Pjo;G<#L)p)5y%ZQVf<7zVw_gZWW<}v zJnIa{fP%T7{2sjmwYTD@yzO^}eg1c_XpNm8-Q#2GPzy_IQtBA+)S+w+7k~}R zi|!fYb|Y{$3(Gi+Ka1SDJ~pkgQpIeWDpG1QrqvEO-3X_r(~a1{^iAx*(~aKQ;2xwd z;MkMlsbiL_CmRP^C@wEc{z=JOH^RHP0>U@;3}5A23gQC-JA#`(mx;I?-chVBR&J7L z9fc(zHDLY1yEVpN$%`UA zDmSuh0Vs2mt%r%QUE`dYueb*L77LV=ys%_pfohnl8(sp&m$Y#Qv(#PM4c|gR3L%AD z3uhF(Zn@hyixy_kHgu?fSb^KSF4TWH!HvNn7%p*p%XroU^eqJaW;^JF`1e+>N|3SL zPz__I*|#Kh2r8mej=XX7@p-A{0lapm*NX7{UXT^v8pSuWY*f^hgtKZBtJ&- z2T8_BzC`j6#O>c?g;bn(r|;abN^TN!^c^N~o-O+})aR5n1~{E< z#~f`Y$k1LoaDW8n!fTtm<%y=^J_cZU=#xAAM0PbMH^t6djvcfQ&A1_i89Fy8OQG39 zvhM`0B;F0hO?Z8;cVQ~0oSi0SYEQgQfjNv1o7_ORbj=qlO_q6{O$zphXpas#-F#=}WPdoT(EH9g@W zeFmsH`MSW-6&}J^C=GQSq0tj!k6pU>3TC_^Y{fkjpo`1|=;C2uj&P>FHRvMc5?{nC z8bB8{V2pjBoy~sGE})6PrSM^WZy0cMb4zm&2%=W%YYt&d{iT5>vBE8-!RD|WM%iISQ}M3ASTJ&ZiPwiaP~;xX+ZzRO2A=!6mpcg0F&H}+<0kUX}{bZV3L{Agu*0u zVkCPMCixb)w9)}NDtG&s&C4 zCV8+8lN8a8&xt+69_(pgWLfDQ=vXzfZt{rzyluoYA zVg$e?AM0x)CK(oeZJ6Z#wzl60CONwHG;}#BcdPFybHY4OK2X<}rC} zyZP*gsQWu_F2>#K;xbKTQjp526_{*d#$oCTif z;iz|;Peb~gd>U=fsXos8IO?^v>vAV;0XS;bdBJ(!oRB*M9QAVZ6!1UdsFT3WZzvq~ zMVVGO>gnd0=2>8&7lDqR<+>^y^{jcxyv!1F0yyeRe(9WZ)4XC{Mfn_X)Uygloim?N zoO5klFUvXX6=04EM~#Sv!cjjZf!6yt>N()3=MWB?lE>v~c}C6wCmjKfdLE}mMEtxw z-MrwuC=Wmi&_8)WUPS$V#c_!6;H*(Gj{b?W zPJkOZXzp31x#yJTo>!~!oLZx6a>~38oOKr3c}-!fF92s9S^Km~&jvVaOR@vW#92Mb z4p8GDjb3i3QAA5wd7Ao^XB0gz=YV#?0)VohKw6DAe;R?w*Wl)V`V*w}Yaq6tAgyiKCtQn9kk)S)X%)}$ zKC6r8@wEX`71z9q>--9D0zAK2z^Pz(8ND-Z_3edA0fMRM2j4JvZq>vxe75Cc{S(C0 z|FOq69Wfo}n-%<($U21lwJ}rg=LaEopcTI_lvm(dd+!Cn=k^2r2)isJ(%L^S5TYsI z6>PkD?S-0%YHYo^T19-YR;{hrH?LfLp@p!!JK8GF;XUyDB0@6j)q^+kMe%q!%$0CY zfb}u(#7{py-xlz86K;kVUHo^+F03j|?V#^*M{sw%MY zKNui>0+_m&IKTRVkP7Ck$H`mqKNU8GUZ(C~_q>BdDUpQV%l~ z%SeB_5mQm-0Qy7)U!qDfPZBJvXHPYj)k*NHKus`IC2SIhgzu}1w@7Z0+$Q0B-=YD6 zDQ^l)Wmv`Nl!~hHz51^Yh@AXbw%Lj)H}-mG)^jV>#f6orw)3O?U>R*pd>Gqf$H{18 zYyA|C_pc8G^}wj#hjyEBsNavWGhAc} z2ll!9!!_r+l?Z&!wax43himzxpihNsw9cas2R_9; zzdP{`5)HhV_2j2T3my#&jHq#7U|$dFDZURaah7}*Umv}?=i0RJ4Pc()`_aSKg?fq~ z;LsxCK8N;$BbEB-hu{{)C@ zECHr-ja#GSriX$4V=pI~Z@geatV zMDmj)KSe_LQT#N?&yf5qiTdHxA7<*0ko+9UA0_!?BZ{ID ztt$RKEB*s3cK8+EYsL8a;jO9l7e9sgkK`G7oTqhrf(qZKtp7yv+aLgx@V+eWvySEZ zo#x1lPJu{hZk*4poV{pbLGTi`;}_)LT6E)l{^XDnzd~uhO7d$Y*aw>U^CW+PlD`U)dB(dg{52N*b&`IP z0g`7(o+bGkB%AE$DM$@Q0$*3m=pKPlp0khmoOl4OWvkmNm*{Ura2M4cf2nyG(7B1pbW@^49gizEXA zM`D5L?sRZTJLj(lJ2?!F>Tc-poKN2J~Hi3i+UQGE=;TWV5C>+}W6yhaP1r#QA@qeHl z_=wZ~kehTo64$vl4AU@heFf{J&7s9m=m616u{s6yWQXvUMhnBKs5wmQO7qv0Wmy{Ktjozt*?Ph5L5K T`;(DmEN;Z3A-(T=wCeu@q#WBE diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 6d49828d1c33416359108a9936f04fa312c3c29a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 379 zcmYk1y-ve05XbE_X+D(l2<&0Nh=~Cq#K6LbB2h79v7+RnT5@8`PKwH7@C5D1%-eYF zR33qeOHd`w^6&n4mi~9XTPzZS5q^DC-vJ@}u{ef@;uf!cMAW3Fk`xr^*4vToY)2@m zy?Q47IrBnuDdNS+TAnNdg25NU28H>v__mBD(E zE8G8XB(K5_r4;XAvcx5w9(WCFQGuJGcD~Zkxumq%ZOTFRE@)K^()qA1rHl2^+xR%u z;Ae@MBR+{a8TE*t2ewizupVsB2Mgxnishoo%Z4|~@FjG(+r3n8mu$7sV6?Kpp2jIm ji$<3X<6Ly2=loo6Ty%{K3=(cm(b}enh^Agdk$8RqDy?Tf diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc deleted file mode 100644 index b13d4d4afd7d8b6156be519643b511a3f4d0c5c4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2763 zcmd5;OKcoP5S^Kw{eQea{}RV`Z0BQRXOoZwCqY>!mPsPkAM8Y+7Sd?4)jOGZcV_9` zO>nMGjtGehA{Vkp5E2qZ2!uosrwG9biNjntAVsp^2sxlgP}QDwLa^|K1Jj!Ks;g?c zYpbivK9@|!6nw&;zEfV$D$36^={(&i%;4L26i^gUK&>iFX$zN9Cn?}>3eE6ZZg2mLs9;nX$g%{eYTru}#rYS7Xx^bT1dEOnzU3oXpSG0^iPM7%aW)aP9?{C51|7oz*HwPstxBVn4zRo7=Qik zZ+<`c+x?EvaaLyhn7=4)OyO_18MUuXu3nouvubaD154=i-`zmGeOWIuU$^b8+qc~T z?r`L?NMvcqnvs=4CA2V}6M1Ao|6hn61TTDno6vOeN-Xg5s@P42Jp@Apy9fpd1_|~O zj1o`i9N>5q7!((tCW^i#hd{ z0sOjM=3W40Pj~Cw@nU8JwH32fZF)iMlJzxDbMVJ-lm&Gxd4HJfXC=FK#fwsBn{}%$ z#yPtQ&>(W-fR*{!d!{6AJTjvZ(5&T=@BztYpI)TshD z?K+bsuo*BFgA91PWO|VoH=E}fI-rJ-8BJ&rH4#YXPN*~5v^uShX`1@)BNWip$POc= WO{j;o5w%YpQ&G~>*%4fs*8dA;{*`0^ diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc deleted file mode 100644 index 48bfd81b5eafc30b3d54f4ce1808a454a45c4fbf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8207 zcmeHMNslDQbNAlf#iHh@$~b+fZSIbaq8nRn}x> z79+EIbC$QEEN>0bvTUuCss(wG4fr4f27J>$5TIim1OqArCUW=ilyD!xZbUg8{LM? zXWZtv)oqPex+~*$w~ewa%ekxLweA|y>vr94u)=pWR%E4jGTn8y!K$qGj@sQ|b=G)C z>29)3w!+#d*bxGcPE?j# zS7s{*FGVYNlrOz7Ye(%-Bf_$;D@6rg)D{(1KGswPHBE-`Zz>6`Q0cqs+sbb$QRZEh zyOB0(Ma`&1`eN1hG*&xKp`A8b!OT`R_OTN081wytawH;i#%Dxp5;z$P9}Gn0^yS6k-7^Jx&u4n zmg9A_Zg$tP0&_q44k0EN^eopl_ZNv_S2u^Y6~g?;LStLowZh&IU2&T^z0f=mY0nzl zB4=_tn7SbtGm=qchtsj;1)OZH(-s+NydrA_;o-y-XMz?M?I5Uw`HGS2=kVk;jz009KD-c=o<^-~^EE z^L>jm?80~_h&0U?s-iiH{Bl!>+A_JA!m7xOeP+A71nImiN9BwMA`4r-98s`Kg`zPXE5u zH=!2uE!%?wy6gJ;H+P0u4A@j)@4P`%-kCW4iPhh+VMg1NL$W#8sOft)dK^3h*})AE zMXjl6wW8Lv5@=i9z}wbdK)Rw9klRwTRHE|R;5{Wz^L}A_C$RQxlR13phSpHAbVCvR zTBw9-lzF6L6RC;Yfp3@Y2aYMqkte6E7pCW#NMV^4`R&X}YMu?#qZ}NUE^FX2K2W1Y z`dmLzX8D75l);9tz-<-g`6!Q8`L{>d$f*Zua$mFZtP-Uk>?$+eIxMa@x}+==`a*A-Ukr{GvrIqC}fUkR&l5>+_7 zI%HLjGbpKz>J)R1RdP)81}ouLJW0(~qt$tn)lbMJ*XFHAr__?8wB%?kIWWU>9L*&M zTwo$YyTGxsnnaBU5WaZ>ZO`3lA4WsmDi?u{jP^HT23CHG~$Z!I~lEakN>m3MW? z{lt=c<#BH4sg01aI$w)&5!sgUo^~SVjGFZrVZbF{yLVJJ{X5O!yWhGgCPAPoczzhbb^_?VD2V z(A?NM;50Ci0W9)^I3%QrozuwR0^Lw#p_wU1B!F`?gX!+X^+S;lfSE!kgmZ_V4u<}| z(0jh?^IeN0puzQ^l}Jyla46CvAE71FbK&C`A4X-s7rJaL@QMD)$F6qQ6;r1mxJ=kz@uZ!#Fd z(DFRn4aoW#;g3#EXklMyjxX}u25jm(uFcocnJ6;5hfrvaCiXxyq1-*2C-K&lZTVHO zbyD3z42Y;DfRzN|4wEt-QzXC(h^iW_V zXd_kdbAY)J0Wmtm-cSf3^*IzV%3$JfXN%AnDf1k;w1G0qqBb|uXK;G+JVKuy5>hYX zE#WQWt>CTVt>LXpoDDo5HV|lY2s{O}Yoc8d-xj{5aAj7GilcT^4p+l9a-B@y044^; zFF^Ihch!UI;YL(^q}~BK{!$y5y@D5kG^))vA)$tFnSPkzKfA9Wm3pYo>fu(TMER%? zhrcMpipRNGfI&YF%gYXNn)Sm{M2yb{$OsYsV!L@=FSJyDRZ$n+C3U6pA= zrfV;f4pt7{46m^|=J&Bj8YI3#;S_=R$%i@M_F=R#zs?%0F_7WFGpHtN<6N*xH3+p& zLDuU0<0neY0nGndKm|Qok>hBhzbyD9&pniqE^ly4)<3%q zq|Oip4d82l%0;K4LH^V@gLXS|d?ptPJF>SAB z%UR&m98ga__MMzbZ;0u8XA?@MDTO?@y$I2EGNvRxI?Dz)E1?Gi!-2tpjdm&^cwGjTn zPVU5}Cc8=aF6-zybo9Cxo=wT4tUaKkuZJ@v3_dUrmJFO04;S-_K@Nh`)WxK1W4X%g z!X%#mP_m_u4vaIYi#@{PC418jr`)@+Ub44)IR41Q(RzH`Gd^(KyLD81+{lFvONTY} zLPVV`8M+nT?dUk{Vh7?N>N?}yKx8FG3Cn2B^UW+*f}h z@(=C936N13oH7(06%+lCt-r7C9Hk9_;Cr1LG0-cKOEC9sXD|!{ejg3^ZxOMG?0|^8 zG(g%=B2A)VU_{v#nPeTw~0hVW<(4k*NFTMk>4dk@K)4F1W7d$ z`xEtxC3LYy;T87?xI+Uu5LLNu3C6>$M7}qf0^?3`$B?(7Q(GR!H>s1`M4qJf@-}YZ z`n%{03e>3^sl+^`q#{KIHxL6GmmDU1wJ7ZR?2y(6>zuVF@Zd~ji1{G?;9PydX8?mr zh2<7gfmL{f$t04*xVn|l4-*GNw}9~i=YZpYYXbhKa+p$)Facog^$q?h&6uJ~Tb>`wEBy^f4^l~8b%UM5;f2353Y<~7%>^P6Y{^E(J{h7L6rvYMuj51egdo;j|3k)Rw?$6~JjNT%T|JLg2J{32@rF1UOxZ zetF<@^#X8;!BPyIVz3kgr)!S^r;kbC^yCHLbbSe&o_Y*8U0(vHr((zi`t*MboSyn$ z0H+%t0jH0E(?`JR|1oglhw$h?JxBNeF#R%!;1Zm~e~%ro$?MWWd^E&AWcz%A zDv_6`INx^zP8VfziA5b}3cCGton8D}Bkw`8@t1#5HcGIrpMYzLoHNtVffTxHl9#HS zZlvfSG9LS${6~%7raHR*lL%TuEKc)3r)#NoQn=7x1kRtj_Dffh3k(olElD&-`Yp+~ QrnXSlSSx&5*9&+53oG~#lmGw# diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc deleted file mode 100644 index adb42bd9996c8af46d89a798f252d1fc07846c16..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9735 zcmds7TZ|;hRn1pbWo13O`u&)mnVz1W@zi2(ua{*pcr+<0WfjTKLd&Dc!Cc=n8H+F zu~kn!)=rdT{X|hE&#()ktZ0|S+qBDgYrfgDY)jP9{YtNDS4G!q%HqkZ`EE^l~)y3 zVbzBUt9sQ3x_zFhUsRg4Q;b;IZmMz7ckg>VK&o(Oz&yWMh$}bTu(i8A;Jp`FT)a8x z-@|+A7rkM)d(aKMgi(Ht8ot&I!nl0LJ$!vI@Ly!7D!N5q<(q@n;O4*|@TMNuhTTKY zcY@vhcH8&jYR3zmo)_$Pc$dXB?sfM4?vdyCgCIE7Wc%qactPI_f93^e$8Fu+8T37L zTD#+gyxY3t_T7%hTQr08H=87azeB>qDlo;?Je{dbV> z*$O)cPL+MctFhH@sO&tu@UURlu^JcI8dhV?o5E@|kiW#%kzW`2>30=Fv1hPC*W%*q z-S*IJpH5>+r!>Lt5YjzeLu&gLuj9qbAV0)|-;cK9dTZD1_dP#w+Fc%mtp!=4(3_N4 z#}oWH`UsT=%CSlc8mW=uXpsi0M+T@76+jD-4qA+gprxn;YDN{%a#RJiA``R{BE1Lh zUKx)ytDJ*az^_F$(0Wt{ord9?=ZXi_zCu zB^`CY5tV5qs8xML@}L@+|6VPraRGViCCy(%dJWI{y-T2w-5&ZCvbcaZl|l}C3#|5t zq_vLpWjr6Q_6Ad+0`={V^$*_~e#-j6R_E{U{mnO5Hveg>^WM$h_kZ}~f86Svx7jCu z@cHj=b?%>D{kZwT_qICU|DO15-RgYj)qgpf`_6xCbv|};_22*X<&`bk)yxfDr;T0h zhOxOHc+L=8HnxJ6>w8Xs9T*pw*K+U2x;G4jel<0|2%X`}AWbu_cl*Qr&}n;axX-;H zF7(kw5SRLn-|g)Lary0im&u-t-NAks8wW&-0d_6(L|4r#aUs|rdOS8Ar`zv_juRJ# zE)UV0=!BoA4_*L?i!H*BAl7%g{a8Qf_W4C(o8l||{hk2+6D zFo6|<)m(^;!1LSu3iWc8$VZ4|lI7PZ+ay9O%YP0et_(TQ)y10hJ3`R>IV$@ok&h91 zp2#MV7l?eE$csdNnh5C$;lAUvd^ZRj=iADE-`?CE^t{c`gD9YH{Dn_!Vm25rX`%Cm z*AE8#mOnUnadY=RR06u>ZHhB!6SKo?HhXj+JwLq9H_$tMKez#+=$2Z1!r#&pc-Pcx zs--TdGg?E%pEjeesx#`*6vlxMJfG^|8yNbscZflBdtN_ueTRF#dx#^;9eP2F&UyL~ z2v_mk#1ni4gy3^eSpqQasppgt<+NixQcu(o0fMndpsF$Tkv1wsg}01Raa0Z7SqV>AuuDFJ57fZZwpwGJR{MAJHN#+cZTV&C=s0% z1|8&PH*mxPHY-v+#2zS-qL3B~joIm8E0u{Q6;|kss8=EFNi|+%QdO^}C8Dv&QIW{i z(lXhd$YzRfPpA5vK}NI4e->inpCBUCt^`#$a5>|z5bM(*%~`&MSF9fL&(OOHz09lt z+sgVpw8fNqF2L#*HGY-IZ6dz}f>O*#lzx_YpCj@bkvl}TiF}^O>mZN`_gJ?TI)|K8 zOcGjV{>vc0f+wI&qv#E-3RR+K#?aKq|4hwLP4%drC|EMP7MV5ltKc#52o3U)fF?kT z=-`8n;2>r?jya9dVW+XeGR|yG8R;Vf`lGR;;Fbh6lQN-EC@TxDwM_NPBMS#O9p$MO zS#1+~QrW)MRE1P?5+h;p2@z$2L3I*9vm(}ne}U@0L1ZlPXSSb^W?bm@+a3?mLqHaZ zqSdukO>D_(A~8`Rv1#V6x5#FpUkOhEj}RPknm3Q5w;!EBYjDkQ%1(P zlzG=i-*p3SayB_xd z+-|rV>;9nAER6*dmwSMWd#=Cl*_OLAxaUFrg1dI<(An_-b9NPgGH8d6C?mE=Op3@7 zb%3J1;cw#0QceJ^DxOvKXgZP8xK;*FB+*PpA0QK{dvHpYNgi-YLUm6+1{|FbJQW}p zs23nh6K^Vo=}XcQstC*HxD*vpvlzjEzN_>1sm8G>z>RD|ZBAiER9YemY0fE&%E)Ae z$XtQ~qD=v9z6vW0l8y9P;+3N!+LTD*7!PtMYGeR5VP``2<~A43g9evm(CNY=fNKK5 z1!q@cyg;dKlW_idt7*ZwAjIZxV)Ai?x`A=y^ammTHR2Qjk$oJwTE8D(3c8)%pv#;V zhu`4<_4#0U-#O@pyYNKl`}{3xQ%dT^77${;1xsna*R;f{+od6z!_u%z9h}XSsI&+4 z#g5zSxiPQ`ev~wbEa(M3MhX#|JLrbIomd^l+Ay>WqSd4#m^YGY?_eAOnM8_a;UqKh z*TAV}LM4_!>0MRp<5Mkd*CJ)0ANdM|JYw_wHKd#=Mj@vdx-_^GicR&7mglsgE<$yn zBzZnmng2pq+#}B!&VZye64Ac~HOujzF%nY9 ze9fni#1bPZN0t}~P&D7}sUyJ#+e_zAhCN_L7N|ik3PxoB+bfJ@UB=7`=O9OH0xhb5 zV?|m;=HX5q8tJsuPY^NH^ar$GMpv?%56}CxHZWDn@(nwchgUrcf8<^LdHSS=s z#U|jtK`Byx?t)-{gz%^NOT>7Y7)BosL2QI>AAkl6NGLt`P}HnTB`-qBVL%9F$Hv&P zi%@pKz?-Z9-lpE@^Eh)wU5CzQnYwSAvO4jS)P3Hk$`xTWFll2YF)dQz1IPga(7@y%qC!&Z=b;>ox>Si-a{ta70-Z60fN{8 zQcF|l1XMz2=|~wBDdG@mi*yVDK%Q9|iYDu~alP~YpV9BHfM-u# zNWO#d#wHyCLw_GWlDi}EN-mu#myQ-e2w8|;+Gy%dZoLxcPhe4ndHn&F(+vlFasjL_ z3MY~3{Utseh{ysUXhwjK3Gk^RWf2Ia=hL_aY}0yAL~a0oGIWyy(-m}9#3O*I4kL+7 zA#83yilAf+Vf!pAZ3D)0L7LClLR1Iml%ScxBTICfYBKmPX6qQu~~-hIHC~Wpr+|Iyx^#-iQ{(h;Uwx5KX`s=c84@ zq0`yvySI7AZR1fkE;yObD|+yL}^5JvNYm5S-KHj$kNl%r7S%Yt!L@k z=wg zXGe1rJh5HQHitt&r~J`EzUtU-dW@gAP)AEos4u&pEXkawqgt*F_l_De8xF`hrO5#0 z?_%dj2mNhI2``;+Ea$3^9rUr&UPG0*xIaWZ$ZKAa_A6P`u^GCYVzY<*fU>Z1VZBy> zA;To1Vp~au;6E)8XF@e85VNu?0Qzy8BqQ1+X*xFSYLby{YG+mm=Rvw))UKZ8k`K`5 zlGU*5Nha}LvwR|bdn#8h2AeXdf?zaqHRK0UPPQI$PT41E=;MXA>q$QQWUgGybt+du zFdDfU$tQDF&WI%a=!_IX@>yAB%o~riai$)LZ_G&tSQ2&M%2;EKngo55ELp1ex6p=w ztOdo`&^Hi6K|H0fs%_|akfXl^bwRgKN|tC<6I7;<&Y~@8e=X`N{n05*KjB}ek$YAV zF?mGnAC_&MRS~nd5KT8kbi4pysAV2DPI!usoswJ4DL^BPqEqUY(=pW|_l{4-q)6 zbo=8tU_~;8k0E1lb$*$eQN)a|5*dg6)+l?42$@eZnkvGIX+&3sD(hj}$;FB}jWM=* z2n{+r{-AZYS(cbF(K`ia$AL@viAC@bLihzLxDLLyiKQD8Qljpi*NKc^lo zCbnC$z*(&BRu&UD6HNK23hM#E6p9LvaTaPgn_)-5^j1Tx>7Kz58(``RZYf53sE6=& zVYa1w_@O7b>Ot5JhP<>v@Wx4wa0={>umIB?Hi2A23aoSrC+Hcxyjx-BbL6Hx<^sVl zPTd$;SJZ-6oo|WDs$8$427Nup!-mM^L=89+%n~;qYnWRFjzbA@D2aL2A`59N2`O4_ z16!!Fow^SNfh9uI5+hW(eoEWq^ndW1pS^$^@)d9;;EEs8M~a zMh(DQ6Tti46M$F5a}p3nBnOVDyw`ynN;f<05}>%ALNWgxtle24=D$m<-y`xK5v;2s zAo-MxnbQ(w36?$_F7rR2`VNstL@25#VhK4&mT?~$>nBL&UnTN~MAnF0BJx=xpCdwd zDt2~Blz0O$Qpwa50G(YXjDb$t0fdEDN%1$3cpRk#PUo;XkJia17^C%{podAc{!`)! zB5*rHYyLGV_#+};Cqkc>$JrlKN*F3c&cN}fz*wZgW^hcLrgn?2RDM;~p3lFwagjCg zAqvpTuhBsno9+F6E9?&X^5b;NB*SlaDfKpydqf&UA|g5wItDm>oIhi@(z`@dm*K_*{|#D2k#Kn2{7&q1{AgSWC4$JCrwzJ-grnSS)bw z1qp8Mj-67L=(J3kmzy}22TW$trg2hF>$XXzQ)kjnr%jXAP1-cQeJM>_hiTGylC;fe zI&r`6zjq%1X`x7b!>N9WBS?9d9MdNhxD&rdsK88evmeo0(R&oNbMi zM_RdZPSPSs8!eBv^5uMMtUT5#lnY3Ys@UduYoa{Snk-LBJia;Anl4XEIDzm?c}BuX zglEgM5>6pJSDuq_dh$@Eieb+>ZEuGHFF4L`$3)wbKW zDy{0apV_KzUFg*9c7-)cmY-z)cC*3EAY7Gj&aOLd+paT@H64GDQVmib8tl*TBOlyt zRk!^3i*0rLTGe(jtdSS1PP6urrn6h!Y@$0PjY`rryME1X*XoYbW@0`_Y&-67YU0|a z`;uL~g?fgcaod%x-PZPP8L?kzwyJCON@KHKbt}l)ZEm^G)`*VY&qUw_JkC5o9a}?b zWkbcd$ZzOnQ$ZK`;SpW+v$Y%5t*!c|Q)x79$E_vM27f6$+;`uF$N4aT zXL?#i^DIE!ivSv46wvf^K+7`#BVHUZ>S5n)nD!+vwpDOrp0;k?(Uq77Jzl5z3ckcEyBn)Fzon|&1P%W!SYw#YNgS(TUFOL zH|tw|wAS8i+fFIvTTXqmVb7v5dyZg|V2WUxV20o*!4Ux8s@iL4c7v~<*6d?Qd-vt# z8|_wo*~Lz%>^gP({Iko~nvH8%e)L#*wZ4T#yt3K8bz%9&?d`g~-FE8BA8M=m<}xWxWw)em2$3c{wbo6)b2;7ZhVj;yg@p zEMpAatk;61)fQGc!6c_v-K--KIF@oLh8(*lWhJ7p7_6A?Q1!g)4jBgmGB0i{FRWUzI(L+m&}Y1A&_s+maLf*OW?WAuC8dT)|CdGR1w76kK%WXNs)R}k6*{I;Dvgk(jtc|O!KO|K z>DS9qbyA4F4wiLFjj004#novwp(YVZs3|oK3YJtQHK&dsC8f@&d36+_v^uMbY5}2) z(6@7b=GriEz511>d4xol&8kF`=Dtp`8gF!{E5aXVDr;ri6l~alo`n z28T?mWO2~6$_NgdR>|SOX_ZkNI<1oTMgYgW9ALp41swPCfD_&r;G|aooN}k#8EJQR z?>To4ePKK=X|JEZZgeN{Om(NdNd*#uoLO%QL}mK2h7>DInMTUgWo_p-UxS2!;~fc( zw><$Iy?Ui|WQf+1^$2nGBU=^FML)Jx5sE!@y!uqwH^IaFsN>pARrizb4IIoH$c3th(+-4iZ2NWnIKIt{P zURSrl^xfJGKV#Q#)@`RAL<3$CQT5f`HJj|;w}>VBe*{_1wst(Rtm zSPY@%>#`tHbo(WOZzs4y@BxAk68t%W?;!Y2f|m(CL?CqODnkndivWI{c&;?no*yIK zlmKZT0*8+=54mjaO9*HtnbC1QjlUjS%J)OPl>XaDf(YgH&cuQ9HQ-q$o5U}Qhjf*3 zM*+AvMtAhC?&_Y1tUNZ;;828Jb4}}7dzW2EC!Tr7z~)@~l=ha^ptxYZ5l4;41MMXo zj&8)W_Q~0JfDC2bGal%=wzKpaM3TYQtWloekG=CKkKu86uAAVPujjGp^{(L=cQtHn z{fCSl)EyuRqNK5O-ZwYuxBbY?>gH}85?f-G+hTyt<7n*NXhPk9xvb8Y&tj*(kFxx=SFqA?nmtE)*4<-x88F6 zXmxwLzNP$B*lMM{RWC&Y?v&WZ#>RlaO4zEO48}}u=cjH^Saon39k-n67i4vrIY#nP z)Ui1)VF23*IHcy99@i&yOV0sL>5x^13F}fgj9PEq$vZ~yydRJAXV?XvtTUiNTq|=L zi(+8KyO2T4W2?>vSNxt8$WcK&n9F^A$9PS{smSd8%7M0i5p79bw4U0qx>?4&Yy;SZWM+A3^qXw}%aFww zn+;>5m|?FR7{q=WEMt_?8+tXnV`|PDD`@J7H&W0hwQhloV?BfR$M;@xvxtpjRdNDH z1?CCwSU9PRz|2H4JEy%8Mt4l$ae*fVE(lx{c!F>(7VhaZN*_lbN2QN5 zLw%eH`#9^J4f{Cf&7zO9-Z}KKAaGIO34zB19v66$Fz5sOZ^Fwy(7R6v{ujL?*p(0~ z_PK)vvuZNfo9fts)Y&jK*wKP_+0nKfB*>|Pw6Vmoc+3`gwdrHEm zBz#)JrzKpHQI!}DTjJh0^GN-(Qs<1+IVa(>5`IGJpF324Np_^MbNw~VO{`C0esOo& zOReh)b3tr|0$7-`JVij*uyM#TsEc1Bs;2z!cy$nvmuY+Elhn5ta1qxG3;aNb- z>cYK9_et-``w@HUzP0mvz5dlx4MSSLpe}kB6hy2~8fvm(T*e6Z=e+a5Sz5Cm=-ve{ z4Z%!XKZ4M#M?r30J$=vWUhtN&>K8b-z3(_cEmpcuq0~`~<0;g6O3ji^NNBkqx+Ect z5G9N1nR~JB(*4Mehck;&0#7+_8N7OO!^Fsc3$tJFma$isfs2dx^*zg7WIks9r#OF4 zV=P>!XT7KU=No%wJ=jUw`f>Fw_ss64y}x*1ls0G@=7tk&b&DbPifRS>CTcITJv@_M zTGsV_-XwPCWp4`bTfJ$(=e#2NN^th>3F!-cW2b`FzD?RlT2xfe--~*e)=v_H_oF+D zphcJVPdy+uPj4W_{ygUVyGScZ+8IgvB}wCaUSR9qMPT`ogqRlWCDaP(+P48y7fILD zi*K7jc}OJhzdEEBK29aD8$Y z)d^MTl}@JEQ-`Hq5DnGnm3Q@GC)>*p8yk@deT8`ig3u_3fa)}bRYaWd$CxBF28s5I zVE0fl$9pXvtRl%_&x1<-WUut0)`E=wI0iMOn=2(#-hMO?=WPnYoiwdA#nAdE_N30} za8eO+@Jc6HY*qI{lx8Fx)>yGAz^wh+uk!c%&#(2g#LlcVvRa4br(T5J$1TFzRB@mw zI4iG0E{uhGLT5$_RvT{JK3uLdDS0BGLrp36By}vp6EY+i5UQJ-E4|J(L`tim=*plh z)}3P)MTToD6KOUu{?%SIP;ypRb}6=@0Gg!^wEBmC|1)9Jja!dxdbqX5!PXE>51MMM z_<_z0aq&9QJ|Z&{bg-`#q>IAx9I=T9zps7?iBwx262&EBpwVN4+D3D0ttjGY zvC-b$QrLGxx)V)xVN2-GEMgVS;_a1AtQhWE2Cz6Qkb-Hs@guvU{J@q39gu2}j(?Ek zrBo+9)X_pG>bBkLX0ekC`&V_hUUTbep>&oeW@vkrZ5vvuOxC7RRJNW?BfU+jn|I-ZOkHu{j{L2(yrBZVPuS;@#<~gT7$t`knJb8cik{o)JH$Iu??NL z<=B6oZP1<=m_ajaEx8IKc13KgWwE0(hy@iz)KAbpCHNDawDe4%;8xiNHI4U5;>sz~ zj-H7=1_OaCP(=1mIUze%%zsAB@r^w^x1sUKIy>bE>p5kZ??4otaz;u^1or-NL_)UB z(KwtD)*B|pa-xPFVKr~tei~=^R@GK=1pLfuwYJfZ+8<`cR|sAuaDyhGNbjK~yntd7 zB*gf-?I)NhUHFMUr7FkJ8&7087M{g&RECL!Ag2NCTuc#-FsI=M21J~HKQ4>etV6q_ zozbn*MB(^)&F*qICKAicp=E`LD<^HNOQ6`5W6WS8tQ%qCK&1`Ty6;9SxA8bX51>Wo zpz#;=MLbhx0Sca_7tAR=3GM!pVF6~5pB0#g@|V=l=|!V}v;uPZHxcHtOi(UuHIhX@pMdGe#C6=I8MA`YY&DMh><3B=N@@o$1Fa zpj2f0N+<~pb#Wd-hh{3UdCf_BhMlMR;SQL&ShDo1AV*{$xL)8f2jM9)X11I)Nk5E1 zYWaVFM+~4EMs#>ILo}lnK|%g_)MyTo(flw1P-3a9K)Lnwb=>1YS&O+aig*zDyYank z7e)~;CaU9VH-Q|Q^%E4f_am;p4rN!;jDu-Vs$rz}l1kr**k_=qL^oi6_EKRSscN1aTBOq~u5rUK%)(6MMl?RLwTkcaAO6dSy z5pw&zvb=7WlL4(G*A8&2-|+QYSm%#~o2wx!z)2DB*Uj(}gqSzI$Sm<|0>75vSBwI~X@Swg!`f_U zP8l`g8xg!=vXVFq2EhBrl<^8$hppK1Ose6?HEoxv%EUwaX?Q&X(`jS~*FTP7*`FnN z8=#aB)Y?D7gf|HKlr13tdc}_g)4kCP+&YXqQjM?S3C%cZ!5UAtWpri^Vs4n3h$z&* z0p#Tq4S9mQe}X`WJGl~)U2A$#;z`0*5Dsv~6xW2JjeM9N0PjHM@20uGVPJu9J_U8_ z4itZs$|z$}gJ~rd?{a;<!{XdSFYx^EYZ2t|Eof-@^6yEzd<48yz1!q$T2(EysVO*t1 zx_V3^#MJkoEE;BnRkV%S^W`6>qQYxU{pJwV`Blu|o zA?AWCq392oHX!TY$LPVWW`8fzCNKzxtD@;M_@jhG8I5vM)+pkA*d!k!){m@0RaIa4 zp2E+4;I}((JYVCn;BNs>pQ2yK_Jg!x!BiPj;9ANYquDa9EJ!Gz@$$yizo%c11=s)G zTz9ma?~ZvH$SyG4cME&J49XJm3Ucbd*c}JOg>kqGqoSCKM+A!TSWN$cdCY)eCN`yk zcfJg6iSVShVcFlm^D<}+Zdbz`2}{mjN6w^YOo5M%;XcwcgAk0>Gp@03s;D;upAy`l z!caDW8O*{&mPCtR^%9hdFoW@K!kg_T(ax-wM$XGfNg*Y@^D<Huz`yslt`h3VSf~|R?x;e%ubNL&>BK2?IjDCCt7?H zZKpBH$F~!t!YDtjg|iHL$`4X7+c_Biq;KLuWoCgjwEWIDYMCP-e*{*JgneT~2%#qr z);j;2EP1!aH@@m zU&F7L8=Utr_Rg2=zlmD?ypq)qeuIE5$Ma_g72kP2ES-CyWCaXf4k}NuZ`Sv=OAB&P z{S>rL=ntZA9XgW!=pe7ss=GJZs+`&l+8J_@k=MRTqF2rk!f5+NNIyclQ}Yo>5Q^EhR_n&ZJWn>Q{szbrMQXnle* zu^UA%lMLZBkC5&$9~XDh22W!tdbQKv=difhEg%G}^>5+TKUDASpg~EzhFUh&;lNb) zMTX>9SLOu(K4hlUR$RPNhn8k^O%K@58d)}Fq# zhEe;kvZhVo5Ri5HMx%yj56^8s+6b&aDR^YSYf_GdZ`decqktV8hQnJoODor4=YEts zmlC8#jR{tdJZvR7V@~#G7Pg8x?peg4P0bw?Z4NfnHw5rrJcEbArUPGFp#4)2zVB$C zGT$=ZGT(|o#HN5A??&Fz-qMj~AuS>vobhh-ZUh@p?o{`dTtgwg9{5VkcVpOkF%_j$ z3l?PASFnA>gaE<$%4^!*PeA0R_eI=;%O$p*cMYuadUV*Y59PB%Eqrcg9rvDL8Wbja z3T9QDLOM1k!y>SQ(J>5z!evdymjbkSn}`Df2PQx`Fag4W2@noUfVsC*a$GpZ>>UGR zTn7WDN;Z16ufI?rC9=kdtiKDe_l3I#PA{w_`#POLUjH0Uuz|VTclCWZsvw3|F{NDSEfo#yDxLA*en2T&yBl_U%WdMOKn@{H`++r) zW^6v4xUo5D;zp=4^S4~+|V?) z+u%x|=)sx54Ntr`7C!}jv=<3Zu1KTQdHgh*?PuQP$;cyFGv;8QGeDOO<2jrXSY%Eg zItj2x4^*JxM*6}#jWbC+ve2XpdvD$~>^G@{+@-kyS{qC?TxKp5%q_Ca2A28Uj&awb ze=9^r>#lxRyK92TMhE__cP$8p@YIlHjRL}N6%d{sy`_clX5c*B+%c$xiq8kyYD8br z!sUwx%Qqm9!Cgitk278z5Q}01H*;38f!hf{Tu;P{0SQ`x;PE9)%8$tswYTkmfS9Z$ zlnO3WIlbN9?&CJF9K6CZLM{IuLqAXO3k1IifaMkPDTMTwnff7uBEis#kqGsc^G&YU z1Pajg7#1{HwRzC9I5;r8xxV454L8)uBv^8dUBv}Y| zinbeE*U0vM^ajV*lejuGow^%raS_o(wc>qFX(X7L?C1gSkSYHovA15I8&LN;tJ~48+LN3t2vT((PL<1b=@Ml22 z3I5JQGwb9ZJqe}kRr@j<{8oZNNTSqkgNzhIjJ_JWrj~SEHR9hD`4>pfW`Jm^Wuhz; z7fE)zcZQ$ydLjcs7-!6b)RA2}~ zBeehf@m@`_R^xCLQPlot1f1Kz#qyB6Ac*`tw}ypcJXoEv!H^#Hh~%6(ocH5-PnQz* zkD+9EC1HOC;jeoo@rA)Ca3wJ@G>W17h^xMNv+4A0;>XY+Nv{m&ix?Oz+u8;k5d%XR z2(O8dLC&b)gsH4G3!fO!j0TzaKpO0V-6-s_25pJpopj^FhbICdQH#zm_CKUjb2d~` zAhkzB!R-Su6T18LKQn+$7dFB>Cfn^lu^hreecHwzsB4tienj@6x$vU6eCBU1K=n(395R`Y7M z-|9qIa$ZW#!?8YW)tdkY=$y$n#7fNbXt#*gN1)wDq5T&L;WcimBlqNPi1zp8X*_2EC>L27FXla?#5nybwBX;;Xfd1S}cHp>qL8-9@b2IJB3= zp|Xo_TMsZ}I1?TZ+4Hf`7hOmunU3(FhgACB>!oyOP9(E!C7*opmP-6wmc$8XrE^wt zadLuBx&q%ExaOev;1iXg$~P-axvt;-O6Q4h+OjAyy|%w`@%|ljSiE^3G=bus!Uts) z@Zm6o&!l`)Z8!aN+irr9SLC}gII>vPy{6MiZL7Z55#3v0Kfq^IXw!zbP>>Z~-`gbJ zLHW?*2)ZQiP&!E#L>i|0uD&b+O(#xlEp2bD`7wCuJNR<1G!f{F30&OLEqMbj^e$YT z9Z0Bkn>TlXzlHE8wi`HIh;YC=DSHgSw`y&8KcpyFI=iden{9kAR;B+1X>xFX`B^E4 z3(IG1as*t!gHtq`YeM4vTz_SI<2oh`0Mf90a3_1guCih>xXPy=xLuP*`O!?^i!>T; zA4r6aUFQ4H%d+%)zlbeV$;*ZR5)7h2B{6Slx`W8Z|~il_Rc3|H8a zFw0@q3*wIN`p}{Y)&nRk+!PjWsvZj$!xi$#!p#8-A7XT-Fmsd494&𝔲ZAL1E(b zo406;k{DPfF7m=J2Ze!$CNrdxm4SgsD%dC6!Ig8sj;G)O{$==%>R>N{|MxH9<0akW zCrTC}`keubj7v&FV3JTqIoCY`19wVN(*iU7JTNz~tDK~c3e5NO7Q4r~$H8RTOF`gx zKkr2MWcL&V1hVn08UY)J7qd6BpAap7y9MLkIz!4H)!+IiB~WPMd|aRIwryd`ya-@(XjugrT?e#bzyH-#jtX) zkmcL|oOS*M!Iue0d%>E29j{Jm%^ha%_HQsT8zw%=*g={}tRG?h|BCHCU|sO_rttMn zC{GlHsRSK`MEl{N;hIP8b3zFLYOJU-Fc4rkfuwLik{A{t2BL%guhC;^Jh-I)O%CDT z5DY{GT9JlsdHPa<{aeiVw*)?c$OiwOA<^?*p z!G9tk|F_>Ec#+_@3H~#|?-2YKg8xeJ-w3`!@Vfxz(KY!`2L!8um{87zNqtUV&W9;O z{5+`GV}c@+lnX3|qyu6?KlOl^P@WhnJKSt9{c&P~{gA`{Jpzh)_J1e%eS$wA_#Xs+ zNbpqv{O1dUnc+WF2qKu1pAN&J01*yEATGt1m>9rPe@* zD36654hs?XA90|6Ozs>w;VLu!!Birfrt?_hrD4C!~UOa>3R+S8;Ue#l#G%hYzJTPxH>8WKIblo;qz+0Rrbr$lZ465uL3o~WNnaAl z>*=8wuU@r(6%n{bg@M>(tw$o0|0xREF!>BmUl2hY z*#8vZXEd{C%gSmX7(!t?k{4s^4tZ>b{AvZ?*AJqo1rgNR|KB!+w&DdFd)IAlI>C{blP%Q8fP8t4>9d?%GM+j&~vPoI^KTfn-?JfCoQH3I`M(A^c_aV; diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc deleted file mode 100644 index c4de6c13104c8738923f18d82dc21ad22a5ebd4e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12841 zcmb_iS&&@US-xBE%k<3j>?3I;%d*>xw5B(md&e`H zoMy>5kOhH?9mlSkp^8wUAQ%EEsDh-57Xkzb5SHAcs8D$b^FsN7;vo-&@B8oVo}Q89 zK$W}e{`c(n+;h+W{onb|c_Nof8~7!@@bk5=e$X(!#mL|nL*Ox7o;D4`HC(f0l+C)i zZS5G__KsnOX-?UZyjVFV_joxj_e41<_f$D8_e?oc&unL1YsZXg=Gyu47{a#evGn){rafDpMLg-IUNg#bZraV@KJRAT9PSJC z_~k`6?~YwF-GV#*nz3b;54jWWB=QcsMRyAKBknnO!CidKEHAl_xrf~&2px4FcaOTq z5IW{Q;ojyRN9Z>9N%s!-PK1uTPq}xwcO!JW`#$%idkUcw?s@k<_dN*R;Xdu2cJD{% zPW18t^s-bg$y+>#x45f(SNrbr-R*nI_ek19NV~UuZ~J8VWcyV46w)4cA3?9~bKmQp z!Tmk%_qk_rKka_M`zY@ByXFf<>6u-AlM8|U`10~DK88`Uf@D?Is})sm1ermo^1y>7 zJ4iqI{FBdDUU>HD=N=EnCH$d_AAI)dXI`vaeEx;^1;rbZP;J)=8_lWu@>;98S+BIZ zo)=`7>wcwO_g0ow)2;Er{PE7Y@gy#98p1cWO>Jmn)pl{;vHJ+4*VY9U_0{V1g4p^> zUDZowU~YWQP-Cd}oo7z1blde)e%&Bn!QqtfWA%8T`m*H!0R-SyL_ zR<8Eys@L`Er=IJ&_139gv(c+IPObUPmUpsuHAr@f2xDq&$lS=!Xrj)ftx`wrr^wzabi@aV^M46pB8P&S^X zQtX!<#5}*M{J`=2ZZEL$A|*#^s`gc38%hQm(U#&xPFB!-j2~HAeDdY-N}q5A!#k@{_LRCble0$cOqde2En5 z#8&Yoyg#_Ijd&VQ&0Nm%`!JntyUAr<5l7ZCGSs=fL$ z-d%osUv!tcf#Obeqv@4$;irsMm8xD1>}IDCBpR)5)xZCAV7Ka>AknF>vt$q>26&3f z6vV>MX;(X_({6TxIHp3iwH!E|u4?1!+p9globLL-r3!)`W=OLGwe|eKcAE_jFlDrzk+`%21;#XUuA9p7 z(>k_bbU4W~{kZhgI?(<|JZhgQqwSBxrTv6XMD4qNRwst-CpDgMIkRfv$)C{4y|5eW zr#b~ZJ?E!%Tqn0UjwyswZhXr^2ss*GL#HqbX+Npcu7mWnPGedpLMtb8cbAZ~{Q9r) z_s8d=dHwbvP<*VEmzk_ip;+M5y1lE42%tD0f=sKs+^khwl}^{+i&O#Wwzt;C6s~Tl zhgq7sq3)v~@B;I$D8G~mQm{t7ZnNVH=r<}fm*oryf`nk?gT$*%uX(9eFNfR#v2u3E z9T>|usx=>`mz5YF8|FUnpd^fs6bO@oc$frTM&IicH4zTeA zFLO4Mvu4~XAV2S%4f3^>YNu0gd6h;}d44U&68Ig# zMfevb(?DEck+Wd*b2_PWxaVPH<6BS!o!Bxhqd(R!Xb0paQ#57_OwTBUobmpIZ(g?a zm>$;?4NN@5Y+$S1pA6GcmN^b$yu!R1UBmos(CoaqBOF~q~xW^_u=gvzcnI+Fq7 z5AF>ccq=fVO-y>I#aV;T)06-A@*7|G0WU&1z5;tY+n@6jz6r>f!<5HpZJGUfJ?|Q# zI2W#)S59oaff{pC<2q_A^cVG_>x9o%n@)7=}BGGQ+is@EXVqX0Tze#f?m{zbRHv;))_Z`E%|b+KiMz#r~1?V8MgM4 z!5+fqyu%3e(QX8u;n*D_pv;YC)n`b=_uXxs*|- z*^Ce$r?J**DH2fdp=u`t46=36DuEzH3IY+$C%#f|F0c5Y z?oHQU37nPg8YtE}WvmCf1$*0Swl8^s!;UGe9}Ve{dV#ewZq=`n$+xS15D#BL5d}a| z2}!Wpm9Tn}{m1LA1cT5zLd`~{)}^Uu=+$~HOp+l;F)13GC>N-y+ChT72h9a>Kp;p< zG%Sdwl}`q&&aj^s*%5gN+3!KvN(-NpG3lY8_XvbxCh?yz=j~Z@(VXYsI%(c#kt3&t zD?b7}%bQtTNub(c^SF5!f7M0Z2*o~mt|rtPDf74p{sK{-1rN{$rgE~M>ZkjeepV+h zgE~V|c!$*n6y^GP9Uloxu)wgXbJ}g&37hJ#sB#&9o2@uJSH#?%`CN>MSLZGdj$IR!n2780{0np1v^@Y$b+YDP+e zXUBOFkYLr9z>t3hwgNFhg`K~M@+9HoTrrf<=j4M@HDbM}oyDL%6Bhjw{WUJl)USM4mBfs4UZ&z^xcV!H>tL4eCPOaYYL5HKN`Ta|p z@^~2M3TRMd`i3OY#BxdskwG8OE%jk^@t1IUCm^^kp0=`3CaTernYRj7(wa95*1Ub# znztUnHE&8Cbq>Xc7XD5|^0-8O-m>2X3ttGW^$ivtY9u)~v+y_13vGPV9+eVTXvsqx zf3qAa43$h+#=bhUx6GM4kh8}a=3R#tn`^@XQyAR$F$S#H{$GqCDu3PFxH#Yp*UeX+ z2suE+8X`*$csQgFMXcf1VUssdtB6`)Tu;G9zY<5S!?=$0mqG@hk8p7!{Oo9yvNTLN zrjNR05wTU{y45R&F|0RZQ7pbM4>9em#`?GEVE)4zQFvz*COu+x0Ph0!$?Xrjm6N z*U~R1_m~PIg4#LQEvV2&qN|p;6wWUSt+&kNOaWtdnj{d#XZv?o=nh@kwm-*;biWUSNVC_rd`AhJzbO9z`#0ot*gs<&MB^FRt}1}WAGpPbt3guLRdxs(j_AEHCv2Ox%A$|9GF*pzT8GAcSy z$f!c2@R0csoGOl$#lHx1ahEWwkXs4A`V`u$aXHA}ZMYZ~yYM045S9<+% zosP&A?;*LGz?xu>TovKVvV`aWmEn7w0<9u;1IgizF}LHc9hNS_V>Srs_zo!8t@Cy) zc*f$_tnCEu@NI%5!MEn8P$#XYci`KDQ(+dZ(Qk^UZD(9_$DA>+UIo%`XQ5#UHwpJT zaUbh1H>Kx5{$|}YR-^NNZp-K|0Aqhq&kMeOs=tWX$MlTEHu{IqMjk|NQBTP#_mGfA z(8$Xvpl));ARhOR=)-OX7;AbCP;3(T%FvNv+7Ug$G{Hp&sCXEwXkgxV=GYSBCqN%ba*w(hA}V6IBZjmhh$c!;yIC25 zV?fh!5H$2JB#wBBGnxjfNWvPG0M)}veg9MOTu{6SrPz)`LU@x>prQv#C3J42RPn1ADO;Z1^z7QFzI?^4~yrtX4sBbEr8+E8j>00dpf%G6K7Aet8f$xBFfEO6DlJ z{Mxz5x%7>JPYHC>R7Wx21tXU~TKxRe|GxR!xzgkw2i;?t>NpEhL)0;dAi*Uh9I)cR zR34L@TD{o{67-m3L5XOq34c0hq`Jatie+r9)*=^Hqk73J-KIXtypK_QoZ_b`ev0A~ z6kMZ@z_!5UCeuDjp(*+lKSuH66hA@nlN3V;RX(ENvIH|gT{$-hg>M|dmczBZSEG(-1*@$lAAL$Z-RGa;$1?)Bfo`RI*t7V=rop;*R75FeA!Xa z_LdDiuwFU*vK=inGbjUO^zCg2^V88;Y92Um%7vEh*uXpcavbSpur?Q;=u_x*V>(!}q)+JaD7}bQlVlmP zX9RB${6;zCIg`Lroex8@Fty?Qo`Ehp#O_IB)g&qdRcCb?ZKU0Vo&geNnYRE00?(U6 zdPe84#E!;7;`zNemelk81wF6F1RKAIP=Oe#z6gKSc$BsPJsAK39&vWZ)egY%vsdY`CF2HR9@q z59l-~mz0zWkh+ZRzu^(`F0fIa@wVKOV;RZ=xt#xWd zg%lUl;?OH+J9M61f;Sxqgnh62rL|=Mk?^XOL13M`0Pu0yXiq)Mo^VYYm{-*rBkL5e zQfyFg!L06}*aN1Nyf{Iq90q06O5m_EG8{p2Fml7t4O`b);Cf}by5@P!YG=X(bdfxB z8#oR~S~Qd5qNl@t)|>@^&58?*z(o){Z_YdS0Lav5QD-*^QH!%Wenc#8dkIGU7Xp~$ z2Jk1|PxOSr+983v<(JI%zVO|%{+f51+FHO$SUr9VSUacPOZ?-=Z~P3=d^=pnY> zhVf97kd|;$(9e7k+it){aT4Kap^cBZnQJjR(6Imc1gw7w);=3rd)RAEL$TMJMK~H^ zI;uX5?LbwQzGI%*924b%m4cB1Q$Mqr4HXBcF{+(2o0%x~YE_+y=J{}&>6@ek+jzvN zBT7BMXFf>rD-hV*L_3&vRV7>c&GuSbK3u7Q`GFb9BLn3G*Axt-L}?k6bxshEywBTc zs+Dryl%Hxx7VT+v81qQ;Rv6Owo z!hsUW8%mJSa;i?CrG%c?F`*Q)nE~Ryi76%9A7Zf0S5}aREe@EeB&P0Q^8t(U1{U^P zH1u#NAkV&O9=B-Vjn!jCltpMZ48d@rML%^p4P}|tGYz_+Qt%C@uElVQCjk@oqMyMu zr!j&Fgv{z`$ef;q%U5lfD=XfkRJ2_>Dgi5!rU10iqgLYwxeE$F-6O`FGlf2wyIu_+QgQTw3)eSn_+Hz z(B_rbp{_i=G<9LPWzT73)#Z9+ z1vWPduG27!uAkXVE-g2eOIwW)y}@Fv=@$_5&48rPJs>FzTs+JLx2rg6$$<@TH`wvqz7P2f=h%ICx?v?_ z4#3g@Wae=q=@?cc1@XSn|AKvY^HG>``eT+LmrV8Ns8i!C;*YkR2M+`+A4Pz`C~J4# zLmvuHYT>X4cG7KZ`NW{MG1zho3>rt) zQ4$}LtkbwsQ(wiyYcUA^NN9%d|6xRK^8PU#VKFtf#^n7Icvo!9ah)OWKM3<&6w`2Q zMea7o1ZlSH72{mnSI~MiF6u8B{!55`Ba$pn4IF~etd2%Sl1C=Y`<$cUBiP?xP`q+cVNzd30 z0G!96cyuzZ{suLI_;WAtIIQ|xMx5s^KKrctdj{ofmuzvP<5_NG!v#{2qig#|LH#p| ze{^s1gv)owlJs}8tLh)|AoVqhf1>yYh_IV-7Y5kase73)*GjP?t%nrDMx+uyuM|ZTdI3IStZ>^}nR2rF>e-CvC(YPwZ8RP#zS}CS} z05|mkihC)ZqqqQpuJ#z-pS7>Txq}rXL28XQBQ1#dWmGs1kHBxTA|HGnPmvD1#gyNd z6jQP>QCX_=K#=bA)+%oO)h5p7+6V_u7YCSeX2U~jqq^4e0}CfnzKqgAs(}Tz-|ebi% zELo+9Z50C}=qO+$AQWo2C=!84)Lc=U1W*+9R#7|TE04?&(R)sZAbGakb=O+;M@a`g z5-T_zEduf2pT>MCV5VTF-ATrh@nSNWOCCwzo?c3yOrJ_0O;4qdq!Z~>7G;W7Qhxsn DNMRYx diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index f6a68a0c904cfa8ee8dd079ed1064cb75dba232b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 208 zcmZ?b<>g`k0=CVQlKoj37#@Q-Fu(+4H~?|61dvE!NMX!jh+<4-T)?!DVFB|(21cMr zFoP!3OQ0-+pC-#KuDpWM_>|PL%;eNt%s}=nmi&U$yjxs}#X!-t#L}FSl?+8}K*eC< zmz{n_er~FMNosLPd}(oNk%5JNKxSG%Vp@D!eo=f#YF=@Eky}oFxsiTGWkG6DL4I+n jetdjpUS>&ryk0@&Ee@O9{FKt1R69AKk;Nd#axek_$7ncj -- Gitee From e1e32b1604e9bf3e7eeb816be22256dabab4569f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Fri, 9 Dec 2022 08:06:40 +0000 Subject: [PATCH 4/5] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Tens?= =?UTF-8?q?orFlow2/contrib/cv/UltraFast=5Ffor=5FTensorFlow2/unsupervised?= =?UTF-8?q?=5Fllamas/label=5Fscripts/=5F=5Fpycache=5F=5F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../__pycache__/__init__.cpython-37.pyc | Bin 172 -> 0 bytes .../__pycache__/dataset_constants.cpython-37.pyc | Bin 1294 -> 0 bytes .../label_file_scripts.cpython-37.pyc | Bin 7176 -> 0 bytes .../__pycache__/spline_creator.cpython-37.pyc | Bin 8352 -> 0 bytes 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/dataset_constants.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/label_file_scripts.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/spline_creator.cpython-37.pyc diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 4df862628a7683455644e708d7b0593d5b27c3f5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 172 zcmZ?b<>g`kf`)(X$sqbMh=2h`Aj1KOi&=m~3PUi1CZpd&ryk0@&Ee@O9{FKt1R6CG$pMjVG0FP@fQ2+n{ diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/dataset_constants.cpython-37.pyc b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/dataset_constants.cpython-37.pyc deleted file mode 100644 index 4a2d153d41341d6132887dc191d49ffd30e0fb1b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1294 zcmZ`&&u<$=6!xw+w&OUC?X)2+&@Kf-U6MFSe}GUyxb`Ns>e%3QK^ZKK7Qd<4cD-xO zY^dT>MGss6i4!NJ<_KpFhyzkp3Gp}Vl^@dn0ljbl-i+gx1D)0P&3oT`JM(6D=Gx+7 zHi7Nl-=7b@nM)*olgX@=FnAXsenEl+Nq`2rrx8u{I?*9Xl3)-6=ExkRNQ!B<(+@H% z1?eN5%sz&vv9C~>6Y$RcD(9u~+FVYo~#*BaJ3I^2koRK4{gTMXTYYF66jP)O{9;NvXU~%OhF_ zO3T-2d5@ND_gnnmuJ=CDL!?SF_HlRd86=)WLg9uKPD%Px3EYtJ50b7bA^ugypCNH= zDs0MFo~rn<5_nn)a=!TMsUVHY9si?*cvTL5A?chFcpf|DIG%JGD^(H}c3wJkr1*O! z@Oc^kCDo}zb@9(qNKGA2m%XQi_>mk`lP>?qu_9GTk?y^LxPrKhxP-Wf*hXw2HY1%& zY!RJrBQ7E?Auc1XAn;xOW@RNxQwqU=(nt?PWQ6XBkzBpg>U3#yueNL1q_Eqq?c4QQ z%Q{w~RI7H)YT2atVW)ee+1{m%X4k6sI^F%qkbgfiZU?SU3cVeaJKcs&cRHG^zhK`I3n!-&*kc?t3btoyW>#wk8dp<1ilCzKNK6|!zgog o6u^PU-jN4RTtG6kjFvGB4WFc;<+XhBw63zb9FAo1k-d!m1u4K_*8l(j diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/label_file_scripts.cpython-37.pyc b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/label_file_scripts.cpython-37.pyc deleted file mode 100644 index 44972653442fa78ee1a8f2902aa029437f1baae1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7176 zcmdT}&2JpZb??vl=ot>Vl6Eb6wTsF+HaQM85-HmW6t28lQM*>G)oP`+rJa>BXihcB zCY#;E>Yf!jnh6YZom{-m+PMY+c?yzK4lxh}K~4z*1UckrG;&LF;2)5Sfn@W0uX~z9 ziU^Dx0;C6BUG?hK*Q@t_?^Vs)Q&WzH=Li4w2i<=Vn)a_$7(XU5Z{dwUMIyDfCUt29 zdRynO(Kh&NwoU$8Z3|yBu(wO?vaU%>+MjCeiY&=8zExR~ReT-!Wm%K;PxW?9PRRx+ zb@{5CkuQNVB`?YI@&a-V`LcWkb<^@Sc~Q=SG9!(LTJ!b64d;R9%2>Fe6g}njRMhq2 z*o#Hru6lu3^8+tl5Pr*R3FRey6~>|)cy1`9-%VoY2rJRSbxpgkqD>D& zifb#P7x`f#@J7PlcGo@8jUpxe&`rG9aRi?Gu5!1%#8WXBFBcCNdyChFk4kqn@R&eN z=64sw-hvo37ewHPp1>*;4T1r)MoYg!BM)LcZt~2R$;NfD7DR4RjDw+=E}}KD?W!$L zQH$<&Hb}NgKMGNoD1Z052>aW;y_Tz#i`m5+ZqIvj{@Q}L)|~8-7BGIWha@TL1F{OD zuFLream78i>t%v^kZ&rlKI`&`Hm+%=HjkW^-IxbYO&KLbX6dId9S_sVrrMt7noeHf z$v@Ln6UK%&N#YbIC|YsHPE^x zB=j`!R`9Yy{vLGeEQL&~RcVbJZf`=3&7mpn zS#4-s(1w~U&1$b|vizzxstrrh1if}q8#<_~NPAfW%>peeSGdePUmDY@r+6HckNejr z`mYbm+<(1jTLW)-qW?OVssDO0hpAIMjb)OAJ**t+2PLesj1^Td*9uxyWMfv_GCtSi z!Jd;;HmjqlM_PEv(h_IO`CL~Y>Kgq}QyWcvq=m*~favBCc zW&03_G1NO-aSm07Oh+?3DCxK|s{2-uClMTO{LqxHJcS z06ld9$6#Gko)_#1fbfFwTI((0cDoFV>omc@1*W-i9CdxhgVz63pxj;%(pz);LGtF+ z)}r{ohfRW6mOI=5yb$eVzc=AW#?{C8o^Qv}H@`{T^H&!HvYfCk&#x|s^#!3a^!2yb zPhh^7m&qP5KXU_p4Eb9V$bX03ALBD^GO6H%>SbGeFCw={)6TpgXFrPiq&IU()`k}Z zy(VpsGmC+3z~#hUH^?m~Q@f^dWC1%BELMZxw|RC7AgUcv(lj`qCB(Cim!7xAHH5y% z(}%gn`J7H|#&phS^rJ~SAv||*uQ{c7Ha1ZR~ya!rM8Otp-fBgFA!sD zLx7-~-A|U%^4&Z4Zg+0pOdB^7_}bNe;@ws%Qf>Rz_n8bo6WofzPrTjxQLxu+q(*NM zFFIwLR^&udn`M`p*tmARw|MOSP+B@Jr6qPwacajK{#w$W;ZJ89itP6S-&5_E3&`qh zxWQV7$5=d@ywt9^f$tKCv5(CXa(e0cu`?u#2b@uaF^`J)8%VTQGneQXGrDD1aE=%C z3;GPb#&PbPaZWdM2lTprPFDi$y0p3U(3hlr3vYZ8iPR6^><$pb9wMBT=8ue7#CCvn zYw(V^vKB>GR$X-kDwv=UNKakin61N(z2)sDP|F+Qf2#kbe&y&tzxd*ddrc#?HWB+l zh7eiO)Z309_WDVu=O!DcMO4}e5e%-;ggGWP)K`&DEz<8IN~JZG@$$NU zj7)-6?dym^v9c{gT}|{&c@V0vf{twV}|D{c)_7g?-6j(L^x z;$9RY@MVPGj1losX!NTfJ2A7^t|QP@3<-n~`9ty~JSn;qSF!^^=EewgI%G~~HWpMv z=$FkC-6$L}0D?X|m=bd&_BlerIkud+6M^UiPF?2wMEG0GGDAH$x03?M#ygh40~dkh z3&{68E_>T6KZf!(4V8ZYbyj$()o z3hLmM?}b}o^eoIKlkea2(O@|f=gDbb7tcpdORV@(?7>ygbk3T~x0^GWF&V&Ii_P#W z7(6v(H=TL+!Of2z-nx1BcIVD}H{ZSeFfHSN8Q|!lUM8NIJMTTXeRHK#u%-1|@85m@ z!I-Z!y#DOCv&?S2|#BENZ8j`w5N{=kcNO*SpwKRk!30^YvI>(Y6Vm! zt2y;TPIW;2eOb$>Ijs)b+j1(S?Y#X+lO|$+I*e!YY{+TATb9RtLY4;8KwvU=ichm!gDynIXU-&w0#PUy&gY1>gG_;+MQRICE^y}lUb2CnVn!P>7Z_lK zU=<%%?DP@;QqWCs#1`L8Xr!3x&327-9PkYX&~~`M@ms3< zry+bEy$QY}iXI9-Nr&jd|C3K$$34yN zhdA!y&IStl9e6~IyyK^ISj7Q%1zr?>)Jk+|zd*HdK(ufm)weWtcc%f5X^{WZwVlQz z9HXh%WvDvTt~_`}yhp(4Axw>z-8KLriNTMnr|tx|)_UtvMymUpmVHpuHPy_ip?p z4MSs=v_TV64sfr8*akD|tnPJUf8e#2=?*4{Zy+;nm6mgL{NGMny@eswO(acSy-goO z5zeR;N=`zUx((XT@#gcqhg%$T8)1~W?d~En?)7kO)hv-9ap#vhsOoHZ{`y9eHnIiL zbqp;jwdm7!D9t3+u5cb5oEkKJ`AlK_1x;fHiDp=OLw^N7g3gAn?t#XqibB;aQ0Qg+ zTtP(vJ4($g2!MUV`u)6wksfO9tZYQLLVi%3&L#> zzRTLq58*SakgPNKK)lQMC|llMEIytKuFWloxnPOYtCXrNU7CAZTqA=mV_1BFZE@}? z9{~%AkbRLv*rqQa!!}(f-$@po+p?aPSsDFI7snQH#=fvc{1f^aTkcmFAGtN172>mO zmK7q^19ly$uG9zUfX*bPI8uoxQxj0!EVFvFs{w+Eeh4sVSJgO6R^RIpT4z?{%Q!V5 zvolp^`qY&Eo~HX{B${RE^g9Q|spE`R;j~U_^cb~AB%i{D=!i->$u{&29Ny?yBlUgb z4`)V3LYM9a-CJTOjSsX38gA`yhY6{gpCLv;+yMxeHs@{Jfl`cO(!C?nGPJDn#|HH8 zUxpTy0RI0xvUmRx+EwM!-;PSV3(ys3Ry!yiQvIb-IVoq9AzHv{xpr8FHVvU~#9e!V z84>CPSZmK+C@uHv5&^<8}P+Yw#c3#wfQcjSy6NY z(4HpXVolF5zHMx-wWqRccEDlcg-Ke@GnrzpP_ukiq^+kguF3&2&Eywg%wh|~!x!+@ z^@>^2>t+MWc}~AH@qSHLU2sD_bdbU4M)onY&qwrmDJ^$8GU|3Z>UU9AbCl50f$x0M zUpwRPf>ySU{OBFhKk#l44sl#z a83_ORt(%qE%8iQc%vWA>>Q1$C{=Weo5i7U= diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/spline_creator.cpython-37.pyc b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/unsupervised_llamas/label_scripts/__pycache__/spline_creator.cpython-37.pyc deleted file mode 100644 index 7b040c39f7d3a50024588b3f008ae3daedd1dece..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8352 zcmbVR-ESL7a-R==lcFS_*OoVquh+ZCnHNiEdxN{ca2$W{I=AaJYzMxZcr!O*x1^DV z9J*&HkphJvv5^D^Y_Q4GJuCt%Pq}{}SnS_$0dg;SnAao^zSkfx!6BDlRS$=h)>s4~ zG0kRob#-@jb=9wGZY?Y{EPVd_fBvd__jfJp-{{ZueogONs7K@2W(st8`DPzKDXpO4h`M$Ac(D zO>8NxI*r#@ul1l~XYDN&4V3m1!wW4ZqF`M`{ze$7xz=SFq(P!mzZ>_HH0YVJIpH~;?SSI-_kdiKXrs)Kt$l0F;ulOe>g z8zxHnQ55uoxeKY|Nz2t8B{J zq`Bc7wEEXTzqC^V{Z})3xilMronZ_(FQ9dS$J8)pk!o@obom+NHM5WDsh#-3om3`O z*^*Q%R-V^DE5G%H!`GlCYiF!OJFmB>PJQy^7v+Pt(U4aqth#;Z*w7eUk+=4Ld3Thm zzDz_f&^t(YIv5B-uId5kBygaRV4Aub1M%-4l#J8ry|x0i}`)OK8mr~Dvm4#E*k80?fz zGkcuX&427Nvqx+hI=226m~-6}bb+(lHC+958C*Y4)6)fBvG>pU5FW<*wBxmFQQQrp zqt)5xBm?*(kC(eckuit9t+ml-BDwG3Uz&& z3KCaVH|(%Pv#Dk3Ap@*WP_cv}`~?;i{u&0#E8V;K!@C`Kj+8icoLNJs?6qmHI5<)- zKa~1yV#L!HEIZ|_0w;mvhm{Wdns%Hyq0TB{KT$gEEM_I*k}c1UJXZy8$qa<8&1L7c za~;kbyvE4i)PW3DKlK{pS-rBabnK5j^E>vI%+bzK=Ph$%T83siGEy6;3dWli zg-y6u)#2tA^)#r*EbO>~wB~hzG$^@)!dk334QI(&vP zI5pSddOI&$_9=|j?|@I2m`y))4Q}8`uAxvCK!rts0${?CE`UHuF3F0l0x*y?y;G@Ks9vT z3Gdt{4ZVS4Voxj|Fz=N!u@9*Zh_?-hM+Vw2g;bWgtfbZLnsiXsrTdwK_69@0(u1r_ z6D1ut{AaLdbM=~T0qkB`rsIL?G_(440+%uf(k)G0ULD6GeQL~F+y#INYn|$M(Mw=< zX2*4;PaU~_eEg?-0*VIKgfjA|1^ z4reT2=wBa{$LA-dNm-WR8v#1bSg-8-A7I&`ZCg*R{@Ie1Iy(*h6B};Two><1=@7p^ zw))PG0NbWxQ3C`aE)4T3vc6N&=kitpmWC2FK%xquSoO_g)AFS9!X{`p@={RB#%Zh^ ztNkN@w}Qu{N`NhgR*gAMwJAiKl+wzp>ZExH)p=!ey>SSYLX_}}r`GdxPp#4Qv^FU* z?<>!ogXTeN()!#^ZpbEN(!6H%SJL{V^}W%|A_}7yK+4uoqA87lCN?Hf)@!yYrhE^Bw($N{N816F9UrpM~vyESOr;Kr5rp>y?lX7(TQ2EP?R{nLQ-r#c0e1bN;hv14k zk63(x;cP+mdI*TBL->=a=TJoydt|8vRGm4697Y%$1=uT&VOMG@;OlC!6|xRjVJ@RF z`Lc^-F|pEEkHz|sO)11hA_I;LH-au=ZL|3ZHiuk7*yp_`FgV>1?ue$DeZUQtRcY9D z5-bD+ZvxgZ(C|7@q;mJlow=vY31pCLDY+)@r{4j}S4lEnE0~W5gR5e^%6>2rh+2)k zzbMoi=DB=H(CRoV5SSjJb``dSMH=ThTW6jmn4&O1^z}H7NDhTVPw;1&OW;-IPM5a% zAWr8t&GIc)c{drTZirouFOn{iZ=1O#QW5g6SUN-tg`0~##Dy*3Fl^Ac_zvXN4Tgy- zIMd*x`j9iGlY;+*YY%h+cR4+YN2(X^(gKD9k`^8MG*rQHBNW;S05O;)QIRLbg_tfE z8HfQi8zM2$6}$%ykZ|K{h64JNS0uN@(4TEbe{lT;{&N^ovGy+=^CZTLTTE{^71G(}77G`aqs(Jh%^aVj$RA)T>1QZK8Lf$=>2I9rS7=-<-TxF}K>Q zY$-zKUO9>4fx;3}HSiOlI(?aV@s?vEQR>rz$H!!xwf!UF>tl5C z0EP9|3FL1a`@G$@+s=wz18x`g346spX`gpbz@s;ub8ZDVzXZBAN(Y|44Z4#1HcDW8 z=PYPXAQq@N`)9xD%nB$)3R6HSih1EZ0id>#Mdb7l)d8vq_9HpuOaPg-U4sSS4JaE} z4xS+r6!${hC1YKouQ@=)ZRi?gM;9kIlyu_)kwD!Mx$}j%KqkEN|1+R6?4|;6lCS(m z$hQStkw%>Hd!*8#E&_qV8W)%JA>AmLu3ePo{MngsHWTnVCTxuLk5_|H_etnT`O~1mZgzlrY49$Y}?VmfQJv^bYbu{3J z(JA!j92ksNpdo}4+Xv7cAVY(mYCtnKw8hzfM{p#F@WW_J!m>8NfX_`lKtkYMu+o2s z0`=KuD6wib!zq*HuZ1GN1c6C~IxTzu+ebI?HR^lCH6VjT&utq2k0?@FI#gfUhm<1) ze4U`&E+10NP{GEk1ZPLz1x@W${g5_%7I4r#C;|3a+Z6!G=5~t`MlW1|$14DiM7uCY zTRy1dv=z{{CxFvT`=@|qcJdFHb5Xh|mx!M%9XgbS!~bPjCO}H95NKupJwcH)#|)d~ zL-I?=yQOe6?nAS@5>gstR-%0qUzjV}al)VB;KLDuf^Z3X)H%ViP1fMphGP&Ddo&@H ztk&I4Z*#J{efyJ-K6&JS@aX1`uiX0Jk-ms-eS->8L1f5N)k`=TojJ1WRqrtUV|@X= zUh~fV+fRJcf}Bk+*^2kFidJM;z3N^Nrax5UPL)sIYiho}KJCM6&^AnHl-WMd<<(g^ z`|yq4g0V5VY9H78rcV>G@+bd_btIHkvfMVzY0Wulukces3Z>!fpE;^Kb0bMTzlU*8 zI`+LzJwp&iQ~Q3_@cmvahj@44BSG&!N5&xUsrtT*yS}g2FwR_5(rqKJ52-fOY_5@d z>+>j{;Ni%wwp_AnwZ=!a)3q}96jzY+mO?t#-2o~v;Rt?k{p?pZY3an?#!C+h>o)N! z=%hs&owp(HU$nujGhKPEt(3P_=Gw||0xklD@@5H{dD6jZ@Ke|T@qvO1K<;B?jHn6W z8UBrr2l~K{+YG zv&Q<*nIw}sV2jPheP*>UvIU2R0U4&0)fwd&ZVdLhN0*^F2mHo(=p*~#JRG4WItv!BS=%hK zm_-#ho+&0L5$8A$>bZ_Y8s1-U>^601CZ?Ktx33?`+z?)1qBeWj3M+Gdkn=3&4wUqP za%WMDonqxWhAL6qn?nj>V-&d9xk;j$ICq-wo2E-~vEILf5Q(&e@{)Y5NX|I1+>6YV ziRNZD%o?l^7vZ)*c%*YkCSs!yNPy&_BFrxEhNruO+j5apj>v`PFMOtgnMK+ZIQ53z zt?5+&w>|8VCg9x#;`uP_?%)KX@hWWy`Or=!@hTz|& z%+d5*24EtqI_IcdBrf4;D81%6c;WgnbrT%dpHT6Tig&4)S!l){?_r$373**xIlmyv z;|3pxfoG_Vllc_}CQB%+vo&@xCxOpT0-ESYXakxfyWMH)kMN^^Km~bOP4BI}x8Lwy zcnwyn7n}DT-aBt1IP&6{J-H@$rteTesXhZ3SqQX-xos7W2d|=xAG_{?f}JFMk3&14 Q++xdZlxsKJe`cNjUq#BXQvd(} -- Gitee From 78563d144d34a2ea6c0835247a6d37e03bb1bba5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B0=B8=E6=B8=85?= Date: Tue, 13 Dec 2022 13:29:06 +0000 Subject: [PATCH 5/5] =?UTF-8?q?!1=20merge=20*=20update=20TensorFlow2/contr?= =?UTF-8?q?ib/cv/UltraFast=5Ffor=5FTensorFlow2/train=5Fultrafast.py.=20*?= =?UTF-8?q?=20update=20TensorFlow2/contrib/cv/UltraFast=5Ffor=5FTensorFlow?= =?UTF-8?q?2/README.md.=20*=20update=20TensorFlow2/contrib/cv/UltraFast=5F?= =?UTF-8?q?for=5FTensorFlow2/README.md.=20*=20update=20TensorFlow2/contrib?= =?UTF-8?q?/cv/UltraFast=5Ffor=5FTensorFlow2/utils/datasets.py.=20*=20?= =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20TensorFlow2/contrib/cv/U?= =?UTF-8?q?ltraFast=5Ffor=5FTensorFlow2/unsupervised=5Flla=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/README.md | 6 ++++++ .../contrib/cv/UltraFast_for_TensorFlow2/train_ultrafast.py | 2 ++ .../contrib/cv/UltraFast_for_TensorFlow2/utils/datasets.py | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/README.md b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/README.md index fda58942f..2d24c7c41 100644 --- a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/README.md +++ b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/README.md @@ -75,6 +75,12 @@ UltraFast是基于点回归的方式进行车道线识别的网络模型 |---------------|-------|-------| | UltraFast Acc | 99.7% | 99.7% | +- 性能结果比对 + +| 性能指标项 | GPU实测 | NPU实测 | +|---------------|-------|-------| +| FPS | 36.17 |61.76 | +

高级参考

diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/train_ultrafast.py b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/train_ultrafast.py index 582a88636..d0d38ad16 100644 --- a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/train_ultrafast.py +++ b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/train_ultrafast.py @@ -28,6 +28,8 @@ import npu_device as npu +npu_device.global_options().op_compiler_cache_mode="enable" +npu_device.global_options().op_compiler_cache_dir="/mnt/home/test_user08/UltraFast_NPU/my_kernel_cache" npu.open().as_default() import tensorflow as tf diff --git a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/utils/datasets.py b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/utils/datasets.py index 2ede39232..da95e1c50 100644 --- a/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/utils/datasets.py +++ b/TensorFlow2/contrib/cv/UltraFast_for_TensorFlow2/utils/datasets.py @@ -189,7 +189,7 @@ def load_json_dataset(json_file_pattern, processor, max_records=None, shuffle_si if premap_func is not None: ds = premap_func(ds) - ds = ds.map(processor.process_json) + ds = ds.map(processor.process_json, num_parallel_calls=64) return ds -- Gitee

P1Lxg zR@9OIj7QZ10tN&jXBgIo2-q{~Q^yi@w(AJO+U zI@jrl(kScdU+3e0rSlCs^K>XJ)GbXp`pBB49h5co5FTLw6HTe@1qC3<%g7)R$o@^H zW@r0ik*Gjj%Wwj|6-q1aBlBTMc;9iTJRvMKj$2UrQWHu1DL4Gp!twwu6|8LgDwQ^g zc7qiZEi#9*z)mFhSE;O%nAhR4o_A203RI+rRjFWr(b9Hn{vOTWtNHshf4}wTc%y+J zB&p2DgY62Mbl@MOiQe(iCF4Ve(|t_TpXMOpoKZR)P+UrMIOY0JN|W=#1nGGiojwSB zjV{gH@Zm4Pl8gZk=Y1}qen+nF`enWgM;`3ip5Cw5&o^l=Foyr(G~_3$a+UR-rd!)c|hS@J$Ic~P*~`ir=YUkV9hPfhYTiSHz#go^Vx4;>V%L@ET=JdcM)9S z0IWbZfna$7aq7;ly378Lh%`OiY5imLAG!l83)F7?$|+AMj4 z1OGLHmtLTpily%K%6V#~;obfvTTr3eD&m9T*9C!Ks1+)_+1S8Wa!|Ago@*-BYAK4M$hgt(y#e4o+2FJwA&|r{`OVFq`%6Vv8ABV4qFUC_loKqD4CXvQ0uF7SoYOx*x$XUy8alWvGpad{q*TS zM5LEldLAB>el~vqD_IPkI_nl&>l;{5yxCl6xM}#^%o?t}cZ+pc?$t7*o5hv+>$mY= z3D(^r{_zg}1;ir$1%&jDTT%gXzrFQqtPKiExIJ3%8C*M#e|R8utwsov%GNGmpbS&? zBv#cb&|zmP%tpJR)k&vJ=Re;aBMV zD4qAvd6v#=bY7?Pb96pM=QDIZN9Q-_u+6kl7Ysc&|6X;s`eWmWAtobSz?sSe!>Nln m&!sCDD<8r3`rJUFFJBoiC(Eg^+*qM9T|Qlz0qx1k2mc@46V&_w literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..604b81582c42dd6c8188c32d5dcbad8fd0f45af6 GIT binary patch literal 4728 zcmc&&O>ZMf8SZxM#Bq|DVP_FSNT|55SY(~mo=}(tw7?(^Ga8AAgD>rJSG8TF-R-Ta zPU2`!Or$+=LE^??_Q;uk(^pRW7dY{}Ro!Jf>oCKYXxZ&{eY_vf`&s?z$;lwX)&J`s z)1Q7bivC7_+N+C)pW$m>;U*#x=g~Np@te+<(VM+5qu7si$2~W3C+{x~$A_5P z6Wx5UI2s?t(XS%W6Z?0O*q4W|_r}K}{!KJI_zF~`m&3U3y(opuzl!k}|4v>=t5y1f z7knmlN{iZy1ft9Q7B>lDL^SSv5oNK6Mdz+R-jm(m^@#BwV)01qi-WuPbv)kR810M0 zy9i?k;<4k*T_86eQdA0T+dA1yQCi| zeV8O^&W%ZuzeIn3el{-`^2|zOlgdc_(T~nvsq7WclC0Fpg)B^|pXcTE$7k~uSS?E< z&z!htQ*LCkAjwC|l|BHY4@h8#TRezwpRQFKwenqn{kP=}2&siEEYA}ybAFSUIbTZi zJZ(X5A^15_JjTt!BVNb92lP;VkL&#?N-JZ_1#=QG2z$-7P{nM(n(JaN*`yhtu&gRl ztIC2Y!$^SCO2KT&EWeaY70hC_TEGhoa+DiLTj(u0$?OpJtVJXkkD(AF+x^nvp zS!tfLLfY$6Uyj(X7t36der#1%nl*y1MPzBYSXS24%*&Zd*<5Nl3ZjRHS6uUjv{IWe z^kKNd&<#6hHXC_kg;9cmFi-_}u_?FdoRwLlWLTw4Vl@W<=WJS*c^IeN+zqiSo>xRh zs1#a5V{X~?Tmmue)Nd5qf>;$-BDVFYk_oWr`ME>kHg zEl+oozgBt9rjiVigT=M7b2e%9_~b%Ya`IycsPeU81IIwyS=svz+@aKsHNyPA#rrwW zjhu`CJG=~^sZ2S7%J~<83=~e8vLDclg;mJ`xz@ax5v;QD<+8G5F)dA%ThJ*OkcS1G zvnkCAxP&=fYq-XfJol|S-47Ucj~~|(yg#dy_7MW4txE&oDGee=0rwS&^)*&&4@UeX zQ=VQ<%R>G)UCY_N5ttAQJmd}h#|3pJIbs)JQn12L5%T1WLJGKA#wMGhO~^_{!bwZ= zC}%%!d9Pc;$;`_tN4R?{m0HbIfq>8aJ*1w-WIrfMN)FtxiQhjN0y%gFQ~wcrRit?( zybN#Cdn%0*$Yq4lyNY?*5=|Ni&1AUtW%6T0IuqrB(ghcM>0`!uB63S1y}!^xVk?wM zk>-Js#WwQvT+Mu_Z<%Oqxh9=@=v!e}##5I)Kr-i3Ng*Fnh2WOENUn;Vb7aek5Y1)>azi z+iQtbKrXPODWJsZrWzGDy9KIuJ}0yQY|gfY?k$;-Z50-{q^klxJ)q;*k#ZrZkSV-N zut_NH6BJ{u_%xTMshh}O*L-#A^xY+2W#F$!*Bs0Aa&0*_qlfvde~=e)ot42fRIhhuCYZwm<4;lPkAniE408|b3ne->hY+6y6;muB5W zQb*dlQ_%H7qM<=|lhD$)w5|kPW0xfgHZ=;{4QnvxOqUh8xvtPJS)2t>_I51Y=zKRi zT_-e*!e*KaMXfEWON!r(^tf(T&x0zVS^Y2!HDV^zwf+L~0s^jAVcIvIq==tDgyW!~ z3ZJBX`rfUn57JAy@&QH2+|!Rbr+|lRYu8KDQo|vaCE8r_;H{?Lx4{zSbK8)+6ujw29Od%`6#X8)1K{ z0AODy+w@sm+WLO}0TM2IJZPJ3pK*NG=D7Xj9rDUL79UJ4J(pU>he3*QzlBTVT?Y=~ z3vMp&XPtga*_CEWAkop*!MU9ub}pXNk^hL!`sBtq>kkq-epfj?A1BFYNc4Wj9mea9 z&FX%eTl7AW|Rsft;`K?xJJ! v`n`>Tku#Uk1V(+})2;DBlPGH`{gZ2m`p-NNKlv_Jxc3DRw9}7q$L~J?ulV?- literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bfa6d758796323509c67c88eca4c78eb971b134 GIT binary patch literal 9734 zcmcIq*^eB@dGC9Aj=jhwxuj@mD2bAWB6msZqG(zaxuhuC;)*6IG3Z!m_fGZ9^zO{` zu)2r4-1H=lmded2b`Bsxfb6~`P);O9kOcVyawkZhf_gHX2Zt~aAa8*Y!1;YuGtJ>h z&V!NkpsT9CI{K@xzPsn*)Kp2s<$m)U_1B-%w11P(nyz4$XQ9t%+a1SqhOmNHkmFs?m37f8qBh&5PT4CD$5gy2**ERY$a>bBMQyd` zI(xi57-KW1y|**(&3E>B`#Sr*{hb5efzE=r&^hQGM1R3M#9Z$QR`d>sGi#q)d(wM~ zU1ViexvzUwHpQmzYu*ty!)8%G&F0u1)X!Ye*j_e&Ut{y(v)iV3G(5VkdC#$BwvX+{ zyyw{gwt#Uju!HOn>Kc239Y%eOeU3ee`bBn$RoM}YJkBn&r`a=TonUXVqwG1f7TNRc z1&mr^wc+Sv>_t4EWXIVF)Th`YTS9%Bon)s_zr;?nmr%dVUS_YLeucftUPJwA=&rrS z&al@X=tWEW6>p6NHW+LecNW-B#aSeLu~4r-^f z-V4$eTE$L~tiP8=?c||>K{ZF%*WSH!`5j^3x^dy^im)zSxOG9e7cVS->EgR9mxc4C z_g1dF^`5Zby?*QJwdQ|+@}K{5ZMAp4d1CqScfbAW+4Hx=RJ|2+yJ0)=+d-1niB0IT zaXYw|aVKjiQf)`iP_7xf#=y!frZfE`<59_skIaU7P(vSj){Fd$8G5P4Ofv&CGa72H z$;@?wuV&Vc#Vlftrn#%jH49H@Ab`7d>P8r_YSamuVYLyp!)nlFRURhMR#;8rYBx+b zV!mFDy1jl{CFtVH!|&q~)A?>15l>Q1+u&e$?I9IS+?w{S_|2Lv%=+Ea!ihQ@ZxWTO z*KSqVHW?YQ>#!DGig?SdPs~vBgUTWRzg`D2B zbS-9Kd#N6GI&pWgcTZFZK`h%OI=qZklWE$7F|8lgXYq4!^TUADi6Q7RanInENB1=} zQb>EIW%{;>mXWv2yk+GrJ8wC8tB|+cyj9FwrMy+nTNTx6Rt839;6SHVwq_QWBx#bR z-l@g4UUM99dJ^!G1kB}AnsK`!L3?r#n3k!99*3#57V>dHFY2zJ^6NbA$rWbvo@70e zki9uXj#Udx02M;}@{#oRq(9urBXI8}p9S4|xY+A9YwA>LrqDNp-r@&w5RxlY@dO^l zTr*7lBpc`f3Why6YpB&WODKu>YdOy4#> zp2s2RkU(R{+SPV-UAqk?__B6G`w<#zW2~8(JI=25@vKgeLZ&}HDl>6}=GIg1#E{9L z+TdZh6|!oR2fY@a;*J#2MFEjssky>P8^XOpotMIfu;O0W6*JcZo`i2~%EK%x^O!y=KqL_CGy&-J*G)dM{8b%ZR0GiYp9Dh&v<($?JQ^^2qa&&Fd9 zW@Xx%#-~Acur7HN3JZgiz9V%^ECGRf6_1l|FZke+w(p0JUhe2yvtuup-TAv+j4R-l zH25_%NIMcefrIphF2O=?>jNDB&@VD&uTJoR!K{IqTA8^GK}sd%_XAFC1>3xs8JDyV z&cd8wz05jD>)KQs+Q1$-nVmULXoCXg7O0)!%;vSsYFKKWJGUU7wEyBtj1#@CHI*3B{6-~^4ull&wG3q75n+4oB4+7WDh-08hRrd@KM zqisEt6HQTk@mxEu2kqnyG)Ch;8z~YRiBx5T)Ta^RXC*SeL}~&fehz)@F;uRf_&>h3 z=8v8@bvmg&RwgxTs{r1vzL3-;uWc1OVbHC<(7`j&PR(k_SxlB_C(*y-MBsWkFyCp& z_$#zAJ4(Y&A__db8$#C!y9Y&=%2jKwM#)N2`hR*K|m2=JJFQQ-ZfRUb*-X|;}zd}%n zaE`SofHRUl4?jeoGHYM~2S@LK4N5vEb5|$4>8jwS-AVizbU*YQgsh_vY+z?o_ESfCD}Cx&`3L0Ng-f9iX<`jBQswQYtwqW2qNT)rLnOxHvM15mUDbd zayNyYDOQ&IAgoG10ILxND*2qLd`|hZ=cLnYnzTOXHN$2y=x&-J1!8tZ`UW!ccu4Fc z<%3V5MFuq3j1m=wXx};y2g0g%f}XI0cCQtHRp{*0e3Gh%r1iJT?}zo2LX6&t&4)B# z9CA;XccWx$4#g6}7BJ3(VSOkg+Y2PNIj<)G-R#bGt_}SxPT@m2t5#H+QF+)oc3HQ z{g^SYC|l66lL}IlX%9oJe;zMSj-k+`dYm?95idOp$Ipd!Ij$ea6S;xzsD=dbb&Sqs zpF-a}{Cxut)GmsstN@ctEWy}s>Zz9LU0-KML7nh7!*rr6dB>g>Po+}8uDt~ zKtN1gRm76jFsQex3>v1Jzz;oMZHL`v+NyT?Nm^Y+%-jmAh%*pJC$f#S!f=gzPJ(D! z1=I?+8)4e#-4UjbP<(s1gj_B~*h*{4jVxC4zE%J@Fid5LyO_x5V?%@!wJ(wgWjiRX z0mh9G1|?93N6;b4;bsp(mJj?A(w%VqVOr%v!j=^oV6{H=NFsSNYF5a6)x*iB@G3NY z1oCFoUnH7F#5E`&F^V`nM>qVG7FMVjQ!bGPA9QYglB(uD(a zb0O0)a~51??YJ5E47_FD`RZ-x>YZY0t(9n$v^g-z;5DsQ2HkKz!Xm!mq)LRt!a8Cp z{sBFd+i^33b@jWX7=j)GH71I2H$*BZT8C~J_2V4SpUB1RKD?SUHQSr+gh+i*Itrtc z)SHUBcgG&#NDaH;&R~$|j`}C*T~;{BxqlA4lEWxSZppc^@Ka&%H0WPKuA`#!A4h*K zsYHL;n>+*ZK^UB3$fdq!teK#n@qnmz2zJ|2^dotNg@&^;Fzy@%{U}3W4r!(fy**+< zdmUk2LJ62=za4O7nK;y8`?Q!tUMT7Yt8hOyWF81yD=KNgDWH?Fs@SVKVD0O{J--o< z0w*^0+i{YJ^4OBz?Br^b1e%hg@p?J=ud&6UyJSgnApP?*fPDwIymF_He5wL&Ng0|( zzROq4fjO|Cl|Y;Qpq1oKy*r&kUn$rEI2e8b5_ZG>b8OTvVY0uFIm|c&|3K9X8sZpw zE~t7zYgz+0r4ukS=K$iHtbo#PkfuZ;5W15xNX5)0{>B(tAAs#g3*k1#%bFm{GiaD_ zJu$vPr)7=VScHW=?5$?WN|@wyOZK*LUU7mr*hKOSzB&>Q3>K<~)BqB7eR$}A`$*|g zwV|py+;;Q>NY#cgQHcr)26Z6~*yQ2J^<>XK0gIteR?(~YJ*z*dpVm3WnE9EIPnMs_ z4jvvm6CKf$124?UO!$&6sA-{=R03^XqS7G^v@TK&YRFpBx|D+7iH?%i#UZW1`;yj_ znJZ@{2EPhpWG!i3lGKN1MD+x%F@8jA60sqfm1Z#|ttFW`nc7-~{{kun{Y7ehOvNuz z@pTl!X2^hos#I<8ZxH;KsrVHtewB&|M|XzkFUjglW&f`jlgy$}2Hk~f7?Stb0sBrv zUjK2vPMUx&qI0g9Jvca273iH1Yp(J5M|X+ zTq|iTsr@*i`EOzJw{a_#VpjA+uyU|-1kddpnG*xI)FfX;Lw*R5x+67^WQ4^^%{6Py zUc>hb+2=s7oI#&4*5?fS3h29qK9xgJ^9nb$)Me1hA0obhgxf1y`}zqoh1Ix+z=?Ek z^#ThzN46Fw!8fkmMA+LqvNb>1bMwljBU^i5cY=1r)IwD~eU5V4Hl&gi8g|PoJ=P}30s|h|WJjumHmEpq%zKf+3a+Wn1 zF^bp6N`wD4;QwC+jsFA|ADT3|L;jNh@DHijR$b(|@QaMso7j=GH2;PE z2@8}XPh67w&18)gr;JB;Lw$~n*g&HsHNHz21B@dbL^V!-(8xr1{`+V@hI6iy&!T7K>#Ml9NuCy9 zYvi#3TlJ)(0g9Uu4agA^b9>A(@h@X7<>6Y2!H5B*Y)i?9ZungS{2mn(V)+SRh|))B z&%^ma)|#M?31yBpt^8@Ur64^DN5>E{9w7wHF41@#15qI&`)Ta2ynTDQOsLS=YC8Xe zA0COEATrLy{*PcS8AJX+B=B;H1dy2?L*fs|ks!o!W&Z*mHlL;p{w}%N`(?65vKcwj zbb#lyJEhx-(NwB?J-nAlRK#38?jn7M7&ab85MVQhQsMrlFfnf8oWyiK5Rpnfog);9 z|1}jYd=stwz)U>kp6WR~Y`&VKc?|hxpauyrV4a{f$#=rX)8p#1jZE`?3U!NuchB&NLl|VtrGXlsuSG5gw20VM2Oc6zjb{GNJgqFou)l zLHTKHKdDY&=Jx`QG$Atllm|?n9OHq%#5*369fcAu;2oqIa-k;4$a&(6c%YDgzUeTe z=}^Lqt1y8wn-Yeen*D%y9UfW+yy+tS;*`r1#573$J|d!(IC-oCvg3dwM|m8 z6u=q2s5}n09pUqFP!ENROmC|R2UHHOuh$BmLm~FnX$(LV!@_7HoWQ`v_meesN0AkA7&?ke@z3zPY2TgtB|NhXi4`BDwzMn4~-BV zEEt>O8U8m^{4D_t{&&>=dnzUrS&>kXG1iI4Q20lT&1r(3N9;lijFjm-Wj6I)bBAI| zMCmww6aO`!_t2ctQCszP)Z;%!;Ece!Qu7n2%q!omf$x)2#mHFt6k#>gt^cg%k_ggg zIGz8j0t~C)pr>=9gugu?vb-CCvrM%4Gl28wsUWflD+CjAT9Us*&&5W+ivTokC;Tty zc@IOt-lsp2EV79E)RO5s{--oWvTjM`Pk`x$Qqm-6-fZt)J+8-csYPnZtb#YS9Cwkk zTg2Des2fU_;dD%#Y%VWSLHsD4E^l@$_J#a+?|OU2AuN%wnbjbzw~(Fb_znDXf*}pr zrH{t#zBh$nI=ZXAw>G-+uMhH*xcsIozn}^yUR^`ZRHpO!HG1`RD%Po>1dhxgNMA-q zXVUAI9#CgVG*Ie;B2*JLi^NYqsi`rJ{QnPqS{JR4kS7 VH02gt2R{=(+jU)Y-lfkN{{zk+@&Nz< literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09c80152e0c0d5ba0ed2f78851539c0ad6e40452 GIT binary patch literal 2207 zcmZuy&5ImG6tC*(uj!fDuWS+$qaa41HSWfEFb5GyLV^&NpaG#licVL}Y)^W+C)GXK zZ0I@dLO|GyR}prPp8Zq2)zyppFL?6zYIc%Mu$p>R^pO!%#L&V&f6e|H zF!nbsmd(ZFTl9JiBAMg`OSt04&Ivp2oG`xNxrt|GeiC4Gig40Rx>nXHqDe35P5McH zGDrrKm1JeInyf?e0{1f`xZam9;X#{UU7|RhsU@bYJO*dEJ~chp|%+TFgqeo0g?2YB9`p z-DE!LMhnq7`d#$857Ihwf5hfN8^8p<^9PGV(;KR^nQ0|<^YYDNFE5jRUJ9$3RGYAb z_M%bRbPKXUl(UKH$+Stu5Vod`85C*VEKbmYXtAw;t-6QHE&2Ds_HH#%+l|83S*`TV z+uM)w;iGg&TEt_#w$k^DYVX$e?*3HiX;rIj>!EE~P13wfWjbxPru$}nq#7afI@9?C z?55J6&@=7Zxr?R%QO7D;*uJmPq(R;;Q zmiX{i*9`8Rzf2RRGIBpPdLJ=5==BZwykQ6Im=l0nw`IaPh1Gk~jfk z;!l1Lq$5`N_O1-Fego(tGOFMH|wnf04fB1(%r&uQG7Y zW(ysy*L0yFX#F;7qH%BBZ(Z4KyT7sD9T^?@BmULm_F(Qmd;Ak?z2iZ%0{Hfi_z8#o zv|g2iHfY05Mw&L+LXTXLt4Hp+J!^7sCRu++638_q&c>44thL?&y?ejL zEudvfQ0(X!A`>9{_fjpf{*>1I#haR;t(`Z({*JU*)%@kfw{Jc>pV4L1D8$t5#Xn%u z@-fQ$6_oFhGe;)M&QnKUZ}`}0k@ZJ>?v|V2df?s$x3lCn{Mg0X8~ey)F8w3tgd=v|v|5-xrcz$-rc-r4c8p)7PY8j2QH_A7IM7sb^v58^tx7=Bd#QAVBB_;}WlGeu ziLGDe!#W>Ls$7Z;;##P((baTc?B&g_I8)|Z7ikKb@$5yWX9r>LPr^|n7HqeGj&~es z!YnPIs;(bGK~4B%h;e9p|9@h-+>V5y5*G&qdrBiAOeDmllCy&5eIcIB(qbX8u+kJW zeH+9K&Y+j6qFBO@FbJie(Iy+>iF^|(Q6t*2FP1Ea9?f?k}6d@!i0|h1oCxieoAW^dQ~zV zD+?Dhkvdr!~b_azUy-2lPxe|#Q^HbOs*(+meT_rc^7 zm>6O>LlNfqkQ|{y=Lq4|8I4>p#yw?Yj<(2av}B-L_y4XG<9oFc??(d3u2`5zMyaP z0AuuF=M}sPdUS@%ZQDt>$J2Kc73Rr|-wBs~JCu{8PfD3TWl7DY$w}fjfK&de{Jvi@mr^NAEMfJeQZt*ueB3T1Z|3{zbxs5s&cA z1ky$pLD7L--;26Up}0)cMEi{^HcaHEnaulEIR84Y$f;Yylpgf|Io zP&)_y8a2cG8nq!f3J^20Q+P@MJ@|}P=rNFJjThHI#BdL&U3^VyQacduRs|E2L%~$Rnk?L7eb+hMOK%Q<2z35ICgSp z(mK`{A*HXY1tbtsh19Hg0SO6pC7yZUnLl7&dCH$aUzXoF^I<33th8gzx#!OPI`@1Z zuT?6phR6B*_pSe3(6s+jCwmMacku?R08*2pr!|BxUg$^K3*$%=$v3lMeyB-Z8qYLo z9O?}VJyTlfS?JkPJk;v;F?P__TLiO52XYs0@EZUhJ4>yhOVKcry&7h+OT&`3EIiY& zlOv003(}P(w2myx3fiKq${Dn-oRxEEOLAT=pe@Tqc@AwwF3Iy~tMY=ph;~N4Aupkw zm6zo=(9X##pzxcq^`rfrW`FdyTf?(4kO$swQ-#4wt=9>{+Mr!q!?y?c#1mR3sHTTn zy2?Gg!3uyBk>({(M1<>+4roLMpxMy?ElFQ&NqB*wBQ+3VtRH`YXA`^dQ!h50-k{a& zc^w%SH+zGP1hts5*y?q5HiFo04SE9=#O{RX*a}($<;TXx#xORU%5T=q*bMw$TUppy z*#rszf7Mh4&8r{Y*c$BkH$p!Mz5T#fH*eo~&}lztw!QX1d5`>lFi`hje(%N>acz0`Aa*>j)9-|ycMTf`^8lK#PyI$!X7X}2wIyg89%^$gl17H$ zO;;R}@`&}i9vhv0sEF5fp~`5O>;zQNjVqge=;a*xK0XDspC(2N$&n|<^K(hXX;+rs zI*{RYoU6SMN7@J4$5+OXUssF7MQsd;nOYaJ?CS4^8@I24yiqIzi3 z_l$N+IoLNgxBY`a6$v$Zem}O_y+Jdq8+@DCA&ElD!?>7B@$?mobOn~{yVxaQyA`v- z6^mj?jAka+@SW*YB)BtpTp|L9(Ek3;`J zTAn@WLE}tlTxa$>n#Q26E8-u6V^XSmVZy3d3}bN+8#|poKy#=_EcHqvRSRFeVa&|| zn&yyuXbu{cT+SqRwL~YpLEsWV?6mxT=qpUmu!C4^#p3B{r6`b0VC8qQDyRWy!oiQ! zf{f4QUsaUxqwiHQs!XJXb-ASIV!5Q=LMPH*Xh#}24lgEQ0f72NZ4pXCI!+qS9!p)s z++$)8bop&GYLP&RKIn<55=&0g?GWAMn0|)Afcy~=4n0miL^j-_o#`RJMfg3uK?7j; zXi9VLkJ$<%1Daz-ChSz4*CG>inq4E(88_&=6x*6wga1v(xL@YJK^C3*VMSJx_~E;s z{;O_I1U?Z`-AbfvZ*-c0_cTu0Ep-k{6M2&MPx&a5CVqr(!7PBrLL$MCs9Z5}6T#4= z*WX%U(Xcml>tB8O){9fOW}I$qMuKtTj1}pOTRArxUzZ#9>$st=f;vTtlkuexAaeO& zOp1I3XArJ1wTZ<6Y0rc;Q|u`v?BSU$_U)KR8k=h^1)gbL)#0@>4wbY>FTpE(qEp(! zHqJ6;^@XC=u~vN-0KV^g)R>8IjoqeflmNUSBp1Ck-T9I=TKj!W-NE{RsM5r|7^QUc zNKQA=Pa4hIjJ%7j4VeQXNCJ;CI`#Eq(nXN9OigN1gr7F4!hWBbvxl+FHK)d&j^xLa zx1p%S$Nw7pN204eHi%tgGZZ1EEO4z7IbnUSl6{WF_^byY0*tXERY2S_XZA%UJRq?>1Ek@*QiiZTZ?oJk5({Fus8Cg>-x&9bX)ui{_Ztm(d*4{xX_R`}0KmZ-shy?`5=@ep^iEoJ;%X z(m9R2m$+96A!#m}$9)!~bAaUt87<%GC50;}qqBbe0OB~70Et?XtE(Hat(vm4AH){t zcyTe@>a@1|eh}1+<1g~btRT9gv_QQ_3(H6Vx0*=Q$kC|F1W4Q*+$Ro~;EneJM47&dz3R0eD+U^E7E%>IbwV9XI9i@(9Mn#4hdRG3^cPy0g(J z2dc$61VugM=*Ty-)TdFQu{O;0i$Q7*PFsR2Rr9I0MVV;``hr^k z8XWCBepzI!2^~)NEn#wGTm-(v@3--t-kKQACLU+9SspA&Iuzm9ejWokmBvh^jUlWE zp%h8S5d!FQQz9I7QQH86EACp1*}$L^&&@wOheBu+h(i64D=gNI5$E>r5T{le`mLKI zqc&U_*{ikN%eUI-9IT8AHBS3hM%HR=`F8t6j$-X9N}tK$Oi7<|(KP>@lHkdhr9Q-@ zPw@t%KZ#$yo%=N2HfIWHVeZpN5u|vL7KSi3E*n39J2@hSH`iZ@TD~&NJsx5a2Y^lZ z!;j0zJzyS(;&=L<`7m*JbrBmus8A5-%Qus|oFw>_yGr$1Yp}C>=Z~}T1)G{BFb*qJdR~ zn3&{M-~s9yp{V9K;}8^7%HIXGf5M#1q2~ouGPGWwr^2h>-0?jxc0F%rAome;y}0ao zd;85^Is$^_poI-*u$P)6aGn5#o+N9#Mi@mtjx*{eVPrSzJ^@k#rd9ofunEr@qpCvD zCEvswTmf(lQ+G;^;S})BJC@@*Wyb`5$*~=S=e95{e+*!E@dj%EC;kkWm4-B*3H%*^ zKLSv$Te2V>w6-it7i~e7WErg^E3%5VC}+Uix!74t%k@#83--**)K+SQ);fLZ53l)~ z%e9*|)VQ@iO4>Tl9A1Y#)mnprlAV4t^r`kI$HVJ*23s07Y7P4Bj`aI2KQTTY#)T2Yko$KL|-# zLia!8Xp8n8{(b7m>WL#KrP2wLKJzH5WZVodRbfWk zTq5<**GF$=^sZc$?_9fz&~Uv*u%2k=`;42fQ-s&k$p-)V6qU2t0gyfW(Gzr%17J1( zI$nygnAWe%$4wRBbyrcQq<%_(YDYyuBXMiLAdKoO^-BWeTF-V@k1+ZiZ$SF) zSf=hea8LYGG*&OfZhv>*lm3%V3;(16kIlg@{<(2c?Mc7g-0y{I1vA;3@tP?s%Kc*O UtR_DB4*5k6qmC%+j$R%9AJ!;q*Z=?k literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef71b407910778c83987b06e4f19ef93d2ee6777 GIT binary patch literal 3448 zcmb7HTWcFf6rP#AYAwr7;yRAqi<@4E@(`C6n$(0eZ9)nKi_ivIM9Z?(tQ6V0Wpw>_y#JaqQZlGw4Tef#)8cWE0JZk5~vh z(ejZ%DCQBy z>a$yW58KU$Q8R4zRro0GC4Kdv)BpYRy~oFcSPl9~ym!!-ac8e<3`lMa zjR;jjy9W)~doQJX@b?l?yazQ~J@naQQk0#@S<+ z&pCTm1DEA9;1NUBHNzov?yArM{yz}OsqaN>YcH3c4Ff4lKjJ^cxk_~P5?_v zK+Z-2@v>@}*x${p8@HNOOReEU14M;X#-edsy*#U&wz^4cF^m(5R7LIHSNJsX(J=1w zi+HllsR84yj9?AOAieYC@Cl@p9$~11Fzoi_utWWF7(N+BoqR>Dko+n&q+zb`1?o*> zYkGt(ML_u&PeLP~2afOfw(pwx2BwN13Est%kaR&B@E9n;GSoqokyD2wJ^0Pn!J~FJ z&N-rp!W=rcuvf|`3PcA*FQ)`vqr^|KMZIJ)le#)c(g-M^esw8kjr~T`9I*k%zZlOh&m27~Q}~0TOR@%)@1jE~ za>7%7#(_;?C=xgv@vTA`(!*#pMxQQqA}}Ahr0zF{%JgQUZD?e50;_fxDt?D8#sG8y zKV1HrNCmpGsfok{Ci5A5B3tH$E zV^e749$|e-=~w18nUR|-97+KmQ-xF^+zG);6n$;OqL;|lM5yX}b|?3w>?(kv%$y63m)^9`YJX_cHYnHRLtJuV#xGI24x`?cdI?KzGQgXsX2jMZHunCVs;6ft+Dc9<%$o3r;`%ZA6f`N94dF?^q4?R2R zMG+Xepx3rz=J;XLph5UinZffCp6DAOTp20*N}|NrBUiY}wU)N8B?{MFbw^`YVtr(A z?19$a)@6@^RhZXAxw-9xp%+AU%V$x1K&oT36sCjJO<{(EZhuec&0AMQhItWICbY0G za#&Ir^?Q+5H-zbTowg_RZ9f$HE=a+3V#jXvSl5YvtFT$f`sdY+?Oxa0h`lJb2a(5~ z{^rJYzjfVd(R=m{FN}KZ<)F8FWn+7e3!@S3!8`jMJUJW@?}P>Gd}RM5Vf=&0)*gB0Xy_qB1`zXj3 zKaUA>(8t(u2LzJ$OK5&i%cx^5ku~Sdnc7JtuzViYWB=?A9P`#(pmpvoI95*R{8p-`m<1s?*eDhyxvH(bbYZZld@8r^#M`2f)&pnD3xqW}ez_=VSt+T~8jI3S?5@+d zTj3ROq6+O@FTy~o>as?(qOv-8k2aHQ==uMc$@I|8Yiu4;>ndAFi$xUjVCxn;MfKA} z&JZC*v$I5ML{^Ecfz%Clp6VBfTqGjjB)>Yc`#g?yy!RK;lRAes-n+`40B^eMb7+tZ zGQH?C&{*NxT~!6`s2wZnTXcrKoYYi05Ff@|G3 zk|N$%1b2xS;ci;I9G@7S+*jQENyRPP)!=d(Ng1th6OK-`Q*McB@#)SPxFq86eeDyK zT5wX?N=j&*?bJG}yyRBytDh)nuXWDdQ|~F@%)*JwlJgiZ^SoPi=fIhv^(Pfx8Ld%H z*{ksK=y5*NIlqsc-c-VQ6&)o`j?Yw4ecZ@e4#{ zcgbD8uRS#O#Nk-?<$?PCAK%lG6Uj-w%ukG-+E@5VNh^1CNLxuxC8v`!$yq+jPxI`G@|=>)@+v7ELX4$hb8&rbWl#JQgx zt>07S%o$#iCFWUDFfYxqu%ciF(0!~PxwM8gW2f%wdwROtv1FYYkenM!MvKpkHee++ zXnBp-pviMz=p^CM8ctTB@2C0dMAI1c7+*acHJ#-SX0fKT+`$=NrZbMO@Cs$uyLz&Q zJw49XF!nrO1iiqQV96Ppo3HW39p!<}e#)2J0<^Uh8hkBTM60@Ef`5CWFFL=8{b0rn zw5hVMuH)1``06H53R1?!Ys2TFm@q=vuEo9DBp24Ylp1SWPIG6g7kaf_Ki;kdp2I?V zkvXxqIV_?(Zf@I@v^R%KlcDs29BosI*&J5Jt%9__i)kt6s z>8Cc8S#y|I^FyC77v^vq+0ff1NHR3m!~RfMAhzo_V=OR}0+``us;x)Om!1Ocw(G?n ziyjtVH&ZkvA=M+uG#(eAp&TAUFwq}C;4MI|6iW!z?JK`rr609|V>& zW5nir+FcE?+2oleCCQTQyrI0QyhCF%c(SqDu|_!>0hto??CX$YJ)&Pf(hpcO%5#eK z10XLCno;zrIb~NY;b9dWgo-lwWw}s`0FoeFnBdqpV zkeHp?mA&f`%mBu4`u$+fp1cwXedzb=g%rz}DVQjX;R3mmFk-nJcnKXpfVnGIBeE>T z;vygS;_(szG* zlf4e`azq$G(ef(&i+HM9O|7YAz?b1_1P2a0d*aUP&j&rIE_z{oB7r@J!NcASx-`^$ zn59ENtoC72+W^_Dy>={2fPJ{JO|-@vr|Nj?RK;Eg%xUhC?k_*gcovMh_Aua-0pqyQ z4`;}AYmsvc8^CF94lUVmSM$S4>P$!aRxl-lq0|6ud;mXhv*r=T!CE$lr#E)@=ZDCDRC9(mtiH+F9#s?u|PW~`ZPL5gfi*)(&L#PnbiXzQD zZI$Fgr_JhQ5<86+uFu$ya1O%kGrS~B$rkyvmm;sQCJ|Q{A>v14w(Z5~RZm`Wa7Pf? zXd5v&@aXQ<_ner?3mOFp8GnTis4L+t)ozQK?jVT$KI=6BvtcG8*X$4MW^X_Xkb9KM zXn9BrsWVz{JItqmEX*AkN+j~rP#Fp1JFWxolU^e7FKv4`ydG_4_OSlC6XNd2$W03Nm_6$0N zL8jU$%3RY;cgen&j*x+1>?aT)o0Bk6O`FF8#3Ao6OpJ&5-8RLLM)9aMH}Z$J%&yZ? zCEmYEwI33BjmWD+zE9*8kVaXqbm~5hnbgz{8P>#EWY|G6r`wJUPSR4Dyh6=96kbSl z^eKpGo`TvZ}6 zmA?BRYZ@JczJlCG293=);~Mf7l66hoD@^oS;3A*V#Xio*g;5driY)FG7VZ_eYmkId z$<50QY>dij%`E_>;zopQI@777-+O?;bGNT^XuIs7PrvScTD!_b>)LkZz}PeJA?PF6$Oa1f)TQo-mOwT za`5c6;ldO#!a-!$_D$POu!uC(G{O<5or=} ziFia>MC8W?f)_Hz3NzXU;$aOi>Sgu|;tz>@2r>z3@IV-iw|lkfqxma}CzKZP2E1q6jtsvJ{a7Wojl%=K>hO?+xZT^PN- z7ZSb%x8)%7F$S58Ofk(O1gfPbkksY(MSP&zP4iwMWdzP1-814qvR28m GEdSqz#2&!_ literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95deddf43de404e0c5048e23cb93bd5a9df4be23 GIT binary patch literal 5642 zcmbtY&2!tv702R>AV^V`E!&Chw6dGJy-9Vc$tv=|70BrqgF zFCa_ifHR%SmmYG-wbL2tm~;P|J$0rh-Fo(={k>fPlxVu?Oo7>lw~PIF`}Xbcz1`aj z3$BKz_V=%X&P7f8CuPdV0CNi|{R$-1Le|&1EMkZHk#=YtX-t)wT~q$Ey0-k*22Qt* ze|l*2-9e+5vOT9ltN^gSXEZ00O*Fjls8{^bfA})0 z@;8al+MFOXG`B#$kCc+89OgQ*1cIW-#XV58MA~Kz_oGt0%CrBw7wlXFd(&bl0o?4Pz4pgQurPo^0 zow=5DQV;)=De?0dq^&E{sgHOR#zE%o&(4Gv^yf&WxfGk>J`!|@z zTd4A5q|!8OP9^hA<9+<8;IKttF5%D&trr^L&<<<(TH#{oh7J6&!!uzsTmT0+$3|Qd zi*gMZ!p`TcBvBQRbCW-aaw*mD#aRR;=)h?3VVvv_cMs@QlnzgMQ9!AOgm9-?_bFEJK7&wlUiOoG;{liP3)&PpJ=ka9okeMyp?M` z`>*h~xdH0@%{*e{!LeBN4yOe=hgwjHqqJ+bNcG+)Q}9 z9S;GW@VVnAkJ8;f0V#4X+1l7#9SyhJHBsN_XLry|#5t|EFoOiXpy7w1m+o$k`bpMx z7g&_^0mk^*GEnJKLIP% zNOOFY0v1~-sG%aAO+O2EIK?u;CHLgX0?|-<^ZGG-;~M_XYNK8a^ca83?W0`QRIus9 z1HycIO6-5_TtA+NqSG$t&D9H%0ivDgwR0{!#d{cE&DF^etzaMSbHX=H9%1e@m#S%& zV?06Ca=KS%ckC;*+l;TELEGqb_>EJHGZ%`lp$?}Yfd7;TMS&+YFOfvLv2WwYhe#*KoWB^`3GnrY=1P0h9Un2GMrYe@J(HJI?IBWlN9x8jCbY{K^cm>C&@t)7H?d}{~B z0fT$E^SM6JKQ|_DDsUz25XXyN9m&o0P`{`h8529V4;#5jGtRWbrkZZ&-sO>6_Ifnj9YoyEB4LcFwG>e&->8&j z$Aff-{Ebu^?4pPZg@Nw7${)qyKK~(l5SDDiZxPS@8SM(?RcKMt7&QuGX|e0<0-)Vx zCL1qLl`UP6tXzdBv=_)?q^@btY5i6 zN`*^lq#zP{rbKQk!iFx5M*#ppsP0-$T_Q$+g6biq+(JsJqY8o$vjAoc2}Bei$}>I! zM0vUg{M4ZsSF|2ba8R8i0s~#pL*EK`$OoSUo)TJgQHzJ8-OSr5nZ355AcNvZzCmPW zPINxlN0K2g0mb;@X%Q-ns1}`MAs}3*X0*Lw{t_T2IygNC?HNbdOc}*g`k|efz1k5A z*=2d>ab%Pw6V%15&CWJD(X@Xgw-f`uVE*+H4)#Xy?1q5V^T-cj_0O_(I>d{G^@qQD z_{h6`|Iv*{_iqb*I6~mmk4J!?G{YeqVHn+NymzC}fpAp>AT5bD$BZh(G7V7NkMQkH zPd3j{QJtN13U;os@poqQGq2UDb*pqmu2<`5)NdNy`IEw{IH@HK#pF>x}7?u5_}`q&0&f2jk~>k~JxAJ#Ls z*FfA~&+F*P-7;Wz1kz#w7hZH9)A#Ad0|l148f$VNG74i54|%nDgn`;pzrsdLj1XTm z9&{exedMiwdi%jTr=1~}Laoeik!?5B29#)w1)$3}+Mbfx6U@Vaf!~L<$`Ach%wD%C znZhDPy}G=EGx5XOB|E2jq)Pq}=LYKYFBo58Izsge>=M!&OsW~>M()L;50-wAvdKL$8d*FqcAXE^ppQ3cj$MHz1 z*Mf5#J(p?}OSq&Xx6_X|aYsg+u+>*O4!YDE43eSJK6xW~p(Bs(GCq_DEd`h-uW&w4 c*GM1Ip&-2g;;^RfFi?7pRz4~PeNQwJ$Z)Jod1#uZ13IYvD(l?v)u?19hQtq!P(S{IX~^rugI8xD{VZnBhmdUz5yssG3x^ zQ+;=3yYX$5dn&!z-b!D#4}1|bTJEn5WCv6_hV+)~pi0NfTPs7^A(c*`d|P%~WjH&m z^2ze{%1CxZr8~+yDm$|~RXSDPRY_;lD&2|nXm(VkyO7?U-L2BykhLef2k9|urfG)B~n|Q_JjopPIg&(OtuN)2z7RbBl|akQ=M# zg;UIz-Ken4m4fv?w6B3F`rN0kU%C7#q!Q)YV$KoyVilRri$%L2ij`tD@6-g^cbHb8 zW?H#Q-d=VG7cD1OU8&?CG+!=T%UAP@mOw$*`Mg~$oLgC}SXD~W z-Ck94Ls&ONtzg-R;{v~ zTPOf5wt z1m7WiNAVrTcgzXP=pDTomvPfrj*EH6Sc^!sFb&iA-jGit3*cRgIz}l5S#cRE#ib@A zGA?6g^uF=B(S+n?ycwgE^>*M-uq9e(GCsgD8EZ+YnK9UJd@=ke7MMw z{ctmZd34AG`b|)Zghbdyva$VQ-pQwZ?c|+et(sm~sTP=+a>L7Y$2HXI8uRnF?dw7r zh~Y+x6)dG2ftG8w7=*gSR*?5YOi_)Qu-L|QP?br!5g12()r}Qy6bs8`v@X~3rY)#> zH&m^=iCWdNbLHZ)O!e&>TCDG3@46uU^6Z6b1)I=rksG+%uiiHwRPK{j+)r~uOK|8+vbDGGY z-3RBUmueMj+OcdWw_;mj=EU@k;=+ym0%hf1Rz`QZT)TC6dIKG84?G;QwWn2f7EFVA7!3D zh=ik+XqYt0Fw)_YA;Tt&@_xwAMC}63~68`!`HEQgMA~$(I%sd zIevwOfiGp4s+UY;6JxfYXMe>q+b!PhY8858-#n*2?zfFBMRa3x*cv96(gm$Pm=aAY zn9>X~t&cA{A#ucTP$TQ;b{})SVb6;fdxQS%%n-wA<$& zHo020yy}^P*h!`_5a@u%j5lxUX7g+Ya%LrO=Ic(o3aVhV)$IoxJ&8}BE-s<0!1I!S z{N_@r?aedO(J&XxW>Lqc3q1;BP8_+;#W{+4F-vk1Bop#1IqDk_PpvkK=?=!@X@3RP zHkVQ3Agt< z>|kqcTZ-`py0N^TcYM>{tkB_h^Jt0i=CTU$zL}(GCRaHOW8Eg*P8`Qh@T_-}Zv~fz zLJYk8qY8WQ;pFXc*cG12I9l-x=66UvrE$FCsmZ+2O!APWJ@M|ygOSJKVzSwx>Lbk* zzN4jBvs2~c%`WwwXm(4jlx+4O*HKC}d-2^_>T34MzAtHJ2kN>@JxHY<8qI#E_m0*a zfHn8w{;>t${g3d(9@d(J&VV{{d(2K8zgy*&(iXGJ?7pwB4JytprLA(SGjvBYd$43% zB`0cby~8{B&^xf-(wLgtUbNYP z)Oe8FsZ#rzyPW-UhukT5$sThX4y5#^p2O{Z+zXq-;2D8VGMMqGGadGKzkx!>83 z(#dy2Z|iS|-VQH>%_(yl<>b>g>zI+$nAGR7m<5ozIg!BPs#(}oK<~1 z{V+mK=c%=0a*(<~n-k7)=csu~4&Djf(N(*#=0UmF>^n|946L1?$LXAuLw9ssjwYK^ z&1v(rdFHMrr>Uch=o=+x%{i8sr*NmA^-E`*r_FQbd6dsA>T(*R3^ixW3yO25jq8-0 zfs7ewrDHkzzz`2)Qg*DJmV4nbKu7&r^N>u)L)5w4FDK=coWVnT;7+7@80&9{-;k5d zBN*WxNI5Dq@`yZ&`n2MB3Ntyi9DWcHBaq)Ys38|(4w|{BG;>L5=CacECA9`uzJqDf^ z^+DqVlQ669Cg$~(Es|9vV^Mp*J#fi2SFUb?@EnxXy zeHE?V{MA=A^CljDFXub)$he6+VA)EmtA23nP1hFEFVw9neSvhLoVV?Cg&u*OOs4Ty ztkzc?uOyAjW17wZF3kti&eCI&ZWYVrv<;ucfs5wU;JXwH8WWu|CR-pSxl<|=Bl{6`AHNiNr$Hp0Z5l?m_)Jaxli;F zKZYE%n{sYDO3IO`;L+>Pytd*{4Y<)4Yi4;hZLaV>?P-#UkI{kBdJcj)gFq+n5HVrD zm!h4{qm#U|^tC2C?X}A(MNC!e4R5h-sQavA30rkA8T_d%e8ofQ*|ZmWDCbu#G3BlF z)U_8bUV2{faLMyn}t)kK<3WZJYA;2xPRUCfuTA_vp8c?W?O6wj}wuhC+rc=u5f z_h$mPTuE(pe;D2S2GrIYTn{)GD+LF#{8|WqypxNb>8Wmfz9)91M89M?E26qV532k+ z+;){H&S6Uwzx+i*E6#l;q7~PcqFPbA8Pkg2bvmvUe|=v9sb5HdoN;(!Sd7vKJVMP5=E1=suXCBE>2z;`R(62 zHVoI*bB_%10z1D!@*;>Efv}v(anrL!aBA7I>Wyy3J1B@dacXL+@iY@~ePEqt&9bZe zoV11D1}uSNQ=1op5N|`2E1ky{E>?>S*CB5OW_zsL z;~Tq$%e}r%DrjZ4hbGhBI5>@`%VH^LNxO zQAY6`Gp@gk9iiVwP{Q^Cy>1Eym1~(&#&8p`8irMKw}pyHsF)rj5Sh60McvdD0mDWp z_vJidb}<;+t-LT1^vRcVd~&$)MW4a#4K`-Zw-vWLU$4W_Zn4;IxKXTU5}P6+Zii~= zorZ$FBb1Hq1oh)4oto3?ED=aTWT?lp6wf1HDCZZ|25s|?J6k&}$36GIAJXQMA#|V~vAcU3E{-Wx z68B2rj(YV`72E5Qow5s8>ezOz*}chN=)lrodVGVS%i1y+V=ZpRW$FSf+HF7hU1r{ZNgDZ7~dOhhJNb{#M~-A!WsY#ewn9UT3yLQCQ`l23!Uy)6^W zEm(OxdMq2E4)P!%n4h=GmsR|x(WlOi>BXG4#|%)x~XzO14w*A1D38gP`UP*!`?T|dQ*vN(az&07lW;bNMw+B5Q-u&|uR@PQ7NR*~XqB+_M( zj?KXLl~m|#r0u%V+)JVf*-h$wdzJxy#4V( z@ll}$b>d?TDq!9a3gy7m!;i661R8V%8gN?Pjo;G<#L)p)5y%ZQVf<7zVw_gZWW<}v zJnIa{fP%T7{2sjmwYTD@yzO^}eg1c_XpNm8-Q#2GPzy_IQtBA+)S+w+7k~}R zi|!fYb|Y{$3(Gi+Ka1SDJ~pkgQpIeWDpG1QrqvEO-3X_r(~a1{^iAx*(~aKQ;2xwd z;MkMlsbiL_CmRP^C@wEc{z=JOH^RHP0>U@;3}5A23gQC-JA#`(mx;I?-chVBR&J7L z9fc(zHDLY1yEVpN$%`UA zDmSuh0Vs2mt%r%QUE`dYueb*L77LV=ys%_pfohnl8(sp&m$Y#Qv(#PM4c|gR3L%AD z3uhF(Zn@hyixy_kHgu?fSb^KSF4TWH!HvNn7%p*p%XroU^eqJaW;^JF`1e+>N|3SL zPz__I*|#Kh2r8mej=XX7@p-A{0lapm*NX7{UXT^v8pSuWY*f^hgtKZBtJ&- z2T8_BzC`j6#O>c?g;bn(r|;abN^TN!^c^N~o-O+})aR5n1~{E< z#~f`Y$k1LoaDW8n!fTtm<%y=^J_cZU=#xAAM0PbMH^t6djvcfQ&A1_i89Fy8OQG39 zvhM`0B;F0hO?Z8;cVQ~0oSi0SYEQgQfjNv1o7_ORbj=qlO_q6{O$zphXpas#-F#=}WPdoT(EH9g@W zeFmsH`MSW-6&}J^C=GQSq0tj!k6pU>3TC_^Y{fkjpo`1|=;C2uj&P>FHRvMc5?{nC z8bB8{V2pjBoy~sGE})6PrSM^WZy0cMb4zm&2%=W%YYt&d{iT5>vBE8-!RD|WM%iISQ}M3ASTJ&ZiPwiaP~;xX+ZzRO2A=!6mpcg0F&H}+<0kUX}{bZV3L{Agu*0u zVkCPMCixb)w9)}NDtG&s&C4 zCV8+8lN8a8&xt+69_(pgWLfDQ=vXzfZt{rzyluoYA zVg$e?AM0x)CK(oeZJ6Z#wzl60CONwHG;}#BcdPFybHY4OK2X<}rC} zyZP*gsQWu_F2>#K;xbKTQjp526_{*d#$oCTif z;iz|;Peb~gd>U=fsXos8IO?^v>vAV;0XS;bdBJ(!oRB*M9QAVZ6!1UdsFT3WZzvq~ zMVVGO>gnd0=2>8&7lDqR<+>^y^{jcxyv!1F0yyeRe(9WZ)4XC{Mfn_X)Uygloim?N zoO5klFUvXX6=04EM~#Sv!cjjZf!6yt>N()3=MWB?lE>v~c}C6wCmjKfdLE}mMEtxw z-MrwuC=Wmi&_8)WUPS$V#c_!6;H*(Gj{b?W zPJkOZXzp31x#yJTo>!~!oLZx6a>~38oOKr3c}-!fF92s9S^Km~&jvVaOR@vW#92Mb z4p8GDjb3i3QAA5wd7Ao^XB0gz=YV#?0)VohKw6DAe;R?w*Wl)V`V*w}Yaq6tAgyiKCtQn9kk)S)X%)}$ zKC6r8@wEX`71z9q>--9D0zAK2z^Pz(8ND-Z_3edA0fMRM2j4JvZq>vxe75Cc{S(C0 z|FOq69Wfo}n-%<($U21lwJ}rg=LaEopcTI_lvm(dd+!Cn=k^2r2)isJ(%L^S5TYsI z6>PkD?S-0%YHYo^T19-YR;{hrH?LfLp@p!!JK8GF;XUyDB0@6j)q^+kMe%q!%$0CY zfb}u(#7{py-xlz86K;kVUHo^+F03j|?V#^*M{sw%MY zKNui>0+_m&IKTRVkP7Ck$H`mqKNU8GUZ(C~_q>BdDUpQV%l~ z%SeB_5mQm-0Qy7)U!qDfPZBJvXHPYj)k*NHKus`IC2SIhgzu}1w@7Z0+$Q0B-=YD6 zDQ^l)Wmv`Nl!~hHz51^Yh@AXbw%Lj)H}-mG)^jV>#f6orw)3O?U>R*pd>Gqf$H{18 zYyA|C_pc8G^}wj#hjyEBsNavWGhAc} z2ll!9!!_r+l?Z&!wax43himzxpihNsw9cas2R_9; zzdP{`5)HhV_2j2T3my#&jHq#7U|$dFDZURaah7}*Umv}?=i0RJ4Pc()`_aSKg?fq~ z;LsxCK8N;$BbEB-hu{{)C@ zECHr-ja#GSriX$4V=pI~Z@geatV zMDmj)KSe_LQT#N?&yf5qiTdHxA7<*0ko+9UA0_!?BZ{ID ztt$RKEB*s3cK8+EYsL8a;jO9l7e9sgkK`G7oTqhrf(qZKtp7yv+aLgx@V+eWvySEZ zo#x1lPJu{hZk*4poV{pbLGTi`;}_)LT6E)l{^XDnzd~uhO7d$Y*aw>U^CW+PlD`U)dB(dg{52N*b&`IP z0g`7(o+bGkB%AE$DM$@Q0$*3m=pKPlp0khmoOl4OWvkmNm*{Ura2M4cf2nyG(7B1pbW@^49gizEXA zM`D5L?sRZTJLj(lJ2?!F>Tc-poKN2J~Hi3i+UQGE=;TWV5C>+}W6yhaP1r#QA@qeHl z_=wZ~kehTo64$vl4AU@heFf{J&7s9m=m616u{s6yWQXvUMhnBKs5wmQO7qv0Wmy{Ktjozt*?Ph5L5K T`;(DmEN;Z3A-(T=wCeu@q#WBE literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d49828d1c33416359108a9936f04fa312c3c29a GIT binary patch literal 379 zcmYk1y-ve05XbE_X+D(l2<&0Nh=~Cq#K6LbB2h79v7+RnT5@8`PKwH7@C5D1%-eYF zR33qeOHd`w^6&n4mi~9XTPzZS5q^DC-vJ@}u{ef@;uf!cMAW3Fk`xr^*4vToY)2@m zy?Q47IrBnuDdNS+TAnNdg25NU28H>v__mBD(E zE8G8XB(K5_r4;XAvcx5w9(WCFQGuJGcD~Zkxumq%ZOTFRE@)K^()qA1rHl2^+xR%u z;Ae@MBR+{a8TE*t2ewizupVsB2Mgxnishoo%Z4|~@FjG(+r3n8mu$7sV6?Kpp2jIm ji$<3X<6Ly2=loo6Ty%{K3=(cm(b}enh^Agdk$8RqDy?Tf literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b13d4d4afd7d8b6156be519643b511a3f4d0c5c4 GIT binary patch literal 2763 zcmd5;OKcoP5S^Kw{eQea{}RV`Z0BQRXOoZwCqY>!mPsPkAM8Y+7Sd?4)jOGZcV_9` zO>nMGjtGehA{Vkp5E2qZ2!uosrwG9biNjntAVsp^2sxlgP}QDwLa^|K1Jj!Ks;g?c zYpbivK9@|!6nw&;zEfV$D$36^={(&i%;4L26i^gUK&>iFX$zN9Cn?}>3eE6ZZg2mLs9;nX$g%{eYTru}#rYS7Xx^bT1dEOnzU3oXpSG0^iPM7%aW)aP9?{C51|7oz*HwPstxBVn4zRo7=Qik zZ+<`c+x?EvaaLyhn7=4)OyO_18MUuXu3nouvubaD154=i-`zmGeOWIuU$^b8+qc~T z?r`L?NMvcqnvs=4CA2V}6M1Ao|6hn61TTDno6vOeN-Xg5s@P42Jp@Apy9fpd1_|~O zj1o`i9N>5q7!((tCW^i#hd{ z0sOjM=3W40Pj~Cw@nU8JwH32fZF)iMlJzxDbMVJ-lm&Gxd4HJfXC=FK#fwsBn{}%$ z#yPtQ&>(W-fR*{!d!{6AJTjvZ(5&T=@BztYpI)TshD z?K+bsuo*BFgA91PWO|VoH=E}fI-rJ-8BJ&rH4#YXPN*~5v^uShX`1@)BNWip$POc= WO{j;o5w%YpQ&G~>*%4fs*8dA;{*`0^ literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48bfd81b5eafc30b3d54f4ce1808a454a45c4fbf GIT binary patch literal 8207 zcmeHMNslDQbNAlf#iHh@$~b+fZSIbaq8nRn}x> z79+EIbC$QEEN>0bvTUuCss(wG4fr4f27J>$5TIim1OqArCUW=ilyD!xZbUg8{LM? zXWZtv)oqPex+~*$w~ewa%ekxLweA|y>vr94u)=pWR%E4jGTn8y!K$qGj@sQ|b=G)C z>29)3w!+#d*bxGcPE?j# zS7s{*FGVYNlrOz7Ye(%-Bf_$;D@6rg)D{(1KGswPHBE-`Zz>6`Q0cqs+sbb$QRZEh zyOB0(Ma`&1`eN1hG*&xKp`A8b!OT`R_OTN081wytawH;i#%Dxp5;z$P9}Gn0^yS6k-7^Jx&u4n zmg9A_Zg$tP0&_q44k0EN^eopl_ZNv_S2u^Y6~g?;LStLowZh&IU2&T^z0f=mY0nzl zB4=_tn7SbtGm=qchtsj;1)OZH(-s+NydrA_;o-y-XMz?M?I5Uw`HGS2=kVk;jz009KD-c=o<^-~^EE z^L>jm?80~_h&0U?s-iiH{Bl!>+A_JA!m7xOeP+A71nImiN9BwMA`4r-98s`Kg`zPXE5u zH=!2uE!%?wy6gJ;H+P0u4A@j)@4P`%-kCW4iPhh+VMg1NL$W#8sOft)dK^3h*})AE zMXjl6wW8Lv5@=i9z}wbdK)Rw9klRwTRHE|R;5{Wz^L}A_C$RQxlR13phSpHAbVCvR zTBw9-lzF6L6RC;Yfp3@Y2aYMqkte6E7pCW#NMV^4`R&X}YMu?#qZ}NUE^FX2K2W1Y z`dmLzX8D75l);9tz-<-g`6!Q8`L{>d$f*Zua$mFZtP-Uk>?$+eIxMa@x}+==`a*A-Ukr{GvrIqC}fUkR&l5>+_7 zI%HLjGbpKz>J)R1RdP)81}ouLJW0(~qt$tn)lbMJ*XFHAr__?8wB%?kIWWU>9L*&M zTwo$YyTGxsnnaBU5WaZ>ZO`3lA4WsmDi?u{jP^HT23CHG~$Z!I~lEakN>m3MW? z{lt=c<#BH4sg01aI$w)&5!sgUo^~SVjGFZrVZbF{yLVJJ{X5O!yWhGgCPAPoczzhbb^_?VD2V z(A?NM;50Ci0W9)^I3%QrozuwR0^Lw#p_wU1B!F`?gX!+X^+S;lfSE!kgmZ_V4u<}| z(0jh?^IeN0puzQ^l}Jyla46CvAE71FbK&C`A4X-s7rJaL@QMD)$F6qQ6;r1mxJ=kz@uZ!#Fd z(DFRn4aoW#;g3#EXklMyjxX}u25jm(uFcocnJ6;5hfrvaCiXxyq1-*2C-K&lZTVHO zbyD3z42Y;DfRzN|4wEt-QzXC(h^iW_V zXd_kdbAY)J0Wmtm-cSf3^*IzV%3$JfXN%AnDf1k;w1G0qqBb|uXK;G+JVKuy5>hYX zE#WQWt>CTVt>LXpoDDo5HV|lY2s{O}Yoc8d-xj{5aAj7GilcT^4p+l9a-B@y044^; zFF^Ihch!UI;YL(^q}~BK{!$y5y@D5kG^))vA)$tFnSPkzKfA9Wm3pYo>fu(TMER%? zhrcMpipRNGfI&YF%gYXNn)Sm{M2yb{$OsYsV!L@=FSJyDRZ$n+C3U6pA= zrfV;f4pt7{46m^|=J&Bj8YI3#;S_=R$%i@M_F=R#zs?%0F_7WFGpHtN<6N*xH3+p& zLDuU0<0neY0nGndKm|Qok>hBhzbyD9&pniqE^ly4)<3%q zq|Oip4d82l%0;K4LH^V@gLXS|d?ptPJF>SAB z%UR&m98ga__MMzbZ;0u8XA?@MDTO?@y$I2EGNvRxI?Dz)E1?Gi!-2tpjdm&^cwGjTn zPVU5}Cc8=aF6-zybo9Cxo=wT4tUaKkuZJ@v3_dUrmJFO04;S-_K@Nh`)WxK1W4X%g z!X%#mP_m_u4vaIYi#@{PC418jr`)@+Ub44)IR41Q(RzH`Gd^(KyLD81+{lFvONTY} zLPVV`8M+nT?dUk{Vh7?N>N?}yKx8FG3Cn2B^UW+*f}h z@(=C936N13oH7(06%+lCt-r7C9Hk9_;Cr1LG0-cKOEC9sXD|!{ejg3^ZxOMG?0|^8 zG(g%=B2A)VU_{v#nPeTw~0hVW<(4k*NFTMk>4dk@K)4F1W7d$ z`xEtxC3LYy;T87?xI+Uu5LLNu3C6>$M7}qf0^?3`$B?(7Q(GR!H>s1`M4qJf@-}YZ z`n%{03e>3^sl+^`q#{KIHxL6GmmDU1wJ7ZR?2y(6>zuVF@Zd~ji1{G?;9PydX8?mr zh2<7gfmL{f$t04*xVn|l4-*GNw}9~i=YZpYYXbhKa+p$)Facog^$q?h&6uJ~Tb>`wEBy^f4^l~8b%UM5;f2353Y<~7%>^P6Y{^E(J{h7L6rvYMuj51egdo;j|3k)Rw?$6~JjNT%T|JLg2J{32@rF1UOxZ zetF<@^#X8;!BPyIVz3kgr)!S^r;kbC^yCHLbbSe&o_Y*8U0(vHr((zi`t*MboSyn$ z0H+%t0jH0E(?`JR|1oglhw$h?JxBNeF#R%!;1Zm~e~%ro$?MWWd^E&AWcz%A zDv_6`INx^zP8VfziA5b}3cCGton8D}Bkw`8@t1#5HcGIrpMYzLoHNtVffTxHl9#HS zZlvfSG9LS${6~%7raHR*lL%TuEKc)3r)#NoQn=7x1kRtj_Dffh3k(olElD&-`Yp+~ QrnXSlSSx&5*9&+53oG~#lmGw# literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adb42bd9996c8af46d89a798f252d1fc07846c16 GIT binary patch literal 9735 zcmds7TZ|;hRn1pbWo13O`u&)mnVz1W@zi2(ua{*pcr+<0WfjTKLd&Dc!Cc=n8H+F zu~kn!)=rdT{X|hE&#()ktZ0|S+qBDgYrfgDY)jP9{YtNDS4G!q%HqkZ`EE^l~)y3 zVbzBUt9sQ3x_zFhUsRg4Q;b;IZmMz7ckg>VK&o(Oz&yWMh$}bTu(i8A;Jp`FT)a8x z-@|+A7rkM)d(aKMgi(Ht8ot&I!nl0LJ$!vI@Ly!7D!N5q<(q@n;O4*|@TMNuhTTKY zcY@vhcH8&jYR3zmo)_$Pc$dXB?sfM4?vdyCgCIE7Wc%qactPI_f93^e$8Fu+8T37L zTD#+gyxY3t_T7%hTQr08H=87azeB>qDlo;?Je{dbV> z*$O)cPL+MctFhH@sO&tu@UURlu^JcI8dhV?o5E@|kiW#%kzW`2>30=Fv1hPC*W%*q z-S*IJpH5>+r!>Lt5YjzeLu&gLuj9qbAV0)|-;cK9dTZD1_dP#w+Fc%mtp!=4(3_N4 z#}oWH`UsT=%CSlc8mW=uXpsi0M+T@76+jD-4qA+gprxn;YDN{%a#RJiA``R{BE1Lh zUKx)ytDJ*az^_F$(0Wt{ord9?=ZXi_zCu zB^`CY5tV5qs8xML@}L@+|6VPraRGViCCy(%dJWI{y-T2w-5&ZCvbcaZl|l}C3#|5t zq_vLpWjr6Q_6Ad+0`={V^$*_~e#-j6R_E{U{mnO5Hveg>^WM$h_kZ}~f86Svx7jCu z@cHj=b?%>D{kZwT_qICU|DO15-RgYj)qgpf`_6xCbv|};_22*X<&`bk)yxfDr;T0h zhOxOHc+L=8HnxJ6>w8Xs9T*pw*K+U2x;G4jel<0|2%X`}AWbu_cl*Qr&}n;axX-;H zF7(kw5SRLn-|g)Lary0im&u-t-NAks8wW&-0d_6(L|4r#aUs|rdOS8Ar`zv_juRJ# zE)UV0=!BoA4_*L?i!H*BAl7%g{a8Qf_W4C(o8l||{hk2+6D zFo6|<)m(^;!1LSu3iWc8$VZ4|lI7PZ+ay9O%YP0et_(TQ)y10hJ3`R>IV$@ok&h91 zp2#MV7l?eE$csdNnh5C$;lAUvd^ZRj=iADE-`?CE^t{c`gD9YH{Dn_!Vm25rX`%Cm z*AE8#mOnUnadY=RR06u>ZHhB!6SKo?HhXj+JwLq9H_$tMKez#+=$2Z1!r#&pc-Pcx zs--TdGg?E%pEjeesx#`*6vlxMJfG^|8yNbscZflBdtN_ueTRF#dx#^;9eP2F&UyL~ z2v_mk#1ni4gy3^eSpqQasppgt<+NixQcu(o0fMndpsF$Tkv1wsg}01Raa0Z7SqV>AuuDFJ57fZZwpwGJR{MAJHN#+cZTV&C=s0% z1|8&PH*mxPHY-v+#2zS-qL3B~joIm8E0u{Q6;|kss8=EFNi|+%QdO^}C8Dv&QIW{i z(lXhd$YzRfPpA5vK}NI4e->inpCBUCt^`#$a5>|z5bM(*%~`&MSF9fL&(OOHz09lt z+sgVpw8fNqF2L#*HGY-IZ6dz}f>O*#lzx_YpCj@bkvl}TiF}^O>mZN`_gJ?TI)|K8 zOcGjV{>vc0f+wI&qv#E-3RR+K#?aKq|4hwLP4%drC|EMP7MV5ltKc#52o3U)fF?kT z=-`8n;2>r?jya9dVW+XeGR|yG8R;Vf`lGR;;Fbh6lQN-EC@TxDwM_NPBMS#O9p$MO zS#1+~QrW)MRE1P?5+h;p2@z$2L3I*9vm(}ne}U@0L1ZlPXSSb^W?bm@+a3?mLqHaZ zqSdukO>D_(A~8`Rv1#V6x5#FpUkOhEj}RPknm3Q5w;!EBYjDkQ%1(P zlzG=i-*p3SayB_xd z+-|rV>;9nAER6*dmwSMWd#=Cl*_OLAxaUFrg1dI<(An_-b9NPgGH8d6C?mE=Op3@7 zb%3J1;cw#0QceJ^DxOvKXgZP8xK;*FB+*PpA0QK{dvHpYNgi-YLUm6+1{|FbJQW}p zs23nh6K^Vo=}XcQstC*HxD*vpvlzjEzN_>1sm8G>z>RD|ZBAiER9YemY0fE&%E)Ae z$XtQ~qD=v9z6vW0l8y9P;+3N!+LTD*7!PtMYGeR5VP``2<~A43g9evm(CNY=fNKK5 z1!q@cyg;dKlW_idt7*ZwAjIZxV)Ai?x`A=y^ammTHR2Qjk$oJwTE8D(3c8)%pv#;V zhu`4<_4#0U-#O@pyYNKl`}{3xQ%dT^77${;1xsna*R;f{+od6z!_u%z9h}XSsI&+4 z#g5zSxiPQ`ev~wbEa(M3MhX#|JLrbIomd^l+Ay>WqSd4#m^YGY?_eAOnM8_a;UqKh z*TAV}LM4_!>0MRp<5Mkd*CJ)0ANdM|JYw_wHKd#=Mj@vdx-_^GicR&7mglsgE<$yn zBzZnmng2pq+#}B!&VZye64Ac~HOujzF%nY9 ze9fni#1bPZN0t}~P&D7}sUyJ#+e_zAhCN_L7N|ik3PxoB+bfJ@UB=7`=O9OH0xhb5 zV?|m;=HX5q8tJsuPY^NH^ar$GMpv?%56}CxHZWDn@(nwchgUrcf8<^LdHSS=s z#U|jtK`Byx?t)-{gz%^NOT>7Y7)BosL2QI>AAkl6NGLt`P}HnTB`-qBVL%9F$Hv&P zi%@pKz?-Z9-lpE@^Eh)wU5CzQnYwSAvO4jS)P3Hk$`xTWFll2YF)dQz1IPga(7@y%qC!&Z=b;>ox>Si-a{ta70-Z60fN{8 zQcF|l1XMz2=|~wBDdG@mi*yVDK%Q9|iYDu~alP~YpV9BHfM-u# zNWO#d#wHyCLw_GWlDi}EN-mu#myQ-e2w8|;+Gy%dZoLxcPhe4ndHn&F(+vlFasjL_ z3MY~3{Utseh{ysUXhwjK3Gk^RWf2Ia=hL_aY}0yAL~a0oGIWyy(-m}9#3O*I4kL+7 zA#83yilAf+Vf!pAZ3D)0L7LClLR1Iml%ScxBTICfYBKmPX6qQu~~-hIHC~Wpr+|Iyx^#-iQ{(h;Uwx5KX`s=c84@ zq0`yvySI7AZR1fkE;yObD|+yL}^5JvNYm5S-KHj$kNl%r7S%Yt!L@k z=wg zXGe1rJh5HQHitt&r~J`EzUtU-dW@gAP)AEos4u&pEXkawqgt*F_l_De8xF`hrO5#0 z?_%dj2mNhI2``;+Ea$3^9rUr&UPG0*xIaWZ$ZKAa_A6P`u^GCYVzY<*fU>Z1VZBy> zA;To1Vp~au;6E)8XF@e85VNu?0Qzy8BqQ1+X*xFSYLby{YG+mm=Rvw))UKZ8k`K`5 zlGU*5Nha}LvwR|bdn#8h2AeXdf?zaqHRK0UPPQI$PT41E=;MXA>q$QQWUgGybt+du zFdDfU$tQDF&WI%a=!_IX@>yAB%o~riai$)LZ_G&tSQ2&M%2;EKngo55ELp1ex6p=w ztOdo`&^Hi6K|H0fs%_|akfXl^bwRgKN|tC<6I7;<&Y~@8e=X`N{n05*KjB}ek$YAV zF?mGnAC_&MRS~nd5KT8kbi4pysAV2DPI!usoswJ4DL^BPqEqUY(=pW|_l{4-q)6 zbo=8tU_~;8k0E1lb$*$eQN)a|5*dg6)+l?42$@eZnkvGIX+&3sD(hj}$;FB}jWM=* z2n{+r{-AZYS(cbF(K`ia$AL@viAC@bLihzLxDLLyiKQD8Qljpi*NKc^lo zCbnC$z*(&BRu&UD6HNK23hM#E6p9LvaTaPgn_)-5^j1Tx>7Kz58(``RZYf53sE6=& zVYa1w_@O7b>Ot5JhP<>v@Wx4wa0={>umIB?Hi2A23aoSrC+Hcxyjx-BbL6Hx<^sVl zPTd$;SJZ-6oo|WDs$8$427Nup!-mM^L=89+%n~;qYnWRFjzbA@D2aL2A`59N2`O4_ z16!!Fow^SNfh9uI5+hW(eoEWq^ndW1pS^$^@)d9;;EEs8M~a zMh(DQ6Tti46M$F5a}p3nBnOVDyw`ynN;f<05}>%ALNWgxtle24=D$m<-y`xK5v;2s zAo-MxnbQ(w36?$_F7rR2`VNstL@25#VhK4&mT?~$>nBL&UnTN~MAnF0BJx=xpCdwd zDt2~Blz0O$Qpwa50G(YXjDb$t0fdEDN%1$3cpRk#PUo;XkJia17^C%{podAc{!`)! zB5*rHYyLGV_#+};Cqkc>$JrlKN*F3c&cN}fz*wZgW^hcLrgn?2RDM;~p3lFwagjCg zAqvpTuhBsno9+F6E9?&X^5b;NB*SlaDfKpydqf&UA|g5wItDm>oIhi@(z`@dm*K_*{|#D2k#Kn2{7&q1{AgSWC4$JCrwzJ-grnSS)bw z1qp8Mj-67L=(J3kmzy}22TW$trg2hF>$XXzQ)kjnr%jXAP1-cQeJM>_hiTGylC;fe zI&r`6zjq%1X`x7b!>N9WBS?9d9MdNhxD&rdsK88evmeo0(R&oNbMi zM_RdZPSPSs8!eBv^5uMMtUT5#lnY3Ys@UduYoa{Snk-LBJia;Anl4XEIDzm?c}BuX zglEgM5>6pJSDuq_dh$@Eieb+>ZEuGHFF4L`$3)wbKW zDy{0apV_KzUFg*9c7-)cmY-z)cC*3EAY7Gj&aOLd+paT@H64GDQVmib8tl*TBOlyt zRk!^3i*0rLTGe(jtdSS1PP6urrn6h!Y@$0PjY`rryME1X*XoYbW@0`_Y&-67YU0|a z`;uL~g?fgcaod%x-PZPP8L?kzwyJCON@KHKbt}l)ZEm^G)`*VY&qUw_JkC5o9a}?b zWkbcd$ZzOnQ$ZK`;SpW+v$Y%5t*!c|Q)x79$E_vM27f6$+;`uF$N4aT zXL?#i^DIE!ivSv46wvf^K+7`#BVHUZ>S5n)nD!+vwpDOrp0;k?(Uq77Jzl5z3ckcEyBn)Fzon|&1P%W!SYw#YNgS(TUFOL zH|tw|wAS8i+fFIvTTXqmVb7v5dyZg|V2WUxV20o*!4Ux8s@iL4c7v~<*6d?Qd-vt# z8|_wo*~Lz%>^gP({Iko~nvH8%e)L#*wZ4T#yt3K8bz%9&?d`g~-FE8BA8M=m<}xWxWw)em2$3c{wbo6)b2;7ZhVj;yg@p zEMpAatk;61)fQGc!6c_v-K--KIF@oLh8(*lWhJ7p7_6A?Q1!g)4jBgmGB0i{FRWUzI(L+m&}Y1A&_s+maLf*OW?WAuC8dT)|CdGR1w76kK%WXNs)R}k6*{I;Dvgk(jtc|O!KO|K z>DS9qbyA4F4wiLFjj004#novwp(YVZs3|oK3YJtQHK&dsC8f@&d36+_v^uMbY5}2) z(6@7b=GriEz511>d4xol&8kF`=Dtp`8gF!{E5aXVDr;ri6l~alo`n z28T?mWO2~6$_NgdR>|SOX_ZkNI<1oTMgYgW9ALp41swPCfD_&r;G|aooN}k#8EJQR z?>To4ePKK=X|JEZZgeN{Om(NdNd*#uoLO%QL}mK2h7>DInMTUgWo_p-UxS2!;~fc( zw><$Iy?Ui|WQf+1^$2nGBU=^FML)Jx5sE!@y!uqwH^IaFsN>pARrizb4IIoH$c3th(+-4iZ2NWnIKIt{P zURSrl^xfJGKV#Q#)@`RAL<3$CQT5f`HJj|;w}>VBe*{_1wst(Rtm zSPY@%>#`tHbo(WOZzs4y@BxAk68t%W?;!Y2f|m(CL?CqODnkndivWI{c&;?no*yIK zlmKZT0*8+=54mjaO9*HtnbC1QjlUjS%J)OPl>XaDf(YgH&cuQ9HQ-q$o5U}Qhjf*3 zM*+AvMtAhC?&_Y1tUNZ;;828Jb4}}7dzW2EC!Tr7z~)@~l=ha^ptxYZ5l4;41MMXo zj&8)W_Q~0JfDC2bGal%=wzKpaM3TYQtWloekG=CKkKu86uAAVPujjGp^{(L=cQtHn z{fCSl)EyuRqNK5O-ZwYuxBbY?>gH}85?f-G+hTyt<7n*NXhPk9xvb8Y&tj*(kFxx=SFqA?nmtE)*4<-x88F6 zXmxwLzNP$B*lMM{RWC&Y?v&WZ#>RlaO4zEO48}}u=cjH^Saon39k-n67i4vrIY#nP z)Ui1)VF23*IHcy99@i&yOV0sL>5x^13F}fgj9PEq$vZ~yydRJAXV?XvtTUiNTq|=L zi(+8KyO2T4W2?>vSNxt8$WcK&n9F^A$9PS{smSd8%7M0i5p79bw4U0qx>?4&Yy;SZWM+A3^qXw}%aFww zn+;>5m|?FR7{q=WEMt_?8+tXnV`|PDD`@J7H&W0hwQhloV?BfR$M;@xvxtpjRdNDH z1?CCwSU9PRz|2H4JEy%8Mt4l$ae*fVE(lx{c!F>(7VhaZN*_lbN2QN5 zLw%eH`#9^J4f{Cf&7zO9-Z}KKAaGIO34zB19v66$Fz5sOZ^Fwy(7R6v{ujL?*p(0~ z_PK)vvuZNfo9fts)Y&jK*wKP_+0nKfB*>|Pw6Vmoc+3`gwdrHEm zBz#)JrzKpHQI!}DTjJh0^GN-(Qs<1+IVa(>5`IGJpF324Np_^MbNw~VO{`C0esOo& zOReh)b3tr|0$7-`JVij*uyM#TsEc1Bs;2z!cy$nvmuY+Elhn5ta1qxG3;aNb- z>cYK9_et-``w@HUzP0mvz5dlx4MSSLpe}kB6hy2~8fvm(T*e6Z=e+a5Sz5Cm=-ve{ z4Z%!XKZ4M#M?r30J$=vWUhtN&>K8b-z3(_cEmpcuq0~`~<0;g6O3ji^NNBkqx+Ect z5G9N1nR~JB(*4Mehck;&0#7+_8N7OO!^Fsc3$tJFma$isfs2dx^*zg7WIks9r#OF4 zV=P>!XT7KU=No%wJ=jUw`f>Fw_ss64y}x*1ls0G@=7tk&b&DbPifRS>CTcITJv@_M zTGsV_-XwPCWp4`bTfJ$(=e#2NN^th>3F!-cW2b`FzD?RlT2xfe--~*e)=v_H_oF+D zphcJVPdy+uPj4W_{ygUVyGScZ+8IgvB}wCaUSR9qMPT`ogqRlWCDaP(+P48y7fILD zi*K7jc}OJhzdEEBK29aD8$Y z)d^MTl}@JEQ-`Hq5DnGnm3Q@GC)>*p8yk@deT8`ig3u_3fa)}bRYaWd$CxBF28s5I zVE0fl$9pXvtRl%_&x1<-WUut0)`E=wI0iMOn=2(#-hMO?=WPnYoiwdA#nAdE_N30} za8eO+@Jc6HY*qI{lx8Fx)>yGAz^wh+uk!c%&#(2g#LlcVvRa4br(T5J$1TFzRB@mw zI4iG0E{uhGLT5$_RvT{JK3uLdDS0BGLrp36By}vp6EY+i5UQJ-E4|J(L`tim=*plh z)}3P)MTToD6KOUu{?%SIP;ypRb}6=@0Gg!^wEBmC|1)9Jja!dxdbqX5!PXE>51MMM z_<_z0aq&9QJ|Z&{bg-`#q>IAx9I=T9zps7?iBwx262&EBpwVN4+D3D0ttjGY zvC-b$QrLGxx)V)xVN2-GEMgVS;_a1AtQhWE2Cz6Qkb-Hs@guvU{J@q39gu2}j(?Ek zrBo+9)X_pG>bBkLX0ekC`&V_hUUTbep>&oeW@vkrZ5vvuOxC7RRJNW?BfU+jn|I-ZOkHu{j{L2(yrBZVPuS;@#<~gT7$t`knJb8cik{o)JH$Iu??NL z<=B6oZP1<=m_ajaEx8IKc13KgWwE0(hy@iz)KAbpCHNDawDe4%;8xiNHI4U5;>sz~ zj-H7=1_OaCP(=1mIUze%%zsAB@r^w^x1sUKIy>bE>p5kZ??4otaz;u^1or-NL_)UB z(KwtD)*B|pa-xPFVKr~tei~=^R@GK=1pLfuwYJfZ+8<`cR|sAuaDyhGNbjK~yntd7 zB*gf-?I)NhUHFMUr7FkJ8&7087M{g&RECL!Ag2NCTuc#-FsI=M21J~HKQ4>etV6q_ zozbn*MB(^)&F*qICKAicp=E`LD<^HNOQ6`5W6WS8tQ%qCK&1`Ty6;9SxA8bX51>Wo zpz#;=MLbhx0Sca_7tAR=3GM!pVF6~5pB0#g@|V=l=|!V}v;uPZHxcHtOi(UuHIhX@pMdGe#C6=I8MA`YY&DMh><3B=N@@o$1Fa zpj2f0N+<~pb#Wd-hh{3UdCf_BhMlMR;SQL&ShDo1AV*{$xL)8f2jM9)X11I)Nk5E1 zYWaVFM+~4EMs#>ILo}lnK|%g_)MyTo(flw1P-3a9K)Lnwb=>1YS&O+aig*zDyYank z7e)~;CaU9VH-Q|Q^%E4f_am;p4rN!;jDu-Vs$rz}l1kr**k_=qL^oi6_EKRSscN1aTBOq~u5rUK%)(6MMl?RLwTkcaAO6dSy z5pw&zvb=7WlL4(G*A8&2-|+QYSm%#~o2wx!z)2DB*Uj(}gqSzI$Sm<|0>75vSBwI~X@Swg!`f_U zP8l`g8xg!=vXVFq2EhBrl<^8$hppK1Ose6?HEoxv%EUwaX?Q&X(`jS~*FTP7*`FnN z8=#aB)Y?D7gf|HKlr13tdc}_g)4kCP+&YXqQjM?S3C%cZ!5UAtWpri^Vs4n3h$z&* z0p#Tq4S9mQe}X`WJGl~)U2A$#;z`0*5Dsv~6xW2JjeM9N0PjHM@20uGVPJu9J_U8_ z4itZs$|z$}gJ~rd?{a;<!{XdSFYx^EYZ2t|Eof-@^6yEzd<48yz1!q$T2(EysVO*t1 zx_V3^#MJkoEE;BnRkV%S^W`6>qQYxU{pJwV`Blu|o zA?AWCq392oHX!TY$LPVWW`8fzCNKzxtD@;M_@jhG8I5vM)+pkA*d!k!){m@0RaIa4 zp2E+4;I}((JYVCn;BNs>pQ2yK_Jg!x!BiPj;9ANYquDa9EJ!Gz@$$yizo%c11=s)G zTz9ma?~ZvH$SyG4cME&J49XJm3Ucbd*c}JOg>kqGqoSCKM+A!TSWN$cdCY)eCN`yk zcfJg6iSVShVcFlm^D<}+Zdbz`2}{mjN6w^YOo5M%;XcwcgAk0>Gp@03s;D;upAy`l z!caDW8O*{&mPCtR^%9hdFoW@K!kg_T(ax-wM$XGfNg*Y@^D<Huz`yslt`h3VSf~|R?x;e%ubNL&>BK2?IjDCCt7?H zZKpBH$F~!t!YDtjg|iHL$`4X7+c_Biq;KLuWoCgjwEWIDYMCP-e*{*JgneT~2%#qr z);j;2EP1!aH@@m zU&F7L8=Utr_Rg2=zlmD?ypq)qeuIE5$Ma_g72kP2ES-CyWCaXf4k}NuZ`Sv=OAB&P z{S>rL=ntZA9XgW!=pe7ss=GJZs+`&l+8J_@k=MRTqF2rk!f5+NNIyclQ}Yo>5Q^EhR_n&ZJWn>Q{szbrMQXnle* zu^UA%lMLZBkC5&$9~XDh22W!tdbQKv=difhEg%G}^>5+TKUDASpg~EzhFUh&;lNb) zMTX>9SLOu(K4hlUR$RPNhn8k^O%K@58d)}Fq# zhEe;kvZhVo5Ri5HMx%yj56^8s+6b&aDR^YSYf_GdZ`decqktV8hQnJoODor4=YEts zmlC8#jR{tdJZvR7V@~#G7Pg8x?peg4P0bw?Z4NfnHw5rrJcEbArUPGFp#4)2zVB$C zGT$=ZGT(|o#HN5A??&Fz-qMj~AuS>vobhh-ZUh@p?o{`dTtgwg9{5VkcVpOkF%_j$ z3l?PASFnA>gaE<$%4^!*PeA0R_eI=;%O$p*cMYuadUV*Y59PB%Eqrcg9rvDL8Wbja z3T9QDLOM1k!y>SQ(J>5z!evdymjbkSn}`Df2PQx`Fag4W2@noUfVsC*a$GpZ>>UGR zTn7WDN;Z16ufI?rC9=kdtiKDe_l3I#PA{w_`#POLUjH0Uuz|VTclCWZsvw3|F{NDSEfo#yDxLA*en2T&yBl_U%WdMOKn@{H`++r) zW^6v4xUo5D;zp=4^S4~+|V?) z+u%x|=)sx54Ntr`7C!}jv=<3Zu1KTQdHgh*?PuQP$;cyFGv;8QGeDOO<2jrXSY%Eg zItj2x4^*JxM*6}#jWbC+ve2XpdvD$~>^G@{+@-kyS{qC?TxKp5%q_Ca2A28Uj&awb ze=9^r>#lxRyK92TMhE__cP$8p@YIlHjRL}N6%d{sy`_clX5c*B+%c$xiq8kyYD8br z!sUwx%Qqm9!Cgitk278z5Q}01H*;38f!hf{Tu;P{0SQ`x;PE9)%8$tswYTkmfS9Z$ zlnO3WIlbN9?&CJF9K6CZLM{IuLqAXO3k1IifaMkPDTMTwnff7uBEis#kqGsc^G&YU z1Pajg7#1{HwRzC9I5;r8xxV454L8)uBv^8dUBv}Y| zinbeE*U0vM^ajV*lejuGow^%raS_o(wc>qFX(X7L?C1gSkSYHovA15I8&LN;tJ~48+LN3t2vT((PL<1b=@Ml22 z3I5JQGwb9ZJqe}kRr@j<{8oZNNTSqkgNzhIjJ_JWrj~SEHR9hD`4>pfW`Jm^Wuhz; z7fE)zcZQ$ydLjcs7-!6b)RA2}~ zBeehf@m@`_R^xCLQPlot1f1Kz#qyB6Ac*`tw}ypcJXoEv!H^#Hh~%6(ocH5-PnQz* zkD+9EC1HOC;jeoo@rA)Ca3wJ@G>W17h^xMNv+4A0;>XY+Nv{m&ix?Oz+u8;k5d%XR z2(O8dLC&b)gsH4G3!fO!j0TzaKpO0V-6-s_25pJpopj^FhbICdQH#zm_CKUjb2d~` zAhkzB!R-Su6T18LKQn+$7dFB>Cfn^lu^hreecHwzsB4tienj@6x$vU6eCBU1K=n(395R`Y7M z-|9qIa$ZW#!?8YW)tdkY=$y$n#7fNbXt#*gN1)wDq5T&L;WcimBlqNPi1zp8X*_2EC>L27FXla?#5nybwBX;;Xfd1S}cHp>qL8-9@b2IJB3= zp|Xo_TMsZ}I1?TZ+4Hf`7hOmunU3(FhgACB>!oyOP9(E!C7*opmP-6wmc$8XrE^wt zadLuBx&q%ExaOev;1iXg$~P-axvt;-O6Q4h+OjAyy|%w`@%|ljSiE^3G=bus!Uts) z@Zm6o&!l`)Z8!aN+irr9SLC}gII>vPy{6MiZL7Z55#3v0Kfq^IXw!zbP>>Z~-`gbJ zLHW?*2)ZQiP&!E#L>i|0uD&b+O(#xlEp2bD`7wCuJNR<1G!f{F30&OLEqMbj^e$YT z9Z0Bkn>TlXzlHE8wi`HIh;YC=DSHgSw`y&8KcpyFI=iden{9kAR;B+1X>xFX`B^E4 z3(IG1as*t!gHtq`YeM4vTz_SI<2oh`0Mf90a3_1guCih>xXPy=xLuP*`O!?^i!>T; zA4r6aUFQ4H%d+%)zlbeV$;*ZR5)7h2B{6Slx`W8Z|~il_Rc3|H8a zFw0@q3*wIN`p}{Y)&nRk+!PjWsvZj$!xi$#!p#8-A7XT-Fmsd494&𝔲ZAL1E(b zo406;k{DPfF7m=J2Ze!$CNrdxm4SgsD%dC6!Ig8sj;G)O{$==%>R>N{|MxH9<0akW zCrTC}`keubj7v&FV3JTqIoCY`19wVN(*iU7JTNz~tDK~c3e5NO7Q4r~$H8RTOF`gx zKkr2MWcL&V1hVn08UY)J7qd6BpAap7y9MLkIz!4H)!+IiB~WPMd|aRIwryd`ya-@(XjugrT?e#bzyH-#jtX) zkmcL|oOS*M!Iue0d%>E29j{Jm%^ha%_HQsT8zw%=*g={}tRG?h|BCHCU|sO_rttMn zC{GlHsRSK`MEl{N;hIP8b3zFLYOJU-Fc4rkfuwLik{A{t2BL%guhC;^Jh-I)O%CDT z5DY{GT9JlsdHPa<{aeiVw*)?c$OiwOA<^?*p z!G9tk|F_>Ec#+_@3H~#|?-2YKg8xeJ-w3`!@Vfxz(KY!`2L!8um{87zNqtUV&W9;O z{5+`GV}c@+lnX3|qyu6?KlOl^P@WhnJKSt9{c&P~{gA`{Jpzh)_J1e%eS$wA_#Xs+ zNbpqv{O1dUnc+WF2qKu1pAN&J01*yEATGt1m>9rPe@* zD36654hs?XA90|6Ozs>w;VLu!!Birfrt?_hrD4C!~UOa>3R+S8;Ue#l#G%hYzJTPxH>8WKIblo;qz+0Rrbr$lZ465uL3o~WNnaAl z>*=8wuU@r(6%n{bg@M>(tw$o0|0xREF!>BmUl2hY z*#8vZXEd{C%gSmX7(!t?k{4s^4tZ>b{AvZ?*AJqo1rgNR|KB!+w&DdFd)IAlI>C{blP%Q8fP8t4>9d?%GM+j&~vPoI^KTfn-?JfCoQH3I`M(A^c_aV; literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4de6c13104c8738923f18d82dc21ad22a5ebd4e GIT binary patch literal 12841 zcmb_iS&&@US-xBE%k<3j>?3I;%d*>xw5B(md&e`H zoMy>5kOhH?9mlSkp^8wUAQ%EEsDh-57Xkzb5SHAcs8D$b^FsN7;vo-&@B8oVo}Q89 zK$W}e{`c(n+;h+W{onb|c_Nof8~7!@@bk5=e$X(!#mL|nL*Ox7o;D4`HC(f0l+C)i zZS5G__KsnOX-?UZyjVFV_joxj_e41<_f$D8_e?oc&unL1YsZXg=Gyu47{a#evGn){rafDpMLg-IUNg#bZraV@KJRAT9PSJC z_~k`6?~YwF-GV#*nz3b;54jWWB=QcsMRyAKBknnO!CidKEHAl_xrf~&2px4FcaOTq z5IW{Q;ojyRN9Z>9N%s!-PK1uTPq}xwcO!JW`#$%idkUcw?s@k<_dN*R;Xdu2cJD{% zPW18t^s-bg$y+>#x45f(SNrbr-R*nI_ek19NV~UuZ~J8VWcyV46w)4cA3?9~bKmQp z!Tmk%_qk_rKka_M`zY@ByXFf<>6u-AlM8|U`10~DK88`Uf@D?Is})sm1ermo^1y>7 zJ4iqI{FBdDUU>HD=N=EnCH$d_AAI)dXI`vaeEx;^1;rbZP;J)=8_lWu@>;98S+BIZ zo)=`7>wcwO_g0ow)2;Er{PE7Y@gy#98p1cWO>Jmn)pl{;vHJ+4*VY9U_0{V1g4p^> zUDZowU~YWQP-Cd}oo7z1blde)e%&Bn!QqtfWA%8T`m*H!0R-SyL_ zR<8Eys@L`Er=IJ&_139gv(c+IPObUPmUpsuHAr@f2xDq&$lS=!Xrj)ftx`wrr^wzabi@aV^M46pB8P&S^X zQtX!<#5}*M{J`=2ZZEL$A|*#^s`gc38%hQm(U#&xPFB!-j2~HAeDdY-N}q5A!#k@{_LRCble0$cOqde2En5 z#8&Yoyg#_Ijd&VQ&0Nm%`!JntyUAr<5l7ZCGSs=fL$ z-d%osUv!tcf#Obeqv@4$;irsMm8xD1>}IDCBpR)5)xZCAV7Ka>AknF>vt$q>26&3f z6vV>MX;(X_({6TxIHp3iwH!E|u4?1!+p9globLL-r3!)`W=OLGwe|eKcAE_jFlDrzk+`%21;#XUuA9p7 z(>k_bbU4W~{kZhgI?(<|JZhgQqwSBxrTv6XMD4qNRwst-CpDgMIkRfv$)C{4y|5eW zr#b~ZJ?E!%Tqn0UjwyswZhXr^2ss*GL#HqbX+Npcu7mWnPGedpLMtb8cbAZ~{Q9r) z_s8d=dHwbvP<*VEmzk_ip;+M5y1lE42%tD0f=sKs+^khwl}^{+i&O#Wwzt;C6s~Tl zhgq7sq3)v~@B;I$D8G~mQm{t7ZnNVH=r<}fm*oryf`nk?gT$*%uX(9eFNfR#v2u3E z9T>|usx=>`mz5YF8|FUnpd^fs6bO@oc$frTM&IicH4zTeA zFLO4Mvu4~XAV2S%4f3^>YNu0gd6h;}d44U&68Ig# zMfevb(?DEck+Wd*b2_PWxaVPH<6BS!o!Bxhqd(R!Xb0paQ#57_OwTBUobmpIZ(g?a zm>$;?4NN@5Y+$S1pA6GcmN^b$yu!R1UBmos(CoaqBOF~q~xW^_u=gvzcnI+Fq7 z5AF>ccq=fVO-y>I#aV;T)06-A@*7|G0WU&1z5;tY+n@6jz6r>f!<5HpZJGUfJ?|Q# zI2W#)S59oaff{pC<2q_A^cVG_>x9o%n@)7=}BGGQ+is@EXVqX0Tze#f?m{zbRHv;))_Z`E%|b+KiMz#r~1?V8MgM4 z!5+fqyu%3e(QX8u;n*D_pv;YC)n`b=_uXxs*|- z*^Ce$r?J**DH2fdp=u`t46=36DuEzH3IY+$C%#f|F0c5Y z?oHQU37nPg8YtE}WvmCf1$*0Swl8^s!;UGe9}Ve{dV#ewZq=`n$+xS15D#BL5d}a| z2}!Wpm9Tn}{m1LA1cT5zLd`~{)}^Uu=+$~HOp+l;F)13GC>N-y+ChT72h9a>Kp;p< zG%Sdwl}`q&&aj^s*%5gN+3!KvN(-NpG3lY8_XvbxCh?yz=j~Z@(VXYsI%(c#kt3&t zD?b7}%bQtTNub(c^SF5!f7M0Z2*o~mt|rtPDf74p{sK{-1rN{$rgE~M>ZkjeepV+h zgE~V|c!$*n6y^GP9Uloxu)wgXbJ}g&37hJ#sB#&9o2@uJSH#?%`CN>MSLZGdj$IR!n2780{0np1v^@Y$b+YDP+e zXUBOFkYLr9z>t3hwgNFhg`K~M@+9HoTrrf<=j4M@HDbM}oyDL%6Bhjw{WUJl)USM4mBfs4UZ&z^xcV!H>tL4eCPOaYYL5HKN`Ta|p z@^~2M3TRMd`i3OY#BxdskwG8OE%jk^@t1IUCm^^kp0=`3CaTernYRj7(wa95*1Ub# znztUnHE&8Cbq>Xc7XD5|^0-8O-m>2X3ttGW^$ivtY9u)~v+y_13vGPV9+eVTXvsqx zf3qAa43$h+#=bhUx6GM4kh8}a=3R#tn`^@XQyAR$F$S#H{$GqCDu3PFxH#Yp*UeX+ z2suE+8X`*$csQgFMXcf1VUssdtB6`)Tu;G9zY<5S!?=$0mqG@hk8p7!{Oo9yvNTLN zrjNR05wTU{y45R&F|0RZQ7pbM4>9em#`?GEVE)4zQFvz*COu+x0Ph0!$?Xrjm6N z*U~R1_m~PIg4#LQEvV2&qN|p;6wWUSt+&kNOaWtdnj{d#XZv?o=nh@kwm-*;biWUSNVC_rd`AhJzbO9z`#0ot*gs<&MB^FRt}1}WAGpPbt3guLRdxs(j_AEHCv2Ox%A$|9GF*pzT8GAcSy z$f!c2@R0csoGOl$#lHx1ahEWwkXs4A`V`u$aXHA}ZMYZ~yYM045S9<+% zosP&A?;*LGz?xu>TovKVvV`aWmEn7w0<9u;1IgizF}LHc9hNS_V>Srs_zo!8t@Cy) zc*f$_tnCEu@NI%5!MEn8P$#XYci`KDQ(+dZ(Qk^UZD(9_$DA>+UIo%`XQ5#UHwpJT zaUbh1H>Kx5{$|}YR-^NNZp-K|0Aqhq&kMeOs=tWX$MlTEHu{IqMjk|NQBTP#_mGfA z(8$Xvpl));ARhOR=)-OX7;AbCP;3(T%FvNv+7Ug$G{Hp&sCXEwXkgxV=GYSBCqN%ba*w(hA}V6IBZjmhh$c!;yIC25 zV?fh!5H$2JB#wBBGnxjfNWvPG0M)}veg9MOTu{6SrPz)`LU@x>prQv#C3J42RPn1ADO;Z1^z7QFzI?^4~yrtX4sBbEr8+E8j>00dpf%G6K7Aet8f$xBFfEO6DlJ z{Mxz5x%7>JPYHC>R7Wx21tXU~TKxRe|GxR!xzgkw2i;?t>NpEhL)0;dAi*Uh9I)cR zR34L@TD{o{67-m3L5XOq34c0hq`Jatie+r9)*=^Hqk73J-KIXtypK_QoZ_b`ev0A~ z6kMZ@z_!5UCeuDjp(*+lKSuH66hA@nlN3V;RX(ENvIH|gT{$-hg>M|dmczBZSEG(-1*@$lAAL$Z-RGa;$1?)Bfo`RI*t7V=rop;*R75FeA!Xa z_LdDiuwFU*vK=inGbjUO^zCg2^V88;Y92Um%7vEh*uXpcavbSpur?Q;=u_x*V>(!}q)+JaD7}bQlVlmP zX9RB${6;zCIg`Lroex8@Fty?Qo`Ehp#O_IB)g&qdRcCb?ZKU0Vo&geNnYRE00?(U6 zdPe84#E!;7;`zNemelk81wF6F1RKAIP=Oe#z6gKSc$BsPJsAK39&vWZ)egY%vsdY`CF2HR9@q z59l-~mz0zWkh+ZRzu^(`F0fIa@wVKOV;RZ=xt#xWd zg%lUl;?OH+J9M61f;Sxqgnh62rL|=Mk?^XOL13M`0Pu0yXiq)Mo^VYYm{-*rBkL5e zQfyFg!L06}*aN1Nyf{Iq90q06O5m_EG8{p2Fml7t4O`b);Cf}by5@P!YG=X(bdfxB z8#oR~S~Qd5qNl@t)|>@^&58?*z(o){Z_YdS0Lav5QD-*^QH!%Wenc#8dkIGU7Xp~$ z2Jk1|PxOSr+983v<(JI%zVO|%{+f51+FHO$SUr9VSUacPOZ?-=Z~P3=d^=pnY> zhVf97kd|;$(9e7k+it){aT4Kap^cBZnQJjR(6Imc1gw7w);=3rd)RAEL$TMJMK~H^ zI;uX5?LbwQzGI%*924b%m4cB1Q$Mqr4HXBcF{+(2o0%x~YE_+y=J{}&>6@ek+jzvN zBT7BMXFf>rD-hV*L_3&vRV7>c&GuSbK3u7Q`GFb9BLn3G*Axt-L}?k6bxshEywBTc zs+Dryl%Hxx7VT+v81qQ;Rv6Owo z!hsUW8%mJSa;i?CrG%c?F`*Q)nE~Ryi76%9A7Zf0S5}aREe@EeB&P0Q^8t(U1{U^P zH1u#NAkV&O9=B-Vjn!jCltpMZ48d@rML%^p4P}|tGYz_+Qt%C@uElVQCjk@oqMyMu zr!j&Fgv{z`$ef;q%U5lfD=XfkRJ2_>Dgi5!rU10iqgLYwxeE$F-6O`FGlf2wyIu_+QgQTw3)eSn_+Hz z(B_rbp{_i=G<9LPWzT73)#Z9+ z1vWPduG27!uAkXVE-g2eOIwW)y}@Fv=@$_5&48rPJs>FzTs+JLx2rg6$$<@TH`wvqz7P2f=h%ICx?v?_ z4#3g@Wae=q=@?cc1@XSn|AKvY^HG>``eT+LmrV8Ns8i!C;*YkR2M+`+A4Pz`C~J4# zLmvuHYT>X4cG7KZ`NW{MG1zho3>rt) zQ4$}LtkbwsQ(wiyYcUA^NN9%d|6xRK^8PU#VKFtf#^n7Icvo!9ah)OWKM3<&6w`2Q zMea7o1ZlSH72{mnSI~MiF6u8B{!55`Ba$pn4IF~etd2%Sl1C=Y`<$cUBiP?xP`q+cVNzd30 z0G!96cyuzZ{suLI_;WAtIIQ|xMx5s^KKrctdj{ofmuzvP<5_NG!v#{2qig#|LH#p| ze{^s1gv)owlJs}8tLh)|AoVqhf1>yYh_IV-7Y5kase73)*GjP?t%nrDMx+uyuM|ZTdI3IStZ>^}nR2rF>e-CvC(YPwZ8RP#zS}CS} z05|mkihC)ZqqqQpuJ#z-pS7>Txq}rXL28XQBQ1#dWmGs1kHBxTA|HGnPmvD1#gyNd z6jQP>QCX_=K#=bA)+%oO)h5p7+6V_u7YCSeX2U~jqq^4e0}CfnzKqgAs(}Tz-|ebi% zELo+9Z50C}=qO+$AQWo2C=!84)Lc=U1W*+9R#7|TE04?&(R)sZAbGakb=O+;M@a`g z5-T_zEduf2pT>MCV5VTF-ATrh@nSNWOCCwzo?c3yOrJ_0O;4qdq!Z~>7G;W7Qhxsn DNMRYx literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6a68a0c904cfa8ee8dd079ed1064cb75dba232b GIT binary patch literal 208 zcmZ?b<>g`k0=CVQlKoj37#@Q-Fu(+4H~?|61dvE!NMX!jh+<4-T)?!DVFB|(21cMr zFoP!3OQ0-+pC-#KuDpWM_>|PL%;eNt%s}=nmi&U$yjxs}#X!-t#L}FSl?+8}K*eC< zmz{n_er~FMNosLPd}(oNk%5JNKxSG%Vp@D!eo=f#YF=@Eky}oFxsiTGWkG6DL4I+n jetdjpUS>&ryk0@&Ee@O9{FKt1R69AKk;Nd#axek_$7ncj literal 0 HcmV?d00001 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh index a02b8135e..4fddd6808 100644 --- a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_full_1p.sh @@ -98,12 +98,12 @@ start_time=$(date +%s) # 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 # 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 # 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 -train_epochs=5 -train_steps=70745 +steps=11300 batch_size=4 print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" -python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 +#python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 +python3 train.py --dataset_path=${data_path} --batch_size=${batch_size} --steps=${steps} 1>${print_log} 2>&1 # 性能相关数据计算 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh index 1a4d11707..74dfc2c38 100644 --- a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/test/train_performance_1p.sh @@ -98,12 +98,12 @@ start_time=$(date +%s) # 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 # 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 # 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 -train_epochs=5 -train_steps=1000 +steps=1000 batch_size=4 print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" -python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 +#python3 train.py --data_path=${data_path} --output_path=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 +python3 train.py --dataset_path=${data_path} --batch_size=${batch_size} --steps=${steps} 1>${print_log} 2>&1 # 性能相关数据计算 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py index 25190fa2b..a73396787 100644 --- a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py +++ b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/train.py @@ -30,6 +30,7 @@ #!/usr/bin/env python3 import npu_device as npu +''' npu.global_options().fusion_switch_file="/home/test_user08/hyperpose_model/fusion_switch.cfg" # profiling @@ -40,6 +41,7 @@ profiling_options = '{"output":"/home/test_user08/hyperpose_model/profiling", \ "fp_point":"", \ "bp_point":""}' npu.global_options().profiling_config.profiling_options = profiling_options +''' npu.open().as_default() @@ -119,6 +121,14 @@ if __name__ == '__main__': type=int, default=None, help='log frequency, None stands for using default value') + parser.add_argument('--batch_size', + type=int, + default=None, + help='batch size, None stands for using default value') + parser.add_argument('--steps', + type=int, + default=None, + help='training steps, None stands for using default value') args=parser.parse_args() #config model @@ -132,6 +142,8 @@ if __name__ == '__main__': Config.set_log_interval(args.log_interval) Config.set_vis_interval(args.vis_interval) Config.set_save_interval(args.save_interval) + Config.set_batch_size(args.batch_size) + Config.set_n_step(args.steps) #config dataset Config.set_official_dataset(args.use_official_dataset) Config.set_dataset_type(Config.DATA[args.dataset_type]) -- Gitee From 4d6a959ee14561b48065f026f8d34b18101d8d1e Mon Sep 17 00:00:00 2001 From: yongqingli Date: Fri, 9 Dec 2022 16:02:23 +0800 Subject: [PATCH 3/5] delete __pycache__ --- .../Config/__pycache__/__init__.cpython-37.pyc | Bin 17471 -> 0 bytes .../__pycache__/config_pifpaf.cpython-37.pyc | Bin 1406 -> 0 bytes .../__pycache__/config_pretrain.cpython-37.pyc | Bin 652 -> 0 bytes .../Config/__pycache__/define.cpython-37.pyc | Bin 1656 -> 0 bytes .../Dataset/__pycache__/__init__.cpython-37.pyc | Bin 4436 -> 0 bytes .../__pycache__/base_dataset.cpython-37.pyc | Bin 11659 -> 0 bytes .../Dataset/__pycache__/common.cpython-37.pyc | Bin 4204 -> 0 bytes .../__pycache__/dmadapt_dataset.cpython-37.pyc | Bin 1306 -> 0 bytes .../__pycache__/multi_dataset.cpython-37.pyc | Bin 4647 -> 0 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 286 -> 0 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 3567 -> 0 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 313 -> 0 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 8528 -> 0 bytes .../__pycache__/define.cpython-37.pyc | Bin 4153 -> 0 bytes .../__pycache__/format.cpython-37.pyc | Bin 5652 -> 0 bytes .../__pycache__/generate.cpython-37.pyc | Bin 1567 -> 0 bytes .../__pycache__/prepare.cpython-37.pyc | Bin 1791 -> 0 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 317 -> 0 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 7385 -> 0 bytes .../__pycache__/define.cpython-37.pyc | Bin 4025 -> 0 bytes .../__pycache__/format.cpython-37.pyc | Bin 5030 -> 0 bytes .../__pycache__/generate.cpython-37.pyc | Bin 1795 -> 0 bytes .../__pycache__/prepare.cpython-37.pyc | Bin 3456 -> 0 bytes .../Model/__pycache__/__init__.cpython-37.pyc | Bin 18299 -> 0 bytes .../Model/__pycache__/augmentor.cpython-37.pyc | Bin 2874 -> 0 bytes .../Model/__pycache__/backbones.cpython-37.pyc | Bin 27451 -> 0 bytes .../Model/__pycache__/base_model.cpython-37.pyc | Bin 4728 -> 0 bytes .../Model/__pycache__/common.cpython-37.pyc | Bin 9734 -> 0 bytes .../__pycache__/domainadapt.cpython-37.pyc | Bin 2207 -> 0 bytes .../Model/__pycache__/examine.cpython-37.pyc | Bin 1308 -> 0 bytes .../Model/__pycache__/human.cpython-37.pyc | Bin 5501 -> 0 bytes .../Model/__pycache__/metrics.cpython-37.pyc | Bin 3448 -> 0 bytes .../Model/__pycache__/pretrain.cpython-37.pyc | Bin 5846 -> 0 bytes .../Model/__pycache__/processor.cpython-37.pyc | Bin 5642 -> 0 bytes .../Model/__pycache__/train.cpython-37.pyc | Bin 17566 -> 0 bytes .../pifpaf/__pycache__/__init__.cpython-37.pyc | Bin 379 -> 0 bytes .../pifpaf/__pycache__/define.cpython-37.pyc | Bin 2763 -> 0 bytes .../pifpaf/__pycache__/eval.cpython-37.pyc | Bin 8207 -> 0 bytes .../pifpaf/__pycache__/model.cpython-37.pyc | Bin 9735 -> 0 bytes .../pifpaf/__pycache__/processor.cpython-37.pyc | Bin 17945 -> 0 bytes .../pifpaf/__pycache__/utils.cpython-37.pyc | Bin 12841 -> 0 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 208 -> 0 bytes 42 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/define.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/dmadapt_dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/multi_dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/format.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/dataset.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/define.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/generate.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/base_model.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/common.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/domainadapt.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/examine.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/human.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/metrics.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/pretrain.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/processor.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/train.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/__init__.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/define.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/eval.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/model.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/processor.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/pifpaf/__pycache__/utils.cpython-37.pyc delete mode 100644 TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/__pycache__/__init__.cpython-37.pyc diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 2a165760f97184d8b8ae9194f9d6f17f96e97603..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17471 zcmbtb-ESPpao=w)m!IN?Bpo0|o-W8UZLEFZl<8JOs#79`X<%2#}X~3g7^Nf&2w|%CD+>W@pI} zManC&d#1a(s=Kn*6=qk{i_fBc%U*~IaQgcOjf2U)0NZKna#7-sp^b%Zu30y3)aQWH>|1Z zn<9k zS9}lo1Mz+F3G&axqWBc~L$M_8Ab%w8ie==F#XWH!`HFZTK12RQJQRSGyXOBs4GvGOQrnOX4=neX16kOUZh3F$u;wPsb*m>-5l7d!L=HOngS=!5|Q>}l{&_jU! zF+&d@0sTu2Jp$-oG4$vW&=n0m2Izle==rijn+7ei(=KzDVCmeSLawP?yYg2LXfr{Y~a4 zr7v|a{cD?V@0Zd4(&qIS=@|C;&4tYy`!~^+pEhqXr6QLhRqy_d&3DA0DE&IUSL&Ab z-^KfT%+(wF@4rax4f!|rzaxfTr1yrqL;D|ehZ#p{|HCfIFbg*}Z!5~A7Ji-C8)5iS zz>juE0FNhdli)`*{5xWdsf~5V4pAHD*5hb>symL>r?~YPTB9t1+C)TcvO59z$q0TT zqIQa@O?9UZQJapao$gMfwUV_dP-FQ!G`2GlwX@wbfM+=b-VeJho6N}xrZ&@^IYjMT zMD2X{99o~}zRsYvlH!A$ybw{l*u4PwixK<<9@|G^@*Co90V-(9Jdd5KVflu?VHq{I zRks_4yS{1Fd}G`8Hw@EpVpSb!`nKCL>{i{CO@1zw4E&NYtec)C48Rj|=&onB+wly~ z@_oD2FzTIFjj(xTgK;GF0Mysf)bt3E?=qsOo#EDv$F8s(!)yuTj_HG(Z!}%eaV!8c zQVhxSWe39{^xlr=Tg`dZXSe0-sM3QSGHMt$2(DWgCQxnI9*wZ7`4e`)K=Cuvu|*jm z9gpdCJPjWKnRVZi%tzHc64|yjRbwW|E}6Wv&q9!B)!&^1S^7ISb9Ic5hypH9bfNK^Z-GfIHSR%TdkyTj%a9t}X6fIUaES@sm4u9|iff&lex8?4Da< z!4GG~ZrQ$#dEK?drMZuT;Z7Svw5o~&CC;k;PTP{x7;jK~(zaS{47M_)=Ai1f+ny|< zP7YE+=As;;WSEi>B$dfWc4NcewkVC-59{_0A*f70cGqpkYFYl;hTV#QMm11#-Dj}j zRghklE_z@(m6EP)w_8DB)vmYAdSxUmWTvhKLs!H{}(%pFSx`8o#W1!LB8+w;AuE33^dVas4p@s%D2 zp4na%50!}!tKvPC1%tv`?=%`vAi-HI_bPf)70@g-tJ-$Vn$@&geo#ppfw7fyqUUw`F>jkH%b<=9Pa;Msm?zX=XWLz)EwM{$?pfe*}M;p z=Ad?Pne{BxLNE-#-do8OA_N**21~=29O>q{H;$G+`8LtY_;vg_LAKHE1Vhx0xQR|x zM);+saaM-atD>$e>;08=T{%Qz5~1Oj9vbx;qT#n58uh9xhm|zw?u@Fp2+@ds*TA|u zLZz$2E)BDUhkY7VZ#`W4J;bG6d${zws{Z?wybI3#-`#}`w`ncV`lv$V%NrjoV6m;5 z^=cggy~d)y4AH-}ut9o)w8O#@>zIXVmDEeMI^W(2O1(|x5zu;nha@#UoW7jy|1&bB zbnIWIkna1JDW;3*souZrFxpS0Pvh5H1NM*B~|-!m8UXY^*$ghAC?swhvRIBlYGcEndR}XLcOk%V~Fy z!6~bRkX?T{vNBOKY#(+CZ}4^3!DNul!LvyZWHFGg>nMYUH%v~B-iJ-uxM|!rVhU}Z z$KuWNh7#?&i>+|Sg^gnB@zxyJG3e6<^p0=TrQ3WdB7J+C>{{Z7Kk}$M8*e9rlz^Yyim%C6rAEA*+Z;^i5+dbZ8?}4Wn+-#u!iaw5jOjD zFBChK>0~!rCe0sgC9wE*sY`A3&(#~F8oqa%rZvPl>sd~{{JacJ&vWwKWz+GjIim~@ z7xoRCqSdC^E@KDd-1A{1um$Fo)iY|mK6_<`6#~pF#Z{@M~rN zdD-77``cxIqwHJwk2&zR%1IoWyB$es$VjFxy|(HRjQ+4|u2fGRY%BHT*v)lr&&kV- z+n)KmGMABOpko4UhmmfWOUwHxV%4mYWr7%`omcG?1oiJI^GHvn&dCQzkEy?YgeIiF zV2V(7SaT`og}bk;4q6SgxDr5358jYPehUEz%d;i4;Y(3_YB?TDC8_P@fEqFltJP_e zlhR?T{HD}U9KLLr&rI7fNjJD{_R0EJvJr=#4{cFn)|EDl9MdtvqPad{?{Ai1@*1;F z&kja6rERu+1J;s9UMAQS;)Pl;o$KTekULpxca9JzZB6uch!8RjYgVnV z8A&$zF?Wgoq-N3fK&}jICj`ip0;d{M__YnQ)q+9mN`Ykr_13r2ZkHl1j+S0ZA*S?HKPRqB~fJT219?;j8jF`GR(%>L`p^6TaW2*)WaeAB}PGcla+&=|& zA(EvjxpGjJvMAs)n@v1~9uu8`{5B;rf18B|l@V$Ep2d5%nv33Zc+bb)^LQ`B-V1mi zh`kTsy;vO-MMQiD&F_PBD3KJwAVFmHs)RK%T@WOMtO$f@FkKzn+n|UjM@(&LP(v8v zE_+!Xw$&Pi@g%BUBU%VoQ9%T@Hy|u*<{M48bv6{gq6iW?FiT*xy{m|*W!5$n1W?qr zum`ZHk<*5d0Qbwo))K{*uhJWZhZcAsRib(oU6*h!5xt@KEO|4I_5z~{HA0HIHpwrI zt-u}#4T!cewF8k%9Qw^KB+%Dg0isAi4Mxsa#t~eCil=U881NR{pVW2zK-4r zML&}F6YMj)L$OZnmU>gYJG#&}L%SmjamM16V@8@Ec#X4Xx4583wK7-g^Kz)K@+zsE z3zW=Ka*+}`{grQ0Vo>r95-6u9^h|-OWN(PD-6GvI3Xs%vAw3@bldC8>=AL&!+ms&E z1EQvC23?7Hm2`Z9b0a#cR!Su_bvgtLTa8+%`ZBbGjn%H2N0UKjKDkObK2R!wgsva6 z+M_7a^RUxu)H{Y_uS>0h%v#Mt{FDqsMod(JG15MaR&9VT2a67%UdNG;CCgg7Av6J7 zle9;u-ul5AJN%}my^HZfN20UUE$$Q!1IV9x4a<~|CVIQ%D?K@eFTcW((}p3#&Oha8 zP!kua5?X$JP^)@6H5}D*ua32KlDOh5fCPb$Ody=NGm|WwJ*d@aC5Q#psa{qTCN?39^Kx^?EZ#ik|fr#OxYqKV1c8cBMle@C-C2KQUY_4 zxJ$o2j9hv$o>vAO$$@5^sEvfks?@!MdVvo)`XDwUZVbyG9-kY(3c0av`dH88k8%9< zNlpq|CX{-a}$-TX?cLJ|fRD^>)ss}AW2B@s$D zux%yn^-f$%g-WbtS;8ZAvn|cGjuX>qS`0swE6IvR7SSbZhnxg`yiN`S(7;@( zNSB^0J)!mUcyYB%j;`@|^}z!!SY5iWh;p}ZLhl)is}Eo#z*2zMVcGB)`cA1r%IaW8 z^XbsB4h^B_r-Vzmcgzu<9^wd!j&%sUmV9YP6^BN(FeaI=sA4mSxt!s!%SGo9WxM0%U z3FRh6g{qH9pyoQRj8kynv?iboot7{HiB@61xifveR_2IiR*W^hiw%(|Di(p`>TV4z zPm0Ju1&50Tf*I?J@Zi;T-33fIN?%W5v?r~Q!FtzSO&Pi!>a@iFN}-z9rzGr zWmemEZOgIxdd=R(AKdA16}?Wk!-3c$>R7nGs)YBpVb_gqit|G#H6f1b^RAV&X37do z2adx<5wZ~X!=htROQ=Ojo)(p6|AFOn8I+?G>7amU3$)TGe~i#pS%Lxf6KE+jtf?} zrGRTX)w*Lgf-~Wp(kpgTT^jO&>HZ3}=)GVfEcS3XYq9A*mz944`qHFiost7SKC?4y zN38f~8Wi0`OX(XUvWDkm|9%2B>JByrl=3rrKya(_atd{ofiIrk{TzoED&9cTiQ8Wo zA~u0=4#97M@`L?iejfn+7wQ`BhM;S>P+}o}&2AMRfAMJT!Rn*Cv3{Km9sRIl`s${K z4%QPoI7I?+u+6{r^{_!bBsriT<&jLB#>-)uh@>H>q+whjl#a7fI?+-RaXwyhP%2Kx zD`O&%kv7Ol3K}2e@)?%u7!|!3{*A@~Q%(zk+`>~buVa^Y`q$M+q`^s+!9*v4J~J@_ z)WrXTh%?*LvUwcB>{^?2l@!Ba)y;9j1Lg(mqC|i&=8Nqgr;*VXZF^;GJG2w%j=>1U zA=Fi0-GO0Gm5jJ3MBBTRj17%0L#wOEk;#eENj76$lQ(7FyrRauv*;~*-Z+70h1-7= zHXtQUKhsc$CFmZk0C?}p088LV@2ZdvwcR%6Ik2P)rP`yuZ%Cy$Wj12S}0QtDOS4AT68rZ_Jq5 z#$%3!#-k_uet20A`)|W+Q3Qj~;?T}j#s_;ZWLzj)4aOL~gq@+l55PUd0Ug_vAE~$B3_88C~^_+|Y5af6egu+8lA!a3-eA4dWQet>IAG28uuY-UL zc*MP~(tY(!GT&i=!%Vht-G>DZeaDGAIWRqBx);~IwpFv?xdKIG3Y8?E)kMOHbheSet;cOH0s=AL$l7yFHw8TOlpZW}eSx#j&7q=3;wu_a3NL!CR zbtU0or1D-oV@-Cn (~t!bV^3NjodfMyP#eTGYX%WC_IBYfRq+g5i8!)hgtboSKo z=Z#f_&QO2Y(0u;3s@aY;&3$dDS@^L9n zr{RIa+d>VYR^3z5K;$A-#rM0YgvWS%8*@~P0Yr&(1T=G&?MPN(UX#jO-f{}s2~|1#>U9| z3FJ9Q)(3bdqE7~C=e6l`m>Gjkh&D?KDf6*eX!yS75DTp;Zh*8wak#hosPDX#dXn9F2GGH9t>fVeiV9C*w7V~(8iy+rRs6x; zm4Avfna8iB$q^yY?7o5~!`+C{+&ekVL9^4KkdaF&?9UPivTdyg8Nt3ABl^>m6CK9A zQyr9Lx<5;x%RWWO+!*)=G17m2a?&HXlHru!hqYM7^@_L@-o2}fC3kSDq|{&9!(5? z^Hv$r-@K_xbtR*B8-o;=3Pl)%D*t44?ZM+9w`0yB(8q*w zh7w|_Qgx(zD&txbqjg`I3|B>LMOLQ5!uV3DoYJK|F;>32AR1Ll)+zZDN=!;NDBGtXC!`Cg=?3vGN`giq-C*ql#`d5_~$CJx|nERCa19OID`M&SqqM+4YT zWCM&heBN~>?NX993)bF})Tt!(CP~pqTJVyVpQM!`NibPJVq(MswTz0O_%Uv6;cMXU z(LEXOXGn_tF%*3;Rl-M7!^nrz^I5!Si}BB@a)k{3si2w4Glg97r>T7LLUFoyzIeL$ o2J&oiv^a+6OT}!dP|Ou`r98?eiWiHM#XSGxQmWy9*<$wp0TzcR&j0`b diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pifpaf.cpython-37.pyc deleted file mode 100644 index de74cc94af8a09fdf7261cf96b00abc7b8646a6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1406 zcmY+C&u<$=6vt=1>-E}RJ4w?fX_BTXzZw_nb%RtmK-L5u z*dab4aX}op0YYkz+_-S(%0IwikT_5-R!@iv;>4Tv56Z0OGxL2j^Jdtm!y;fIagyLV$o45LaW`)50_IRDVXaiOp~gD1aUmL(?_Gl3NNb*9yyS zIbSz|N?3JmjWGLj;?|(ZYP1d}asf)nMX-=dP)1&Y3UZm5{mVFBfhux^mHVqWu7Qob z3N=>iuY60O{+*6`J@>i+3v7dK!Xok-EFrhx67o7MBe&r)@&;Uim2LrNufQtuRais5 z#;pG9IKDCWdlmg|(3`p69rU|{erI>C}F;>|>8e_&u!;r)3ZDYXKj4UUJyG`+_XuK^eQ8J*whP=b1j&o_m35#UK!%HVY%R-z< z69tc0s($KJ#g(eKQWGI9hFJv6Vp>wN$kbdG^E}96L=!x3M}v^g@+S2jkEDK%FnaMo zNaI}5e8T<2^JQg{L2;3=**wpC>0tVx`eehI*ZJfuj+m_GA^LF|dP0`x`Ip5!fwFSu ziB6AZe8gleuN032OEH_?!GoqPbi7W*v8o6*6a00C3T~Ocah&XlYbA#HLxl3~m zgVYl|j^y$=$7E_2NLBl-m3~N#9phRQTs3rx%CJ$$zRr z#?sci4%Xv|=Tl59J!MfArym6I`L5H$!lYPK=CtF;=Uu0h4>ZZk*Gw*?^&S>s5U_h& v=s7z@Sejk1v^CAv&4Q`TyJl&o>V-uet?Db}pH|70rA_RLs;k}km+$@;C3CVr diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Config/__pycache__/config_pretrain.cpython-37.pyc deleted file mode 100644 index 0f5e69c1628a70b5cbd7d39801c1807484a311eb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 652 zcmZvYzi-qq6vv(1kEFSzJ$i)+p{}=F5EBC`6+b#bRSBjnmM?xuEPQcf`>wj1kXSl1 zF}7o8#LnNq=(Q|2c0-7PmqXVHNBQLIXX|~xFE5fLBDe-$Kj*`Mkehb1T|SCqe2qqM z!aYec54=nNid=TCh}YJ9*`4*WzDJ<{iDUuqKtMy@Megt(GU9#YfCtE!hsb2ZUA}{Q zk4MNMA0Y4X7`e|AThfB%R%v&C zYf0N_4OrSRTe9KuwdhifA0GNmn`3qWO;x=%b5#U5| z0=&c=;Ev)Y;AQ3lcNKSlS6CHzRq-;@2U`8w1t`(Hx-N}QGM^kHyL@yoc3@=N034Ju zjTV?ri>yEmR-`5~sKrcbGm8>t(-I@pVI^8-4s}_XR+tMrR3zDLwqEb{y^g>yUMMJ_ zRYbs?T$l(J0;Ej4AP6M{%%xYaN)iM~IN?DcogkQ`Y#yWS2En^|82_BH+k6zx;~6dY z(zj8}6FxiL`jtGUwbe{Jio+Raw6gLHPojiYS9CB5_*7N1xQ%Vkb z2A|*BzOEQP>Qcy%JGNUkB&03_*8d4h`N#z8bbnVoUOmDBZM$`>iD2xED3l}}9XYLNxm zn1-Wu#+KFwZgK-)6Gn!$Ux%&y`Jvv0g?|ne!nCB(ZVsDyQF?XzYJgU6(CWAP)ad!$ zuC$K^ox^r#Pm#fClb@Qr3 zcMv6GI07>fV$=GefM(_KlIQBX2m8H*kztWhpFmH(w%aiCyEZ)UpwE=n>mzS}?`SPh z^^8}yuBfDBut<)BY|P}+3QxzB_+cahuyR%2hn<@+GHk$Q76tnLOQ+fQhuz*kWlE#T z!ilsGdxI&ott<}q+qvQ%fRH^HzluX&-IY!JL>26{Wzhr4^v%r>eQ+4T)XPY)Kt^J zGx7I7x;xhm<6kt$pAtHc@Q?qBh8fKC4BO=9`_c#Feffi7rnHJ(VI@}fs(#I``E|SQ zH|z%HO042d_)WX1`&Dn!Z`m!~uX$5`+ivTA-JAAj>>1r}cvt*cdsg=+yg7f~p4a^* zWG~oP(Vt|kZw-5qO|drK*Vr_h!TUP9!e;UQf|)zU^4xboYixaIqFpX0)!kQ*H@1@M z=F9bGFOtgoTa! z8u~Mu=eFj#0-pEase?Y7@ifq%BOWzBGDhW5<*a;K9aY~iko(`7!4r-1>S5`vDIWpp z#08|^7f90{kb;*T6OJXB*8y5j_X=}4>+0{2ifwA z)7q#uGS1A?YVaq`bwhJ~53Wh#+BZ(?+Uhq?8=$q+k4B9%^UnsGdRM0THxJbBMOhTmKz2=v7b#fn-(6B zVl|N9bOL5cj(%V{|AUkjy?iUQd`BrRt>eC<^d@_uQlVc+P$v<$MBLGHkGD5sE9~ih zx);D*LhMLx4Fb^%rLPyASVYmgkq*7k^TJ~h>{}#kE!BcFITG=}@x+j?op+&B5GqFr zXqoS;Br>enl(deWBS2IZ$hQeh_ejVt6Y{p+lw`xOd6srbp|#vngh*?cDRoi#)JC}g zy>Tw4kl^IXT8am8D0|SZK$kfYwy^YSsC`l9k>mZ}5;+zRV4HE7VXr4#0iiUxW<^|* zA7HwaY~^bZP&%eWoMT;Srmomc<1AKk;F8^~NJQKd0Z;9Jz95fKQ1&>K9&_baD{h~= zhlnW-14DPn%%h|yJbt&pGZc{p;~)l?5o9nW1H1NNaE2gku3)v;K39C)F_ zh}4sa)k54c-;0IfmNP&I!!)jUavoq0Rv33f28RPqv1A-q2Uzx<05Pa}#)A*B<%Ux7 zI0^$s(Ywi&W4WFa$2>mAiF7>Tfj}OU&m$SSJdU-83RQl&Qsngc1|2JoJgAcfHwAU( zDJybhDy%n?mppKFJmsVFvWNA) z2!szGuuRQuM>;--9m+q%&tr5M_Mfd_(P0u1t_X*rbRr*+{ zzOE(|(`j`iK6;Ac*!ifIrIZWBDJ6~7mQEh-x-Mu0!-dxrm{xkO$s=Y-(xDFrqWLh2 z;;smyfi}+(mv8{+3z{Ad)a8k}C<;EoKTqUOuwRWtFLHXHSh1MRTwXMs%K+nGR=^$J zNQsmO_wqc*481h8(n+7rc9F;LxuL(OGo|3D?c(#|S;NP<#Mr`tP1<&x-#((3thK)`dW^2PTu`d?vX)GayMcn6XxF7;}P88qO^n;sWSU%z?Q+4o~K`Lbc%|NPHfkIzE#!l*^Mz zE%lk4Ck?J@4bq%A!~x$uLQdE-S)dk_q@MXhTC`vx51j&?C-8N>O>>jE=LCCWxAQ@| z=idzZC3t>#zS<9czN&bvx@3d z!)%q>rj}paZ|I&wl|=E*gPk_!dxYs|zX z&oE^N?Wgx4skF}l?n~)TB}o^pX8rR5;b?wKxqo*%eHS(raZ_r z{T2){<+y?9sAT_UNv?vjd?RV%)6ix7NVq&Hqn}hlq*l^&Vo-YyN^VjLV@e?34+B}o zDqV;)I=e+@pgbWYu0Do6rL(x8>8xyx*9EPE-M(0n(y}rmF5Sg-gxS-~zi<`T(I8s{ yY->d?O41 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/base_dataset.cpython-37.pyc deleted file mode 100644 index a2fe68b3a73d72dabe03ac1e807f4a3ee0734fa1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11659 zcmd5?&5s<%b?@%^+}YXLZ<0$%RFkGDjy;?umvTfgA}vuA6)EwC7HLx%nQ8Y<_3ZYr zr+Zl4v)mob0)Z*O(!qd|V;lv`Q;>6Rxfl)t_&@NW;X{C2tXqKGbjt5lf6vUC%fNxs zyHj0VAMdMP)qC}-Zq3bABs{bK@^|eYUX!H%q>t#yA@eqVE=R&7CR;D)ms>O9U1`lSh2^YjZ>}|mJ_^etUu(^ae8F1iEw&bA>3xY6 zS?N=Wm7XfCB_`jK>gBJ1SK9naMp~Es{Oxz{zk_$#a<(<5dph#Vx1FAD+B(zwo+xt- zuT8z8C+6YbQDT68CbbkMGX z&Es8W3v3bZ8Mefh@vg8Hwu<*GJHbxkU1g`(8s2m4GJA==4A?bxg}uVgAUDr8*sDxM zZh@U;=g?}AU1jIlw^6dhF0j{7vdk{BI^HYnJM6o7ud)Wag!c*7gj8PltG9I5h`A-{ z&_i$fy^EilM>3MoTHgF;lAXNAODGb_*B`F$I6Y(CGh9y_xCXy`ZT+sEP4 zhV451j^#YrSl`+28@%th#`;?n+&GBx$n9Daa`zmqRWma(U$PBdRHksI4T%HdyBBHmOsrezU~uV^)ElX}rZ?^xBU`0z04pUOYOA|M8K z`2Agzdjs84+m`OSYFcsC&cJSarej}McTE>ij!*R4$I=Y~+;%L7zbMs}r01Wf$Ud1-f~3vy>3;v%bf=aY!+SxYFK-(*mLAX0 z5-6J~KC;>~rSb6@x^rxXlC|=M`B{odjDh?x#m=i^!xHiyx6N=9Tj3%gc(853^i()G zBYx>e_ul*H_Ir1Z(B=gi^JrRUw%fjo{0GcLZ(-0d_27fF8uy zmEpJ$#@ikb|H{$$7YTnXQx_}lCn@x2CYaAOJs$Se=U~s+SM3jC&osT$rop9$3|$#N z`gVVcb#WViiuAekIedz)#9N+BMo7leO`h~ml!M$s-ph@0Pi3U}r}87kz2@bACVwW4 za&MsDXYweIx9p+*GwG@POZd=VDglIUf&VMup|32Vx6~~PC{nj1;QUPp2j!?MuSg+0 z)W_cfZqO5u;c0ctzm&cVAj+&jkOK}53I|12JSgqm@@75*RZo@M(!t8`}% ztE2KW`3sqsM}@;V%7e!uax;{B{JT+!mEMpJN{@f{KpGW3M;-CR$}v|nqY@~nd?|3E zW|na8b-fxZq77swFn)YcfkY~6(!uO#mR0et?p+SrjH+RqGY9bX4(Gjv!^Kf`G`A)_ zlb^|ydrSI4e$0WtN`2pwMzaJ0&AlgII9^rTl<)fs?z~LX12YMfEd))zN+=j_9M$^sP2w4u$a1KsLUWlDk{%~ zwN1;>nR_un(1h#O-@jO|pUa@~cF>b>B)p>};r)@Ze+f?efS5A*^@MQZccXblMT6_= zIqU+?sTsgjFFIZa7nbRQiw-sJJErZax()ABCy&$(2Ouc{DV!T_-?14+!a;ZJ+guEr zZbOW20&W6d1YXFFI14(47@}H&!Hqsp!0)ATp_ZTQHTE0355?#-)ZH7Gsnx)S<7#eM zhCn^AbwRYj8WWgzZ(PZC*c!x4qCEg^P<FND=t3|4d zNrlaeC&U#J5DZBM40>tN2JK?9v^0Vq^|47uatW518>-i7nog4&IyTLwWu!V!$}$|T z0BA#K$PM)G?Irr$#ppc58i-MhNcAOfc3A7-&{G@+Vp1A9^$ zl28ti&b~9?s`JDaTB3XElO4k*6|#+By__T`Beu!vfPYXW_W#oa7b3qAEuhnv$P5~i zH_cU7g}epePsDQfaH5r%bV(yIeT-Q{KQ4HFeQCH683naL)?0bVGqo0)|%+T6W~rS)D$BA%AhJLP zblx+2#`WQaAf`ruZO6W3?2)|`gA8*As~3#4D`Iz?q7%c@Ne|Fqx;r2aD~^Zo&+zn= zuBv5pyoNfQ#{`2}HfaeR*sM8xIb5R%w}|M)MjDLxIT-fgO3;L2Flvl!c*flsbUIL$ zZB@6dER*OO?_hEumx5^bXbW)~$u>3%50~I-%LfRJjua;SgYvh^<2>!@>WW_xd<*Bw zF9wW@AW1#vE6vNTauoIBR{(1)eDZbWEpj#5POpFSSA>sbRg{Js6T7XZzrk_Aim{-tG`#nK_^#B20Cap;k<@_?L0zwtOfxKT01ccx={HrlS`Bk*|HGUz5 zFN`f_9#JvE#$Tr-8;ul5{w+Z=d@~s`@}lFxY^Y(tfCh7NUXex@L)}`-4s_HJ+Sdjm z@sPl1z==io4iZnIogPx`daxf{@es7!Yj{e$?>YlzkRVW;#4zk_y!>Hdgm*{zF%AJ9 zs}Hb8gmUDDtM~7yw}S|EgxKNDVL>?7oAsifvkcp>z2jnXl1m}tmws+&_Wfeuhx?`W zUEw+V1qOfEg}xOLu|?S0FKL0Yp=c%0j!4s7jkLmF3QEJdD9QqXi^wkmcOs7B@ZS*M z6fKdI6?sJ-UOBGv#SjqutcJmK3YxgXa{_g=Wr=vt%*W08X+8&>kb$$!FCxdkN6DX1 za)Odml&n#5l9D%&_=@BDc@f?(ibcoui+aCr*vv0*-QG4tq}x~QzF*Ke*Z2J*is3`~ zrBJzj#p`g2M*F33Wc*qO>$Da#;}?USP!OwG41>uy79gRvYGmv+p#lUo9*B z)@odl9)Q+~U>yttJ#nAbN~S#Q;#a~_!W14(L|t0d^zizXpo?a*J+aXEd8~wPtI(&C zRy?(d2mXCxe=g`A?oOkH;08(hCM7~iL`*2tkYjR-FE*ANpQ{}LOe2scqsUTGKD6v2;k(x01E;xw6PBZEC>a$!a;uTZQA;W0hSF+ zbOc_!e7A5|92K6)+#4x}CCUMQ4!NQTuY5Slvm(MP`Ntn3ya9}(oWNO%ah6AUgjZ&Q z@JbkjpaahGLCGt3XSxXCj3`&dZw^6-DuNK@pf`dLHPA1DBO{srJ`O>Y5rU{;p_$kJ z3BriBQ_l%OP}pvO?Z-D6gt$KLEC~~Oj4(vx9L4U~Mi|;K*%2_|IQ+rD#stLF-syUb zhFX!AmYDiRkOg?Svq>n_NQGUF>B}Yq@<;P#Cz$%CyxN4fSd_yHDcc>bkwSujR>q!f zWWmoT9&Q3ow2JHs`MKYmN1E{?$ng?>#M@#an&L-XNcj<`vVO$qg*}LT%4U*%>2|+G zmHQne(wZ=ju`#S>jA3N#@?`8L&z8~<<5TF2t@}MN9M{&RY+U}iHqM%A<8Hym6@-nu zm9cTf*v6HFjVott+zhK6Y2#)mY#fddDA3OTBV*&}^lWZ(_^oLR2b}{&T9x+u)BED6 zNou|LKiHZ6KU@bAb*A|*j?7~H5B3O)ap4j3Uw!WU7Z8py|I;HCjYMWgP!FbmD>)60 zLH{pT1O8W-B3zt0rq_bi)XY!T!q#%CIF`oLRUxJe7c-(&feuUgPbf)Ng>*aIxRYGk z=16dG;WWe1B?tM1)`T>?cn1!+$vqpofrCRt=@6bT#V0ng-F_Km{63N+4xZPVU@HWD{5IUR6fI{n0-4r2qmJkq zj5Y1cNF4)JlmnSezet2TEfXC6X^MobIHoi{9tA=a(^rz(68v+z$WT8?SM6+sqczR1Xc}VG1KbkS{Hmrs#{IUShQErY{9BZKo01EZyhe$L zkJKqg7xj39l1r2{DWS>DTa-McP6 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/__pycache__/common.cpython-37.pyc deleted file mode 100644 index 4ba8236464d3dfcd2e6b0187aa3149c3400532ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4204 zcmZu!&2JmW72nw}eu$*LCF;Wp;|6KiB2u6fh2aKqY%6xtI!fR=2(W0dU2&GwN=q(1 zJCr4MnM0Kv4Y;|s2FPR2{R?_8dhB6N0osdhJr=p7zc)+Cj?pDJ^XARlnK!@p=DoLf zs@1ZAXXdZpwEn$p82_TdP0RQx#Xt=>yYARt%<>Mp6%Is&hZ@GyPm82 zf>+di$t$5Z<8pV#o0+~-@v3^R=FRGUt~>87V9w%pyx3jxmb%N{a(BgB>8^UK(|77# zojct9%JA0s8ZYwFSIk@I>%7coKyB~}uVQYKZ}3?@2hKG<&lkYi;){F<{cC)gub^-6 zRbEH`I%mHznwu9mo3Y4VZyE}(x{ZRC=;m%$lgEW*o zt)$ycdfWXoN`!1i^y8nHdxG zN7lp~+tN95CycWV15hz;9-HE(bjF3uBp={T2T~fu;=}-K?tTXg8OFEC2VxGrAnXN0 zb3skBq^#D-t}3=p3ZnZ3R$wBsa@ z@9wG+aPOrAfU;Xj|Ey`La?}kvp-81}m}!CbmGgNNakizk+BV100(Hg7v8fX%I97hJ^QIXpd|FLQp#JFlWgCNbbZM zy8yH>VVE(;#SGpzF#%U$KSyaEOTU2X3c`!;!EcttDow9Zvp~%vnoGa{)-FH;AaT?e zD>UN_qMp2=3Q_loh$OsIT%)%(sM(~3-0me%=BI)$P`LqtREHalNsX;D@dkKTu0uEL z>PD?&kQrkG?!i{!I?RwPW9R5Oxdh#$a?@0f)kv2>S zzkO=%8PCiwwtj1l3iuaCrL2&#V@JIIsR93XwT~Ca@al0124uJg<%x-Q2IeiximZ-n zayBa;&GEvTp?iDHI5&B50}!0E(TtoQFJvWJ0cI~{ju!Pgvze7G>iy=j`D}5`z^e1vLQXw*_3E0> z3&+MYme%vRd?ffY_PCzeBt2RJCaYOhR`PYu87RHZIcuBQOt!=q_~KJ%RHb)5Hy;1} zb3@jT*7y=%e#%D6+46WD_E_Qe#2T$+E8`8ms$Hl)-pndGEMH2jTuIcgB&wRaHZEn_ zRzP}HTYf9UDx=kGl^faW_%*&ZVP6B4QC-%@jf_sap4HKAWDeTR=ddu`wmyEHU(?j} zb7alOZ$ekH(N;#5c`Zj>J9T4!xNs7sgCLHEp&#|y$+kR|ianwH)J=FO=t^VYq%sh4 z=nQ20?HhR@dQFrAD7O6_+wmM96aLWN?)N&%Lh2iCT}lQrf@73YJrscqUj#U@L@+s+Kz4I^*aE1k z^3lf+KfJx?>trEN1_4M!6CEIxBa%UnBbs1ECp`?1J_@ZqP(adlkV1GWC{*P!#EPMZ zgM88nmD`F$3qn>umdXV_0}-m3fO9_`9Q5NvsuGf|FCz&dB&_r>d7_+yGo)-)JU}Au z1zk)auJQ0x71H1F(slK3&2=9(%He5eRKP2f-nVtEcnK!ib4C_#2hy~>pD)1rcW z#dGQ9hB^Qm(C*<~gRQVFBngvUXKx@t+n|eV6Nz*IoF(=a zXyWan9;u-sDd(+OGb@nT)Ewbsofee`^iXPX@Yh>#5J;S;WT_?@>8j+&rbNR{MD%>@ zWYoJl#^)SF@;t^*C6I8r%L}~t)Y6<1#wDspX`E4YSs5-py4RSNRgHes596p8ZeLIV zfLmO=ETQN|bE-@_-~5rDzuf|-)D2F39>y}rGq>^Wtts)}Ta@@vyoCa3xsPi+oiL0k z@gI_?QS$arf_M=6(MmxfX`3wqW6pUi87aGAjW|VMv?x0f>$P=#jpTE%D3Kn-bMHss>}4rrx4r zb8JfM$kxf5aYNHKj7-LN{%lj2e!TfPst#Rt>d=Z(hr;f~RXNpU;vN_{jOJ4r(X_=o z=n$oY^jKL32e`3ua1h2JZ#v>vm=Yh+yc@t=Jw6RBh?hx&CV{7y)B&42X*t%C3WAJG$Rm{MtH=*zN%wu~!<%VLT%a9cEW`4EyCMS6h>9s11U zY#pC&=TuW#`*|g)6Oe^)7@VrvF3Kg;he->jN3v0+R)65P5HICRI%+fPI(dZ@MU=wv z1cH4S`z>_oAFyLe7t~;4g!an1#JkdqJdWyPlwP>9WJkKZSJj}PstpONfZ`6R2fARh z2cU6BY|5mtRudx1C4o@9q@dPK*Nqe;zlPAXGQ_72=>~JtN5_lqr!=~*$~~NxhbK`B zK_0aD1^B8QASCCNr1%mH@g6n0!v26LU3Hg0DVy?>phK#HpJxM*G(7IV^rBMH8Sw|~K}2*>5F7+WVL(D`(%l)_W=v9l46~;d z^y0;<2)jqm{x`XL@?Y@edmU$7(Sw1!yrk{--uLC}?bX!|fwA)SV|G0zciT-Z^@OEKYjU1k2nmU6tROFj`s6oo2y28&Ipj|;uUrEnrc zfW=^t+=6LOLr~D7Bts^tWN*nZkbw+g4P^@oNB-!hF2$%4QcPTfWR=Sh00*ui%sVh` z0%1mHgcC<4b!#{I%eZwhFK_s4{V3c*tq2)*WM*EobN%p z)?j&w2#kPN5io)9`s901dy_m{-yt)y2jKhk3Ha?r{1o&@-KWPN-%WlIOtw^|jBwiY zrmfoLmo^3|Yt-|RsxmDVSSJF)liH=!px+x&O&2nMX!-R;*v_?JT{_+Pmlo(uXTU&9 zb#4?I+-Yz2mM~DPt7`|vxP!Z}aG&wg+eiN4>)11w`w+e3+rK#ZwcO~EFF}0-ei~Ka z5P&kMyg|j%U`}TpnXLZNSwnF_w_$qtukJ4(WBBXA`rYstU8mEt|HXQcw$MVAC>7_v z!+EJ?U0~kj{6#H_`4h7W$mSFVH1UCe{g?>CS(uIHF^$9KB<=bR+!UAU^7E8O8!208%6)OLA=4twOgMr)*NnvHRh=uH(9jd+3DKOx$LMjf-KpfCL33 z=mqH79O_beCdt8v_L^o!I`-T@(O=NRUVHLi@Tt$cOMoAiYiFk5><4xi@4oxIpUXS7 zTE)Oq{^#F%|K2c+|IkbR*y!9wNq13kgR|J^uz($#C&saLVlX{scWgCR>NwowR_yk@ zj)ysu+i|&H=~S5U8-tg)^Tgmz;2l|=70y03n(i}DHQLV@>L!!bJKZ$!c{f9o;l2DY zN=Mx|8V5aEmp>Ldw^7m|ssQ|OqhoT$%_m03;@{ymFFj%4$l)$VO5EdRv<|QE6|^p| z@)}x?*ZCW0%Y2oeL0jQ#d>!oy-{5D_R{5L!9NHTHF5l!AK(Wp*g6Az+`)CwrQO;+N z*rA776Xn+^=?zp9c3>Q^iQzNVGto0AR9h3UyJ;NTCkEb1(>F)Gx!k}eW?LvsJZTdg z(ghMekA$pF(ebm#!$6+xCH>E$LBMln{5VQ8S>6jWf7lgSDoeVl&`2+d6Ol?sw@pjh zX%L6>J_w^dQ3lKZFvKdF=fBI(o`F=ErGT(oPkWsqED)>39&c<&pfZ58IKBHwlf|DMC z)NE-)GGuLWIiaec6=$eNU$;Jld^V`DNdTEVxm(cvGE|(dZ4#fOwj(a3AH+_= zo37djc-{1Icu9)6~A$PUd%ZEa{rY{$5N`&0AKI<&FMoLFBQ z+?v=2)`9)C@r_Mi;jKh2_$kA>(xC$-E&U8sV4@CM^TF09w1Y!yxy!vLMn-GQBXh?% zVux<#9(xn_gl5Y$J8{)GgZrMnnV_&j6wp?)@&x>XliD|y;sDe_6Bi=+-Ac4K_`WmAn%Bz?6A+IQnQYurODa$)=AT*vRE`=c<7iB30;8@UHwq~2<^ z3Vg|Vl~6uLNiU$hGyZ@fK(;S=IB(O=1#E4R3u!`;o;u#`q`8x}ZP_=6qV~W}Ni8ae51#oI-H~ zqCzf99f91NIzkmfMeN-)a^}SrrZlgnXrM2v8Z0>9sHab%8CFj4dsy*1lqr+ndbi^I z@Hn)a_&!yOCL(B)U6dS&OEd8;=tCn!j7s+derfDvN!E=U<9A;o+(tBL=p|DP!bi;t zLam4f2)+S=3aMP6BB~(sk~ReragMsyI2uYvbO$^^?3v;ttvDDoJ!Rx_Lx+xB&Z!RS zYkE{G#B^*WxBZAe5@$iZvrh5eU;MOlMhz}}SqO%Ny@Z)u&(AfdA9n~tk}pN8in+ng zvsLt}rpGoA%j@`2Z+z*MnpVh}41=^vIUb4uFlOa=;Ls7pksu+<)MkPiE3>9%ZZDa6 zsXhK63;M(OabuJ!L*EY?!NYDmf)JD)Ym3e>97ZC|W^$e4cS&Vih2Y=LzmAfD{{A$H z+v08Ttb{Dr3dCq%<&wn@F*{8Ob!ZQ1~G(j7h ze?S<2(D(##1Y20+v-j>cc61Ken8KNe*0?mv!XI64Hys72tp6%SVv|8K9}1BOK{=&# zfJL}(F1`@&QKejk&WDN(|AYyhl|jb= z_tj;%mlDC5-z-SbO(q#-*j8ofDOleoF3l_y6lkS8J*KQvr16Unr2|`p0_TY&Wh>%5 zRmAB6>&k(WKlvldR6(j{-MPTVZ@q%!|BHS&|4To}sFi-)M(0%hxICvH=NI*3@~t() zR+=GbN2Lzp$LK-!DpHcYNrx;i;z00yT6C4FLimDUK0uil`};3~SQr?xf>Wg~=+f78 zmEDS8&>kdhL8+klB~`aj$r6IZwX5?YmeqOExvXcLDh*Ys-u${f5L*=dDGA^F2hS_5 zZT%jJg-dJUgDE+A(FaqCa-vN1G(Ve!VbqJ@^N958`B|KO<~eH%1ag1r4!M7jCIji^ z9BX;ag!UmFp9*nL&T)KkQDh5r>2|b^b!c{|d4n#c6q9{lR(!vo@DbdyFRQ-)0RK4X z8L@_?`l7c2D zcvY_|XjQW+EAU%B4<1InKw9WaI~fK8S?S`BfFSJRk}ZCKm9mnl+jxlExKcV<{cP@E k6hY3PuK4|aGSFus`lqocy_*_`TNLI~I-`#7s1fJ?3*`}R*#H0l diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/imagenet_dataset/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index e1012322df406a1354aa0db6f5389a42877eb07e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 286 zcmZ8byGlec5S`qIu2&cQ0xO%_7_qVt5fQ;g78HbSLfFi81Gz~^a@G9}zhJkt_HWW| zwZCBH_-Z^bXU+_VIdeH4H$d2Zf1SP50PC;e4+V(}s(47GKrsVThHP1^U|Ft!fhtsK zs=l7qq}d&`)rOpKyJ5sF%lkLId_;>$D4Ar$)NbX*`avjs(lg8@>0CLFHa9ZTwv&TN zdHdHLN;>lfVTal`eLV2F>yamnNemIg@hP9^ZX!F;xghS*Mi;J)dp_awg~#AsM1J)X g&Go-uxDeXvB*c+l`%eL{-n&yPHIW*O9Thrl-1GU0w6lSM%1~T!X+h z^YahgFAov&8$L`f2PT(bsEZ&t;WQx~D(DW|B|FY8p{364xOUC!c=qmhYVQ)xxbrpP z&KBzgoPI=_?j7(!R=OxB7lX+q80r)V0lqouFitu9nsglQa2M___qY#tkJoqrcc0h6 zTf;2AH;h+BDs;qS9V?-`=nXHtlzH3 z;8UR0WCxN8>7fCK-i&C;a&@O7Okk7yPjBT<_2dun;asN>Q zR<)^pu1uyaD_pQ`@R{!!CEDK!_-x^TW)Ah+?k@cA#>=1V z#Z-s2GA6Hz34DWYeqT(W`IDGR`-OiQ(=VSuxd6l#S8~>B=YD&+rDK`CjIIMJHY6*U!nxTC9ufOts3EMk!`6ngvMBD3)RZ^cO&>o&Z7EA$o`|(PJCppEd3X=f z?{YH*JO+-(Mm|)(fmka8n_GL3wO7&@=CBF0QzK;t&;C346^=y zfxx>0h2Rdn9pB1Y0R;p;-NG%{T21Df6>N(ZtZ*-o+jJ+ubxLj%n1SvHv5;Q@$UEFG zZJ`k0AC!DczNI~8nKRh*bbZhm%nW7=oXx?Qhqe0mY|}4%Iw)ADcqtU z$rfE>>X4>i(e-7`Zd1@Oco|OsGm%YTvmJ-SYbY73$3BL3cO!%vX4oe3!&l!4E1nIp z4FUW!*~-1K?wx*ZrRm9sVJ|cPrUK5!;(8miX`H-2GQLQ=85hQZ3If^>`oq<-QPgo} z0T1e`K&C2X7Rr*Dm{(O`3!pv?;R8IUrE?4R{M{+AX2WQ@9WaCj=2$fzq~w*e&6?#1WAlODm1*qdL58^XaTL$u{&NQoV_BieZ76q{F-y;gQEGKMM@<|lOKmdM2oFmAA3T*e= zD*!Wx-S9p%VC!K3|JhFfXYY$#%*3Qyw(eK|@;ThQDtqjIca^<7fx72WoJ7$?fgjrv zo3F$VdU+Z}3q>15Rn_uERJ?>DM6p-MGy7til<`?KIfufQ@gkP-{@6c-rN3Uq#~+xB zm1Ye;jgh~i$TXs8nDH@uqC{pkif)YKq+F4+7}Db?&Y*Y|1%9vZ6Bw|>_Jf&J_|X&4 zzzddw$E~(mH;r^Oj<~ql??N+xxp6ZfOw))JXnXNE(UxH?^_qsZ-}*iH)-UsnuJ>(c XcehCjE|fuEL?|iL1DP?{oCfR6(n4`!-rT zYs5F#X%MF+FJ$!YA0F{)nG7-|7~*N9RTF&@Qieq3WWlMXjxP!Om&>*yy(_ szTFwLMmyvWr#`$jN}bDaA;zxa((J*f5Wp!3& zb!JvCg)Cceb4K=o0Of;VTN2Q%gCPUH*%w^|{RSTshG9Uu@Zgg##vE*aFET4@VH4u0 z3K{E*7cbrt5%0yv)6+!(zv93AZLNGm5PnaU!B0Wq9zOrqNSMGxThK&PJd%!uBl%bm z<2FT8cwa`#@I0$!d7jhqJTGWPCNbHbYL~PU#z;)Dr`u(%jBt~N2K1^wqqHN7|>Q*DlCFK8;;LTv*S5G#ZZrj`pOh3@OzUkfk_U2=& z{@AGNb=T8>U^>3*eb;vPZf`aZI;PjbLN*`79N27QiGwLN6TBOpgQ#RVRxs$;AR0;h zf|wV*S>!dU41T-^760#$m{1BPXc7}OnO*vcP#2lZloti4#buUdxfdc7BhLyDST+${ zE*2XUq|9c}USJhAi@eC@*gWznw!jvVm)H_Jg?ySVvlZlJw#rT;pJ8X%S>zRVg`Hxw z#vR+O8Mc2Hg?Plf&g#tp>%_d?JlZ~{_5~!>N|f=tP}V3vm@ZOCZqc+`uUj<>=2W+h zMpR0KsRvN)VXb2X%_s-k)|usLMZCCD23OwVb^FUp&w~t9v|OTuAmSKN5U~QClEuS zC2`FZNF!Sc>D0?Cc2Ai!##jT2bT3qiVSy1?uH&n2T@9M1%J;Q_X%vfU@*9pJpy4>R z<62JOtCqt@bFQm_t2Rx$19M`g=Ld$vFoLPIrdit_9m}y72w{qzsWFN@%eK|$rs^BJ zFxdqA8p%RW#Jx74xaNAE>33X*Sx!T3yUeszsDoJxT<_r8(C(ifB%O}YHme)y(kA8~ zuFY4m`Kxu$ZNJHxjUj7rhyWpU!%tD(NPlUQ;WcilAF2K!EA}0eGQ7|!6R3P&CMxfDr+@2<9OF`ak zHoo4IXDbq_c&m9fGTo`M#SLNJZN3#?dduo6glix%+9`iDGV9qlW*vnbo15gpIlGDi zU07P;q4+~Oe&`^g9NP-E6RG!K%-wTr*I?`BK15s#*14s7CLNK#FwZN z>-5N>Q>N?RueXlm$=Rj|}MGbWvhi|3ed=qAF2L&r)i9KPo#`~9K94}MTe zCk7|MTc;A8Q2fAp3q=IJgKuCI>KH}o6?bUJV^pOoLudXtRflx);HIm1Q$zwW#e>r^ zI5SVOQw|vbWcAM#qP%}mgi@p+KuU--QnSfjtg-I*I zvDnXsS@PFd<_mcR=Gu~2wvL{D4(+)U+Vg;s!(5nez^P~Xml9#RBZQey-p+Zwh5+Xz zgF4a`;Rtgci=wbod@2NRfm(2WS~-012VcsqLcahCi~GM03!w0*KNU_L6>)hj?f-RH zLd%c))121nKb=;rbea|UO3T1C6_%O2tzczv*ZOFgSPNzX?PVHsN7(u8Qz0yc#c--s zLhYZQ3Jsw@gRg>b7T??}asOXK`DprBcp$tIpZ#OPTfTPib{;d9*%T|i5W@KfaQ4x* z5YC4S;O{)}fa|NBbZ`9pY?k2;4O9O$3N$ocSmYk|$Y z6#FWZLzVpQFR)_Jxfq@Ybr-@5;E(i5^6n7^YR9Oh)~RqMJhv=_=NE)fU4RQgl0O#1 z3#WztTDTUT-p2a9KZI-j)8U!^Dvbs2RugJ|7oGxjt9Jmi^Sh_QelMlnNocP?Q#iGx zE#WM$ajBs;$7^#WL)7>x`xnED9Jl2CPdJ1A`S2pv&NW&ToCRtF?aN_>P186U`AXV3 zdP)@7(8U!t51w<$9g05}*aB)5URwNR<`B>rErv74;?XK}Wr@iR8QhuSt(A$^3U8fF zTIB}#(x2sRb7O6D2`A?$s__NFU`+W!B3p~O*}s(Z&xDobq%FJz{$57=VmJ#r!1MkU zw#@67*~+#Uw?QUX0CyF0o(3+&NTA!-P&Z-OHM|b#DLa?IJE3*HC5Bfi1_OH5*|~7NHKIwp_e#=x1HCtf zz2OZ|wHV$2UoHU8+OY__H^QsI#nz?PWpHAJm#`58QxqGelGk{A*ffGou7fn@3%fU`UZkQ7mua>t>BwS z-wHQ_Z-=+or9Mp-1t<46S{WbZ-6DHS~DCp)Zw6$<1)gWj@2=33lN}(M;&}3P#w8V zLP|B0brD1WNYI3jldfy%p(8MG8-am8%9&w!V6SNcB~cR-I>f3@EQxCYgsYCode`tQ zx9h6|7V!{^KjKDxEte?D^LUMJv8XV>=zDZQ+j>nGlnsyk6HMmA$F}^S=C(U`e*>ias+kpf8)H|^f%WJl*U?DlVCd`r!w-{X{HB4(V-ZSblTLL_ ztmPx%eNA`J*A$G?a!u1<&!6x2Dg!Mc``m@*^h$$J4#mNGIkjWDa4GzwDzDrze8~*kWs)b1T6mx0$&zo-7MvzU*e8+z7?rE8) z9w^u3PpyuoeBUumEpy-5w#`WS%ru-{p3-|aH*WQ&3$gCpc>Kxtn;(897JABuFvi|g z+|11-68EZq7D-Mg${3z!97H+C+#@kYQ!p9|RXO;gtkLP14)cfy6W(2vf#MmyRtj7a zk*||Tq6}AH?;C)R6ex`!x;6m35k5ZNwo}|U>qnQ2KCrSP&mP7VQoGXvGK%P*V zc@)d#3D2^RxO}wilSEE!NGOtAKT>!cTWKM{7b&!FATr)jPC&nHWpO9-+n%0EbG+mo%1p?4%F0DWVYi`#G;v$|6EjLOG zEatil?N3`s-V>YH$TJAZ_e)Uk{}zcbr{D=8FP6n+{FnIS!JM=t#;wJqy)4e5uOyZw z1wHsnb5dDcAZTe0eR;HHMQT$xhB9F3VL}nF@RZ7BK$iJGkJ^H)=GTC830M|H6?2!x zRg7QeST9Kxz~d8_a~&-<;T6efu6v9&Q{A8S!xGAfc_2d$=bBb@-5Y zs~x@gAe^&nI#uIo`z*Ku{|+Egy-`VQWxlb)QeGq;9!Bsvk}7<-jU*C~0A z5*~@QYdiv>glM6NR!UYU8`nBGRj^H!=s{sr#Z83;5y}*)F9V( zZ+lHDWnfR81MeY~b9%?B(J_^)J&p^d=OYjJ9w`7l+O^xR!+8;9ffaWzzDXxQ^+zemY~APUn;>=D)^8} HdFB5B<-f50 diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/define.cpython-37.pyc deleted file mode 100644 index cf6dbd2eb69f2c2f4bbd8eaad58ea0df38542111..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4153 zcmdT{&2JmW6`z^?;4Z&Jk&-3Z`EXP<0Fl&6eQ1o9W!z;O@lAzbY5{bt^q@Auv; zKP;8<48Qze|J3~39Ap2a!pUz2ScPIaPd_ng2noOj(I z_Ysqpw4X6)N6u3#@T3DP*0vzsvCNGnG^o#!1*AKc?2L3#PvzWLI!2;}k!WEgEFjUl zk?7q>8kg!xQhSoRj7* z`@>$7N+&-H+jc7R<9a?-^~y z6imr7MxDZlm7p5cu;S`7_LK)RvW6MVXz$ZOr?cKrse&i4BLjjMP2k8Ta0pz29DzsR z6Oex$6$pw1C4w@+6rf36(T^HY#cjMvh5U`fw^)zwbIww+Wo3NJZV504%rVRba}CRZ z^Jb-mn5#!LTA&(XbL&$*?k5*{~_FDZ?sY6~m^%rdy(2-WP4QZ{f9f zA7$bW`)%+WvG44=nY&fZ*jDXe=8$dY)IT!oU{+d(n15!2*~A`yL$zc@)(dRw(iuM2 z5)*4@Wc`q3R)%%s6x=mMdkjDDZpY1-PNOc&c^J6&pgyl>dr1?zP&U#=5~bH<)avvj zRYN;PN2TqRs38M;Elwg})ZEto#{JEB*p-pe?#A8jqxcErL)A%A?XRAyR>!Kj! z=?t{9@ku|5VE;jXyBleH)l{yIDpyC9&Q7B{jDq~5Snh=bOgho_cC?phw=oz*eX04b z=6m&=b~cB-M&Ha;^E4<9Fvsw^40~|Bz~AX4okyK+x~F-lZCd1Cn0gCK`tGyk&A1mW zr%{rI!z5Dg+*n@kwALG~uobKDLDWxTb*~#gxxT!)H;5EK6i_*{j~kUSIk-Jj1#VLQag|z;F43@G3 zA#L11E^fP%yGNYfKJD@$-xnFfUBhJgxwU}&f>Lo(vQZL~l7s%N%#o#Se#m7B)~=jJ zo^Wq6NSh=|0`)nRNP0AbESGt0WS9f{kY%_jWc4}q$)1arpcWFA)aM_0vV>CkT**CG z^3Rm)bM5SN?QD6roh>Kr)HON1@8iw)S4Ja`Mx3L0ppB8Ro?~+A3fuQHGYSy1g3k17 zGtkKW%v_`Bif4Vm83dh}!m7dmRbhpl|9U0#kCsrrfY#@^UTbDI1Nf+1!m+j2| z>-hNS-^T#F$8|@GK9c2Ek%|-TG?Z%WY3Ipiq$2I7s?kpzYv&aVse^)H=C4O1VaP=!#PC|gfl{V3Y!0$kCJ(An= zL$)t=yQ!dXcuJS$mvC)3=eDahQyey&6V9F43v7IeI+Aa*WF>P+sxNSG79P88y^9_n z?_@l4pu>lqDbHWf&p(7~k1k7dFqFUBc+GE2zWw_NdB1My=`;?_c1$)x{Rr0D!NIg6 z+)EygA?(t9XpatLbV_uEd4|#<_(R4j3Ta3UE!n-xsP0? zwx-Bi#vfVgUE=?fOrUO{tGc6pPCfpD;3mN@3El%xg!3$$|hB~_#x?iKo_l52Q=<8LTN1hF3w9{!u+5*>hj1OO( zZzQN~HHKYemI(js6#b*p4%lFCU-9Y zs@0Ofuk^>y+y8n&h<{LH^0QI7fiLO-D4}FuG-W6ctV3~N9}1cEIZcPh+@{N=*DP@9 zH+_^=-`^@Wi&FepC|fy?gmU(+W(h4oVVPm&u^eAEqUXOY z9?SdBC5~~Fy(|D7hAu;IS*$xBJBgckNg?r#YYK}(6)uRRXguTR^^z$VcS#uEcuT?( zNja&+)q|QTnvyA-O2;{r2UBsiR|n-)>{&dRHq}EZX*YjaBsKJv4yG}uZfdGHCk}BY z9xX8yNarPOLbM*=4LaO>;UTnHj$s&!`G|3;WjF;{Ml@S?p6?7V)f^ z-IvSaV>_8k=92|8W9H1fS?JiWW7cE2{o04%Rj2zKZhd6T0ZsjoPh#*g+M)?v2zP4WnS36}uYuCsJo~6i1D@)EY*qGit>f zssErAg@e{snA+VzoVwkC3dgD6-D-8hpsUhSR%2Xu5Mar))EW%1a=#nJX=Nj9sRs|n zS*yepWc6}89K@l{2YN%Sp4F|vD0N#}w{}yn6~()wFfFY2hpqVASJ5#Vg#(qkx;5y4 zq$t*n#ng$y{<@yW0W`^EGpC8Ec8D15(HP09_6ZaLQa>jEr^#!(sr)3B8)+dYT9*mA zLZC`BY@ANlsOEuBbb2{XC1ySRgV3w!|Id4?8^f(|H4dXV*oi{@@*Asny6bmZ>%sa^ z2lv83G}O2H!%wcRZs1%R>xHXtw_>n3UfmjXyMf9ZK;LS`S4O+39|YY&Hx7c!*dTfp zKv)&IB)wDrx^+ob@Sl+@vSRsi!J3f^s5$6akaKcxdZNBpChN87#_2~aaq+Pp$EZL? z<+vAXkxIeVu04a)k@l zyM0)rW+f{F-XL|TIC~RxPaopTC1Ik6BnuAfU=7+fVb;^y#`lNm%rv^GSRBZhbxg^9 z_zmdZYtRocjC}+Ase$)QNCVviFD}GB{8I_7vat>=W7C{WkJt-X@B+F~Nu6Fa9Hb5m zr%Ih6tY&s#2YM9gYuJ}L!cwHCsL77QA+2EkG#uttTBLO{1v#>?XIb(C%vbm#G9>~g zhg4#3>4*tu?&8Suki;Gd&?Pk3@~q^7QBQF81Dw8>ov@Ny3c1#>Pn=*5^U(1Uie`1< zm)Mo)AENIJt?BeJF+A-mTGkS{Z z#dD}8%x6n7S=^lD%=(_kitUqqJ9 zm!+!j9sf0r-fB~zOg}oxQEu~J^k!L60$-|}N3h0-eH_Yn3};@yvEam9Rah33FL2J_ z8LwxlV%|dBgBG#{9`lXV4`Sp-##jC(+5F<5mbc4U8<7&`CZ)&L*q9P>AnU$P%BGA} zDkpZSmG8`7J@+ps zGjQ2;GqZ2$FJsTt5&g{^?}r?2+RR)N+h4*K15efWt;bRu!c+BQJY+PMnN`y~`T^FN z#rX8`IIW|w4r9$`Yvfq7XRziNtFGpOW$C~1PV>h&$sE?0Qw#a%9QIkrMw5ADkQFn} z=d_SbS!O{k@?KxW1+xIm#p8AMWwOW^M6;zmP(v7}Xik-et6`^QL|S2AQOp}rSWJ;OLSZ-BXpO>#)hr&5z7Iu{2x*fUqVkaDgx)p~3MJ`va^@m7%qw7~rOlo5i|0wt%HRsd(0u`t= zt2(JUJ*lWdr+|QL&PXUn1?uk`2rXEHw55v#Nfob&@ugT$5Y5T_LSiG3MktLdz$ln$ z1N#OTnWgCGrJ`r@%np+#{`nWqHJNoB`DXn$e*4!a|HH2#^$S33I4R{gLuO=?Q*Y*3 z#Hr6PVC2jYCJLY!$Iou`-7gLekXg87$|N!9uu#^dXO$px&*NcZ1`#-(Fyu;Jr&$$x z$~gK;=pj|76#=b_-1hOvp|`MOL*8ms)6!tH6R7ZEw~Z@AUEjnI62u6d>#d!BoaH=k zQLj7L*&6NUw;r{-sFP4+sS$+*yzYq;b8x|y$iJ2)^VgpTT14{Uv*u(E0~M>bT6^>V DLTnYg diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/generate.cpython-37.pyc deleted file mode 100644 index d7a40d5b4c57f8a910634a581597d4bfda6b6bdb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1567 zcmZWpOOGQp5N^Al={zPgyC5MUlof(iE(ACr4x<%<1!)c&MOq}J7O3Tk-AQZuh1(e> zqt1mLj$Ak+ksP^j;otbmDQ9k+sB&i>ENo@7 z5I%!mXE1P!lgx^UAX~b#w)V~44Pa?7ffdO=jCKs=3=5mt>RTJJy-}8r#V}QI7@4#d#Hnds|`^@XYg$;_H|lmT2$I)QnIyn z<|Tmdv8oaUANQhTC0SauJJxZ<)KU*y6Dx|cwMb;TK1z#f)t)uKgk*H*uC~!2%@@YG zZcpt%M6DPFccF9@lNg_g+rbr4aGR*MVc9+9q>m>)y5&pUMucD@Tj~)E79EfPKkAUj zWI#snIfNd*0hLc+%^YFI=0Qh78#cWU!hhIgH@0Du12eJ-Sk<_uiWI77(jVIfe|ZAF z`uM&GDL46yy=W*dS@saf=*U=J!%@BsJ4Ag13`o&dG3ugv*y?o@PVP`J+aU<2utklx zW&n`H4GDR@9$ksk@i8&0R!AK|kw!uvcr5Oj1nBrS{4lGUT4RHHPn zGLefy3H}7Q{AgOLvwPE*QkHT|qoH{g3?o~%L`TN*Anm)Cq%G@xLS`XGU?Mf)6Zswl z4A0Q`M2A4Qn2XDFF0>7@c1mEa4p|M>gsn2A-C~tjYxySl=unxLD1nzqQ5+Wcy OYPbk|9MY3FLjPa1@S`>W diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mpii_dataset/__pycache__/prepare.cpython-37.pyc deleted file mode 100644 index e3ee8b5c9dd396745056c36b12cdbb0d3fa7a48b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1791 zcmZ`(O>ZML7`EptlgVU~t*QbQLbGsyNGNH$hefLuTES|Ms}&%DMxbFfcA9DD3)^Y8 z(d0t;0js?tq~r!%x$q145x#Pp$ruRYTe74XQO_j#VzAFut|pY;14g5v%5YWjVE z(4Pui7Xyq(Pg9gq17{IC>*qlffbEb@Hpqw35Nwxpe?ZY4f6w9=_!VT&l(}J=@ z(D;Ox#ZHp$IF0MSt{bQ>k;=CT)9?^awELmhi_ali&pl|wn525AWb>fpyho?L82g}`mcr}-bg+Gw^ zeEGqD#n(EkP-kh8@;N1UrIY5#G3BE`8YP!jm58~tPRg{9PJ2!5ilv2MUASGKz@AA< zT~}$96)j|!7K>b8N@<CV=R+aOA0FG(^(3_hGN%(kxri5>vV)H%&(hhmWERg#7JoqtUa}`yd44pR zUsRN-e4adR4s@dPwcU>iq)U}BI<79{pta*x8>!dq2{Z@?M$lBek6r8<77nlv){fy~ z3%)(YDi6bhb_jS6d)lLET{FP!Js4@UBfX+p#Dt!uQz}ic*&guHOE{1^``7>owyW5l z(2-=x1!d9@8B?#ad}E6pt1aT=g-A2bOx4j+je3oxGcEJHELcYa*cdowoke;ndLy@* eca@j>FfYj>qaVKmi0?q-VjqH3pO1H;`o`b6t`Rl> diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 7ea9c191bad6ebeec7b4aa728c0c4b6fb443d882..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 317 zcmX|+zfQw25QoqC*Ak^X0(%!+F)<*7C}M+FDlA#7D88tXoH()_P+x3 zaUKsIktcaV&Q>su_p}9%dq$ixpVe*cge+WP(NTZ;u$nLCi~q^Ur)}2@i7$1DJ|y>% z?y&V~VQE?wy>fo^w{Jqa)I@|2FozEe_v*Z#p!cw#e^(6Y<8Dy41%wT!rLnPSmfPq`K7EvNqG+vSp?q{b%kya>UHEqyZzDak5HDWbha$-I|U=?sJ(ARj>)VTMch%6 z!YCNK!=$q1MkC8}$If?hIKAvHo-$bLV(h3q^O20mL}4k zyem%#b(yek+|LPmG`u{S}!5Ze)_7FxgPq)vu(!%v7d5sem5~tcd$A5D%d&vI?uBEU_h4 zLs`!Fp^|cgQDw_)1^r8Gm9Cw=!-M;HE9n{i^8q*2a^dS$oT;@dj zC~z_A>$mRTje2$=!<$M%v zV>~I}^eq1&v);Xf)_;+tapy4Gc1=_8k=ecJg|_8I@1l~fdDe6JS?s%TGCzmD52*h< z9eVN8$PEU1di3=A^eu-+ZWz4XIh7Qm5ja07PoYUPVuGZWv4R;xFp}o9l`WhN53P73 zDM786%;iZlRRXR*h)gk(byz`=b<{~A%*I7?Hrf!9IvY;g`8X$ikQAV=;)vI&4#9^W zijt-q3`eo~Xc(LJRt$Vv;V3@b)0pDWcRiY+LiKbCi559tpRW?ZeoWb`lwCuHYps~3 zkOb45-7LR=vA2+-7m-PFORi@BnoR#9{^jgX^`bmJbHc^jr03{IQc^||I`3svCepri z0Pc-taHb-pMj~n1)R@ear$un73I^1yHEfaVbRC2WV=KnZMta!S;|)g_7c#x~YE{pE zvo$nnwoV_0ZV*Sh8;I4eZw;_VsBbvl5Tee&H?b8ktYEsm;nSzVpgVCq3NB1_R4? zI^7(#h4W{)MLOO&z0X7cC}p}c(%_B5hTxDW-}1Hmmv>ow@VfrCem98oE+Xx%0LaD8 zz~LyFYvW;z^o?*^_pM+jrA4G~yYU8r;PN@@rO*vP{s9pjw3-odhFpHw$4)%r!4VMi z@m?4>dBZ5Cj~Q4xmusS05B$G8aN@bQ8!s9!fdly!%3h(YgRGS_$H?&w*fM~NOm zYG=SNwno`i z%1HV0mnb8B$$y0Gc>WMu^f8=rMq_+w;i47{;Ai8HODCR}wCLuiZw-ZYH!wa&8+yIOV|D^XX!;J)R94oG0CmKqKLi&U zKmsl@8xO;^N6!+Bbd5JLHg`+ui+Y>@iO_m>oI(zJ^7CGn-@Pkm{-HRnwVD?^W;fg%&VvK;$_yF)UfCBx1fnEp}i8X zOp1Fl`c|_(ZBhk}jY(sFby5Qjn{j*6m^4|NoqDS7wkECpQ{v3l{^_hdBSJ!_p(xH+ zpW$*+r{ALm95tp#!o?cykcS+Hq1UAfEzJCAJgIQ8gqM8GzBA)4 z<+aSJg$c@CTiEgUZrw*2q#pPjK-1Sge_o%_RG6U1jq9(!*`*yj`k|AzH(YxITLm$KND-n7 z4vgm*t^!BGDO?@$B;dRVAmF&t`5Ags`CJC{{)YL83Oe zOFfuG<^yc6S_~hJYwL4RC-V0A`ua)SXy%bWbINOUmE%`a&vV9!bL^H* z%$!-6)Zfcj#_gYHnu4?kyk|J#o<6Q-K3})nO)9x0bW{U@3p@NUPRl#7QF6o4ComR! zowJEX03;gO`iT|}ogg)I$;L=34!kcBa7~KV5M!9)0qaBwhDi~@xGzTzMO7_!7eSb` zb_>xL;U=L%bW=hM_|_xH7(yo+8~MYeV0#FD^4I7L)pLTRM0(L7@s#_acbhd)Y8u!vBtECinp;aeH&RZE}&gX z+aktU?v39#Di#FBYs}dqO?`rF@DBkDJYN=MT(2S#+nU^ld^}hQ{)tJ;J>8oGs=EV z88Ng`O?NRz+=I8&rT#CFCFLp2`9tcnDO)gyXY4?8|AiUiql6?b8`q7?Kly5DNk{GV z$O(7{OHnvfv?<(50T|Ovs-_77jy$T@O!G^8p2}|>zWgEzndKS^N9SCI>)g+7o**Z-kPg8sipsgbvSs4u9 zXqVe%75|pVsh0h#Ju$m;HL1dXF~K*`{xLWdC_7=o-i_HTP~Dc$e7(T9CJ7r zH9(1UVNR5fPNj0rfjm0b{D=GqTytVo4mqb>C4cV$8j({>NmcQ|K;e76JahxEzwb5h z-rU@bz)}9kUpFo-3h^(hJUE&tyn`$Lw-iDNB?lsqS{_)3;=n!>lG?0+v+V{hm*v0< zO79D0E9ah2j&}F$z*jE3M9+rx3R@~HdeB}X4_L3T;+XZ&PW4h@UADM`E$(0o4_J)O z7NfJJd8wT&-A@+d_3A+xC0~`*jG9#yHK(d-UM;Aas;h=-sztS=merZR)q)xLHPE+$ z-dXhcXs@8RhI^~HuZ|I(fZwcoQvK|n2rB9+^)&3dGwNB4yP$rq&cfFE zBB<{^s~bw_ruO&czI8=>Q4JRLqOR-3i{dZx_Q5RyuciOtwN!Zh^OjK0sdE@}Sv{{_ zfIXw+P0@Psh&Zs;l9_XLxU;=MwFd{dkh9_j3L1=1BCwPUY-I(GvIAE+0ahj`DG#e( zLbQEEvWzIrAVRZP%%y!FDiZ<&6apq^W&{Xu_} zIqQUO`@^BunY+%$Yri|(8fbX3{Z5#3cvgo0-dx>`w)JYF<0RaPwR!cz>WzN)MyDHg zBNKkChjC=C4WhfRt#0m(v>D;KSFd!EPOOvF?RW#h3{_E8y4xS>^P@dOv3w3!d=}tK z=Mg9SAKi!V(I*6d2N;UDppcmkf3hvlhwY@UxNph=Io+B!Bi&&YHCC=+ZDIq9J1TgG)6 zS9~3~G~*CgQ+K?8H$H@{os|M|p8N16g?v>eb=pJ=$yE+LwoNY^BJx zlu5p^SH##xtv0DSs9BSmi>Gi?=yMDADF@!3YNmGTT@ZbZX7+9cY%O$2)yAmjox`?5SQt9;g_kMk9rXRW3* zzk|nwoy2vD9<%dLDD=cw?tYqxg!l4&iQVJc*h*xI6(Em`Kdc9Ihz`v~*JlpO>DMq#QTbPHBp znrmpA1nGo?msr9FGJ$AG#`H-8!CHkVr-fUCWF>qa=~YNpLY5?e8cUW0JU^+Cfa${^ z?{gp=hZlh)E3rJTs;5@!LNIlR6&5ubi-aQ!$v$kEpGdX}$$GeB+V^40N#7D?mZ_JP zShDV+82hPD8+5+3|CjbS1D_(mmS=u}H8HCM#1r#Ng4YOs1!$Gc>s0AO&N4MhbSJSxUXG2~w}HGIdcdQm%4vw;ea3YnRR<5jTrOTm6qkiYP<1~Dmr>j{}!r70d|PIu7hJ%UZk#W2%^`ZGoE41o03 z#6{Z{W#lT#l%LHX(brnZX0Y*vO5f>k-~mz2oM@zn*-R%!YqzsANHRy`-^9?rLFS@_ z&sxqNHZ_##FXD1Eh)mY#nrM4!w=tLC#JeQ#0$H7XPKxLQav|*-;(D-{yG%vLd`J#$ zK<1C|(?YwG`2)2P8~A0835_MkxyZkm|9D)Y%{)F2Kp3nFg0axJATP*s_Dj}FmS6WP PZ0G$7g{x}i=+FEBfm~K^ diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/format.cpython-37.pyc deleted file mode 100644 index fc5617c25ed47a3269c96b342ff75c099d68cb64..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5030 zcmb_g&2t<_6`!7&ot>TikhJ<%f*Bw{w#rB*Ibc#DgxG`%6=YW|aB71YM%$y+NV_wu z?vcGxX1T~5Q)M`&IINCvN!D@-ANaYXEL=)-&2HVq^2%7+dECEBfrVt;b4j2Vb*S z?z?SQk6FD+-)no!_?f|NUV3Qo(wW(=qUG>1T4l6qoZT^6?qlp=Y`2)QZ``_Z3*S|s_w+G=yo*<^0YqSh8*P)bw#7|uJv6|E4N@iL-bg#?FQP0wUGd+G{9_q$KTiDH zsf<2I_R>wi~@r8Sk|_1wN`+`Be%b9d})LbsJAC7M38jX#=hO44^_9l!$Bs482Xi>PJ7wdM2a~=E3+|W6Z&UEtfT*{ zJ6ro{KibM7nFTl{aqarn?Rf8YxEJiDBG`!%nTq#&>4P`6_D=?p7~lZ6ZiZPXqim}$ zJ2>TlPnuZJ53|j|iE;yQ5obZr!WQyX0E0RD*IP7sJ!u!R=rDX>9(Ho$lBN;XDs~1JIqEp(dEz*!>%!MhOeRDfNO1x%DJ7F&lqQLtDW2#SM#!VqMf^j zcF4k&%RFbLsh6%s8V9}yI5luah4=HLxM zv52NhW#TQ9`_OW)7@?+E!lITdmhlnaBJdi4Zv&{t#8891xR*sje3LqBMMFCf*ds14 zn-JO7kMK?n*(yxgVKwH$e95S4_>mz`&vRzT9f}k5(0R5G{=U=Sd=R0ghud|180mxbKt@&a9Zc)sHFQU zdFh@kI`7-4I{!@~D3F1+2q3n15snFIzZha+g;g8ngnQ>GA4k z=4kE_8*XHa`J9%F-K&k}b1$EVGz<9xQF_G0>qINBjh8fc%lX1N(XKHAgM+@eAgIvbN?0PS^syuNbQa)>69(J1?-s=}6ZcH6iKZ@Z-D+NpFvqAjwL; z#FzNO@5v(=UxpmZy0&|*2k6X6$Ek8aG5j=yIIfI5dm#+ z<^NlM*MNnrjjOOa(50vgOEL~#Jx2sa#kK@LUq8M3Q516j=XY*x`-o4`RXccd<0c4pp2p_`)J9lUY{fHKg% z#t7Mm8|y~Z_=9;wg(7OrP(($S5Rslr z4x4ZH(oWcu?*N%DRF!@tx)F+*j5a-t6RyMPV_lg202E_lSWsksOdC9=!2Ot_ny!e$ zVJy3E>gPW{z91$gT`eY6#9GXG5_62?$Jx0#5!BSXY4s!4Enq4F_;q}g1@&GEapElu z`~~k+)U8|hBN6s`+5|ow`N*L$FNy>$xAgZ!+Q+GA`@rFRO?-!FS70(9=tELgFG^HN zgh@A2ZZ8cv`lWcCX4yLN+Eje%l&PEu(q>YX1_Je{a*D!EQ~ODh;K&OKz@_*;D2O)+ zP>vN%B3KSZmo%+x9mFXm>uj`>e)Qo_RW%w8V9rSVeJM!6IuYx#Onif;TJpHB({^h^ z=ka!t-%;6e*>)iZDuJ#RLM^f!3rK7e6+C@Gre5?eBmiq6|8Ka$HolXYBI;heAWO)-+5Hur)w|rpP6}+3(%``Cb_e|8nJE7 zKg?JcS#*mi1fmqHs$^S5sV4u54sF5)J6)RX_SA&gPGknOf!^sgAk*qWSDN#>eqgyt z^(b;`4KbVQA-3JJw8`l5M_-B1L0qXSOaup~?^5~@F6mq?P=!zgjmfmXV_i{BKx;MJ zO-?svJU=t}<#JB7)|7L7ozs3pwsV>7#uH9x&Oe^vdG$Fw>nKPENzY4B^*jho7#4&- zSH(5r6?aX22>-;ai`rej!0B|}%T+9S_EfPIx>UT_Aan^loAR$`*u7W)&nCWHej88t z6@rbs* ze-4DNF{%=TS&XFClyGusUs;#Vl||Y)I;CcSg_DA2tr7=^z_UhuLU4 zf=*b6buX-$&3eqeAk#4$F^}~jt+4?cURcw0M!vIx@jKjV?FU5B-50wr-Vtblt=!!r zF23P1Qtm-f@x6Rj#Cl18n(MKPQBjcE!a-#JcMwI+L>E(r{Qdo8QDl4~c_qVT#l@q? zlY?Y-5Y56_A;LqRSA}?<7N^^j#d*m^iJKV73n5#nbDnb%NxoH{EB8qn zWk)R9`2;upg<%N~B-Xy@Z-n7u7za}b5-!$+k=kbVTiVc?UXm-T>3}23O&1)IZhGJp z+?`vEceW)vb+_)-?i=Ts_0zo{ou=Omnqlo-cI$pUsE0b{ztJl~te>C$VAY-4o7o6N zXa5iZ^iC7G2(zd* zWYL_5X;Miw)H*h`+DL19_nk!?(`wG^q*UFgEP2j?ky?`?N^+bvufj5ti|dIf&)W%s zq_ospl9$W&iueScDyltKW9&2As`hXXK3t2p4!Ad$4`d{?bH-_^jm20W@4(_VIfOlYgjZ-@s zaZuoGTMgCN*Byfw-G6=FQqHF#s!wn=jbX-&_*s&zVVBLMcLQ+KWivp>=$&}9yIPtsQ%B)C=guI$D|L7)Ik zia$V#)7n{u?Da44QV5>Q^xR)oNj~@Gg8RDAKV$sFUuC?HGB=a={bc5!7fb&%%B3$0 zGwBy69Lv~ux+v0i_x7XDzr5c4(2o~UKF0z~5ZOXTGAVLjXRu$U9Btyh(2YoWCa`&w z@xWD`QY5()Iy%Y&aC_N&k5$S|i-+rl_?4M&qyM$+!IlD_8u@ijNU|K)ZWEr8J{cqR$N0q; zm`8sNHdG%48p4kx=E{MrXn_(}{iwq1ELx^gY{6LdrRl9R^j2ldnd*gQ5+B2*cwAi!6$?+A^;)0qX*>ERsUE(zXC}rI~y)Zrpi`Y4fIt-6vVWmMP!42is~B#3jSk NLxyDC-rVSS{{fJ6@dp3^ diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Dataset/mscoco_dataset/__pycache__/prepare.cpython-37.pyc deleted file mode 100644 index 23edcf632897c11513c57daaea7f0aaa770df19f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3456 zcmb7HOK;mo5GE-~qGb7zG*9QX2nsj?6e$VPst6Jz$g8I&LE;mOmZHU#&6wn}yDKLa zluO%VQ3U-3@vXh|(tCeL!D~;s20i7}ndL)ttP~AlQoHlb&TD68*LM~dn;JZgKYkhh zeoE8+#!3EEU~n5=@hyBj&C>(T)~Wv5*w z1DY(3Sm06a&=J|khG)S#Sx~|I{mRQ~bQ6#TKz=SEErnbJ zWD$^GOURN!P}?$UE07h?QOk7vRZtt=5#X;!*E1b&OUO}$tfo4^-c||w1hHa%3}jp0 z@t2M0ZR-6igC8x>6Ep8X%0@wNcqf&uw^QAK#U<>N!agmu|5U>Ogn{5<+u%o^SWKpRtYTAkLW^{F*&jB1dn#Qms!b2n`0v)Cp3m=ad9>>-P2u|JsG4-UjElTxxPEm^29tjeO0 z3G7vtrtkFvMj+a0E0{^?5S}IrLH(s1y8prrV<`W^q&Y>ozP_I31O!Fxd?%acZmDiF z1Iv5SArAbJTj;5Jpb+s_238=;dvI@vZfNl?UusJ4Pjc>@ml z5}`l&GVdHiHzF(w_Q)nZ*s?p&RB0~%Io%XKbZTr9s)>43x_%(Kc0<*WQi3;%WMQ_W zVsatvF3t|!iy8E7;S}86XK)!w>RF4KoXiG?IIO(QOitya_jx9*akLXbmnGQa>^f|& zir2YA+A~Dyo*XY!9w48XsVUQK3lkgVKAid7{|^uQsg}czWxyPFQY3evKlzXYnS}Qp z$jFBr$i(Ij^e1Q2cI!tY=Ac)XJ-Q2?rF`ug$7^I_BAggZBqsLd#C!%jm^ALc7*cgA zw3|r-lSq|YQqNAH#Kb+|D=6Ve+Kkf{t=Q}{DXEkr>W-U&%MkChsfm>DWZ&fS^KubSnw2XfN7X$7E diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index d08b998f72d35ea107f0d8c25cd4b68e152ebc17..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18299 zcmds9OKcp;d7c-CGsEGCA}MM0Zc5q}M>{)|w5zq2e#Cn1!>i>oT1j@safZ$59`+$SL3dSM_6t zLy6iL@!H&->gvC${;K|~{`w#Fmu^o_ms0r4|M-tvKmMIm>Obiv{L3Kob^JUdok|%g zBWF2qmatqShq8=4(aAURfMt!GJ=rNV3Y}u3*eNwiovFrDr`#xW z*@QjasWd8_&m(`NafI`e$j>xpIA1{iXyYj7i^$J5W;tI%ey%ac`6=Y*8}pnmBY&)M zjPujTA8#Dzd;YsR8c#j|c)Ffg0&-8YS7;qj>O;ilyzOHM>3A9ZxwP8Q8P+1T(c zXEzC}0Cw%oeUP`3kfRSw55vB8a}4mt)gk*bbMD-v%#$SRPV_0=8kD?Z2DesjA2f<-!!b2AAx5gu&PK1-|Ofb=BjX8 zrsuh$MRP)b)A-HccLzW3K`xaVq_$FfX(K&IZ|B7&KW$__NvBf-u*@L!qx4?pvz&9u z$nJqfsK#e$@nDeN%I@U`IX|~Gv7HesXfr`=K1(|ing_J)Pwr(=lBW{q=Ulr0_;>vx z!zUU3?+h;i{-^#F!wU@mCx({+|2u!0;YEi3CBrL#|FeID;U$LuA;V_?|EYhJ;ZqFv z89od6fBSO`FEjkN89u+A!S~Gg$GBvgOKx+?aW1*ypWu=Tmn?G0OI&i-KglIWxTNBr zGG+$3@27qM+Wj6VxpjJP0(3pIb#^b0=ee!(dy}BW(ZQsw1Nu(*+SUT$^8TFj7ZUl4 z+{eEdv&Ni#sq^w)0jxZ0%p?EGUJ-2z15kC4ZeAV~Rc_G{jHQZ}8Ij?ZR}w9+Jfr1J zZuu&=T;`UwM9bR2EotR!y|!24XMJyKP%^UHS@ge-Z#_no8I*XGQwrV({5Zp>0AGw~ zkP-ieeoxp-ovVA;atCEl@Unl6Qu@xVMb0mB{yN@Y$L|gNZs7OkAUl}E^Gn7{pO&#& z(~c3*Gb^qD?@8dj`3t~XX1p{vS@AvKJq5fM|F3wRe`KsQZ&~qEU_EWW)u}!g>oUIh z%lLf-zpvu=HT-@9zpqEsIP+;`FXMcRvD4h<#3HbtwQqG^eJ*yUuW?S%_57zrV5<1H z4Q((Hu5rxd!eCrpx zYeRF_x6GDbDrxuw$n!<7o$;D_ zYg5B+>qVo`MQhXY(J@eHo4SWy4708GY##%ZLZ^lSbZxz5YL>6^g<(fd}o2I}l8CqMo9qpjzTFcfwug23TKO|lk+R#MPs#22}^~nQ?CcUn; zbVpk^Iaw`1OG%laBo_=->R)`s7`9J> z>MX1T71<(Oko~i=Rk%=xN50TM^O5O8aoNyVw;9%;7W!w0N?E~*6{&^(g?+`N`qV=I z{JuIvYSltt+gD*&yISa<3~M|dQL+~LClW;oU2CKuR=F1Xr@|KdG_QSS2?Y!YA358O zyQ5e*ntQE(;hv+f+abq>bf=wz^)%3SSzt3uk-|o7LprV(zSvf0-Rqx=XLe*#x$-m0 zxjm2-KJ1##Dt4M`Ihf!^!GzQ7x?V8BRf0*UDKllK*J-w}$vCF%1zD)LL3Rg8Zqx1g zL2ifALiiQIRN^y&DMR=5ret39Id`9Q%5Sn}8FilyjzwBBO*9o?7^IDtX?)Np@v7U^ ziZ~CVH1h8^J*#hu1%A2DFBpqsS{MtZlXk~w3a=|OmfH&o1ZlahkU45DbM%2?5r{;U zk}F8iZGP2iclGwqGe9mP~)ionwm zn8+*21gk_aW*tE)dN8TDMO5f*0&mye6i1@`TY{AMpb)ZwIGT99aaGLH>)eX#zei$+ zj0)4ZCj_Xj_9UW0kfS9LlqvPFwi#9k*G!N%nKi@&qYjsXS~$|u2!W!8I*p2}mP@_o zTec_804UBM}>Qt~q$yG|OQF5J<*C}}eNsyOA3d#?9j&F5L zNyeN5`5cr;8uEwu)#)HFMUnUtnl*~pL8Oq7i<;NbJ;M49CA>Ui0^KLr2b9pF`|Ff= zjK}}nUD|Xz=8|uEev<^p)tgIThgH2zY}|avvh{7KAlH{RF`a@wZRrEDD3_W|%dz}s zv)!G)pzkr7v*J_^I*xE9qI_qZ${|GnoqB@@ercj6Jet$)<$`D{Wa~B|qh- z2XOj)N?K?JJ_+#%&?+njQh zXdm{}Cu!J^6Kz=Gf0W+Kqa|C>{uGx?4klGe0VQll`(-XE4vMOzgc7!-{b??l8celQ zMv+HYv0zjESq7NPsKxfPe`Jswqy~9dmZd@2nEEuoH$9jJ4a@q+ATVk2EDg1;)irJC zM~77D4{F3Js?@A`UB;?>ax1E_!WODNmZwP{%IHwA@JXmi3qw_TXZ4Y6tyDY6ROsra zJ=zvb73sf8<&Fo=qgt#=g`9?r^#SVy9gTdK>coT#$0kh?HJ~warA7X}n)JVLS&b1&=X+;Fj4(^Ok+q~=|@X>(mwB{|lSA?{eVz0CByR>zi#?xndU z^Q~=IBbK8XmVkZaie0U~1Ro90m{C)%~YF zV|l&)#PWK&4Lv}KV(2h1n>5U;Ps;kkFYZFTYwmKvo+|F1)#sW0P zN{C-IdwF%Fzp)Mb$0O(K5W@b+Rrmx#H|lWFsa4nW$Cm|#kbn9o9*65&3^lJ#uyhKF zdg%2H%Fw+ce5|U(=z~(kC!#vTI))&1BA8%$1SNj(o+U^q24zVuNd?gWNRa#wDj_{o z$+rnOO#(0;$c&PQ@nDWJB>BBTg+TG3f=R`1ENx*Skn9c0a{Ex45$iwefHx{~xS>F4 z6eIGB?{GVieKjPzz%dWTNaT)TO@ftT;)*o@giZ-5PYLPHh8y?yV-$GHNK$85VaZ6v zhtdL+6DTDk>G5>qsRa(=bv&{sb>d8GwlOp$u$iyjM7vi*n>Le}ff+ zd}nem1LruLcc@DW!F`4o6L-*KH=pQs4`(JXD#@SG?nQ(-)m5m-DFzRhuv%K%BtM$R&L+3rwp!q3 zRR-zUmQE;2M1b2xx_4wBN@c6EcK+>o&P+>(m+^Xqk` zB4PV!L-TOp-z4deeUkQ9&3!`tOOR6e$)fQoVn16}{;WL>W%~DOHQ6rC*d=E{fIkg49YHe|+wlQcRhYa>wOId)SL-BQd$%1U( zh*3=ahVny$FO7wst3iiVrVKYy!splD!W-HiLeX<*1Ml!sB}Zg^rbH=Fjn5@cKE|g0 zxx@)K91$l4vSw%~XW&+pqU7{gQ6iF}gj|p$x7Z~}?p&$=8>#K0n3wsFQa;>~Mhfn< z^a`XJd541;<=Ku*E^!Rt@!m;cqUbJ-DvwNxZ54(M3AAVw-$k9s3L)2I#zUQ+Pm44(r03B$`0PPPWegyr;Y z42M`tCq|B}NO^1SuY3>7pHg~%&_ABoT#_OS)k!1HFy1h8R+u*h#exDEdJ&E{WhNt$=Wq7VM z!tfhtXhjS^KZfC{#uu33KU54KIdG#*Tgm5x#POI0FT&fg-19drh}lRA@8eJHkb)Ro zyuYnH%7WBSGHiU5H|8S}OdFRn5iWxV<(X7FX4@!S&SPDp-Lpx|^3rDgfHYcClhx43 zIi@?hz3W-O0&E!*AmrH>lER%)t~{!^GQukkW=U%NT$bcYBP_XzhJO&T zCe<=UVu&<4@w-U=mWD%LlY$G3AwwTbgW>`G92pb`rLX=!{PpJ_>%wBI0P$sfmgzdn{nwy4_};QsWZhwl#X0eyR1MZ$W4ET=LolFJ z(E2hwFs#lIQlqu+Tkr_dP7OziSubjIX#}}t?c0lt9;%RGKGm+(wB+mcY7Oq)@AMzH zT-z1ia*c+q0Lu1-47jF2*EH&CxQDB?tF`MlZb)R>qT8j2W5f~m-$9%se`ygP zvrh6L`HgZYzzA(7iBL8ADuTWixp}q!whED49*>AgG6oOB84u^G{bj`E5glP7B0iD{ z7AbFhysa_+??d(an>YJ!ArzC^?pOwVSxPEOE%LcdDS&ghU)fMeo=r#|D2w~LedEpk zmniC!yTy}wJ#53-4>=GY%RgRse<6*$Wlaz5ydfo%W?$A&@!+0}cY)rFfNZnF#%)Ni zD0ZdBk}&jO*EK}S?9vS;)kVEjOvI>D7i)J$1J&0e^`u_XZ(@_%n2_>CO^Ur`c0_m}99i=!#GS{-UDXr4N)4Fob=lPFt4R9< zzk+EIt_h0aFpoS563LiwUS+%yHp7IWV0(3~q`wT}DQ7B?FZ8Q!AU+%l7wST^J+bHl zfb?@Dl$gj1%alTRkc>gvM=N%vk&`5>vBnEvjo_pY#o$oAI25(+s*oq9mwHKj;}FFh zP(Rw?Mk$82h zLs2@hE~B`NgD6ew{6I=S2T@Gxk3s9C!z?0q68K^iXIeAS!6YDw(Zj(vZ2&&*&%n2+ zw$xIQDBfYFC-Xnt9b{x1t$y&EaMb3ong~ldcoxEC{epYI5b^`If@dylN$xN*x_$t@ znCkb5>PbsECVLV(hVGaa+QG?LAte)>5FnLwrx1~2MMVj5E@vT2RFRu%v}_}S$#p&y z>60My3+$)EC=MXzvz9J>#3(&Ml+`dODZvjhK}q|Mv?D>(jNcAVMJ24lrgC3kS?0yQ zT&8DiUB*NWo0)1ih~9DS0#Z%@9+DdZfj85~afq7qI=$e+O|(CN(uDB9U1AKW$6}1> z{I4)%l3F`%?XlLbtzQ+zVsb3J+_ab*Zt|pR8 zjs_E|DhOrVbP-k0K;7POaW`!f*V14))EG9be3Xje)f-uADEVmmt-753bvk!z<8~oV zFNLO$-2Q_h;DS)s<582u)C!3R9yi zh}cp*HdCVW3+O$360O)_Bsdtn_jpnZI2POk#QXKumMbs~ zUDx4IHOX_#_>1%{Fzv7k^>qZ6A%303LU;=axRNcIny?z6$VOXI2{&gx?%~i5C|an_ zUEtfOZdeIOW4#ImbWmp|1NbE&noplTl`N0;*Z|}bmo%cwxfmxX-~awC!$1fz8Kj6s zWXHUR%|p9ZUkyWt4Oq0oTJJ^2f8^fd8^Z7RoQ-ymh;_Yw7yBzL55Ca;FE1IX+yOh=$`Yh}I<-1RGY*v1m;J zHTuIn*0u0{V6$xr&tKyn)v~MizdVdjE>-pKMld=GTqZ%Tt%d*41mp#;**ct16I}%3 z`|MasxSt{qnRp1l(%)kxd-st^@r9p4mZEC$AEh!!Z&~_fviS`Ea(L2Dy%#c>$$V6b znrX&dy&9AdV%{{&CsxZ0vd9NHQg?!qj#z(rKTqtU=b*@I#dUEHjhtR|Ig9Rab8NJ9 zKGJErn52X*^)#Zr+v7WZe2tGU?eQ%=zLm$9@%R=VU%%r^cYM`Oyh~{4f{~yDKn}F# zIA%W9Z%mTpgQ_o%Q{f3p$RQXMc_!)K=a4SPZ&3+dIpQE=4q6iwZosGge1a(GR6&p* zlbwxn9*E}SCL<_(1=9-w_cgjA=Y0(c&M7bh<@6c+4*G9CUCtMtVt#HHE*BOH`NH{v JhF`Xj`(MSfu-gCt diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/augmentor.cpython-37.pyc deleted file mode 100644 index c9b789ad27e6ab23680a998d6d18ca1315d69220..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2874 zcmZuzOK%)S5bmDG?(FQtYsax;f!2E$(+D3b8gTuV>9r~*b4klnXrWYfC>A) z6|@Eak`0_A*kT()!n(O2-b2q{0>oHDu)q=>(GblCJg`Mqc%lW$5x!_YV1X-KSnBEC z2T`65-{0FB$CDzHM}Iwj{P^e!Z2W_c_UG+?e*THt(d~_)SIHP{*FygsJ--Q1T(OWT z3&@oXw3Gw1l?!xK1L&$I5ZlADd}Gkm_I5ha_B}u=nrw~YaD0l4_O*AAWn&|pQ;@Zi zQ92DrXBD4;fIdyO2|5H_fOaCel?U!F4ZkqihwR^v*SE8Ayk5k4 z5$@%&eEF^QFVf_TC<&8HhF`^#Jd>Y{vU{(rZ_lQ&oMw5vekT+0Xg#WZb#pq?UKpm6 zvq<~RdL#I7_h@RiTaZ!AS-L*@%Y?RG{?^rvw;KF*y zOV4nQu!RGzrMyE|wI1=(H}Zz^;b(;Pp@d+@V>pc6#1=h#dq>f6XWv_*omgw^m|Hl*!S<=SK5>b2*D1SYuUshms;hby zQ=QrpY}vO?^9zRWs)bc{Mswku=7Oj-d#b;RgMDnXdrq@^tZ8hX)9#N(KY^s?#db6TDq#1)gsnIOyI{ALt_r~MYVj186GhKzp7T|Jfd=o{a}6T zea(s$b0*x&jLwB>TxR#J@{(Gde`giVOUtlxB7v=%Gx1eVm@l3AiE|R?OUg7jo669G z#f^cLQpS=F{+3WIP?E!>EV{JZNub zQp7SG<3B&P099uyvrUsqV8zMPgIID{KcYpCC=b%g`F1pokBpph{$N!;jX_(*NH?Zf zMP_;>N|JOEhcYXoBF!e@I4Weiub1ZXD&bW!TV#d4Qs8TIBqZcPBM!49$>Sm%B3Z{$ zFV9~*Q5$~uO7PVGq`UEKnx&H>H=78SYd(Z|fh0>=RbM`l#OTq-wmA2&HY?A z%}lsm6ZbR^wRfj-SF9U%LUNGy?}Wo_ya`7N-9OoR<)EN@B7xDML6*aMoXz=r^Km=y z5s+vUl9rK$sn9mP*aA@T!Mhb1?H`kn&Q7GWpd)LCA*!alRIo_(^SZXUhO27}>IiB}1DT?#Kn>JxTUn;Im?~syi&xi{9Z&Hs zp)s!s4*k8T_|0>Fs>D5sJ^4SC%VtVm#2{CQ=8}r!pl$vr`3%Xg5+g=LudQxK2Pt)m#h)EpzfDML@nv@EXDE1aA<$N$?iI+XQq!I2FL9xmUw%GMa_v zj0VuiNsZa3V|k4qJp1H`oO*ZQO3Du=V@bLMNQ%$~-H94>otQ-9I1IHPQX1OBwJ6k` zF#L8e8dWoLiQajK;1oTT`Z4{|c?alhr?FtR~M`;+NjDB|zZVXSS?+R3Kz zMEg+=X%g*?3T-2aXm_$Vp3WrQCbeIf>rjI05b{}~+Z8{GX0fcoK?^?VOSE^Z+Qqwc QrOfF{=JAf@ar0RJ0axte4gdfE diff --git a/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc b/TensorFlow2/contrib/cv/PifPaf_for_TensorFlow2/hyperpose/Model/__pycache__/backbones.cpython-37.pyc deleted file mode 100644 index 07ea66658cc35caabe9519c5584b3539adcc05d1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27451 zcmd^oYj7Odb>2LCUcg}Rei8&p@Bxxb5&$1dMcx;8sg<+~ue`Kf9!EPIpaRK+mHbFj zE>}6g^zzRk@yB5{7(*^ zC-Aj@C7DQ=2{XBps3sf9yQzDLyXkw0qzcPavqCefIr+_33;0c~c z9pcZdbgq`G<<+ig*J`C&S?#WNul7`XR(q?xt9{kJ)&6S#>Ogg1b+9^^OuR2)W=-Su zglRN7UQ1Vp@SHRAc+Si7aHFt1vOH=Q%;I*kI%by44zr^%))+H8&GKT->@q8_C#wgv z@baQzcAGs29cQS-EiY!xUb7Ej2a$ijIe`2JB>xG?|ByK-`5%_2Bj%7fjM65}5pz`X z$SogTKIWA;=9TEtP=cbeHV4dclyMwo95g3T#)On{0?&uc!+1XYTB3NAP^gJdNkm@_fcjzL=PLWQRREH>R5NDYEZ{e54wx0Q8|`VJ zcU14?K9A*MB z#PqgLlnXgA5;H9ZP22sn-_4x$2HJ%-{|jbi9_V#<=B$CCX5&VrM z$7BACWM2?!B=hhOB*&7Q2jbZ78NXc@IClBrFNgLOd~Nm_CIVRPR`MRDRdNnP{wzY> z9A4s9-7@ifj?c+gtQq_&XO_P6aNduvEmg;GlFTv7A~a zdrz6k?Ud9cg_=-g&O=(@cWnv72OX1g7TuE7FgF$&H4aTT)oQsXY)C>3HG6H*S*^cP zTd>y9M~#+at*zg#-D)~FYF-9u^)gw>*Ue#6nN1AF=$pBD3_PT;?dMQG|MlYA*Ny4IaWdj0uOTv1T4u`dyu^KBIQ+a9q*lMr^q;$6!PVBaRk+%D8M6?AqCo{=1hW=!7 z^I+`A)2F|^9zHGjkcYj%oTT+X0S{W=*fON`wYCphUbQOeX$(}o<+F&Rw>;zIWH=ip z5eRw^F>>ge{I(%|qjHJ%ALA6X{xi^j#X0nbc?-P}*Kw-9tfx_TM0f^2)>S%f9i)xI zDs!8eGGaR+=u#O0Gw84PF*mlC>h5T-TF;WeKF8Q1lZee!gai3Rdv891uI)rx{aNCf}4o zMFwy_Sw5Ig=hMZUK#v842Ol;xKQ>DVP%HS>vlvt_3kU50+1qOzG1FZ&#{Ab`N7Jfm}901Pt-)G!Q zGvZMW1T{G5QAT%BCOpcRP!0i4`?($VCsp&+*-9%XzNWzwS@6v|N`Zofs3d6WtB zsP{g6htFe79EWnp;D``ljf;yNWa<<#66AJh-(t8q|HanGut^( zCY__^S@YaBQT#DM@zZnGhcIE(&<A+6_uTXr4z`bctKHi5XUaJ=8R~FV@ zhE7Z2xF@&WAtypHf#6rt-;lDRoL7)^q4COk&8j=iwNRw)kg`L*>%sEbnvdLPYcpQh z`5^4P7B*{*gvl-hQ7!~gF4TOif1!4M=Xx0VVi5Ua5cy(lcIR#w zcyGcWpME)vdbu_$ZCdr3=yL5mv_KK{(vZ@mY)xOviP#fSN44fUzV_2_+SHoVP;w-N zKf1}lg%7HY9s)lVoFU+Xk%)p5O0oMWI2@SZ8Nkl443R&=IO_S5*5~n@x(gJ2FL?(% zWI3bxvzp(~{5j2^*Zc*|U)201&EKK3QeH9zxyFY+GH!Ui?}P~fLhn4Sw6>eR5(ie(D@AbM7Tp%2qnqjQ~3oz6TRq8iJj z)1b3RXNk@YI!!vubY7;jLT8muiw*~=wMJ*1&PU*Yugd7Vz<>Z_DR4!7^r!H(=}BP3 zbnNlh(-xlho^=gY8)4sx25VhJRjf;JFmFU1y37FTMAlzns8UF+_cGwS=(N2Q@!#k0 zjnP82D*9Sz1Vd7M@MDun1{ks^7?KuUT6UF1HzU^DEV{b1c{D`cS^@1`&pLoS-_(WN z_ane9`{pm0#4H$Qvcg#db(abkA#fUB`?HvuA-ojRu5ae0fj2|dx0Z$KOJgm;0Yv^X zo|e;nn9{YpNwKd?mYCAkOQhf{ev>(nmNhe={V%8Jl<<@{nLZ;h0y!?{pl}&bxN_Tu zLkOCOQrEd$UM9@+>3Pr#+gXQG5k_dlff63cp@d_>9XyPD*vc5NxtXjO$ zG+~vjnax$0GvITY&TX;(DJ3_<=rT=ZhW=GP5+aC6Y`2mdQ$)NV{71;c=FE-?LIVa2 z111auR=*UOP+xKa>hH013hspWym{tnr<-A2xIeUuobUf(H$p-^xn^)BcC%b;#;-Xk2A=B`s{+Bq%}SN;-%r ztYc-jl3qF^HXU;e(mDCNg{{1FN^TA;CfXlX+gr13_13pOih$U**JQKLQrof)+mgAW z)uppIjjb&@oHrUXe3p;G48xxC#!!^;$JN=2=_I$)yrr$tdFxVrpd(C)1)aE=7cAKMP;Rdd|$=$vDQ{oRi0bKM@=k_H)}Am>Llx z&xP$Yo{FYIU_Q0zEh{T($$RptC6kB?#!5+>YU?y2(Ew(&?s_GF- zWzI_-_YkvI6d~L>CyTqHcSXcRH<}Cqz4%cM+Y!1&hwr|p7tX?|XPL+fIvI>sUl%*HcO86a<1J3e@PC^) z=7&+?9ejNpGh#8sgBGtpzJv$)a9`2Q`pQNlT;hnZ%>YU~jp!QNOmLlwD~$jWX((FR z07{T;V$y(ZavjnLU_^E+_gWN0v@xg{ekn*|>D0#t{_uDIkKhJnKNazH6%|t4lA{dG z6rzTLhZ<_P+`v1pB$Ggecm^ruv$yvL2-qMO0)z@i*-$dlVgWX|8Er8g8$^W+umKd- zB-$#F@3K@9XpB9s6zfX&K_kn$AgAVqf;g5zEtoz9-5_W(A-1W>gU&Y{2 zy5%T3cjqtgc~B{TtYM_9~5b0zM_opM+1}L?t{?Sk%*uz^ekK%+;@&*R!IH8!#r)WD-SV8FvyIpO2NMz#g1`3?+ zocdSrwZ8-hVv?BC0}HZ9kWzc4#GlsunZTdAODm(Ho<%2zr7@tw_NVy^n!gzMnFGu- z*yfX1E(M{9H7&fAEzTS z_mlMfB%Pn4vrXr()A<{8-k|f-bUs0cL(nzQ;~1FMU&DiM%)USZMBt95J+ zd0@e_h}!HYc+k6PE^Jw6L_5kt5cE3({??Ig>Xd^b82aWy znPB}G`n~x@s5tMwU=SS-8zHl@7gouNp}l1VXQ5KiPSY0^5^C)G^2F4KkJd{KIa0XJ=O3%^sIWA_E@g7v zG6OJQ-p+alAN15+-r<$-07{g~BPKkRwO`&nd*t0K2UH9*Jc6R<-HWYx%sV`S5=RY> zpm@~CTMh0}@{pJJaC^O_J+XC8N)C^jP1_oxWZ(UbR+{YFLF7|-%d2c?L>aknVsKa%BR+ZEj!9$#5N_08S6*VjO;I?Z#J@aEcKV@@;aiM zh{^L5^AU+EQc*0R9SkFLXqU2NToGHScf+|J|z!71j!J;w)oT2@^b_^SLA@a4<0pY1s~kgxtKnP)ka zEe|+Xd>ktH_!d*U>QTFo{w^*Z0Zuz@RZ_@tv{Re1DBv#u>ZlT4W!A^z2mx zhhq>uFQSy}TZLv=KtZXTdzKOu5VxWt-T(9Kpd#|304>8bHIdvL-PO?4fz@H8hdp%j zKo;oFuo93(sKn;buEO8-%}T$ZRd4qo^fxM@i`s-PmK>hPJGHO!w?SB$gv2sx`#o0x zL!{%+s#%793eGLoh2t3N+~imetMUw8z&H2=FoOjJFr&8(X5iQ}j=#b}huC z2&l+m4EMWb^u+YV2ZI!32x)xdPrANcNb#yB$^b&RBn7(}tclAdf6qO@Mn;{4^}?|% z#;s2P7Zi>e!U5kEdH{$z*t^mTy;E`bvl$zL=FJ}#WP{1Ab6xOgC5`JmzGNP5Q zmqVx!R}Mi!TsYx@{$AtEcS)T{&IR>h9j<$ID62EAP8ScVd%cE3>VO9fIpl>L^+Jvz zjEB@gU)l>WoS3sdgTbc(23aT6jhouqwk3dJA8V%w&0T=uZ=r;UsWS#3Sf6G!exA+* zo$GY!bp8&VzY9m%P3QT@St}b!L!5BFZBJ=PiNOU40r772B@1R;WL)=wecg99OexB%kpceYX>66>K5x)=q-5kX0j`c<|7K@vxp zU`I`soe2R=jM&bF%gxp=u#A5IM{5vNEKaV6GF(LZK7yRg>P^cbSwir6 zM+N!ihZN-RO+UD8fP0O&q;6)G()W^Pb`(~AZy&P(TkbsQ6vhP#ZG3Y}nR~P>6k3N7 z3J`)guWNC*beQ+!I5#J5GvFh(UjYUkvZ zyO(`JllyNnytPF@Lj@N@=<$Yv^n;*;>2VcAB>hN`el!wq zEJ!~Jojgo`V2|+eJ;D#Rl{XQTcPNtY;UGN(t5)6-hI^m^LKl6(_s*JV$2U?o*JvoP ze^YD8X!^-L@;%yCuVY%h^zx7Ik^aOU;Zu8rpKL4d6w6!63m6LD%XlMgomTJS^yA>& zBd}Hsm}kths`v4>Jd#;9&zaNcMN-bRQD!{KENE;U75_X#Jy6o+@Ze*YSTTmgig97i z`W&Y6(()IaPY(V04?MAS{f~d|@K>il{KV3?KlIDvKla~WdSd4V_O{6}VAfZ{Sjg6}Lm%A8fl7$J*fXAvO}Wc*n?!+L9U?V&20dpQS^w zpw>M2Ug3=%Z_ftvHA3svmj;gZW_k!XrnVo{%*JAUW5tQ!G7c2tA^R|#7+gkEm6)rr6^?5LxQiry zPr4v08SczZ+ak=#g5~f4p2_h2&M?|&q}OO#nbvykfic<_SYSAyiR-a(0nox-@a4l5dR$w5h7alma{LgQKuj03e!hiHjrf;3mDWSf#g zV#IVAf0f$oli0k*ph7$tQ2ISek5C3cF=RC;4HTu)@EU^(3?lgd)D&q#)V$3-J%$1V05bj|Z7gR7gSM7$3h<5gaZjAPX{}J@MI(SYIbd#sN{qqo_ z(ueCGBIRTjJyb8H;p?HlqCKnV-_$)z5BR2(SgcaBOHo6 z&ghQ>L>iT_NP8pziabWBL_Hc6*higc&!aJFYz=7(+-aq*LlltUrWO}45=2slYXb7h z(gL#b!=CZY$|6Z;+1e-I93nfWG>rmwkGX^|>^l^k(zfI@g(D@Rb z-=Xunbp9=!-=iZP@cZ=r0iAzG=ikG*AH4Z4_RIt#CW1X=`blsj2;Cgs-3vbYVRHYI z@jTJ(#@JGTD>7ko_O>F(8bNR*rnot}J9C7nsqm&|Ji01Mja#sMVq_0Gp^6BuZ^9D_ zE)h^E*=&Hl`PPJ)ypwC;Cd@lI2Zx^^%6QaC9Yt36f~9e=H-kgUhM9%BhTXdmJOq@A z>fKBwDU)EsxCAty@kS~yWh9L{_MG*OHq&I0w!Y?f~KQDc2>& zbF3FQFUFDH=UADfk;>rUF_)Lel2lyi-<;fM;O^#99GsjWSMasTBDa#e_sm+fU2(T8 z?)JMQXH#*&0Z)SoIdcacCKswN>h=C3vT9pekL)i^W4^>GHzwb*h{S4>6KDJ8k^SDt zoAEGhei@0>Y_$FZyb#fQS}tDzJuKaCZxIgv8l>-LCDw~YTxpq)3^TTjc@$;FaFb1zxX7&+Z z(f%`3KjdtYja4*!diVXx6%a6)BP1ul9D5MpSC>3sYZcB{WbPC+c+T?`Ih?OBaA+C^ z_Zm(uz~y-g1;Yu}aalHEadTB^o4Em!I=)5m1jy~bh zd23i4{g6kW*n@u9qr)T{oBoJLC-Y(c!)^7S^yo)~4#lRe{zpCfcTfdf<8xw*!=-CS8~sf#3^VRVr+qVKdN(id$dtbKX^lJ+UWqjWo91