From 26007b884fc9bbc0da109487eb8b4e8da659244a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 03:20:28 +0000
Subject: [PATCH 01/54] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20keras=5Fsample?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 TensorFlow2/built-in/keras_sample/.keep
diff --git a/TensorFlow2/built-in/keras_sample/.keep b/TensorFlow2/built-in/keras_sample/.keep
new file mode 100644
index 000000000..e69de29bb
--
Gitee
From 6faca651dd8e176f9c54d8a516798c7e19bc2122 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 03:21:27 +0000
Subject: [PATCH 02/54] =?UTF-8?q?subclassing=5Fconv=5Flayers=5FID2615=5Ffo?=
=?UTF-8?q?r=5FTensorFlow2.X=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../LICENSE | 201 ++++++++++++
.../README.md | 203 ++++++++++++
.../README_BAK.md | 193 +++++++++++
.../modelzoo_level.txt | 3 +
.../npu_convert_dropout.py | 54 +++
.../npu_ops.py | 256 +++++++++++++++
.../requirements.txt | 13 +
.../run_1p.sh | 3 +
.../subclassing_conv_layers.py | 310 ++++++++++++++++++
.../test/train_full_1p.sh | 167 ++++++++++
.../test/train_performance_1p.sh | 169 ++++++++++
.../test/train_performance_1p_static.sh | 169 ++++++++++
12 files changed, 1741 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..2f83b0d67
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md
@@ -0,0 +1,203 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+
+
基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Object Detection**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.04.08**
+
+**大小(Size):458K**
+
+**框架(Framework):TensorFlow_2.6.2**
+
+**模型格式(Model Format):h5**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Research**
+
+**描述(Description):基于TensorFlow2.X框架的图像检测训练代码**
+
+
+概述
+
+## 简述
+
+ subclassing_conv_layers网络展示了如何使用Conv.convolution_op()的API实现自定义卷积层,可以重用大部分基础卷积层,只需通过该方法自定义卷积操作本身。而使用 API 的“StandardizedConvolution”实现非常简洁,仅包含四行代码。
+
+ - 参考论文:
+
+ https://arxiv.org/abs/1903.10520(https://arxiv.org/abs/1903.10520)
+
+ - 参考实现:
+ https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py(https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py)
+
+
+ - 适配昇腾 AI 处理器的实现:
+ skip
+
+ - 通过Git获取对应commit\_id的代码方法如下::
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+
+
+
+## 默认配置
+
+
+- 网络结构
+ - 使用Conv.convolution_op()自定义卷积网络
+ - 训练参数个数:34,826
+
+
+- 训练超参(单卡):
+ - Batch size: 256
+ - num_classes:10
+ - input_shape: [28,28,1]
+ - Train epoch: 5
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+|-------|------|
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+相关代码示例。
+
+```
+config_proto = tf.ConfigProto(allow_soft_placement=True)
+ custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+ session_config = npu_config_proto(config_proto=config_proto)
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+
+快速上手
+
+## 数据集准备
+
+1. 模型训练使用mnist数据集,数据集请用户自行获取。
+2. 数据集下载完毕后,请用户使用keras.datasets.mnist.load_data()直接读取数据。
+
+## 模型训练
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+
+ 2. 单卡训练
+
+ 2.1 设置单卡训练参数(脚本位于subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。
+
+
+ ```
+ batch_size=256
+ #训练step
+ train_epochs=5
+ #学习率
+ learning_rate=0.001
+ ```
+
+
+
+ 2.2 单卡训练指令(subclassing_conv_layers_ID2615_for_TensorFlow2.X/test)
+
+ ```
+ 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡
+ bash train_full_1p.sh --data_path=xx
+ 数据集应为npz类型(数据切分可能不同),配置data_path时需指定为data这一层,例:--data_path=/home/data
+ ├─data
+ ├─mnist_npz
+
+ ```
+
+迁移学习指导
+
+- 数据集准备。
+
+ 1. 获取数据。
+ 请参见“快速上手”中的数据集准备
+
+- 模型训练
+
+ 请参考“快速上手”章节
+
+高级参考
+
+## 脚本和示例代码
+
+ ├── README.md //说明文档
+ ├── requirements.txt //依赖
+ ├── modelzoo_level.txt //状态文件
+ ├── subclassing_conv_layers.py //网络结构定义脚本
+ ├── test
+ | |—— train_full_1p.sh //单卡训练脚本
+ | |—— train_performance_1p.sh //单卡训练脚本
+
+## 脚本参数
+
+```
+batch_size 训练batch_size
+learning_rate 初始学习率
+train_epochs 总训练epoch数
+precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data'
+over_dump type=ast.literal_eval,help='if or not over detection, default is False'
+data_dump_flag type=ast.literal_eval,help='data dump flag, default is False'
+data_dump_step data dump step, default is 10
+profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False'
+profiling_dump_path type=str, help='the path to save profiling data'
+over_dump_path type=str, help='the path to save over dump data'
+data_dump_path type=str, help='the path to save dump data'
+use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False'
+fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False'
+mixlist_file type=str,help='mixlist file name, default is ops_info.json'
+fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg'
+auto_tune help='auto_tune flag, default is False'
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡训练。
+将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md
new file mode 100644
index 000000000..b0e12437f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md
@@ -0,0 +1,193 @@
+# Customizing the convolution operation of a Conv2D layer
+
+**Author:** [lukewood](https://lukewood.xyz)
+**Date created:** 11/03/2021
+**Last modified:** 11/03/2021
+**Description:** This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API.
+
+
+
[**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/subclassing_conv_layers.ipynb) •
[**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py)
+
+
+
+---
+## Introduction
+
+You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`.
+Keras enables you do this without implementing the entire layer from scratch: you can reuse
+most of the base convolution layer and just customize the convolution op itself via the
+`convolution_op()` method.
+
+This method was introduced in Keras 2.7. So before using the
+`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater.
+
+
+```python
+import tensorflow.keras as keras
+
+print(keras.__version__)
+```
+
+
+```
+2.7.0
+
+```
+
+---
+## A Simple `StandardizedConv2D` implementation
+
+There are two ways to use the `Conv.convolution_op()` API. The first way
+is to override the `convolution_op()` method on a convolution layer subclass.
+Using this approach, we can quickly implement a
+[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below.
+
+
+```python
+import tensorflow as tf
+import tensorflow.keras as keras
+import keras.layers as layers
+import numpy as np
+
+
+class StandardizedConv2DWithOverride(layers.Conv2D):
+ def convolution_op(self, inputs, kernel):
+ mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
+ return tf.nn.conv2d(
+ inputs,
+ (kernel - mean) / tf.sqrt(var + 1e-10),
+ padding="VALID",
+ strides=list(self.strides),
+ name=self.__class__.__name__,
+ )
+
+```
+
+The other way to use the `Conv.convolution_op()` API is to directly call the
+`convolution_op()` method from the `call()` method of a convolution layer subclass.
+A comparable class implemented using this approach is shown below.
+
+
+```python
+
+class StandardizedConv2DWithCall(layers.Conv2D):
+ def call(self, inputs):
+ mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True)
+ result = self.convolution_op(
+ inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10)
+ )
+ if self.use_bias:
+ result = result + self.bias
+ return result
+
+```
+
+---
+## Example Usage
+
+Both of these layers work as drop-in replacements for `Conv2D`. The following
+demonstration performs classification on the MNIST dataset.
+
+
+```python
+# Model / data parameters
+num_classes = 10
+input_shape = (28, 28, 1)
+
+# the data, split between train and test sets
+(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
+
+# Scale images to the [0, 1] range
+x_train = x_train.astype("float32") / 255
+x_test = x_test.astype("float32") / 255
+# Make sure images have shape (28, 28, 1)
+x_train = np.expand_dims(x_train, -1)
+x_test = np.expand_dims(x_test, -1)
+print("x_train shape:", x_train.shape)
+print(x_train.shape[0], "train samples")
+print(x_test.shape[0], "test samples")
+
+# convert class vectors to binary class matrices
+y_train = keras.utils.to_categorical(y_train, num_classes)
+y_test = keras.utils.to_categorical(y_test, num_classes)
+
+model = keras.Sequential(
+ [
+ keras.layers.InputLayer(input_shape=input_shape),
+ StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"),
+ layers.MaxPooling2D(pool_size=(2, 2)),
+ StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"),
+ layers.MaxPooling2D(pool_size=(2, 2)),
+ layers.Flatten(),
+ layers.Dropout(0.5),
+ layers.Dense(num_classes, activation="softmax"),
+ ]
+)
+
+model.summary()
+```
+
+```python
+batch_size = 128
+epochs = 5
+
+model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+
+model.fit(x_train, y_train, batch_size=batch_size, epochs=5, validation_split=0.1)
+```
+
+```
+x_train shape: (60000, 28, 28, 1)
+60000 train samples
+10000 test samples
+Model: "sequential"
+_________________________________________________________________
+ Layer (type) Output Shape Param #
+=================================================================
+ standardized_conv2d_with_ca (None, 26, 26, 32) 320
+ ll (StandardizedConv2DWithC
+ all)
+
+ max_pooling2d (MaxPooling2D (None, 13, 13, 32) 0
+ )
+
+ standardized_conv2d_with_ov (None, 11, 11, 64) 18496
+ erride (StandardizedConv2DW
+ ithOverride)
+
+ max_pooling2d_1 (MaxPooling (None, 5, 5, 64) 0
+ 2D)
+
+ flatten (Flatten) (None, 1600) 0
+
+ dropout (Dropout) (None, 1600) 0
+
+ dense (Dense) (None, 10) 16010
+
+=================================================================
+Total params: 34,826
+Trainable params: 34,826
+Non-trainable params: 0
+_________________________________________________________________
+
+Epoch 1/5
+422/422 [==============================] - 7s 15ms/step - loss: 1.8435 - accuracy: 0.8415 - val_loss: 0.1177 - val_accuracy: 0.9660
+Epoch 2/5
+422/422 [==============================] - 6s 14ms/step - loss: 0.2460 - accuracy: 0.9338 - val_loss: 0.0727 - val_accuracy: 0.9772
+Epoch 3/5
+422/422 [==============================] - 6s 14ms/step - loss: 0.1600 - accuracy: 0.9541 - val_loss: 0.0537 - val_accuracy: 0.9862
+Epoch 4/5
+422/422 [==============================] - 6s 14ms/step - loss: 0.1264 - accuracy: 0.9633 - val_loss: 0.0509 - val_accuracy: 0.9845
+Epoch 5/5
+422/422 [==============================] - 6s 14ms/step - loss: 0.1090 - accuracy: 0.9679 - val_loss: 0.0457 - val_accuracy: 0.9872
+
+
+
+```
+
+---
+## Conclusion
+
+The `Conv.convolution_op()` API provides an easy and readable way to implement custom
+convolution layers. A `StandardizedConvolution` implementation using the API is quite
+terse, consisting of only four lines of code.
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..a829ab59b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:NOK
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py
new file mode 100644
index 000000000..95f8689ce
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from keras import backend
+from keras.utils import control_flow_util
+from keras.layers.core import Dropout
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn
+import npu_ops
+
+def dropout_call(self, inputs, training=None):
+ """Make Keras Dropout to execute NPU dropout"""
+ if training is None:
+ training = backend.learning_phase()
+
+ def dropped_inputs():
+ return npu_ops.dropout(
+ inputs,
+ noise_shape=self._get_noise_shape(inputs),
+ seed=self.seed,
+ keep_prob=1 - self.rate)
+
+ output = control_flow_util.smart_cond(training,
+ dropped_inputs,
+ lambda : array_ops.identity(inputs))
+
+ return output
+
+Dropout.call = dropout_call
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py
new file mode 100644
index 000000000..fa6f8f211
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py
@@ -0,0 +1,256 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""Ops for collective operations implemented using hccl."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import numbers
+from tensorflow.python.ops import array_ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import ops
+from tensorflow.python.eager import context
+
+from npu_device import gen_npu_ops
+
+
+DEFAULT_GRAPH_SEED = 87654321
+_MAXINT32 = 2**31 - 1
+def LARSV2(input_weight,
+ input_grad,
+ weight_decay,
+ learning_rate,
+ hyperpara=0.001,
+ epsilon=0.00001,
+ use_clip=False,
+ name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.LARSV2() is not compatible with "
+ "eager execution.")
+
+ return gen_npu_ops.lars_v2(input_weight=input_weight,
+ input_grad=input_grad,
+ weight_decay=weight_decay,
+ learning_rate=learning_rate,
+ hyperpara=hyperpara,
+ epsilon=epsilon,
+ use_clip=use_clip,
+ name=name)
+
+
+def _truncate_seed(seed):
+ return seed % _MAXINT32 # Truncate to fit into 32-bit integer
+
+def get_seed(op_seed):
+ global_seed = ops.get_default_graph().seed
+
+ if global_seed is not None:
+ if op_seed is None:
+ op_seed = ops.get_default_graph()._last_id
+
+ seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
+ else:
+ if op_seed is not None:
+ seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
+ else:
+ seeds = None, None
+ # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
+ # be unexpected since Python docs say nondeterminism is (None, None).
+ if seeds == (0, 0):
+ return (0, _MAXINT32)
+ return seeds
+
+def _get_noise_shape(x, noise_shape):
+ # If noise_shape is none return immediately.
+ if noise_shape is None:
+ return array_ops.shape(x)
+
+ try:
+ # Best effort to figure out the intended shape.
+ # If not possible, let the op to handle it.
+ # In eager mode exception will show up.
+ noise_shape_ = tensor_shape.as_shape(noise_shape)
+ except (TypeError, ValueError):
+ return noise_shape
+
+ if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
+ new_dims = []
+ for i, dim in enumerate(x.shape.dims):
+ if noise_shape_.dims[i].value is None and dim.value is not None:
+ new_dims.append(dim.value)
+ else:
+ new_dims.append(noise_shape_.dims[i].value)
+ return tensor_shape.TensorShape(new_dims)
+
+ return noise_shape
+
+def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
+ """The gradient for `gelu`.
+
+ Args:
+ x: A tensor with type is float.
+ keep_prob: A tensor, float, rate of every element reserved.
+ noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random
+ generated.
+ seed: Random seed.
+ name: Layer name.
+
+ Returns:
+ A tensor.
+ """
+ if context.executing_eagerly():
+ raise RuntimeError("tf.dropout() is not compatible with "
+ "eager execution.")
+ x = ops.convert_to_tensor(x, name="x")
+ if not x.dtype.is_floating:
+ raise ValueError("x has to be a floating point tensor since it's going to"
+ " be scaled. Got a %s tensor instead." % x.dtype)
+ if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
+ raise ValueError("keep_prob must be a scalar tensor or a float in the "
+ "range (0, 1], got %g" % keep_prob)
+ if isinstance(keep_prob, float) and keep_prob == 1:
+ return x
+ seed, seed2 = get_seed(seed)
+ noise_shape = _get_noise_shape(x, noise_shape)
+ gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name)
+ result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name)
+ return result
+
+@ops.RegisterGradient("DropOutDoMask")
+def _DropOutDoMaskGrad(op, grad):
+ result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2])
+ return [result, None, None]
+
+def basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple,
+ activation, name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.basic_lstm_cell() is not compatible with "
+ "eager execution.")
+ x = ops.convert_to_tensor(x, name="x")
+ h = ops.convert_to_tensor(h, name="h")
+ c = ops.convert_to_tensor(c, name="c")
+ w = ops.convert_to_tensor(w, name="w")
+ b = ops.convert_to_tensor(b, name="b")
+ result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple,
+ activation, name)
+ return result
+
+@ops.RegisterGradient("BasicLSTMCell")
+def basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct):
+
+ dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3], op.outputs[4], op.outputs[5], op.outputs[6], forget_bias=op.get_attr("forget_bias"), activation=op.get_attr("activation"))
+ dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate)
+ dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr("keep_prob"))
+
+ return [dxt, dht, dct_1, dw, db]
+
+def adam_apply_one_assign(input0, input1, input2, input3, input4,
+ mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.adam_apply_one_assign() is not compatible with "
+ "eager execution.")
+ result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4,
+ mul0_x, mul1_x, mul2_x, mul3_x, add2_y,name)
+ return result
+
+def adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4,
+ mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.adam_apply_one_with_decay_assign() is not compatible with "
+ "eager execution.")
+ result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4,
+ mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name)
+ return result
+
+@ops.RegisterGradient("DynamicGruV2")
+def dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new):
+ (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs
+ (y, output_h, update, reset, new, hidden_new) = op.outputs
+ (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input, weight_hidden, y, init_h, output_h, dy, doutput_h, update, reset, new, hidden_new, direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), gate_order=op.get_attr("gate_order"), reset_after=op.get_attr("reset_after"))
+
+ return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev)
+
+@ops.RegisterGradient("DynamicRnn")
+def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc):
+ (x, w, b, seq_length, init_h, init_c) = op.inputs
+ (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs
+ (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, cell_type=op.get_attr("cell_type"), direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), use_peephole=op.get_attr("use_peephole"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias"))
+
+ return (dx, dw, db, seq_length, dh_prev, dc_prev)
+
+def lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x,
+ mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.lamb_apply_optimizer_assign() is not compatible with eager execution")
+ update,nextv,nextm=gen_npu_ops.lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x,
+ mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name)
+ return update,nextv,nextm
+
+def lamb_apply_weight_assign(input0,input1,input2,input3,input4,name=None):
+ if context.executing_eagerly():
+ raise RuntimeError("tf.lamb_apply_weight_assign() is not compatible with eager execution")
+ result = gen_npu_ops.lamb_apply_weight_assign(input0,input1,input2,input3,input4,name)
+ return result
+
+def dropout_v3(x, keep_prob, noise_shape=None, seed=None, name=None):
+ """ The gradient for gelu
+
+ Args:
+ x: A tensor with type is float
+ keep_prob: A tensor, float, rate of every element reserved
+ noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random generated.
+ seed: Random seed.
+ name: Layer name.
+
+ Returns:
+ A tensor.
+ """
+ x = ops.convert_to_tensor(x,name="x")
+ if not x.dtype.is_floating:
+ raise ValueError("x has to be a floating point tensor since it's going to be scaled. Got a %s tensor instead." % x.dtype)
+
+ if isinstance(keep_prob,numbers.Real) and not 0 < keep_prob <=1:
+ raise ValueError("Keep_prob must be a scalar tensor or a float in the range (0,1], got %g" % keep_prob)
+
+ if isinstance(keep_prob,float) and keep_prob==1:
+ return x
+
+ seed, seed2 = get_seed(seed)
+ noise_shape = _get_noise_shape(x,noise_shape)
+ gen_out = gen_npu_ops.drop_out_gen_mask_v3(noise_shape,keep_prob,seed,seed2,name)
+ result = gen_npu_ops.drop_out_do_mask_v3(x, gen_out, keep_prob, name)
+ return result
+
+@ops.RegisterGradient("DropOutDoMaskV3")
+def _DropOutDoMaskV3Grad(op,grad):
+ result = gen_npu_ops.drop_out_do_mask_v3(grad, op.inputs[1], op.inputs[2])
+ return [result, None, None]
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..037077e65
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt
@@ -0,0 +1,13 @@
+pygments>=2.7.4
+jinja2
+markdown
+requests
+mdx_truly_sane_lists
+sphinx~=3.0.3
+black==19.10b0
+pathlib
+tensorflow
+PyYAML
+jupyter
+keras
+pandas
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh
new file mode 100644
index 000000000..21876811d
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+data_path=""
+nohup python3 subclassing_conv_layers.py --epochs=2 --batch_size=256 --data_path=$data_path >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py
new file mode 100644
index 000000000..93d344533
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py
@@ -0,0 +1,310 @@
+"""
+Title: Customizing the convolution operation of a Conv2D layer
+Author: [lukewood](https://lukewood.xyz)
+Date created: 11/03/2021
+Last modified: 11/03/2021
+Description: This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API.
+"""
+"""
+## Introduction
+
+You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`.
+Keras enables you do this without implementing the entire layer from scratch: you can reuse
+most of the base convolution layer and just customize the convolution op itself via the
+`convolution_op()` method.
+
+This method was introduced in Keras 2.7. So before using the
+`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater.
+"""
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow.keras as keras
+
+print(keras.__version__)
+"""
+## A Simple `StandardizedConv2D` implementation
+
+There are two ways to use the `Conv.convolution_op()` API. The first way
+is to override the `convolution_op()` method on a convolution layer subclass.
+Using this approach, we can quickly implement a
+[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below.
+"""
+import tensorflow as tf
+import tensorflow.keras as keras
+import keras.layers as layers
+import numpy as np
+from time import time
+import npu_device
+import os
+import time
+from absl import flags, app
+import npu_convert_dropout
+
+# npu_device.open().as_default()
+
+flags.DEFINE_string(name='data_path', default='/home/hzh/involution/cifar-10-batches-py',
+ help='dataset path(local)')
+flags.DEFINE_integer(name='epochs', default=5, help='training epochs')
+flags.DEFINE_integer(name='batch_size', default=128, help='training batch_size')
+flags.DEFINE_boolean(name='save_h5', default=True, help='whether save h5 file after training')
+flags.DEFINE_integer(name='log_steps', default=234, help='training epochs')
+flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16',
+ help='allow_fp32_to_fp16/force_fp16/ '
+ 'must_keep_origin_dtype/allow_mix_precision.')
+flags.DEFINE_boolean(name='over_dump', default=False,
+ help='if or not over detection, default is False')
+flags.DEFINE_boolean(name='data_dump_flag', default=False,
+ help='data dump flag, default is False')
+flags.DEFINE_string(name='data_dump_step', default="10",
+ help='data dump step, default is 10')
+flags.DEFINE_boolean(name='profiling', default=False,
+ help='if or not profiling for performance debug, default is False')
+flags.DEFINE_string(name='profiling_dump_path', default="/home/data",
+ help='the path to save profiling data')
+flags.DEFINE_string(name='over_dump_path', default="/home/data",
+ help='the path to save over dump data')
+flags.DEFINE_string(name='data_dump_path', default="/home/data",
+ help='the path to save dump data')
+flags.DEFINE_boolean(name='use_mixlist', default=False,
+ help='whether to enable mixlist, default is True')
+flags.DEFINE_boolean(name='fusion_off_flag', default=False,
+ help='whether to enable mixlist, default is True')
+flags.DEFINE_string(name='mixlist_file', default='ops_info.json',
+ help='mixlist file name, default is ops_info.json')
+flags.DEFINE_string(name='fusion_off_file', default='fusion_switch.cfg',
+ help='fusion_off file name, default is fusion_switch.cfg')
+flags.DEFINE_boolean(name='auto_tune', default=False,
+ help='auto_tune flag, default is False')
+flags.DEFINE_integer(name='static', default=0,
+ help='static, default is 0')
+FLAGS = flags.FLAGS
+
+def npu_config():
+
+
+ npu_config = {}
+
+ if FLAGS.data_dump_flag:
+ npu_device.global_options().dump_config.enable_dump = True
+ npu_device.global_options().dump_config.dump_path = FLAGS.data_dump_path
+ npu_device.global_options().dump_config.dump_step = FLAGS.data_dump_step
+ npu_device.global_options().dump_config.dump_mode = "all"
+
+ if FLAGS.over_dump:
+ npu_device.global_options().dump_config.enable_dump_debug = True
+ npu_device.global_options().dump_config.dump_path = FLAGS.over_dump_path
+ npu_device.global_options().dump_config.dump_debug_mode = "all"
+
+ if FLAGS.profiling:
+ npu_device.global_options().profiling_config.enable_profiling = True
+ profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \
+ "training_trace":"on", \
+ "task_trace":"on", \
+ "aicpu":"on", \
+ "aic_metrics":"PipeUtilization",\
+ "fp_point":"", \
+ "bp_point":""}'
+ npu_device.global_options().profiling_config.profiling_options = profiling_options
+ npu_device.global_options().precision_mode=FLAGS.precision_mode
+ if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision':
+ npu_device.global_options().modify_mixlist=FLAGS.mixlist_file
+ if FLAGS.fusion_off_flag:
+ npu_device.global_options().fusion_switch_file=FLAGS.fusion_off_file
+ if FLAGS.auto_tune:
+ npu_device.global_options().auto_tune_mode="RL,GA"
+ npu_device.open().as_default()
+#===============================NPU Migration=========================================
+
+class TimeHistory(tf.keras.callbacks.Callback):
+ def __init__(self, batch_size, log_steps, initial_step=0):
+ self.batch_size = batch_size
+ super(TimeHistory, self).__init__()
+ self.steps_before_epoch = initial_step
+ self.last_log_step = initial_step
+ self.log_steps = log_steps
+ self.steps_in_epoch = 0
+ self.start_time = None
+
+ @property
+ def global_steps(self):
+ """The current 1-indexed global step."""
+ return self.steps_before_epoch + self.steps_in_epoch
+
+ def on_epoch_begin(self, epoch, logs=None):
+ if not self.start_time:
+ self.start_time = time.time()
+ self.epoch_start = time.time()
+
+ def on_batch_begin(self, batch, logs=None):
+ if not self.start_time:
+ self.start_time = time.time()
+
+ def on_batch_end(self, batch, logs=None):
+ self.steps_in_epoch = batch + 1
+ steps_since_last_log = self.global_steps - self.last_log_step
+ if steps_since_last_log >= self.log_steps:
+ now = time.time()
+ elapsed_time = now - self.start_time
+ steps_per_second = steps_since_last_log / elapsed_time
+ examples_per_second = steps_per_second * self.batch_size
+ print(
+ 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d '
+ 'and %d'%(elapsed_time, examples_per_second, self.last_log_step,
+ self.global_steps),flush=True)
+ self.last_log_step = self.global_steps
+ self.start_time = None
+
+ def on_epoch_end(self, epoch, logs=None):
+ epoch_run_time = time.time() - self.epoch_start
+ self.steps_before_epoch += self.steps_in_epoch
+ self.steps_in_epoch = 0
+
+
+def task(_):
+ class StandardizedConv2DWithOverride(layers.Conv2D):
+ def convolution_op(self, inputs, kernel):
+ mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
+ return tf.nn.conv2d(
+ inputs,
+ (kernel - mean) / tf.sqrt(var + 1e-10),
+ padding="VALID",
+ strides=list(self.strides),
+ name=self.__class__.__name__,
+ )
+
+
+ """
+ The other way to use the `Conv.convolution_op()` API is to directly call the
+ `convolution_op()` method from the `call()` method of a convolution layer subclass.
+ A comparable class implemented using this approach is shown below.
+ """
+
+
+ class StandardizedConv2DWithCall(layers.Conv2D):
+ def convolution_op(self, inputs, kernel):
+ mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
+ return tf.nn.conv2d(
+ inputs,
+ (kernel - mean) / tf.sqrt(var + 1e-10),
+ padding="VALID",
+ strides=list(self.strides),
+ name=self.__class__.__name__,
+ )
+
+ def call(self, inputs):
+ mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True)
+ result = self.convolution_op(
+ inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10)
+ )
+ if self.use_bias:
+ result = result + self.bias
+ return result
+
+
+ """
+ ## Example Usage
+
+ Both of these layers work as drop-in replacements for `Conv2D`. The following
+ demonstration performs classification on the MNIST dataset.
+ """
+
+ npu_config()
+
+ # Model / data parameters
+ num_classes = 10
+ input_shape = (28, 28, 1)
+ batch_size = FLAGS.batch_size
+ epochs = FLAGS.epochs
+ # the data, split between train and test sets
+ (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(os.path.join(FLAGS.data_path, 'mnist.npz'))
+
+ # Scale images to the [0, 1] range
+ x_train = x_train.astype("float32") / 255
+ x_test = x_test.astype("float32") / 255
+ # Make sure images have shape (28, 28, 1)
+ x_train = np.expand_dims(x_train, -1)
+ x_test = np.expand_dims(x_test, -1)
+ print("x_train shape:", x_train.shape)
+ print(x_train.shape[0], "train samples")
+ print(x_test.shape[0], "test samples")
+
+ # convert class vectors to binary class matrices
+ y_train = keras.utils.to_categorical(y_train, num_classes)
+ y_test = keras.utils.to_categorical(y_test, num_classes)
+ if FLAGS.static==1:
+ train_ds = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .batch(batch_size, drop_remainder=True))
+ else:
+ train_ds = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .batch(batch_size, drop_remainder=False))
+ train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
+ model = keras.Sequential(
+ [
+ keras.layers.InputLayer(input_shape=input_shape),
+ StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"),
+ layers.MaxPooling2D(pool_size=(2, 2)),
+ StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"),
+ layers.MaxPooling2D(pool_size=(2, 2)),
+ layers.Flatten(),
+ layers.Dropout(0.5),
+ layers.Dense(num_classes, activation="softmax"),
+ ]
+ )
+
+ model.summary()
+ """
+
+ """
+
+
+ model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+ callbacks = [TimeHistory(batch_size,FLAGS.log_steps)]
+ #start_time = time()
+ #model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1, verbose=2)
+ model.fit(train_ds, batch_size=batch_size, epochs=epochs, verbose=2, callbacks=callbacks)
+ #end_time = time()
+ #time_s = end_time - start_time
+ #print("TrainingTime: ", time_s)
+
+ if FLAGS.save_h5:
+ model.save("model.h5")
+ """
+ ## Conclusion
+
+ The `Conv.convolution_op()` API provides an easy and readable way to implement custom
+ convolution layers. A `StandardizedConvolution` implementation using the API is quite
+ terse, consisting of only four lines of code.
+ """
+
+
+if __name__ == '__main__':
+ app.run(task)
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..278486c1a
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+# export ASCEND_GLOBAL_LOG_LEVEL=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=256
+#网络名称,同目录名称
+Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=5
+#训练step
+#train_steps=50000
+#学习率
+# learning_rate=0.001
+# weight_decay=0.0001
+#参数配置
+data_path=""
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="./configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="./configs/fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_full_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 subclassing_conv_layers.py \
+ --data_path=$data_path \
+ --epochs=${train_epochs} \
+ --batch_size=${batch_size} \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --auto_tune=${auto_tune} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path}>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+#FPS=`awk 'BEGIN{printf "%.2f\n",'211'*'${batch_size}'/'${TrainingTime}'}'`
+TrainingTime=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"`
+FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1`
+wait
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh
new file mode 100644
index 000000000..5a8035e4f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh
@@ -0,0 +1,169 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+# export ASCEND_GLOBAL_LOG_LEVEL=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=256
+#网络名称,同目录名称
+Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=2
+#训练step
+#train_steps=50000
+#学习率
+# learning_rate=0.001
+# weight_decay=0.0001
+#参数配置
+data_path=""
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="./configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="./configs/fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 subclassing_conv_layers.py \
+ --data_path=$data_path \
+ --epochs=${train_epochs} \
+ --batch_size=${batch_size} \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --auto_tune=${auto_tune} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} \
+ --static=0 \
+ --log_steps=235 >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep 235/235 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"`
+#FPS=`awk 'BEGIN{printf "%.2f\n",'235'*'${batch_size}'/'${TrainingTime}'}'`
+FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1`
+wait
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh
new file mode 100644
index 000000000..25a5b597b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh
@@ -0,0 +1,169 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+# export ASCEND_GLOBAL_LOG_LEVEL=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=256
+#网络名称,同目录名称
+Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=3
+#训练step
+#train_steps=50000
+#学习率
+# learning_rate=0.001
+# weight_decay=0.0001
+#参数配置
+data_path=""
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="./configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="./configs/fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 subclassing_conv_layers.py \
+ --data_path=$data_path \
+ --epochs=${train_epochs} \
+ --batch_size=${batch_size} \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --auto_tune=${auto_tune} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} \
+ --static=1 \
+ --log_steps=234>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep 234/ $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"`
+#FPS=`awk 'BEGIN{printf "%.2f\n",'234'*'${batch_size}'/'${TrainingTime}'}'`
+FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1`
+wait
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
--
Gitee
From 742d749d99f38343d5d463cd7153e749f30f28eb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 03:21:38 +0000
Subject: [PATCH 03/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/.keep?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/.keep
diff --git a/TensorFlow2/built-in/keras_sample/.keep b/TensorFlow2/built-in/keras_sample/.keep
deleted file mode 100644
index e69de29bb..000000000
--
Gitee
From 5b9a7ed71a8460b787b774d4998e7ef7148a0f0d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:15:07 +0000
Subject: [PATCH 04/54] =?UTF-8?q?zero=5Fdce=5FID2548=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../zero_dce_ID2548_for_TensorFlow2.X/LICENSE | 21 +
.../README.md | 197 ++++++
.../modelzoo_level.txt | 3 +
.../requirements.txt | 0
.../test/train_full_1p.sh | 231 +++++++
.../test/train_performance_1p.sh | 230 +++++++
.../zero_dce.py | 609 ++++++++++++++++++
7 files changed, 1291 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..51d555a15
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Ke YU
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..350ffb972
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md
@@ -0,0 +1,197 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Image Classification**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.4.8**
+
+**大小(Size):324KB**
+
+**框架(Framework):TensorFlow_2.4.1**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Official**
+
+**描述(Description):基于TensorFlow框架的计算机视觉和模式识别网络训练代码**
+
+概述
+
+## 简述
+
+- 参考论文:
+
+ https://arxiv.org/abs/1810.03312
+
+- 参考实现:
+
+ https://github.com/yuke93/RL-Restore
+
+
+- 适配昇腾 AI 处理器的实现:
+
+ skip
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+
+
+## 默认配置
+- 网络结构
+ - 24-layer, 1024-hidden, 16-heads, 340M parameters
+
+- 训练超参(单卡):
+ - Batch size: 16
+ - Train epoch: 100
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+|-------|------|
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+拉起脚本中,传入--precision_mode='allow_mix_precision'
+
+```
+ ./train_full_1p.sh --help
+
+parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+```
+
+相关代码示例:
+
+```
+flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16',
+ help='allow_fp32_to_fp16/force_fp16/ '
+ 'must_keep_origin_dtype/allow_mix_precision.')
+
+npu_device.global_options().precision_mode=FLAGS.precision_mode
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+快速上手
+
+## 数据集准备
+
+1、用户自行准备好数据集。使用的数据集是lol_dataset
+
+数据集目录参考如下:
+
+```
+├── lol_dataset
+│ ├──eval15
+│ │ ├──high
+ ├──......
+│ │ ├──low
+ ├──......
+│ ├──our485
+│ │ ├──high
+ ├──......
+│ │ ├──low
+ ├──......
+```
+
+
+
+## 模型训练
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+
+ 2. 单卡训练
+
+ 2.1 单卡训练指令(脚本位于zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh),其中“--data_path”修改为数据集的的路径。
+
+
+高级参考
+
+## 脚本和示例代码
+
+```
+|--test #训练脚本目录
+| |--train_full_1p.sh
+| |--train_performance_1p.sh
+| |--......
+|--zero_dce.py
+|--......
+```
+
+## 脚本参数
+
+```
+ --batch_size Total batch size for training,default:16
+ --epochs epochs ,default:100
+ --learning_rate learning_rate,default:1e-4
+ --data_path data_path,default:./lol_dataset
+ --log_steps steps per log,default:1e-4
+ --precision_mode the path to save over dump data,default:allow_mix_precision
+ --over_dump if or not over detection,default:False
+ --data_dump_flag data dump flag, default:False
+ --data_dump_step data dump step, default:10
+ --profiling profiling,default:False
+ --profiling_dump_path profiling_dump_path,default:/home/data
+ --over_dump_path over_dump_path,default:/home/data
+ --data_dump_path data_dump_path,default:/home/data
+ --use_mixlist use_mixlist flag,default:False
+ --fusion_off_flag fusion_off flag,default:False
+ --mixlist_file mixlist file name,default:ops_info.json
+ --fusion_off_file fusion_off_file,default:100
+ --auto_tune auto_tune flag, default:False
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡训练。
+将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
+
+
+
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..9f9b36084
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:PERFECT
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..80ef963c3
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,231 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="zero_dce_ID2548_for_TensorFlow2.X"
+#训练epoch
+train_epochs=100
+#训练batch_size
+batch_size=16
+
+# #维测参数,precision_mode需要模型审视修改
+# precision_mode="allow_mix_precision"
+# #维持参数,以下不需要修改
+# over_dump=False
+# data_dump_flag=False
+# data_dump_step="10"
+# profiling=False
+# autotune=False
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="ops_info.json"
+fusion_off_flag=False
+fusion_off_file="fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ --auto_tune if or not auto_tune, default is False
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# #参数校验,不需要修改
+# for para in $*
+# do
+# if [[ $para == --precision_mode* ]];then
+# precision_mode=`echo ${para#*=}`
+# elif [[ $para == --over_dump* ]];then
+# over_dump=`echo ${para#*=}`
+# over_dump_path=${cur_path}/output/overflow_dump
+# mkdir -p ${over_dump_path}
+# elif [[ $para == --data_dump_flag* ]];then
+# data_dump_flag=`echo ${para#*=}`
+# data_dump_path=${cur_path}/output/data_dump
+# mkdir -p ${data_dump_path}
+# elif [[ $para == --data_dump_step* ]];then
+# data_dump_step=`echo ${para#*=}`
+# elif [[ $para == --profiling* ]];then
+# profiling=`echo ${para#*=}`
+# profiling_dump_path=${cur_path}/output/profiling
+# mkdir -p ${profiling_dump_path}
+# elif [[ $para == --data_path* ]];then
+# data_path=`echo ${para#*=}`
+# fi
+# done
+
+############维测参数##############
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+############维测参数##############
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ python3 zero_dce.py \
+ --data_path=$data_path/lol_dataset/ \
+ --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --auto_tune=${auto_tune} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 1 | awk -F'-' '{print $2}' | awk -F' ' '{print $2}' | awk -F'ms' '{print $1}'`
+# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'`
+single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'`
+FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'`
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+#train_accuracy=null
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'`
+TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}'>> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep total_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+# ActualLoss=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}' | tail -n 1`
+ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+train_accuracy=${ActualLoss}
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh
new file mode 100644
index 000000000..458bef301
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh
@@ -0,0 +1,230 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="zero_dce_ID2548_for_TensorFlow2.X"
+#训练epoch
+train_epochs=50
+#训练batch_size
+batch_size=16
+
+# #维测参数,precision_mode需要模型审视修改
+# precision_mode="allow_mix_precision"
+# #维持参数,以下不需要修改
+# over_dump=False
+# data_dump_flag=False
+# data_dump_step="10"
+# profiling=False
+# autotune=False
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="ops_info.json"
+fusion_off_flag=False
+fusion_off_file="fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ --auto_tune if or not auto_tune, default is False
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# #参数校验,不需要修改
+# for para in $*
+# do
+# if [[ $para == --precision_mode* ]];then
+# precision_mode=`echo ${para#*=}`
+# elif [[ $para == --over_dump* ]];then
+# over_dump=`echo ${para#*=}`
+# over_dump_path=${cur_path}/output/overflow_dump
+# mkdir -p ${over_dump_path}
+# elif [[ $para == --data_dump_flag* ]];then
+# data_dump_flag=`echo ${para#*=}`
+# data_dump_path=${cur_path}/output/data_dump
+# mkdir -p ${data_dump_path}
+# elif [[ $para == --data_dump_step* ]];then
+# data_dump_step=`echo ${para#*=}`
+# elif [[ $para == --profiling* ]];then
+# profiling=`echo ${para#*=}`
+# profiling_dump_path=${cur_path}/output/profiling
+# mkdir -p ${profiling_dump_path}
+# elif [[ $para == --data_path* ]];then
+# data_path=`echo ${para#*=}`
+# fi
+# done
+
+############维测参数##############
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+############维测参数##############
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ python3 zero_dce.py \
+ --data_path=$data_path/lol_dataset/ \
+ --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --auto_tune=${auto_tune} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 1 | awk -F'-' '{print $2}' | awk -F' ' '{print $2}' | awk -F'ms' '{print $1}'`
+# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'`
+single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'`
+FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'`
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+#train_accuracy=null
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'`
+TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}'>> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep total_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+# ActualLoss=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}' | tail -n 1`
+ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+train_accuracy=${ActualLoss}
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py
new file mode 100644
index 000000000..367f3982f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py
@@ -0,0 +1,609 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Title: Zero-DCE for low-light image enhancement
+Author: [Soumik Rakshit](http://github.com/soumik12345)
+Date created: 2021/09/18
+Last modified: 2021/09/19
+Description: Implementing Zero-Reference Deep Curve Estimation for low-light image enhancement.
+"""
+"""
+## Introduction
+
+**Zero-Reference Deep Curve Estimation** or **Zero-DCE** formulates low-light image
+enhancement as the task of estimating an image-specific
+[*tonal curve*](https://en.wikipedia.org/wiki/Curve_(tonality)) with a deep neural network.
+In this example, we train a lightweight deep network, **DCE-Net**, to estimate
+pixel-wise and high-order tonal curves for dynamic range adjustment of a given image.
+
+Zero-DCE takes a low-light image as input and produces high-order tonal curves as its output.
+These curves are then used for pixel-wise adjustment on the dynamic range of the input to
+obtain an enhanced image. The curve estimation process is done in such a way that it maintains
+the range of the enhanced image and preserves the contrast of neighboring pixels. This
+curve estimation is inspired by curves adjustment used in photo editing software such as
+Adobe Photoshop where users can adjust points throughout an image’s tonal range.
+
+Zero-DCE is appealing because of its relaxed assumptions with regard to reference images:
+it does not require any input/output image pairs during training.
+This is achieved through a set of carefully formulated non-reference loss functions,
+which implicitly measure the enhancement quality and guide the training of the network.
+
+### References
+
+- [Zero-Reference Deep Curve Estimation for Low-Light Image Enhancement](https://arxiv.org/pdf/2001.06826.pdf)
+- [Curves adjustment in Adobe Photoshop](https://helpx.adobe.com/photoshop/using/curves-adjustment.html)
+"""
+
+"""
+## Downloading LOLDataset
+
+The **LoL Dataset** has been created for low-light image enhancement. It provides 485
+images for training and 15 for testing. Each image pair in the dataset consists of a
+low-light input image and its corresponding well-exposed reference image.
+"""
+
+import npu_device
+import argparse
+import ast
+#===============================NPU Migration=========================================
+parser = argparse.ArgumentParser()
+parser.add_argument('--batch_size', type=int, default=16, help='batch_size')
+parser.add_argument('--epochs', type=int, default=100, help='epochs')
+parser.add_argument('--learning_rate', type=int, default=1e-4, help='learning_rate')
+parser.add_argument('--data_path', type=str, default='./lol_dataset', help='data path')
+parser.add_argument('--log_steps', type=int, default=25, help='steps per log')
+parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data')
+parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval,
+ help='if or not over detection, default is False')
+parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval,
+ help='data dump flag, default is False')
+parser.add_argument('--data_dump_step', default="10",
+ help='data dump step, default is 10')
+parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False')
+parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data')
+parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data')
+parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data')
+parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval,
+ help='use_mixlist flag, default is False')
+parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval,
+ help='fusion_off flag, default is False')
+parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json')
+parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg')
+parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,
+ help='auto_tune flag, default is False')
+args = parser.parse_args()
+
+def npu_config():
+ if args.data_dump_flag:
+ npu_device.global_options().dump_config.enable_dump = True
+ npu_device.global_options().dump_config.dump_path = args.data_dump_path
+ npu_device.global_options().dump_config.dump_step = args.data_dump_step
+ npu_device.global_options().dump_config.dump_mode = "all"
+
+ if args.over_dump:
+ npu_device.global_options().dump_config.enable_dump_debug = True
+ npu_device.global_options().dump_config.dump_path = args.over_dump_path
+ npu_device.global_options().dump_config.dump_debug_mode = "all"
+
+ if args.profiling:
+ npu_device.global_options().profiling_config.enable_profiling = True
+ profiling_options = '{"output":"' + args.profiling_dump_path + '", \
+ "training_trace":"on", \
+ "task_trace":"on", \
+ "aicpu":"on", \
+ "aic_metrics":"PipeUtilization",\
+ "fp_point":"", \
+ "bp_point":""}'
+ npu_device.global_options().profiling_config.profiling_options = profiling_options
+ npu_device.global_options().precision_mode = args.precision_mode
+ if args.use_mixlist and args.precision_mode=='allow_mix_precision':
+ npu_device.global_options().modify_mixlist="../configs/"+args.mixlist_file
+ if args.fusion_off_flag:
+ npu_device.global_options().fusion_switch_file="../configs/"+args.fusion_off_file
+ if args.auto_tune:
+ npu_device.global_options().auto_tune_mode="RL,GA"
+ npu_device.open().as_default()
+#===============================NPU Migration=========================================
+npu_config()
+
+import os
+import time
+import random
+import numpy as np
+from glob import glob
+from PIL import Image, ImageOps
+import matplotlib.pyplot as plt
+
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import layers
+
+# def init_arg():
+# parser = argparse.ArgumentParser()
+# parser.add_argument('--batch_size', type=int, default=16, help='batch_size')
+# parser.add_argument('--epochs', type=int, default=100, help='epochs')
+# parser.add_argument('--learning_rate', type=int, default=1e-4, help='learning_rate')
+# parser.add_argument('--data_path', type=str, default='./lol_dataset', help='data path')
+# return parser.parse_args()
+
+
+# args = init_arg()
+
+"""shell
+gdown https://drive.google.com/uc?id=1DdGIJ4PZPlF2ikl8mNM9V-PdVxVLbQi6
+unzip -q lol_dataset.zip
+"""
+
+"""
+## Creating a TensorFlow Dataset
+
+We use 300 low-light images from the LoL Dataset training set for training, and we use
+the remaining 185 low-light images for validation. We resize the images to size `256 x
+256` to be used for both training and validation. Note that in order to train the DCE-Net,
+we will not require the corresponding enhanced images.
+"""
+
+IMAGE_SIZE = 256
+BATCH_SIZE = args.batch_size
+MAX_TRAIN_IMAGES = 400
+
+
+def load_data(image_path):
+ image = tf.io.read_file(image_path)
+ image = tf.image.decode_png(image, channels=3)
+ image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
+ image = image / 255.0
+ return image
+
+
+def data_generator(low_light_images):
+ dataset = tf.data.Dataset.from_tensor_slices((low_light_images))
+ dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
+ dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
+ return dataset
+
+
+train_low_light_images = sorted(glob(args.data_path + "/our485/low/*"))[:MAX_TRAIN_IMAGES]
+val_low_light_images = sorted(glob(args.data_path + "/our485/low/*"))[MAX_TRAIN_IMAGES:]
+test_low_light_images = sorted(glob(args.data_path + "/eval15/low/*"))
+
+
+train_dataset = data_generator(train_low_light_images)
+val_dataset = data_generator(val_low_light_images)
+
+print("Train Dataset:", train_dataset)
+print("Validation Dataset:", val_dataset)
+
+"""
+## The Zero-DCE Framework
+
+The goal of DCE-Net is to estimate a set of best-fitting light-enhancement curves
+(LE-curves) given an input image. The framework then maps all pixels of the input’s RGB
+channels by applying the curves iteratively to obtain the final enhanced image.
+
+### Understanding light-enhancement curves
+
+A ligh-enhancement curve is a kind of curve that can map a low-light image
+to its enhanced version automatically,
+where the self-adaptive curve parameters are solely dependent on the input image.
+When designing such a curve, three objectives should be taken into account:
+
+- Each pixel value of the enhanced image should be in the normalized range `[0,1]`, in order to
+avoid information loss induced by overflow truncation.
+- It should be monotonous, to preserve the contrast between neighboring pixels.
+- The shape of this curve should be as simple as possible,
+and the curve should be differentiable to allow backpropagation.
+
+The light-enhancement curve is separately applied to three RGB channels instead of solely on the
+illumination channel. The three-channel adjustment can better preserve the inherent color and reduce
+the risk of over-saturation.
+
+
+
+### DCE-Net
+
+The DCE-Net is a lightweight deep neural network that learns the mapping between an input
+image and its best-fitting curve parameter maps. The input to the DCE-Net is a low-light
+image while the outputs are a set of pixel-wise curve parameter maps for corresponding
+higher-order curves. It is a plain CNN of seven convolutional layers with symmetrical
+concatenation. Each layer consists of 32 convolutional kernels of size 3×3 and stride 1
+followed by the ReLU activation function. The last convolutional layer is followed by the
+Tanh activation function, which produces 24 parameter maps for 8 iterations, where each
+iteration requires three curve parameter maps for the three channels.
+
+
+"""
+
+
+def build_dce_net():
+ input_img = keras.Input(shape=[None, None, 3])
+ conv1 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(input_img)
+ conv2 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(conv1)
+ conv3 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(conv2)
+ conv4 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(conv3)
+ int_con1 = layers.Concatenate(axis=-1)([conv4, conv3])
+ conv5 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(int_con1)
+ int_con2 = layers.Concatenate(axis=-1)([conv5, conv2])
+ conv6 = layers.Conv2D(
+ 32, (3, 3), strides=(1, 1), activation="relu", padding="same"
+ )(int_con2)
+ int_con3 = layers.Concatenate(axis=-1)([conv6, conv1])
+ x_r = layers.Conv2D(24, (3, 3), strides=(1, 1), activation="tanh", padding="same")(
+ int_con3
+ )
+ return keras.Model(inputs=input_img, outputs=x_r)
+
+
+"""
+## Loss functions
+
+To enable zero-reference learning in DCE-Net, we use a set of differentiable
+zero-reference losses that allow us to evaluate the quality of enhanced images.
+"""
+
+"""
+### Color constancy loss
+
+The *color constancy loss* is used to correct the potential color deviations in the
+enhanced image.
+"""
+
+
+def color_constancy_loss(x):
+ mean_rgb = tf.reduce_mean(x, axis=(1, 2), keepdims=True)
+ mr, mg, mb = mean_rgb[:, :, :, 0], mean_rgb[:, :, :, 1], mean_rgb[:, :, :, 2]
+ d_rg = tf.square(mr - mg)
+ d_rb = tf.square(mr - mb)
+ d_gb = tf.square(mb - mg)
+ return tf.sqrt(tf.square(d_rg) + tf.square(d_rb) + tf.square(d_gb))
+
+
+"""
+### Exposure loss
+
+To restrain under-/over-exposed regions, we use the *exposure control loss*.
+It measures the distance between the average intensity value of a local region
+and a preset well-exposedness level (set to `0.6`).
+"""
+
+
+def exposure_loss(x, mean_val=0.6):
+ x = tf.reduce_mean(x, axis=3, keepdims=True)
+ mean = tf.nn.avg_pool2d(x, ksize=16, strides=16, padding="VALID")
+ return tf.reduce_mean(tf.square(mean - mean_val))
+
+
+"""
+### Illumination smoothness loss
+
+To preserve the monotonicity relations between neighboring pixels, the
+*illumination smoothness loss* is added to each curve parameter map.
+"""
+
+
+def illumination_smoothness_loss(x):
+ batch_size = tf.shape(x)[0]
+ h_x = tf.shape(x)[1]
+ w_x = tf.shape(x)[2]
+ count_h = (tf.shape(x)[2] - 1) * tf.shape(x)[3]
+ count_w = tf.shape(x)[2] * (tf.shape(x)[3] - 1)
+ h_tv = tf.reduce_sum(tf.square((x[:, 1:, :, :] - x[:, : h_x - 1, :, :])))
+ w_tv = tf.reduce_sum(tf.square((x[:, :, 1:, :] - x[:, :, : w_x - 1, :])))
+ batch_size = tf.cast(batch_size, dtype=tf.float32)
+ count_h = tf.cast(count_h, dtype=tf.float32)
+ count_w = tf.cast(count_w, dtype=tf.float32)
+ return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
+
+
+"""
+### Spatial consistency loss
+
+The *spatial consistency loss* encourages spatial coherence of the enhanced image by
+preserving the contrast between neighboring regions across the input image and its enhanced version.
+"""
+
+
+class SpatialConsistencyLoss(keras.losses.Loss):
+ def __init__(self, **kwargs):
+ super(SpatialConsistencyLoss, self).__init__(reduction="none")
+
+ self.left_kernel = tf.constant(
+ [[[[0, 0, 0]], [[-1, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
+ )
+ self.right_kernel = tf.constant(
+ [[[[0, 0, 0]], [[0, 1, -1]], [[0, 0, 0]]]], dtype=tf.float32
+ )
+ self.up_kernel = tf.constant(
+ [[[[0, -1, 0]], [[0, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
+ )
+ self.down_kernel = tf.constant(
+ [[[[0, 0, 0]], [[0, 1, 0]], [[0, -1, 0]]]], dtype=tf.float32
+ )
+
+ def call(self, y_true, y_pred):
+
+ original_mean = tf.reduce_mean(y_true, 3, keepdims=True)
+ enhanced_mean = tf.reduce_mean(y_pred, 3, keepdims=True)
+ original_pool = tf.nn.avg_pool2d(
+ original_mean, ksize=4, strides=4, padding="VALID"
+ )
+ enhanced_pool = tf.nn.avg_pool2d(
+ enhanced_mean, ksize=4, strides=4, padding="VALID"
+ )
+
+ d_original_left = tf.nn.conv2d(
+ original_pool, self.left_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_original_right = tf.nn.conv2d(
+ original_pool, self.right_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_original_up = tf.nn.conv2d(
+ original_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_original_down = tf.nn.conv2d(
+ original_pool, self.down_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+
+ d_enhanced_left = tf.nn.conv2d(
+ enhanced_pool, self.left_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_enhanced_right = tf.nn.conv2d(
+ enhanced_pool, self.right_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_enhanced_up = tf.nn.conv2d(
+ enhanced_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+ d_enhanced_down = tf.nn.conv2d(
+ enhanced_pool, self.down_kernel, strides=[1, 1, 1, 1], padding="SAME"
+ )
+
+ d_left = tf.square(d_original_left - d_enhanced_left)
+ d_right = tf.square(d_original_right - d_enhanced_right)
+ d_up = tf.square(d_original_up - d_enhanced_up)
+ d_down = tf.square(d_original_down - d_enhanced_down)
+ return d_left + d_right + d_up + d_down
+
+
+"""
+### Deep curve estimation model
+
+We implement the Zero-DCE framework as a Keras subclassed model.
+"""
+
+
+class ZeroDCE(keras.Model):
+ def __init__(self, **kwargs):
+ super(ZeroDCE, self).__init__(**kwargs)
+ self.dce_model = build_dce_net()
+
+ def compile(self, learning_rate, **kwargs):
+ super(ZeroDCE, self).compile(**kwargs)
+ self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
+ self.spatial_constancy_loss = SpatialConsistencyLoss(reduction="none")
+
+ def get_enhanced_image(self, data, output):
+ r1 = output[:, :, :, :3]
+ r2 = output[:, :, :, 3:6]
+ r3 = output[:, :, :, 6:9]
+ r4 = output[:, :, :, 9:12]
+ r5 = output[:, :, :, 12:15]
+ r6 = output[:, :, :, 15:18]
+ r7 = output[:, :, :, 18:21]
+ r8 = output[:, :, :, 21:24]
+ x = data + r1 * (tf.square(data) - data)
+ x = x + r2 * (tf.square(x) - x)
+ x = x + r3 * (tf.square(x) - x)
+ enhanced_image = x + r4 * (tf.square(x) - x)
+ x = enhanced_image + r5 * (tf.square(enhanced_image) - enhanced_image)
+ x = x + r6 * (tf.square(x) - x)
+ x = x + r7 * (tf.square(x) - x)
+ enhanced_image = x + r8 * (tf.square(x) - x)
+ return enhanced_image
+
+ def call(self, data):
+ dce_net_output = self.dce_model(data)
+ return self.get_enhanced_image(data, dce_net_output)
+
+ def compute_losses(self, data, output):
+ enhanced_image = self.get_enhanced_image(data, output)
+ loss_illumination = 200 * illumination_smoothness_loss(output)
+ loss_spatial_constancy = tf.reduce_mean(
+ self.spatial_constancy_loss(enhanced_image, data)
+ )
+ loss_color_constancy = 5 * tf.reduce_mean(color_constancy_loss(enhanced_image))
+ loss_exposure = 10 * tf.reduce_mean(exposure_loss(enhanced_image))
+ total_loss = (
+ loss_illumination
+ + loss_spatial_constancy
+ + loss_color_constancy
+ + loss_exposure
+ )
+ return {
+ "total_loss": total_loss,
+ "illumination_smoothness_loss": loss_illumination,
+ "spatial_constancy_loss": loss_spatial_constancy,
+ "color_constancy_loss": loss_color_constancy,
+ "exposure_loss": loss_exposure,
+ }
+
+ def train_step(self, data):
+ with tf.GradientTape() as tape:
+ output = self.dce_model(data)
+ losses = self.compute_losses(data, output)
+ gradients = tape.gradient(
+ losses["total_loss"], self.dce_model.trainable_weights
+ )
+ self.optimizer.apply_gradients(zip(gradients, self.dce_model.trainable_weights))
+ return losses
+
+ def test_step(self, data):
+ output = self.dce_model(data)
+ return self.compute_losses(data, output)
+
+ def save_weights(self, filepath, overwrite=True, save_format=None, options=None):
+ """While saving the weights, we simply save the weights of the DCE-Net"""
+ self.dce_model.save_weights(
+ filepath, overwrite=overwrite, save_format=save_format, options=options
+ )
+
+ def load_weights(self, filepath, by_name=False, skip_mismatch=False, options=None):
+ """While loading the weights, we simply load the weights of the DCE-Net"""
+ self.dce_model.load_weights(
+ filepath=filepath,
+ by_name=by_name,
+ skip_mismatch=skip_mismatch,
+ options=options,
+ )
+
+"""
+## Add time history callbacks
+"""
+
+class TimeHistory(tf.keras.callbacks.Callback):
+ def __init__(self, batch_size, log_steps, initial_step=0):
+ self.batch_size = batch_size
+ super(TimeHistory, self).__init__()
+ self.steps_before_epoch = initial_step
+ self.last_log_step = initial_step
+ self.log_steps = log_steps
+ self.steps_in_epoch = 0
+ self.start_time = None
+
+ @property
+ def global_steps(self):
+ """The current 1-indexed global step."""
+ return self.steps_before_epoch + self.steps_in_epoch
+
+ def on_epoch_begin(self, epoch, logs=None):
+ if not self.start_time:
+ self.start_time = time.time()
+ self.epoch_start = time.time()
+
+ def on_batch_begin(self, batch, logs=None):
+ if not self.start_time:
+ self.start_time = time.time()
+
+ def on_batch_end(self, batch, logs=None):
+ self.steps_in_epoch = batch + 1
+ steps_since_last_log = self.global_steps - self.last_log_step
+ if steps_since_last_log >= self.log_steps:
+ now = time.time()
+ elapsed_time = now - self.start_time
+ steps_per_second = steps_since_last_log / elapsed_time
+ examples_per_second = steps_per_second * self.batch_size
+ print(
+ 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d '
+ 'and %d'%(elapsed_time, examples_per_second, self.last_log_step,
+ self.global_steps),flush=True)
+ self.last_log_step = self.global_steps
+ self.start_time = None
+
+ def on_epoch_end(self, epoch, logs=None):
+ epoch_run_time = time.time() - self.epoch_start
+ self.steps_before_epoch += self.steps_in_epoch
+ self.steps_in_epoch = 0
+
+"""
+## Training
+"""
+
+ckpt_path = './ckpt/checkpoint'
+zero_dce_model = ZeroDCE()
+zero_dce_model.compile(learning_rate=args.learning_rate)
+history = zero_dce_model.fit(train_dataset, validation_data=val_dataset, epochs=args.epochs, verbose=2, callbacks=[TimeHistory(args.batch_size,args.log_steps)],)
+zero_dce_model.save_weights(ckpt_path)
+
+
+#def plot_result(item):
+# plt.plot(history.history[item], label=item)
+# plt.plot(history.history["val_" + item], label="val_" + item)
+# plt.xlabel("Epochs")
+# plt.ylabel(item)
+# plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
+# plt.legend()
+# plt.grid()
+# plt.show()
+
+
+#plot_result("total_loss")
+#plot_result("illumination_smoothness_loss")
+#plot_result("spatial_constancy_loss")
+#plot_result("color_constancy_loss")
+#plot_result("exposure_loss")
+
+"""
+## Inference
+"""
+
+
+#def plot_results(images, titles, figure_size=(12, 12)):
+# fig = plt.figure(figsize=figure_size)
+# for i in range(len(images)):
+# fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
+# _ = plt.imshow(images[i])
+# plt.axis("off")
+# plt.show()
+
+
+#def infer(original_image):
+# image = keras.preprocessing.image.img_to_array(original_image)
+# image = image.astype("float32") / 255.0
+# image = np.expand_dims(image, axis=0)
+# output_image = zero_dce_model(image)
+# output_image = tf.cast((output_image[0, :, :, :] * 255), dtype=np.uint8)
+# output_image = Image.fromarray(output_image.numpy())
+# return output_image
+
+
+"""
+### Inference on test images
+
+We compare the test images from LOLDataset enhanced by MIRNet with images enhanced via
+the `PIL.ImageOps.autocontrast()` function.
+"""
+
+#for val_image_file in test_low_light_images:
+# original_image = Image.open(val_image_file)
+# enhanced_image = infer(original_image)
+# plot_results(
+# [original_image, ImageOps.autocontrast(original_image), enhanced_image],
+# ["Original", "PIL Autocontrast", "Enhanced"],
+# (20, 12),
+# )
--
Gitee
From 4c53de40bdfd90608d4529ba4dc9e7fea6ca23a5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:17:15 +0000
Subject: [PATCH 05/54] =?UTF-8?q?TF2.X=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cutmix_ID2502_for_TensorFlow2.X/LICENSE | 284 ++++++++++
.../cutmix_ID2502_for_TensorFlow2.X/README.md | 488 ++++++++++++++++++
.../requirements.txt | 4 +
.../test/train_full_1p.sh | 122 +++++
.../test/train_performance_1p_dynamic_eval.sh | 128 +++++
.../test/train_performance_1p_static_eval.sh | 122 +++++
.../cutmix_ID2502_for_TensorFlow2.X/train.py | 486 +++++++++++++++++
7 files changed, 1634 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..ab652360b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,284 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------
+Files: third_party/compute_library/...
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+------------------
+Files: ACKNOWLEDGEMENTS
+LICENSE
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+Files: third_party/hexagon
+
+Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the
+disclaimer below) provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of The Linux Foundation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..e30ebbbb7
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md
@@ -0,0 +1,488 @@
+# CutMix data augmentation for image classification
+
+**Author:** [Sayan Nath](https://twitter.com/sayannath2350)
+**Date created:** 2021/06/08
+**Last modified:** 2021/06/08
+**Description:** Data augmentation with CutMix for image classification on CIFAR-10.
+
+
+
[**View in Colab**](https://colab.research.google.com/github/keras-team\keras-io\blob\master\examples\vision/ipynb/cutmix.ipynb) •
[**GitHub source**](https://github.com/keras-team\keras-io\blob\master\examples\vision/cutmix.py)
+
+
+
+---
+## Introduction
+
+_CutMix_ is a data augmentation technique that addresses the issue of information loss
+and inefficiency present in regional dropout strategies.
+Instead of removing pixels and filling them with black or grey pixels or Gaussian noise,
+you replace the removed regions with a patch from another image,
+while the ground truth labels are mixed proportionally to the number of pixels of combined images.
+CutMix was proposed in
+[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/pdf/1905.04899.pdf)
+(Yun et al., 2019)
+
+It's implemented via the following formulas:
+
+
+
+where `M` is the binary mask which indicates the cutout and the fill-in
+regions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a
+[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution)
+
+The coordinates of bounding boxes are:
+
+
+
+which indicates the cutout and fill-in regions in case of the images.
+The bounding box sampling is represented by:
+
+
+
+where `rx, ry` are randomly drawn from a uniform distribution with upper bound.
+
+---
+## Setup
+
+
+```python
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from tensorflow import keras
+
+np.random.seed(42)
+tf.random.set_seed(42)
+```
+
+---
+## Load the CIFAR-10 dataset
+
+In this example, we will use the
+[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
+
+
+```python
+(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
+y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
+y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
+
+print(x_train.shape)
+print(y_train.shape)
+print(x_test.shape)
+print(y_test.shape)
+
+class_names = [
+ "Airplane",
+ "Automobile",
+ "Bird",
+ "Cat",
+ "Deer",
+ "Dog",
+ "Frog",
+ "Horse",
+ "Ship",
+ "Truck",
+]
+```
+
+
+```
+(50000, 32, 32, 3)
+(50000, 10)
+(10000, 32, 32, 3)
+(10000, 10)
+```
+
+
+
+---
+## Define hyperparameters
+
+
+```python
+AUTO = tf.data.AUTOTUNE
+BATCH_SIZE = 32
+IMG_SIZE = 32
+```
+
+---
+## Define the image preprocessing function
+
+
+```python
+
+def preprocess_image(image, label):
+ image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
+ image = tf.image.convert_image_dtype(image, tf.float32) / 255.0
+ return image, label
+
+```
+
+---
+## Convert the data into TensorFlow `Dataset` objects
+
+
+```python
+train_ds_one = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .shuffle(1024)
+ .map(preprocess_image, num_parallel_calls=AUTO)
+)
+train_ds_two = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .shuffle(1024)
+ .map(preprocess_image, num_parallel_calls=AUTO)
+)
+
+train_ds_simple = tf.data.Dataset.from_tensor_slices((x_train, y_train))
+
+test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
+
+train_ds_simple = (
+ train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+)
+
+# Combine two shuffled datasets from the same training data.
+train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two))
+
+test_ds = (
+ test_ds.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+)
+```
+
+---
+## Define the CutMix data augmentation function
+
+The CutMix function takes two `image` and `label` pairs to perform the augmentation. It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution) and returns a bounding box from `get_box` function. We then crop the second image (`image2`) and pad this image in the final padded image at the same location.
+
+
+```python
+
+def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2):
+ gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1)
+ gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0)
+ return gamma_1_sample / (gamma_1_sample + gamma_2_sample)
+
+
+@tf.function
+def get_box(lambda_value):
+ cut_rat = tf.math.sqrt(1.0 - lambda_value)
+
+ cut_w = IMG_SIZE * cut_rat # rw
+ cut_w = tf.cast(cut_w, tf.int32)
+
+ cut_h = IMG_SIZE * cut_rat # rh
+ cut_h = tf.cast(cut_h, tf.int32)
+
+ cut_x = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # rx
+ cut_y = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # ry
+
+ boundaryx1 = tf.clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE)
+ boundaryy1 = tf.clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE)
+ bbx2 = tf.clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE)
+ bby2 = tf.clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE)
+
+ target_h = bby2 - boundaryy1
+ if target_h == 0:
+ target_h += 1
+
+ target_w = bbx2 - boundaryx1
+ if target_w == 0:
+ target_w += 1
+
+ return boundaryx1, boundaryy1, target_h, target_w
+
+
+@tf.function
+def cutmix(train_ds_one, train_ds_two):
+ (image1, label1), (image2, label2) = train_ds_one, train_ds_two
+
+ alpha = [0.25]
+ beta = [0.25]
+
+ # Get a sample from the Beta distribution
+ lambda_value = sample_beta_distribution(1, alpha, beta)
+
+ # Define Lambda
+ lambda_value = lambda_value[0][0]
+
+ # Get the bounding box offsets, heights and widths
+ boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value)
+
+ # Get a patch from the second image (`image2`)
+ crop2 = tf.image.crop_to_bounding_box(
+ image2, boundaryy1, boundaryx1, target_h, target_w
+ )
+ # Pad the `image2` patch (`crop2`) with the same offset
+ image2 = tf.image.pad_to_bounding_box(
+ crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
+ )
+ # Get a patch from the first image (`image1`)
+ crop1 = tf.image.crop_to_bounding_box(
+ image1, boundaryy1, boundaryx1, target_h, target_w
+ )
+ # Pad the `image1` patch (`crop1`) with the same offset
+ img1 = tf.image.pad_to_bounding_box(
+ crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
+ )
+
+ # Modify the first image by subtracting the patch from `image1`
+ # (before applying the `image2` patch)
+ image1 = image1 - img1
+ # Add the modified `image1` and `image2` together to get the CutMix image
+ image = image1 + image2
+
+ # Adjust Lambda in accordance to the pixel ration
+ lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE)
+ lambda_value = tf.cast(lambda_value, tf.float32)
+
+ # Combine the labels of both images
+ label = lambda_value * label1 + (1 - lambda_value) * label2
+ return image, label
+
+```
+
+**Note**: we are combining two images to create a single one.
+
+---
+## Visualize the new dataset after applying the CutMix augmentation
+
+
+```python
+# Create the new dataset using our `cutmix` utility
+train_ds_cmu = (
+ train_ds.shuffle(1024)
+ .map(cutmix, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+)
+
+# Let's preview 9 samples from the dataset
+image_batch, label_batch = next(iter(train_ds_cmu))
+plt.figure(figsize=(10, 10))
+for i in range(9):
+ ax = plt.subplot(3, 3, i + 1)
+ plt.title(class_names[np.argmax(label_batch[i])])
+ plt.imshow(image_batch[i])
+ plt.axis("off")
+```
+
+
+
+
+
+
+
+---
+## Define a ResNet-20 model
+
+
+```python
+
+def resnet_layer(
+ inputs,
+ num_filters=16,
+ kernel_size=3,
+ strides=1,
+ activation="relu",
+ batch_normalization=True,
+ conv_first=True,
+):
+ conv = keras.layers.Conv2D(
+ num_filters,
+ kernel_size=kernel_size,
+ strides=strides,
+ padding="same",
+ kernel_initializer="he_normal",
+ kernel_regularizer=keras.regularizers.l2(1e-4),
+ )
+ x = inputs
+ if conv_first:
+ x = conv(x)
+ if batch_normalization:
+ x = keras.layers.BatchNormalization()(x)
+ if activation is not None:
+ x = keras.layers.Activation(activation)(x)
+ else:
+ if batch_normalization:
+ x = keras.layers.BatchNormalization()(x)
+ if activation is not None:
+ x = keras.layers.Activation(activation)(x)
+ x = conv(x)
+ return x
+
+
+def resnet_v20(input_shape, depth, num_classes=10):
+ if (depth - 2) % 6 != 0:
+ raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])")
+ # Start model definition.
+ num_filters = 16
+ num_res_blocks = int((depth - 2) / 6)
+
+ inputs = keras.layers.Input(shape=input_shape)
+ x = resnet_layer(inputs=inputs)
+ # Instantiate the stack of residual units
+ for stack in range(3):
+ for res_block in range(num_res_blocks):
+ strides = 1
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ strides = 2 # downsample
+ y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
+ y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ # linear projection residual shortcut connection to match
+ # changed dims
+ x = resnet_layer(
+ inputs=x,
+ num_filters=num_filters,
+ kernel_size=1,
+ strides=strides,
+ activation=None,
+ batch_normalization=False,
+ )
+ x = keras.layers.add([x, y])
+ x = keras.layers.Activation("relu")(x)
+ num_filters *= 2
+
+ # Add classifier on top.
+ # v1 does not use BN after last shortcut connection-ReLU
+ x = keras.layers.AveragePooling2D(pool_size=8)(x)
+ y = keras.layers.Flatten()(x)
+ outputs = keras.layers.Dense(
+ num_classes, activation="softmax", kernel_initializer="he_normal"
+ )(y)
+
+ # Instantiate model.
+ model = keras.models.Model(inputs=inputs, outputs=outputs)
+ return model
+
+
+def training_model():
+ return resnet_v20((32, 32, 3), 20)
+
+
+initial_model = training_model()
+initial_model.save_weights("initial_weights.h5")
+```
+
+---
+## Train the model with the dataset augmented by CutMix
+
+
+```python
+model = training_model()
+model.load_weights("initial_weights.h5")
+
+model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+model.fit(train_ds_cmu, validation_data=test_ds, epochs=15)
+
+test_loss, test_accuracy = model.evaluate(test_ds)
+print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
+```
+
+
+```
+Epoch 1/15
+1563/1563 [==============================] - 62s 24ms/step - loss: 1.9216 - accuracy: 0.4090 - val_loss: 1.9737 - val_accuracy: 0.4061
+Epoch 2/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.6549 - accuracy: 0.5325 - val_loss: 1.5033 - val_accuracy: 0.5061
+Epoch 3/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.5536 - accuracy: 0.5840 - val_loss: 1.2913 - val_accuracy: 0.6112
+Epoch 4/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.4988 - accuracy: 0.6097 - val_loss: 1.0587 - val_accuracy: 0.7033
+Epoch 5/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.4531 - accuracy: 0.6291 - val_loss: 1.0681 - val_accuracy: 0.6841
+Epoch 6/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.4173 - accuracy: 0.6464 - val_loss: 1.0265 - val_accuracy: 0.7085
+Epoch 7/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.3932 - accuracy: 0.6572 - val_loss: 0.9540 - val_accuracy: 0.7331
+Epoch 8/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.3736 - accuracy: 0.6680 - val_loss: 0.9877 - val_accuracy: 0.7240
+Epoch 9/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.3575 - accuracy: 0.6782 - val_loss: 0.8944 - val_accuracy: 0.7570
+Epoch 10/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.3398 - accuracy: 0.6886 - val_loss: 0.8598 - val_accuracy: 0.7649
+Epoch 11/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.3277 - accuracy: 0.6939 - val_loss: 0.9032 - val_accuracy: 0.7603
+Epoch 12/15
+1563/1563 [==============================] - 38s 24ms/step - loss: 1.3131 - accuracy: 0.6964 - val_loss: 0.7934 - val_accuracy: 0.7926
+Epoch 13/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.3050 - accuracy: 0.7029 - val_loss: 0.8737 - val_accuracy: 0.7552
+Epoch 14/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.2987 - accuracy: 0.7099 - val_loss: 0.8409 - val_accuracy: 0.7766
+Epoch 15/15
+1563/1563 [==============================] - 37s 24ms/step - loss: 1.2953 - accuracy: 0.7099 - val_loss: 0.7850 - val_accuracy: 0.8014
+313/313 [==============================] - 3s 9ms/step - loss: 0.7850 - accuracy: 0.8014
+Test accuracy: 80.14%
+```
+
+
+
+---
+## Train the model using the original non-augmented dataset
+
+
+```python
+model = training_model()
+model.load_weights("initial_weights.h5")
+model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+model.fit(train_ds_simple, validation_data=test_ds, epochs=15)
+
+test_loss, test_accuracy = model.evaluate(test_ds)
+print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
+```
+
+
+```
+Epoch 1/15
+1563/1563 [==============================] - 38s 23ms/step - loss: 1.4864 - accuracy: 0.5173 - val_loss: 1.3694 - val_accuracy: 0.5708
+Epoch 2/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 1.0682 - accuracy: 0.6779 - val_loss: 1.1424 - val_accuracy: 0.6686
+Epoch 3/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.8955 - accuracy: 0.7449 - val_loss: 1.0555 - val_accuracy: 0.7007
+Epoch 4/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.7890 - accuracy: 0.7878 - val_loss: 1.0575 - val_accuracy: 0.7079
+Epoch 5/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.7107 - accuracy: 0.8175 - val_loss: 1.1395 - val_accuracy: 0.7062
+Epoch 6/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.6524 - accuracy: 0.8397 - val_loss: 1.1716 - val_accuracy: 0.7042
+Epoch 7/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.6098 - accuracy: 0.8594 - val_loss: 1.4120 - val_accuracy: 0.6786
+Epoch 8/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.5715 - accuracy: 0.8765 - val_loss: 1.3159 - val_accuracy: 0.7011
+Epoch 9/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.5477 - accuracy: 0.8872 - val_loss: 1.2873 - val_accuracy: 0.7182
+Epoch 10/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.5233 - accuracy: 0.8988 - val_loss: 1.4118 - val_accuracy: 0.6964
+Epoch 11/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.5165 - accuracy: 0.9045 - val_loss: 1.3741 - val_accuracy: 0.7230
+Epoch 12/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.5008 - accuracy: 0.9124 - val_loss: 1.3984 - val_accuracy: 0.7181
+Epoch 13/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.4896 - accuracy: 0.9190 - val_loss: 1.3642 - val_accuracy: 0.7209
+Epoch 14/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.4845 - accuracy: 0.9231 - val_loss: 1.5469 - val_accuracy: 0.6992
+Epoch 15/15
+1563/1563 [==============================] - 36s 23ms/step - loss: 0.4749 - accuracy: 0.9294 - val_loss: 1.4034 - val_accuracy: 0.7362
+313/313 [==============================] - 3s 9ms/step - loss: 1.4034 - accuracy: 0.7362
+Test accuracy: 73.62%
+```
+
+
+
+---
+## Notes
+
+In this example, we trained our model for 15 epochs.
+In our experiment, the model with CutMix achieves a better accuracy on the CIFAR-10 dataset
+(80.36% in our experiment) compared to the model that doesn't use the augmentation (72.70%).
+You may notice it takes less time to train the model with the CutMix augmentation.
+
+You can experiment further with the CutMix technique by following the
+[original paper](https://arxiv.org/abs/1905.04899).
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..d1e80795e
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt
@@ -0,0 +1,4 @@
+os
+numpy
+pandas
+tensorflow
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..bf267b779
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+export ASCEND_GLOBAL_LOG_LEVEL=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="cutmix_ID2502_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=15
+#训练step
+#train_steps=50000
+#学习率
+# learning_rate=0.001
+# weight_decay=0.0001
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p_static_eval.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+#参数修改
+#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py
+
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+python3 $cur_path/train.py --data_dir=${data_path} \
+ --epochs=${train_epochs} \
+ --batch_size=${batch_size} \
+ --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#参数回改
+#sed -i "s|${data_path}/data/tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_CKPT = '${cur_path}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==2' | awk '{print$5}' | tr -cd "[0-9]"`
+TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'`
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'`
+# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'`
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'`
+train_accuracy=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$17}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' |grep -v loss > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |tr -d '\b\r'| grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+# ActualLoss=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$8}'`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
new file mode 100644
index 000000000..80fbce60b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+# export ASCEND_GLOBAL_LOG_LEVEL_ETP=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="cutmix_ID2502_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RankSize=1
+#训练epoch,可选
+train_epochs=1
+#训练step
+# train_steps=5
+#学习率
+# learning_rate=0.0001
+ckpt_path=""
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+#参数修改
+#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py
+
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+python3 train.py --data_dir=${data_path}\
+ --epochs=${train_epochs}\
+ --batch_size=${batch_size}\
+ --eval_static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#参数回改
+#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk '{print $5}'|awk -F "ms" '{print $1}'|sed s/[[:space:]]//g`
+wait
+#FPS
+FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'1000'/'${TrainingTime}'}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $11}'|sed s/[[:space:]]//g`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'dynamic'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+
+cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep -v "ms/step"|tr -d '\b\r'| grep -Eo " loss: [0-9]*\.[0-9]*"|awk '{print $2}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+#ModelStatus="图执行FAIL"
+#DTS_Number="DTS2021090622224"
+#error_msg="type Conv2DBackpropFilter is not found in this op store"
+#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l`
+#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RankSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
new file mode 100644
index 000000000..997ae82a6
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#设置默认日志级别,不需要修改
+# export ASCEND_GLOBAL_LOG_LEVEL=3
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="cutmix_ID2502_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=5
+#训练step
+#train_steps=50000
+#学习率
+# learning_rate=0.001
+# weight_decay=0.0001
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p_static_eval.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+#参数修改
+#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py
+
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+python3 $cur_path/train.py --data_dir=${data_path} \
+ --epochs=${train_epochs} \
+ --batch_size=${batch_size} \
+ --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#参数回改
+#sed -i "s|${data_path}/data/tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py
+#sed -i "s|PRETRAINED_CKPT = '${cur_path}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==2' | awk '{print$5}' | tr -cd "[0-9]"`
+TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'`
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'`
+# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'`
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'`
+train_accuracy=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$17}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' |grep -v loss > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |tr -d '\b\r'| grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+# ActualLoss=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$8}'`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py
new file mode 100644
index 000000000..629a54eb0
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py
@@ -0,0 +1,486 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Title: CutMix data augmentation for image classification
+Author: [Sayan Nath](https://twitter.com/sayannath2350)
+Date created: 2021/06/08
+Last modified: 2021/06/08
+Description: Data augmentation with CutMix for image classification on CIFAR-10.
+"""
+
+"""
+## Introduction
+"""
+
+"""
+_CutMix_ is a data augmentation technique that addresses the issue of information loss
+and inefficiency present in regional dropout strategies.
+Instead of removing pixels and filling them with black or grey pixels or Gaussian noise,
+you replace the removed regions with a patch from another image,
+while the ground truth labels are mixed proportionally to the number of pixels of combined images.
+CutMix was proposed in
+[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/pdf/1905.04899.pdf)
+(Yun et al., 2019)
+
+It's implemented via the following formulas:
+
+
+
+where `M` is the binary mask which indicates the cutout and the fill-in
+regions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a
+[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution)
+
+The coordinates of bounding boxes are:
+
+
+
+which indicates the cutout and fill-in regions in case of the images.
+The bounding box sampling is represented by:
+
+
+
+where `rx, ry` are randomly drawn from a uniform distribution with upper bound.
+"""
+
+"""
+## Setup
+"""
+import npu_device
+print('npu_device loaded')
+npu_device.open().as_default()
+
+import os
+import ast
+import numpy as np
+import pandas as pd
+# import matplotlib.pyplot as plt
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.python.keras import backend as K
+from tensorflow.python.keras.datasets.cifar import load_batch
+import argparse
+np.random.seed(42)
+tf.random.set_seed(42)
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--data_dir', default="../cifar-10-batches-py/",
+ help="""directory to data""")
+ parser.add_argument('--batch_size', default=32, type=int,
+ help="""batch size for 1p""")
+ parser.add_argument('--epochs', default=15, type=int,
+ help="""epochs""")
+ parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval,
+ help='the path to train data')
+ args, unknown_args = parser.parse_known_args()
+ if len(unknown_args) > 0:
+ for bad_arg in unknown_args:
+ print("ERROR: Unknown command line arg: %s" % bad_arg)
+ raise ValueError("Invalid command line arg(s)")
+ return args
+
+
+args = parse_args()
+data_path = args.data_dir
+num_epochs = args.epochs
+
+"""
+## Load the CIFAR-10 dataset
+
+In this example, we will use the
+[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
+"""
+def load_data(data_path):
+ num_train_samples = 50000
+ x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
+ y_train = np.empty((num_train_samples,), dtype='uint8')
+
+ for i in range(1, 6):
+ fpath = os.path.join(data_path, 'data_batch_' + str(i))
+ (x_train[(i - 1) * 10000:i * 10000, :, :, :],
+ y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
+
+ fpath = os.path.join(data_path, 'test_batch')
+ x_test, y_test = load_batch(fpath)
+
+ y_train = np.reshape(y_train, (len(y_train), 1))
+ y_test = np.reshape(y_test, (len(y_test), 1))
+
+ if K.image_data_format() == 'channels_last':
+ x_train = x_train.transpose(0, 2, 3, 1)
+ x_test = x_test.transpose(0, 2, 3, 1)
+
+ x_test = x_test.astype(x_train.dtype)
+ y_test = y_test.astype(y_train.dtype)
+
+ return (x_train, y_train), (x_test, y_test)
+
+(x_train, y_train), (x_test, y_test) = load_data(data_path)
+
+y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
+y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
+
+print(x_train.shape)
+print(y_train.shape)
+print(x_test.shape)
+print(y_test.shape)
+
+class_names = [
+ "Airplane",
+ "Automobile",
+ "Bird",
+ "Cat",
+ "Deer",
+ "Dog",
+ "Frog",
+ "Horse",
+ "Ship",
+ "Truck",
+]
+
+"""
+## Define hyperparameters
+"""
+
+AUTO = tf.data.AUTOTUNE
+BATCH_SIZE = args.batch_size
+IMG_SIZE = 32
+
+"""
+## Define the image preprocessing function
+"""
+
+
+def preprocess_image(image, label):
+ image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
+ image = tf.image.convert_image_dtype(image, tf.float32) / 255.0
+ return image, label
+
+
+"""
+## Convert the data into TensorFlow `Dataset` objects
+"""
+
+train_ds_one = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .shuffle(1024)
+ .map(preprocess_image, num_parallel_calls=AUTO)
+)
+train_ds_two = (
+ tf.data.Dataset.from_tensor_slices((x_train, y_train))
+ .shuffle(1024)
+ .map(preprocess_image, num_parallel_calls=AUTO)
+)
+
+train_ds_simple = tf.data.Dataset.from_tensor_slices((x_train, y_train))
+
+if args.eval_static:
+
+ test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
+
+ train_ds_simple = (
+ train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE, drop_remainder=True)
+ .prefetch(AUTO)
+ )
+
+ # Combine two shuffled datasets from the same training data.
+ train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two))
+
+ test_ds = (
+ test_ds.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE, drop_remainder=True)
+ .prefetch(AUTO)
+ )
+else:
+
+ test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
+
+ train_ds_simple = (
+ train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+ )
+
+ # Combine two shuffled datasets from the same training data.
+ train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two))
+
+ test_ds = (
+ test_ds.map(preprocess_image, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+ )
+
+"""
+## Define the CutMix data augmentation function
+
+The CutMix function takes two `image` and `label` pairs to perform the augmentation. It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution) and returns a bounding box from `get_box` function. We then crop the second image (`image2`) and pad this image in the final padded image at the same location.
+"""
+
+
+def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2):
+ gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1)
+ gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0)
+ return gamma_1_sample / (gamma_1_sample + gamma_2_sample)
+
+
+@tf.function
+def get_box(lambda_value):
+ cut_rat = tf.math.sqrt(1.0 - lambda_value)
+
+ cut_w = IMG_SIZE * cut_rat # rw
+ cut_w = tf.cast(cut_w, tf.int32)
+
+ cut_h = IMG_SIZE * cut_rat # rh
+ cut_h = tf.cast(cut_h, tf.int32)
+
+ cut_x = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # rx
+ cut_y = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # ry
+
+ boundaryx1 = tf.clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE)
+ boundaryy1 = tf.clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE)
+ bbx2 = tf.clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE)
+ bby2 = tf.clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE)
+
+ target_h = bby2 - boundaryy1
+ if target_h == 0:
+ target_h += 1
+
+ target_w = bbx2 - boundaryx1
+ if target_w == 0:
+ target_w += 1
+
+ return boundaryx1, boundaryy1, target_h, target_w
+
+
+@tf.function
+def cutmix(train_ds_one, train_ds_two):
+ (image1, label1), (image2, label2) = train_ds_one, train_ds_two
+
+ alpha = [0.25]
+ beta = [0.25]
+
+ # Get a sample from the Beta distribution
+ lambda_value = sample_beta_distribution(1, alpha, beta)
+
+ # Define Lambda
+ lambda_value = lambda_value[0][0]
+
+ # Get the bounding box offsets, heights and widths
+ boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value)
+
+ # Get a patch from the second image (`image2`)
+ crop2 = tf.image.crop_to_bounding_box(
+ image2, boundaryy1, boundaryx1, target_h, target_w
+ )
+ # Pad the `image2` patch (`crop2`) with the same offset
+ image2 = tf.image.pad_to_bounding_box(
+ crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
+ )
+ # Get a patch from the first image (`image1`)
+ crop1 = tf.image.crop_to_bounding_box(
+ image1, boundaryy1, boundaryx1, target_h, target_w
+ )
+ # Pad the `image1` patch (`crop1`) with the same offset
+ img1 = tf.image.pad_to_bounding_box(
+ crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
+ )
+
+ # Modify the first image by subtracting the patch from `image1`
+ # (before applying the `image2` patch)
+ image1 = image1 - img1
+ # Add the modified `image1` and `image2` together to get the CutMix image
+ image = image1 + image2
+
+ # Adjust Lambda in accordance to the pixel ration
+ lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE)
+ lambda_value = tf.cast(lambda_value, tf.float32)
+
+ # Combine the labels of both images
+ label = lambda_value * label1 + (1 - lambda_value) * label2
+ return image, label
+
+
+"""
+**Note**: we are combining two images to create a single one.
+
+## Visualize the new dataset after applying the CutMix augmentation
+"""
+
+# Create the new dataset using our `cutmix` utility
+if args.eval_static:
+ train_ds_cmu = (
+ train_ds.shuffle(1024)
+ .map(cutmix, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE, drop_remainder=True)
+ .prefetch(AUTO)
+ )
+else:
+ train_ds_cmu = (
+ train_ds.shuffle(1024)
+ .map(cutmix, num_parallel_calls=AUTO)
+ .batch(BATCH_SIZE)
+ .prefetch(AUTO)
+ )
+
+# Let's preview 9 samples from the dataset
+# image_batch, label_batch = next(iter(train_ds_cmu))
+# plt.figure(figsize=(10, 10))
+# for i in range(9):
+# ax = plt.subplot(3, 3, i + 1)
+# plt.title(class_names[np.argmax(label_batch[i])])
+# plt.imshow(image_batch[i])
+# plt.axis("off")
+
+"""
+## Define a ResNet-20 model
+"""
+
+
+def resnet_layer(
+ inputs,
+ num_filters=16,
+ kernel_size=3,
+ strides=1,
+ activation="relu",
+ batch_normalization=True,
+ conv_first=True,
+):
+ conv = keras.layers.Conv2D(
+ num_filters,
+ kernel_size=kernel_size,
+ strides=strides,
+ padding="same",
+ kernel_initializer="he_normal",
+ kernel_regularizer=keras.regularizers.l2(1e-4),
+ )
+ x = inputs
+ if conv_first:
+ x = conv(x)
+ if batch_normalization:
+ x = keras.layers.BatchNormalization()(x)
+ if activation is not None:
+ x = keras.layers.Activation(activation)(x)
+ else:
+ if batch_normalization:
+ x = keras.layers.BatchNormalization()(x)
+ if activation is not None:
+ x = keras.layers.Activation(activation)(x)
+ x = conv(x)
+ return x
+
+
+def resnet_v20(input_shape, depth, num_classes=10):
+ if (depth - 2) % 6 != 0:
+ raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])")
+ # Start model definition.
+ num_filters = 16
+ num_res_blocks = int((depth - 2) / 6)
+
+ inputs = keras.layers.Input(shape=input_shape)
+ x = resnet_layer(inputs=inputs)
+ # Instantiate the stack of residual units
+ for stack in range(3):
+ for res_block in range(num_res_blocks):
+ strides = 1
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ strides = 2 # downsample
+ y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
+ y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ # linear projection residual shortcut connection to match
+ # changed dims
+ x = resnet_layer(
+ inputs=x,
+ num_filters=num_filters,
+ kernel_size=1,
+ strides=strides,
+ activation=None,
+ batch_normalization=False,
+ )
+ x = keras.layers.add([x, y])
+ x = keras.layers.Activation("relu")(x)
+ num_filters *= 2
+
+ # Add classifier on top.
+ # v1 does not use BN after last shortcut connection-ReLU
+ x = keras.layers.AveragePooling2D(pool_size=8)(x)
+ y = keras.layers.Flatten()(x)
+ outputs = keras.layers.Dense(
+ num_classes, activation="softmax", kernel_initializer="he_normal"
+ )(y)
+
+ # Instantiate model.
+ model = keras.models.Model(inputs=inputs, outputs=outputs)
+ return model
+
+
+def training_model():
+ return resnet_v20((32, 32, 3), 20)
+
+
+initial_model = training_model()
+initial_model.save_weights("initial_weights.h5")
+
+"""
+## Train the model with the dataset augmented by CutMix
+"""
+
+model = training_model()
+model.load_weights("initial_weights.h5")
+
+model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+model.fit(train_ds_cmu, validation_data=test_ds, epochs=num_epochs)
+
+
+"""
+## Train the model using the original non-augmented dataset
+"""
+
+# model = training_model()
+# model.load_weights("initial_weights.h5")
+# model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
+# model.fit(train_ds_simple, validation_data=test_ds, epochs=15)
+
+# test_loss, test_accuracy = model.evaluate(test_ds)
+# print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
+
+"""
+## Notes
+
+In this example, we trained our model for 15 epochs.
+In our experiment, the model with CutMix achieves a better accuracy on the CIFAR-10 dataset
+(80.36% in our experiment) compared to the model that doesn't use the augmentation (72.70%).
+You may notice it takes less time to train the model with the CutMix augmentation.
+
+You can experiment further with the CutMix technique by following the
+[original paper](https://arxiv.org/abs/1905.04899).
+"""
\ No newline at end of file
--
Gitee
From b96b7cf192c494592fb89f21693c52b54cbb3319 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:20:52 +0000
Subject: [PATCH 06/54] add
TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt.
---
.../cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt
diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..0b49b4fb2
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:OK
+PrecisionStatus:OK
\ No newline at end of file
--
Gitee
From 84b7186558e592444ff02a79941958964d13dafb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:25:13 +0000
Subject: [PATCH 07/54] =?UTF-8?q?pointnet=5FID2531=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../pointnet_ID2531_for_TensorFlow2.X/LICENSE | 284 ++++++++++++++
.../ReadME.md | 29 ++
.../pointnet.py | 355 ++++++++++++++++++
.../requirements.txt | 1 +
.../test/train_full_1p.sh | 103 +++++
.../test/train_performance_1p_dynamic_eval.sh | 115 ++++++
.../test/train_performance_1p_static_eval.sh | 104 +++++
7 files changed, 991 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..ab652360b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,284 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------
+Files: third_party/compute_library/...
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+------------------
+Files: ACKNOWLEDGEMENTS
+LICENSE
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+Files: third_party/hexagon
+
+Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the
+disclaimer below) provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of The Linux Foundation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md
new file mode 100644
index 000000000..d844898a8
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md
@@ -0,0 +1,29 @@
+# 一、基础信息
+
+网络名称:`pointnet_ID2531_for_TensorFlow2.X`
+
+github addr:https://github.com/keras-team/keras-io/tree/master/examples/vision
+
+# 二、代码修改
+
+# 三、程序运行
+
+```shell
+bash run_1p.sh
+```
+
+# 四、归档文件路径
+
+1、数据集
+pointnet_ID2531_for_TensorFlow2.X,10.248.93.131:Huawei@123,/train_output/turingDataset/00-CV/ID2531_CarPeting_TF2.X_pointnet:2292148
+
+
+2、归档文件
+
+3、迁移代码
+
+4、源代码
+
+5、源迁移代码
+
+# 五、NPU工作环境
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py
new file mode 100644
index 000000000..790f4d52b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py
@@ -0,0 +1,355 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Title: Point cloud classification with PointNet
+Author: [David Griffiths](https://dgriffiths3.github.io)
+Date created: 2020/05/25
+Last modified: 2020/05/26
+Description: Implementation of PointNet for ModelNet10 classification.
+"""
+"""
+# Point cloud classification
+"""
+
+"""
+## Introduction
+
+Classification, detection and segmentation of unordered 3D point sets i.e. point clouds
+is a core problem in computer vision. This example implements the seminal point cloud
+deep learning paper [PointNet (Qi et al., 2017)](https://arxiv.org/abs/1612.00593). For a
+detailed intoduction on PointNet see [this blog
+post](https://medium.com/@luis_gonzales/an-in-depth-look-at-pointnet-111d7efdaa1a).
+"""
+
+"""
+## Setup
+
+If using colab first install trimesh with `!pip install trimesh`.
+"""
+
+
+import os
+import argparse
+import ast
+import glob
+import trimesh
+import numpy as np
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import layers
+from matplotlib import pyplot as plt
+
+import npu_device
+npu_device.open().as_default()
+
+tf.random.set_seed(1234)
+
+'''
+"""
+## Load dataset
+
+We use the ModelNet10 model dataset, the smaller 10 class version of the ModelNet40
+dataset. First download the data:
+"""
+
+DATA_DIR = tf.keras.utils.get_file(
+ "modelnet.zip",
+ "http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip",
+ extract=True,
+)
+DATA_DIR = os.path.join(os.path.dirname(DATA_DIR), "ModelNet10")
+
+"""
+We can use the `trimesh` package to read and visualize the `.off` mesh files.
+"""
+
+mesh = trimesh.load(os.path.join(DATA_DIR, "chair/train/chair_0001.off"))
+mesh.show()
+
+"""
+To convert a mesh file to a point cloud we first need to sample points on the mesh
+surface. `.sample()` performs a unifrom random sampling. Here we sample at 2048 locations
+and visualize in `matplotlib`.
+"""
+
+points = mesh.sample(2048)
+
+fig = plt.figure(figsize=(5, 5))
+ax = fig.add_subplot(111, projection="3d")
+ax.scatter(points[:, 0], points[:, 1], points[:, 2])
+ax.set_axis_off()
+plt.show()
+'''
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--data_path', default='./',
+ help="""directory to data""")
+ parser.add_argument('--batch_size', default=32, type=int,
+ help="""batch size for 1p""")
+ parser.add_argument('--epochs', default=20, type=int,
+ help="""epochs""")
+ parser.add_argument('--drop_remainder', default="True", type=ast.literal_eval,
+ help="""drop_remainder True or False remote dynamic or static input""")
+ args, unknown_args = parser.parse_known_args()
+ if len(unknown_args) > 0:
+ for bad_arg in unknown_args:
+ print("ERROR: Unknown command line arg: %s" % bad_arg)
+ raise ValueError("Invalid command line arg(s)")
+ return args
+
+
+"""
+Set the number of points to sample and batch size and parse the dataset. This can take
+~5minutes to complete.
+"""
+
+args = parse_args()
+DATA_DIR = os.path.join(args.data_path, "ModelNet10/")
+NUM_POINTS = 2048
+NUM_CLASSES = 10
+BATCH_SIZE = args.batch_size
+EPOCHS=args.epochs
+
+
+"""
+To generate a `tf.data.Dataset()` we need to first parse through the ModelNet data
+folders. Each mesh is loaded and sampled into a point cloud before being added to a
+standard python list and converted to a `numpy` array. We also store the current
+enumerate index value as the object label and use a dictionary to recall this later.
+"""
+
+
+def parse_dataset(num_points=2048):
+
+ train_points = []
+ train_labels = []
+ test_points = []
+ test_labels = []
+ class_map = {}
+ folders = glob.glob(os.path.join(DATA_DIR, "[!README]*"))
+
+ for i, folder in enumerate(folders):
+ print("processing class: {}".format(os.path.basename(folder)))
+ # store folder name with ID so we can retrieve later
+ class_map[i] = folder.split("/")[-1]
+ # gather all files
+ train_files = glob.glob(os.path.join(folder, "train/*"))
+ test_files = glob.glob(os.path.join(folder, "test/*"))
+
+ for f in train_files:
+ train_points.append(trimesh.load(f).sample(num_points))
+ train_labels.append(i)
+
+ for f in test_files:
+ test_points.append(trimesh.load(f).sample(num_points))
+ test_labels.append(i)
+
+ return (
+ np.array(train_points),
+ np.array(test_points),
+ np.array(train_labels),
+ np.array(test_labels),
+ class_map,
+ )
+
+train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset(
+ NUM_POINTS
+)
+
+"""
+Our data can now be read into a `tf.data.Dataset()` object. We set the shuffle buffer
+size to the entire size of the dataset as prior to this the data is ordered by class.
+Data augmentation is important when working with point cloud data. We create a
+augmentation function to jitter and shuffle the train dataset.
+"""
+
+
+def augment(points, label):
+ # jitter points
+ points += tf.random.uniform(points.shape, -0.005, 0.005, dtype=tf.float64)
+ # shuffle points
+ points = tf.random.shuffle(points)
+ return points, label
+
+
+train_dataset = tf.data.Dataset.from_tensor_slices((train_points, train_labels))
+test_dataset = tf.data.Dataset.from_tensor_slices((test_points, test_labels))
+
+train_dataset = train_dataset.shuffle(len(train_points)).map(augment).batch(BATCH_SIZE, drop_remainder=args.drop_remainder)
+test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE, drop_remainder=args.drop_remainder)
+
+"""
+### Build a model
+
+Each convolution and fully-connected layer (with exception for end layers) consits of
+Convolution / Dense -> Batch Normalization -> ReLU Activation.
+"""
+
+
+def conv_bn(x, filters):
+ x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x)
+ x = layers.BatchNormalization(momentum=0.0)(x)
+ return layers.Activation("relu")(x)
+
+
+def dense_bn(x, filters):
+ x = layers.Dense(filters)(x)
+ x = layers.BatchNormalization(momentum=0.0)(x)
+ return layers.Activation("relu")(x)
+
+
+"""
+PointNet consists of two core components. The primary MLP network, and the transformer
+net (T-net). The T-net aims to learn an affine transformation matrix by its own mini
+network. The T-net is used twice. The first time to transform the input features (n, 3)
+into a canonical representation. The second is an affine transformation for alignment in
+feature space (n, 3). As per the original paper we constrain the transformation to be
+close to an orthogonal matrix (i.e. ||X*X^T - I|| = 0).
+"""
+
+
+class OrthogonalRegularizer(keras.regularizers.Regularizer):
+ def __init__(self, num_features, l2reg=0.001):
+ self.num_features = num_features
+ self.l2reg = l2reg
+ self.eye = tf.eye(num_features)
+
+ def __call__(self, x):
+ x = tf.reshape(x, (-1, self.num_features, self.num_features))
+ xxt = tf.tensordot(x, x, axes=(2, 2))
+ xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))
+ return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))
+
+
+"""
+ We can then define a general function to build T-net layers.
+"""
+
+
+def tnet(inputs, num_features):
+
+ # Initalise bias as the indentity matrix
+ bias = keras.initializers.Constant(np.eye(num_features).flatten())
+ reg = OrthogonalRegularizer(num_features)
+
+ x = conv_bn(inputs, 32)
+ x = conv_bn(x, 64)
+ x = conv_bn(x, 512)
+ x = layers.GlobalMaxPooling1D()(x)
+ x = dense_bn(x, 256)
+ x = dense_bn(x, 128)
+ x = layers.Dense(
+ num_features * num_features,
+ kernel_initializer="zeros",
+ bias_initializer=bias,
+ activity_regularizer=reg,
+ )(x)
+ feat_T = layers.Reshape((num_features, num_features))(x)
+ # Apply affine transformation to input features
+ return layers.Dot(axes=(2, 1))([inputs, feat_T])
+
+
+"""
+The main network can be then implemented in the same manner where the t-net mini models
+can be dropped in a layers in the graph. Here we replicate the network architecture
+published in the original paper but with half the number of weights at each layer as we
+are using the smaller 10 class ModelNet dataset.
+"""
+
+inputs = keras.Input(shape=(NUM_POINTS, 3))
+
+x = tnet(inputs, 3)
+x = conv_bn(x, 32)
+x = conv_bn(x, 32)
+x = tnet(x, 32)
+x = conv_bn(x, 32)
+x = conv_bn(x, 64)
+x = conv_bn(x, 512)
+x = layers.GlobalMaxPooling1D()(x)
+x = dense_bn(x, 256)
+x = layers.Dropout(0.3)(x)
+x = dense_bn(x, 128)
+x = layers.Dropout(0.3)(x)
+
+outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x)
+
+model = keras.Model(inputs=inputs, outputs=outputs, name="pointnet")
+model.summary()
+
+"""
+### Train model
+
+Once the model is defined it can be trained like any other standard classification model
+using `.compile()` and `.fit()`.
+"""
+
+model.compile(
+ loss="sparse_categorical_crossentropy",
+ optimizer=keras.optimizers.Adam(learning_rate=0.001),
+ metrics=["sparse_categorical_accuracy"],
+)
+
+model.fit(train_dataset, epochs=EPOCHS, validation_data=test_dataset)
+model.save_weights(filepath="pointnet", save_format="tf")
+"""
+## Visualize predictions
+
+We can use matplotlib to visualize our trained model performance.
+"""
+
+'''
+data = test_dataset.take(1)
+
+points, labels = list(data)[0]
+points = points[:8, ...]
+labels = labels[:8, ...]
+
+# run test data through model
+preds = model.predict(points)
+preds = tf.math.argmax(preds, -1)
+
+points = points.numpy()
+
+# plot points with predicted class and label
+fig = plt.figure(figsize=(15, 10))
+for i in range(8):
+ ax = fig.add_subplot(2, 4, i + 1, projection="3d")
+ ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2])
+ ax.set_title(
+ "pred: {:}, label: {:}".format(
+ CLASS_MAP[preds[i].numpy()], CLASS_MAP[labels.numpy()[i]]
+ )
+ )
+ ax.set_axis_off()
+plt.show()
+'''
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..ac1db3b60
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt
@@ -0,0 +1 @@
+trimesh
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..9d57d0954
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="pointnet_ID2531_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=20
+#训练step
+train_steps=60000
+#学习率
+#learning_rate=1e-5
+
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 pointnet.py --data_path=$data_path \
+ --epoch=$train_epochs \
+ --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"`
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep " val_sparse_categorical_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
new file mode 100644
index 000000000..2c68e2f74
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="pointnet_ID2531_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=1
+#训练step
+train_steps=60000
+#学习率
+#learning_rate=1e-5
+
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 pointnet.py --data_path=$data_path \
+ --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --drop_remainder=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"`
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep student_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+ModelStatus="图执行FAIL"
+DTS_Number="DTS2021090622224"
+# error_msg="E19999"
+error_msg="EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel \[AIcoreEngine\] is unsupported"
+Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l`
+# error_msg="Graph engine process graph failed: E19999: Inner Error! Output shape is still unknown after shape inference. shape = [-1]."
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
new file mode 100644
index 000000000..6b839114f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+cur_path=`pwd`/../
+
+#基础参数,需要模型审视修改
+#Batch Size
+batch_size=32
+#网络名称,同目录名称
+Network="pointnet_ID2531_for_TensorFlow2.X"
+#Device数量,单卡默认为1
+RANK_SIZE=1
+#训练epoch,可选
+train_epochs=1
+#训练step
+train_steps=60000
+#学习率
+#learning_rate=1e-5
+
+#参数配置
+data_path=""
+
+if [[ $1 == --help || $1 == --h ]];then
+ echo "usage: ./train_performance_1p.sh"
+ exit 1
+fi
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path \" must be config"
+ exit 1
+fi
+
+##############执行训练##########
+cd $cur_path
+
+if [ -d $cur_path/test/output ];then
+ rm -rf $cur_path/test/output/*
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+else
+ mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID
+fi
+wait
+
+start=$(date +%s)
+nohup python3 pointnet.py --data_path=$data_path \
+ --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --drop_remainder=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 &
+wait
+
+end=$(date +%s)
+e2e_time=$(( $end - $start ))
+
+#echo "Final Performance ms/step : $average_perf"
+echo "Final Training Duration sec : $e2e_time"
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"`
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep " val_sparse_categorical_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
--
Gitee
From 83b0d632d6ed79e5388112763159842af1e30484 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:26:34 +0000
Subject: [PATCH 08/54] add
TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt.
---
.../pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt
diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..0b49b4fb2
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:OK
+PrecisionStatus:OK
\ No newline at end of file
--
Gitee
From 50df09ed3ecf9c860ddb64d8e735cf33e5d2da68 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:28:24 +0000
Subject: [PATCH 09/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/.gitignore | 2 +
TensorFlow2/built-in/keras_sample/LICENSE | 51 ++
TensorFlow2/built-in/keras_sample/README.md | 233 +++++++++
.../built-in/keras_sample/README_BAK.md | 77 +++
TensorFlow2/built-in/keras_sample/evaluate.py | 199 ++++++++
.../built-in/keras_sample/modelzoo_level.txt | 3 +
TensorFlow2/built-in/keras_sample/provider.py | 165 +++++++
.../built-in/keras_sample/requirements.txt | 0
TensorFlow2/built-in/keras_sample/train.py | 452 ++++++++++++++++++
.../built-in/keras_sample/train_real.py | 381 +++++++++++++++
10 files changed, 1563 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/.gitignore
create mode 100644 TensorFlow2/built-in/keras_sample/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/README_BAK.md
create mode 100644 TensorFlow2/built-in/keras_sample/evaluate.py
create mode 100644 TensorFlow2/built-in/keras_sample/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/provider.py
create mode 100644 TensorFlow2/built-in/keras_sample/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/train.py
create mode 100644 TensorFlow2/built-in/keras_sample/train_real.py
diff --git a/TensorFlow2/built-in/keras_sample/.gitignore b/TensorFlow2/built-in/keras_sample/.gitignore
new file mode 100644
index 000000000..8efb80c9a
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/.gitignore
@@ -0,0 +1,2 @@
+/data/*
+/log/*
diff --git a/TensorFlow2/built-in/keras_sample/LICENSE b/TensorFlow2/built-in/keras_sample/LICENSE
new file mode 100644
index 000000000..e93be0a6b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/LICENSE
@@ -0,0 +1,51 @@
+PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation.
+
+Copyright (c) 2017, Geometric Computation Group of Stanford University
+
+The MIT License (MIT)
+
+Copyright (c) 2017 Charles R. Qi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+PointNet:针对3D分类和分割的点集深度学习。
+
+斯坦福大学几何计算小组(c)2017版权所有
+
+MIT许可证(MIT)
+
+版权所有(c)2017 Charles R.Qi
+
+特此授予获得副本的任何人免费的许可
+软件和相关文档文件(以下简称“软件”)的交易
+在软件中不受限制,包括但不限于权利
+使用,复制,修改,合并,发布,分发,再许可和/或出售
+本软件的副本,并允许本软件所针对的人
+具备以下条件:
+
+以上版权声明和此许可声明应包含在所有
+复制或实质性的软件部分。
+
+本软件按“原样”提供,不提供任何形式的明示或明示保证。
+暗示(包括但不限于适销性的保证),
+适用于特定目的和非侵权。在任何情况下都不会
+作者或版权持有人对任何索赔,损害或其他责任
+无论是由于合同,侵权或其他形式的诉讼而引起的责任,
+与软件或软件的使用或其他交易无关或与之有关
+软件。
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/README.md b/TensorFlow2/built-in/keras_sample/README.md
new file mode 100644
index 000000000..2e27ca0f6
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/README.md
@@ -0,0 +1,233 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Instance Segmentation**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.04.11**
+
+**大小(Size):43M**
+
+**框架(Framework):TensorFlow_2.6.2**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Official**
+
+**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码**
+
+
+概述
+
+## 简述
+
+点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。
+
+
+ - 参考论文:
+
+ https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593)
+
+ - 参考实现:
+ https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py)
+
+
+ - 适配昇腾 AI 处理器的实现:
+ skip
+
+ - 通过Git获取对应commit\_id的代码方法如下:
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+
+
+
+## 默认配置
+
+
+- 网络结构
+ - 设计最大池化层(对称函数),用于聚合所有点的特征信息
+ - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息
+ - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。
+
+- 训练超参(单卡):
+ - Batch size: 32
+ - learning_rate:0.0015
+ - num_point:2048
+ - Train epoch: 250
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+|-------|------|
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+相关代码示例。
+
+```
+ config_proto = tf.ConfigProto(allow_soft_placement=True)
+ custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+ session_config = npu_config_proto(config_proto=config_proto)
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+
+快速上手
+
+## 数据集准备
+
+1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。
+2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。
+```
+sudo apt-get install libhdf5-dev
+sudo pip install h5py
+```
+3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。
+
+## 模型训练
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+
+ 2. 单卡训练
+
+ 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。
+
+
+ ```
+ batch_size=32
+ #训练step
+ train_epochs=250
+ #学习率
+ learning_rate=0.0015
+ ```
+
+
+
+ 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test)
+
+ ```
+ 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡
+ bash train_full_1p.sh --data_path=xx
+ 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data
+ ├─data
+ ├─ply_data_test0.h5*
+ ├─ply_data_test_0_id2file.json*
+ ├─ply_data_test1.h5*
+ ├─ply_data_test_1_id2file.json*
+ ├─ply_data_train0.h5*
+ ├─ply_data_train_0_id2file.json*
+ ├─ply_data_train1.h5*
+ ├─ply_data_train_1_id2file.json*
+ ├─ply_data_train2.h5*
+ ├─ply_data_train_2_id2file.json*
+ ├─ply_data_train3.h5*
+ ├─ply_data_train_3_id2file.json*
+ ├─ply_data_train4.h5*
+ ├─ply_data_train_4_id2file.json*
+ ├─shape_names.txt*
+ ├─test_files.txt*
+ ├─train_files.txt*
+
+ ```
+
+迁移学习指导
+
+- 数据集准备。
+
+ 1. 获取数据。
+ 请参见“快速上手”中的数据集准备
+
+- 模型训练
+
+ 请参考“快速上手”章节
+
+高级参考
+
+## 脚本和示例代码
+
+ ├── README.md //说明文档
+ ├── requirements.txt //依赖
+ ├── modelzoo_level.txt //状态文件
+ ├── provider.py //数据集处理脚本
+ ├── train.py //网络训练脚本
+ ├── models //网络结构定义脚本
+ |—— pointnet_cls.py
+ |—— pointnet_cls_basic.py
+ |—— pointnet_seg.py
+ |—— transform_nets.py
+ ├── test
+ | |—— train_full_1p.sh //单卡训练脚本
+ | |—— train_performance_1p.sh //单卡训练脚本
+ ...
+
+## 脚本参数
+
+```
+batch_size 训练batch_size
+learning_rate 初始学习率
+max_epochs 最大训练epoch数
+num_point 每个点云包含从形状表面均匀采样的点数
+precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data'
+over_dump type=ast.literal_eval,help='if or not over detection, default is False'
+data_dump_flag type=ast.literal_eval,help='data dump flag, default is False'
+data_dump_step data dump step, default is 10
+profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False'
+profiling_dump_path type=str, help='the path to save profiling data'
+over_dump_path type=str, help='the path to save over dump data'
+data_dump_path type=str, help='the path to save dump data'
+use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False'
+fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False'
+mixlist_file type=str,help='mixlist file name, default is ops_info.json'
+fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg'
+auto_tune help='auto_tune flag, default is False'
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡训练。
+将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
diff --git a/TensorFlow2/built-in/keras_sample/README_BAK.md b/TensorFlow2/built-in/keras_sample/README_BAK.md
new file mode 100644
index 000000000..6d7185b09
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/README_BAK.md
@@ -0,0 +1,77 @@
+## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation*
+Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University.
+
+
+
+### Introduction
+This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction.
+
+Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective.
+
+In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset.
+
+### Citation
+If you find our work useful in your research, please consider citing:
+
+ @article{qi2016pointnet,
+ title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},
+ author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
+ journal={arXiv preprint arXiv:1612.00593},
+ year={2016}
+ }
+
+### Installation
+
+Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04.
+
+If you are using PyTorch, you can find a third-party pytorch implementation here.
+
+To install h5py for Python:
+```bash
+sudo apt-get install libhdf5-dev
+sudo pip install h5py
+```
+
+### Usage
+To train a model to classify point clouds sampled from 3D shapes:
+
+ python train.py
+
+Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files.
+
+To see HELP for the training script:
+
+ python train.py -h
+
+We can use TensorBoard to view the network architecture and monitor the training progress.
+
+ tensorboard --logdir log
+
+After the above training, we can evaluate the model and output some visualizations of the error cases.
+
+ python evaluate.py --visu
+
+Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images.
+
+If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files.
+
+### Part Segmentation
+To train a model for object part segmentation, firstly download the data:
+
+ cd part_seg
+ sh download_data.sh
+
+The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB).
+
+Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation).
+
+### License
+Our code is released under MIT License (see LICENSE file for details).
+
+### Selected Projects that Use PointNet
+
+* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities.
+* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation.
+* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds.
+* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation.
+* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017).
diff --git a/TensorFlow2/built-in/keras_sample/evaluate.py b/TensorFlow2/built-in/keras_sample/evaluate.py
new file mode 100644
index 000000000..749f8c7f8
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/evaluate.py
@@ -0,0 +1,199 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import argparse
+import socket
+import importlib
+import time
+import os
+import scipy.misc
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+import pc_util
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]')
+parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
+parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
+parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MODEL_PATH = FLAGS.model_path
+GPU_INDEX = FLAGS.gpu
+MODEL = importlib.import_module(FLAGS.model) # import network module
+DUMP_DIR = FLAGS.dump_dir
+if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
+LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+NUM_CLASSES = 40
+SHAPE_NAMES = [line.rstrip() for line in \
+ open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))]
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+def evaluate(num_votes):
+ is_training = False
+
+ with tf.device('/cpu:0'):
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ # simple model
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+ config.log_device_placement = True
+ sess = tf.compat.v1.Session(config=config)
+
+ # Restore variables from disk.
+ saver.restore(sess, MODEL_PATH)
+ log_string("Model restored.")
+
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss}
+
+ eval_one_epoch(sess, ops, num_votes)
+
+
+def eval_one_epoch(sess, ops, num_votes=1, topk=1):
+ error_cnt = 0
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+ fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
+ for fn in range(len(TEST_FILES)):
+ log_string('----'+str(fn)+'----')
+ current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+ print(current_data.shape)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+ print(file_size)
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+ cur_batch_size = end_idx - start_idx
+
+ # Aggregating BEG
+ batch_loss_sum = 0 # sum of losses for the batch
+ batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
+ batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
+ for vote_idx in range(num_votes):
+ rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
+ vote_idx/float(num_votes) * np.pi * 2)
+ feed_dict = {ops['pointclouds_pl']: rotated_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
+ feed_dict=feed_dict)
+ batch_pred_sum += pred_val
+ batch_pred_val = np.argmax(pred_val, 1)
+ for el_idx in range(cur_batch_size):
+ batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
+ batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
+ # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
+ # pred_val = np.argmax(batch_pred_classes, 1)
+ pred_val = np.argmax(batch_pred_sum, 1)
+ # Aggregating END
+
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
+ total_correct += correct
+ total_seen += cur_batch_size
+ loss_sum += batch_loss_sum
+
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+ fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
+
+ if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
+ img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
+ SHAPE_NAMES[pred_val[i-start_idx]])
+ img_filename = os.path.join(DUMP_DIR, img_filename)
+ output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
+ scipy.misc.imsave(img_filename, output_img)
+ error_cnt += 1
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+ class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
+ for i, name in enumerate(SHAPE_NAMES):
+ log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
+
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ evaluate(num_votes=1)
+ LOG_FOUT.close()
diff --git a/TensorFlow2/built-in/keras_sample/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/modelzoo_level.txt
new file mode 100644
index 000000000..31529da2e
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:OK
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/provider.py b/TensorFlow2/built-in/keras_sample/provider.py
new file mode 100644
index 000000000..18651c47f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/provider.py
@@ -0,0 +1,165 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import sys
+import numpy as np
+import h5py
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+
+# Download dataset for point cloud classification
+# 拼接data路径
+'''
+DATA_DIR = os.path.join(BASE_DIR, 'data')
+# 如果没有路径,则创建文件夹
+if not os.path.exists(DATA_DIR):
+ os.mkdir(DATA_DIR)
+# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩
+# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。
+if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
+ www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
+ zipfile = os.path.basename(www)
+ os.system('wget %s; unzip %s' % (www, zipfile))
+ os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
+ os.system('rm %s' % (zipfile))
+'''
+
+# 把数据随机打乱
+def shuffle_data(data, labels):
+ """ Shuffle data and labels.
+ Input:
+ data: B,N,... numpy array
+ label: B,... numpy array
+ Return:
+ shuffled data, label and shuffle indices
+ """
+ # 取标签长度
+ idx = np.arange(len(labels))
+ # 打乱索引
+ np.random.shuffle(idx)
+ # 返回打乱的数据,标签和索引
+ return data[idx, ...], labels[idx], idx
+
+
+# 旋转点云
+def rotate_point_cloud(batch_data):
+ # 随机旋转点云以扩大数据集
+ # 旋转是基于向上方向的每个形状
+ # 输入:
+ # BxNx3阵列,原始batch的点云
+ # 返回:
+ # BxNx3阵列,旋转的点云batch
+ """ Randomly rotate the point clouds to augument the dataset
+ rotation is per shape based along up direction
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, rotated batch of point clouds
+ """
+ rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
+ for k in range(batch_data.shape[0]):
+ rotation_angle = np.random.uniform() * 2 * np.pi
+ cosval = np.cos(rotation_angle)
+ sinval = np.sin(rotation_angle)
+ rotation_matrix = np.array([[cosval, 0, sinval],
+ [0, 1, 0],
+ [-sinval, 0, cosval]])
+ shape_pc = batch_data[k, ...]
+ rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
+ return rotated_data
+
+
+# 按角度旋转点云
+def rotate_point_cloud_by_angle(batch_data, rotation_angle):
+ """ Rotate the point cloud along up direction with certain angle.
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, rotated batch of point clouds
+ """
+ rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
+ for k in range(batch_data.shape[0]):
+ # rotation_angle = np.random.uniform() * 2 * np.pi
+ cosval = np.cos(rotation_angle)
+ sinval = np.sin(rotation_angle)
+ rotation_matrix = np.array([[cosval, 0, sinval],
+ [0, 1, 0],
+ [-sinval, 0, cosval]])
+ shape_pc = batch_data[k, ...]
+ rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
+ return rotated_data
+
+
+# 抖动点云
+def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
+ """ Randomly jitter points. jittering is per point.
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, jittered batch of point clouds
+ """
+ B, N, C = batch_data.shape
+ assert (clip > 0)
+ jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)
+ jittered_data += batch_data
+ return jittered_data
+
+
+# 获得复数个数据文件
+def getDataFiles(list_filename):
+ return [line.rstrip() for line in open(list_filename)]
+
+
+# 加载h5文件
+def load_h5(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ return (data, label)
+
+
+# 获得单个数据文件
+def loadDataFile(filename):
+ return load_h5(filename)
+
+
+# 加载h5数据标签段
+def load_h5_data_label_seg(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ seg = f['pid'][:]
+ return (data, label, seg)
+
+
+# 用seg加载数据文件
+def loadDataFile_with_seg(filename):
+ return load_h5_data_label_seg(filename)
diff --git a/TensorFlow2/built-in/keras_sample/requirements.txt b/TensorFlow2/built-in/keras_sample/requirements.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/TensorFlow2/built-in/keras_sample/train.py b/TensorFlow2/built-in/keras_sample/train.py
new file mode 100644
index 000000000..4a6683530
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/train.py
@@ -0,0 +1,452 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# import npu_device
+# npu_device.open().as_default()
+
+
+import argparse
+# import math
+# import h5py
+import numpy as np
+import tensorflow as tf
+import socket
+import importlib
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+# import tf_util
+import time
+import datetime
+import ast
+from npu_device.compat.v1.npu_init import *
+import npu_device as npu
+npu.compat.enable_v1()
+
+starttime = datetime.datetime.now()
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
+parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
+parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
+parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
+parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
+parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
+parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
+parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
+parser.add_argument('--data_path', type=str, default='', help='data path')
+parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data')
+parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval,
+ help='if or not over detection, default is False')
+parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval,
+ help='data dump flag, default is False')
+parser.add_argument('--data_dump_step', default="10",
+ help='data dump step, default is 10')
+parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False')
+parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data')
+parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data')
+parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data')
+parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval,
+ help='use_mixlist flag, default is False')
+parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval,
+ help='fusion_off flag, default is False')
+parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json')
+parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg')
+parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MAX_EPOCH = FLAGS.max_epoch
+BASE_LEARNING_RATE = FLAGS.learning_rate
+GPU_INDEX = FLAGS.gpu
+MOMENTUM = FLAGS.momentum
+OPTIMIZER = FLAGS.optimizer
+DECAY_STEP = FLAGS.decay_step
+DECAY_RATE = FLAGS.decay_rate
+
+MODEL = importlib.import_module(FLAGS.model) # import network module
+MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
+LOG_DIR = FLAGS.log_dir
+if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
+os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
+os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
+LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+MAX_NUM_POINT = 2048
+NUM_CLASSES = 40
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+BN_DECAY_DECAY_STEP = float(DECAY_STEP)
+BN_DECAY_CLIP = 0.99
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt'))
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+
+# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
+# tf.train.exponential_decay函数实现指数衰减学习率。
+def get_learning_rate(batch):
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # Base learning rate.
+ batch * BATCH_SIZE, # Current index into the dataset.
+ DECAY_STEP, # Decay step.
+ DECAY_RATE, # Decay rate.
+ staircase=True)
+ # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
+ learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
+ return learning_rate
+
+
+# 取得bn衰减
+# if the argument staircase is True,
+# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
+# 计算衰减的Batch Normalization 的 decay。
+def get_bn_decay(batch):
+ # 指数衰减法
+
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*BATCH_SIZE,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ # bn衰减0.99和1-衰减后的动量,取最小
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+ return bn_decay
+
+
+# 初始运行的训练函数。
+# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
+def train():
+ # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
+ with tf.Graph().as_default():
+ # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
+ # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
+ # with tf.device('/gpu:'+str(GPU_INDEX)):
+ with tf.device('/gpu:0'):
+ # 使用了pointne_cls.py的placeholder_inputs()方法。
+ # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
+ # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
+ # 它只会分配必要的内存,用于传入外部数据。
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ # 向指定好的对象中喂入数据:tf.placeholder()
+ # 取得占位符:是否在训练。
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+ print(is_training_pl)
+
+ # Note the global_step=batch parameter to minimize.
+ # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
+ # 将 global_step = batch 参数最小化。
+ # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
+ # 定义 batch = 0
+ batch = tf.Variable(0)
+ # 取得bn衰减(自定义方法)
+ bn_decay = get_bn_decay(batch)
+ # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
+ tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ # Get model and loss
+ # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
+ # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
+ # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
+ # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
+ # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
+ # 调用pointnet_cls下的get_loss()
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+ tf.compat.v1.summary.scalar('loss', loss)
+
+ # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
+ # tf.equal() 比较两个张量对应位置是否相等
+ correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
+ # 压缩求和,用于降维
+ accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
+ tf.compat.v1.summary.scalar('accuracy', accuracy)
+
+ # Get training operator
+ # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
+ learning_rate = get_learning_rate(batch)
+ tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ if OPTIMIZER == 'momentum':
+ optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
+ elif OPTIMIZER == 'adam':
+ optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
+ # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
+ # (2)用计算得到的梯度来更新对应的变量(权重)
+ # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
+ # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
+ train_op = optimizer.minimize(loss, global_step=batch)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ # 配置session 运行参数。
+ # 创建sess的时候对sess进行参数配置
+ config = tf.compat.v1.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode)
+ if FLAGS.data_dump_flag:
+ custom_op.parameter_map["enable_dump"].b = True
+ custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path)
+ custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step)
+ custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
+ if FLAGS.over_dump:
+ custom_op.parameter_map["enable_dump_debug"].b = True
+ custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path)
+ custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all")
+ if FLAGS.profiling:
+ custom_op.parameter_map["precision_mode"].b = True
+ profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \
+ "training_trace":"on", \
+ "task_trace":"on", \
+ "aicpu":"on", \
+ "aic_metrics":"PipeUtilization",\
+ "fp_point":"", \
+ "bp_point":""}'
+ custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options)
+ if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision':
+ custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file)
+ if FLAGS.fusion_off_flag:
+ custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file)
+ if FLAGS.auto_tune:
+ custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA")
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭
+
+ # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
+ config.gpu_options.allow_growth = True
+ # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
+ config.allow_soft_placement = True
+ # 在终端打印出各项操作是在哪个设备上运行的
+ config.log_device_placement = False
+ # 创建 sess, 才能运行框架
+ sess = tf.compat.v1.Session(config=config)
+
+ # Add summary writers
+ #merged = tf.merge_all_summaries()
+ merged = tf.compat.v1.summary.merge_all()
+ train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
+ sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
+
+ # Init variables
+ # 初始化参数,开始训练
+ # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
+ # accuracy和loss。每10个epoch保存1次模型。
+ init = tf.compat.v1.global_variables_initializer()
+ # To fix the bug introduced in TF 0.12.1 as in
+ # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
+ #sess.run(init)
+ # 运行sess初始化所有的全局变量
+ sess.run(init, {is_training_pl: True})
+
+ # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
+ # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss,
+ 'train_op': train_op,
+ 'merged': merged,
+ 'step': batch}
+
+ for epoch in range(MAX_EPOCH):
+ # log(自定义方法)
+ log_string('**** EPOCH %03d ****' % (epoch))
+ # 在同一个位置刷新输出
+ sys.stdout.flush()
+
+ # 训练一个批次(自定义方法)
+ # train_one_epoch 函数用来训练一个epoch
+ train_one_epoch(sess, ops, train_writer)
+ # 评估一个批次(自定义方法)
+ # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
+ eval_one_epoch(sess, ops, test_writer)
+
+ # Save the variables to disk.
+ # Save the variables to disk.每10个epoch保存1次模型
+ if epoch % 10 == 0:
+ save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
+ # log(自定义方法)
+ log_string("Model saved in file: %s" % save_path)
+
+
+# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
+# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
+def train_one_epoch(sess, ops, train_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = True
+
+ # Shuffle train files
+ # 随机打乱训练数据
+ train_file_idxs = np.arange(0, len(TRAIN_FILES))
+ np.random.shuffle(train_file_idxs)
+
+ L = []
+ for fn in range(len(TRAIN_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]]))
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ endtime = datetime.datetime.now()
+ if fn == 0:
+ TOTLE_TIME = (endtime - starttime).seconds
+ L.append(TOTLE_TIME)
+
+ # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
+
+ # total_senn,总损失loss_sum.
+ for batch_idx in range(num_batches):
+ start_time = time.time()
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ # Augment batched point clouds by rotation and jittering
+ # 调用provider中rotate_point_cloud
+ rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
+ jittered_data = provider.jitter_point_cloud(rotated_data)
+ feed_dict = {ops['pointclouds_pl']: jittered_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training,}
+ summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
+ cost_time = time.time() - start_time
+ FPS = BATCH_SIZE / cost_time
+ # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
+ train_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += loss_val
+
+ # 记录平均loss,以及平均accuracy。
+ log_string('TOTLE_TIME : %.2f' % (float(L[0])))
+ log_string('FPS : %.2f' % (float(FPS)))
+ log_string('mean loss: %f' % (loss_sum / float(num_batches)))
+ log_string('accuracy: %f' % (total_correct / float(total_seen)))
+
+
+def eval_one_epoch(sess, ops, test_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+
+ for fn in range(len(TEST_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn]))
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['loss'], ops['pred']], feed_dict=feed_dict)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+
+
+if __name__ == "__main__":
+ train()
+ LOG_FOUT.close()
diff --git a/TensorFlow2/built-in/keras_sample/train_real.py b/TensorFlow2/built-in/keras_sample/train_real.py
new file mode 100644
index 000000000..34c60ca17
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/train_real.py
@@ -0,0 +1,381 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import math
+import h5py
+import numpy as np
+import tensorflow as tf
+import socket
+import importlib
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+import tf_util
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
+parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]')
+parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]')
+parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
+parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
+parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
+parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
+parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MAX_EPOCH = FLAGS.max_epoch
+BASE_LEARNING_RATE = FLAGS.learning_rate
+GPU_INDEX = FLAGS.gpu
+MOMENTUM = FLAGS.momentum
+OPTIMIZER = FLAGS.optimizer
+DECAY_STEP = FLAGS.decay_step
+DECAY_RATE = FLAGS.decay_rate
+
+MODEL = importlib.import_module(FLAGS.model) # import network module
+MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
+LOG_DIR = FLAGS.log_dir
+if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
+os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
+os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
+LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+MAX_NUM_POINT = 4096
+NUM_CLASSES = 40
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+BN_DECAY_DECAY_STEP = float(DECAY_STEP)
+BN_DECAY_CLIP = 0.99
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(BASE_DIR, 'data_real/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(BASE_DIR, 'data_real/test_files.txt'))
+print(TRAIN_FILES)
+print(TEST_FILES)
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+
+# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
+# tf.train.exponential_decay函数实现指数衰减学习率。
+def get_learning_rate(batch):
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # Base learning rate.
+ batch * BATCH_SIZE, # Current index into the dataset.
+ DECAY_STEP, # Decay step.
+ DECAY_RATE, # Decay rate.
+ staircase=True)
+ # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
+ learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
+ return learning_rate
+
+
+# 取得bn衰减
+# if the argument staircase is True,
+# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
+# 计算衰减的Batch Normalization 的 decay。
+def get_bn_decay(batch):
+ # 指数衰减法
+
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*BATCH_SIZE,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ # bn衰减0.99和1-衰减后的动量,取最小
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+ return bn_decay
+
+
+# 初始运行的训练函数。
+# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
+def train():
+ # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
+ with tf.Graph().as_default():
+ # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
+ # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
+ # with tf.device('/gpu:'+str(GPU_INDEX)):
+ with tf.device('/cpu:0'):
+ # 使用了pointne_cls.py的placeholder_inputs()方法。
+ # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
+ # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
+ # 它只会分配必要的内存,用于传入外部数据。
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ # 向指定好的对象中喂入数据:tf.placeholder()
+ # 取得占位符:是否在训练。
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+ print(is_training_pl)
+
+ # Note the global_step=batch parameter to minimize.
+ # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
+ # 将 global_step = batch 参数最小化。
+ # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
+ # 定义 batch = 0
+ batch = tf.Variable(0)
+ # 取得bn衰减(自定义方法)
+ bn_decay = get_bn_decay(batch)
+ # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
+ tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ # Get model and loss
+ # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
+ # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
+ # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
+ # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
+ # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
+ # 调用pointnet_cls下的get_loss()
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+ tf.compat.v1.summary.scalar('loss', loss)
+
+ # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
+ # tf.equal() 比较两个张量对应位置是否相等
+ correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
+ # 压缩求和,用于降维
+ accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
+ tf.compat.v1.summary.scalar('accuracy', accuracy)
+
+ # Get training operator
+ # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
+ learning_rate = get_learning_rate(batch)
+ tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ if OPTIMIZER == 'momentum':
+ optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
+ elif OPTIMIZER == 'adam':
+ optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
+ # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
+ # (2)用计算得到的梯度来更新对应的变量(权重)
+ # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
+ # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
+ train_op = optimizer.minimize(loss, global_step=batch)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ # 配置session 运行参数。
+ # 创建sess的时候对sess进行参数配置
+ config = tf.compat.v1.ConfigProto()
+ # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
+ config.gpu_options.allow_growth = True
+ # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
+ config.allow_soft_placement = True
+ # 在终端打印出各项操作是在哪个设备上运行的
+ config.log_device_placement = False
+ # 创建 sess, 才能运行框架
+ sess = tf.compat.v1.Session(config=config)
+
+ # Add summary writers
+ #merged = tf.merge_all_summaries()
+ merged = tf.compat.v1.summary.merge_all()
+ train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
+ sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
+
+ # Init variables
+ # 初始化参数,开始训练
+ # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
+ # accuracy和loss。每10个epoch保存1次模型。
+ init = tf.compat.v1.global_variables_initializer()
+ # To fix the bug introduced in TF 0.12.1 as in
+ # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
+ #sess.run(init)
+ # 运行sess初始化所有的全局变量
+ sess.run(init, {is_training_pl: True})
+
+ # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
+ # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss,
+ 'train_op': train_op,
+ 'merged': merged,
+ 'step': batch}
+
+ for epoch in range(MAX_EPOCH):
+ # log(自定义方法)
+ log_string('**** EPOCH %03d ****' % (epoch))
+ # 在同一个位置刷新输出
+ sys.stdout.flush()
+
+ # 训练一个批次(自定义方法)
+ # train_one_epoch 函数用来训练一个epoch
+ train_one_epoch(sess, ops, train_writer)
+ # 评估一个批次(自定义方法)
+ # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
+ eval_one_epoch(sess, ops, test_writer)
+
+ # Save the variables to disk.
+ # Save the variables to disk.每10个epoch保存1次模型
+ if epoch % 10 == 0:
+ save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
+ # log(自定义方法)
+ log_string("Model saved in file: %s" % save_path)
+
+
+# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
+# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
+def train_one_epoch(sess, ops, train_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = True
+
+ # Shuffle train files
+ # 随机打乱训练数据
+ train_file_idxs = np.arange(0, len(TRAIN_FILES))
+ np.random.shuffle(train_file_idxs)
+
+ for fn in range(len(TRAIN_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+
+ # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
+ # total_senn,总损失loss_sum.
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ # Augment batched point clouds by rotation and jittering
+ # 调用provider中rotate_point_cloud
+ rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
+ jittered_data = provider.jitter_point_cloud(rotated_data)
+ feed_dict = {ops['pointclouds_pl']: jittered_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training,}
+ summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
+ # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
+ train_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += loss_val
+
+ # 记录平均loss,以及平均accuracy。
+ log_string('mean loss: %f' % (loss_sum / float(num_batches)))
+ log_string('accuracy: %f' % (total_correct / float(total_seen)))
+
+
+def eval_one_epoch(sess, ops, test_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+
+ for fn in range(len(TEST_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['loss'], ops['pred']], feed_dict=feed_dict)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+
+
+if __name__ == "__main__":
+ train()
+ LOG_FOUT.close()
--
Gitee
From 8d4827b97b6bc14c2ec3ddd2d539b473c4a7f469 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:28:45 +0000
Subject: [PATCH 10/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/.gitignore?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/.gitignore | 2 --
1 file changed, 2 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/.gitignore
diff --git a/TensorFlow2/built-in/keras_sample/.gitignore b/TensorFlow2/built-in/keras_sample/.gitignore
deleted file mode 100644
index 8efb80c9a..000000000
--- a/TensorFlow2/built-in/keras_sample/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/data/*
-/log/*
--
Gitee
From bb5dccb349692bd7c37daaa2b45419ac70869a9d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:28:55 +0000
Subject: [PATCH 11/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/LICENSE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/LICENSE | 51 -----------------------
1 file changed, 51 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/LICENSE
diff --git a/TensorFlow2/built-in/keras_sample/LICENSE b/TensorFlow2/built-in/keras_sample/LICENSE
deleted file mode 100644
index e93be0a6b..000000000
--- a/TensorFlow2/built-in/keras_sample/LICENSE
+++ /dev/null
@@ -1,51 +0,0 @@
-PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation.
-
-Copyright (c) 2017, Geometric Computation Group of Stanford University
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Charles R. Qi
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-PointNet:针对3D分类和分割的点集深度学习。
-
-斯坦福大学几何计算小组(c)2017版权所有
-
-MIT许可证(MIT)
-
-版权所有(c)2017 Charles R.Qi
-
-特此授予获得副本的任何人免费的许可
-软件和相关文档文件(以下简称“软件”)的交易
-在软件中不受限制,包括但不限于权利
-使用,复制,修改,合并,发布,分发,再许可和/或出售
-本软件的副本,并允许本软件所针对的人
-具备以下条件:
-
-以上版权声明和此许可声明应包含在所有
-复制或实质性的软件部分。
-
-本软件按“原样”提供,不提供任何形式的明示或明示保证。
-暗示(包括但不限于适销性的保证),
-适用于特定目的和非侵权。在任何情况下都不会
-作者或版权持有人对任何索赔,损害或其他责任
-无论是由于合同,侵权或其他形式的诉讼而引起的责任,
-与软件或软件的使用或其他交易无关或与之有关
-软件。
\ No newline at end of file
--
Gitee
From 8f49f56d97e674219fe397ad43549d2a4ea3886e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:05 +0000
Subject: [PATCH 12/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/README.md | 233 --------------------
1 file changed, 233 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/README.md
diff --git a/TensorFlow2/built-in/keras_sample/README.md b/TensorFlow2/built-in/keras_sample/README.md
deleted file mode 100644
index 2e27ca0f6..000000000
--- a/TensorFlow2/built-in/keras_sample/README.md
+++ /dev/null
@@ -1,233 +0,0 @@
-- [基本信息](#基本信息.md)
-- [概述](#概述.md)
-- [训练环境准备](#训练环境准备.md)
-- [快速上手](#快速上手.md)
-- [迁移学习指导](#迁移学习指导.md)
-- [高级参考](#高级参考.md)
-
-基本信息
-
-**发布者(Publisher):Huawei**
-
-**应用领域(Application Domain):Instance Segmentation**
-
-**版本(Version):1.1**
-
-**修改时间(Modified) :2022.04.11**
-
-**大小(Size):43M**
-
-**框架(Framework):TensorFlow_2.6.2**
-
-**模型格式(Model Format):ckpt**
-
-**精度(Precision):Mixed**
-
-**处理器(Processor):昇腾910**
-
-**应用级别(Categories):Official**
-
-**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码**
-
-
-概述
-
-## 简述
-
-点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。
-
-
- - 参考论文:
-
- https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593)
-
- - 参考实现:
- https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py)
-
-
- - 适配昇腾 AI 处理器的实现:
- skip
-
- - 通过Git获取对应commit\_id的代码方法如下:
- ```
- git clone {repository_url} # 克隆仓库的代码
- cd {repository_name} # 切换到模型的代码仓目录
- git checkout {branch} # 切换到对应分支
- git reset --hard {commit_id} # 代码设置到对应的commit_id
- cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
- ```
-
-
-
-
-## 默认配置
-
-
-- 网络结构
- - 设计最大池化层(对称函数),用于聚合所有点的特征信息
- - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息
- - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。
-
-- 训练超参(单卡):
- - Batch size: 32
- - learning_rate:0.0015
- - num_point:2048
- - Train epoch: 250
-
-
-## 支持特性
-
-| 特性列表 | 是否支持 |
-|-------|------|
-| 分布式训练 | 否 |
-| 混合精度 | 是 |
-| 数据并行 | 否 |
-
-## 混合精度训练
-
-昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
-
-## 开启混合精度
-相关代码示例。
-
-```
- config_proto = tf.ConfigProto(allow_soft_placement=True)
- custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
- custom_op.name = 'NpuOptimizer'
- custom_op.parameter_map["use_off_line"].b = True
- custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
- config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF
- session_config = npu_config_proto(config_proto=config_proto)
-```
-
-训练环境准备
-
-- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
-- 运行以下命令安装依赖。
-```
-pip3 install requirements.txt
-```
-说明:依赖配置文件requirements.txt文件位于模型的根目录
-
-
-快速上手
-
-## 数据集准备
-
-1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。
-2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。
-```
-sudo apt-get install libhdf5-dev
-sudo pip install h5py
-```
-3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。
-
-## 模型训练
-- 单击“立即下载”,并选择合适的下载方式下载源码包。
-- 开始训练。
-
- 1. 启动训练之前,首先要配置程序运行相关环境变量。
-
- 环境变量配置信息参见:
-
- [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
-
-
- 2. 单卡训练
-
- 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。
-
-
- ```
- batch_size=32
- #训练step
- train_epochs=250
- #学习率
- learning_rate=0.0015
- ```
-
-
-
- 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test)
-
- ```
- 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡
- bash train_full_1p.sh --data_path=xx
- 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data
- ├─data
- ├─ply_data_test0.h5*
- ├─ply_data_test_0_id2file.json*
- ├─ply_data_test1.h5*
- ├─ply_data_test_1_id2file.json*
- ├─ply_data_train0.h5*
- ├─ply_data_train_0_id2file.json*
- ├─ply_data_train1.h5*
- ├─ply_data_train_1_id2file.json*
- ├─ply_data_train2.h5*
- ├─ply_data_train_2_id2file.json*
- ├─ply_data_train3.h5*
- ├─ply_data_train_3_id2file.json*
- ├─ply_data_train4.h5*
- ├─ply_data_train_4_id2file.json*
- ├─shape_names.txt*
- ├─test_files.txt*
- ├─train_files.txt*
-
- ```
-
-迁移学习指导
-
-- 数据集准备。
-
- 1. 获取数据。
- 请参见“快速上手”中的数据集准备
-
-- 模型训练
-
- 请参考“快速上手”章节
-
-高级参考
-
-## 脚本和示例代码
-
- ├── README.md //说明文档
- ├── requirements.txt //依赖
- ├── modelzoo_level.txt //状态文件
- ├── provider.py //数据集处理脚本
- ├── train.py //网络训练脚本
- ├── models //网络结构定义脚本
- |—— pointnet_cls.py
- |—— pointnet_cls_basic.py
- |—— pointnet_seg.py
- |—— transform_nets.py
- ├── test
- | |—— train_full_1p.sh //单卡训练脚本
- | |—— train_performance_1p.sh //单卡训练脚本
- ...
-
-## 脚本参数
-
-```
-batch_size 训练batch_size
-learning_rate 初始学习率
-max_epochs 最大训练epoch数
-num_point 每个点云包含从形状表面均匀采样的点数
-precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data'
-over_dump type=ast.literal_eval,help='if or not over detection, default is False'
-data_dump_flag type=ast.literal_eval,help='data dump flag, default is False'
-data_dump_step data dump step, default is 10
-profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False'
-profiling_dump_path type=str, help='the path to save profiling data'
-over_dump_path type=str, help='the path to save over dump data'
-data_dump_path type=str, help='the path to save dump data'
-use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False'
-fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False'
-mixlist_file type=str,help='mixlist file name, default is ops_info.json'
-fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg'
-auto_tune help='auto_tune flag, default is False'
-```
-
-## 训练过程
-
-通过“模型训练”中的训练指令启动单卡训练。
-将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
--
Gitee
From 1f3182da6964cbd46e8ad016c47fdd56e1070c5b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:12 +0000
Subject: [PATCH 13/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/evaluate.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/evaluate.py | 199 ------------------
1 file changed, 199 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/evaluate.py
diff --git a/TensorFlow2/built-in/keras_sample/evaluate.py b/TensorFlow2/built-in/keras_sample/evaluate.py
deleted file mode 100644
index 749f8c7f8..000000000
--- a/TensorFlow2/built-in/keras_sample/evaluate.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import tensorflow as tf
-import numpy as np
-import argparse
-import socket
-import importlib
-import time
-import os
-import scipy.misc
-import sys
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(BASE_DIR)
-sys.path.append(os.path.join(BASE_DIR, 'models'))
-sys.path.append(os.path.join(BASE_DIR, 'utils'))
-import provider
-import pc_util
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
-parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
-parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]')
-parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
-parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
-parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
-parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]')
-FLAGS = parser.parse_args()
-
-
-BATCH_SIZE = FLAGS.batch_size
-NUM_POINT = FLAGS.num_point
-MODEL_PATH = FLAGS.model_path
-GPU_INDEX = FLAGS.gpu
-MODEL = importlib.import_module(FLAGS.model) # import network module
-DUMP_DIR = FLAGS.dump_dir
-if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
-LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
-LOG_FOUT.write(str(FLAGS)+'\n')
-
-NUM_CLASSES = 40
-SHAPE_NAMES = [line.rstrip() for line in \
- open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))]
-
-HOSTNAME = socket.gethostname()
-
-# ModelNet40 official train/test split
-TRAIN_FILES = provider.getDataFiles( \
- os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
-TEST_FILES = provider.getDataFiles(\
- os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
-
-def log_string(out_str):
- LOG_FOUT.write(out_str+'\n')
- LOG_FOUT.flush()
- print(out_str)
-
-def evaluate(num_votes):
- is_training = False
-
- with tf.device('/cpu:0'):
- pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
- is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
-
- # simple model
- pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
- loss = MODEL.get_loss(pred, labels_pl, end_points)
-
- # Add ops to save and restore all the variables.
- saver = tf.compat.v1.train.Saver()
-
- # Create a session
- config = tf.compat.v1.ConfigProto()
- config.gpu_options.allow_growth = True
- config.allow_soft_placement = True
- config.log_device_placement = True
- sess = tf.compat.v1.Session(config=config)
-
- # Restore variables from disk.
- saver.restore(sess, MODEL_PATH)
- log_string("Model restored.")
-
- ops = {'pointclouds_pl': pointclouds_pl,
- 'labels_pl': labels_pl,
- 'is_training_pl': is_training_pl,
- 'pred': pred,
- 'loss': loss}
-
- eval_one_epoch(sess, ops, num_votes)
-
-
-def eval_one_epoch(sess, ops, num_votes=1, topk=1):
- error_cnt = 0
- is_training = False
- total_correct = 0
- total_seen = 0
- loss_sum = 0
- total_seen_class = [0 for _ in range(NUM_CLASSES)]
- total_correct_class = [0 for _ in range(NUM_CLASSES)]
- fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
- for fn in range(len(TEST_FILES)):
- log_string('----'+str(fn)+'----')
- current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
- current_data = current_data[:,0:NUM_POINT,:]
- current_label = np.squeeze(current_label)
- print(current_data.shape)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
- print(file_size)
-
- for batch_idx in range(num_batches):
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
- cur_batch_size = end_idx - start_idx
-
- # Aggregating BEG
- batch_loss_sum = 0 # sum of losses for the batch
- batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
- batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
- for vote_idx in range(num_votes):
- rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
- vote_idx/float(num_votes) * np.pi * 2)
- feed_dict = {ops['pointclouds_pl']: rotated_data,
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training}
- loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
- feed_dict=feed_dict)
- batch_pred_sum += pred_val
- batch_pred_val = np.argmax(pred_val, 1)
- for el_idx in range(cur_batch_size):
- batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
- batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
- # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
- # pred_val = np.argmax(batch_pred_classes, 1)
- pred_val = np.argmax(batch_pred_sum, 1)
- # Aggregating END
-
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
- total_correct += correct
- total_seen += cur_batch_size
- loss_sum += batch_loss_sum
-
- for i in range(start_idx, end_idx):
- l = current_label[i]
- total_seen_class[l] += 1
- total_correct_class[l] += (pred_val[i-start_idx] == l)
- fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
-
- if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
- img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
- SHAPE_NAMES[pred_val[i-start_idx]])
- img_filename = os.path.join(DUMP_DIR, img_filename)
- output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
- scipy.misc.imsave(img_filename, output_img)
- error_cnt += 1
-
- log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
- log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
- log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
-
- class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
- for i, name in enumerate(SHAPE_NAMES):
- log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
-
-
-
-if __name__=='__main__':
- with tf.Graph().as_default():
- evaluate(num_votes=1)
- LOG_FOUT.close()
--
Gitee
From 493320e8b7309fdf70beb2cd210e74de6ed07dfc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:19 +0000
Subject: [PATCH 14/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/provider.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/provider.py | 165 ------------------
1 file changed, 165 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/provider.py
diff --git a/TensorFlow2/built-in/keras_sample/provider.py b/TensorFlow2/built-in/keras_sample/provider.py
deleted file mode 100644
index 18651c47f..000000000
--- a/TensorFlow2/built-in/keras_sample/provider.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import sys
-import numpy as np
-import h5py
-
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(BASE_DIR)
-
-# Download dataset for point cloud classification
-# 拼接data路径
-'''
-DATA_DIR = os.path.join(BASE_DIR, 'data')
-# 如果没有路径,则创建文件夹
-if not os.path.exists(DATA_DIR):
- os.mkdir(DATA_DIR)
-# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩
-# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。
-if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
- www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
- zipfile = os.path.basename(www)
- os.system('wget %s; unzip %s' % (www, zipfile))
- os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
- os.system('rm %s' % (zipfile))
-'''
-
-# 把数据随机打乱
-def shuffle_data(data, labels):
- """ Shuffle data and labels.
- Input:
- data: B,N,... numpy array
- label: B,... numpy array
- Return:
- shuffled data, label and shuffle indices
- """
- # 取标签长度
- idx = np.arange(len(labels))
- # 打乱索引
- np.random.shuffle(idx)
- # 返回打乱的数据,标签和索引
- return data[idx, ...], labels[idx], idx
-
-
-# 旋转点云
-def rotate_point_cloud(batch_data):
- # 随机旋转点云以扩大数据集
- # 旋转是基于向上方向的每个形状
- # 输入:
- # BxNx3阵列,原始batch的点云
- # 返回:
- # BxNx3阵列,旋转的点云batch
- """ Randomly rotate the point clouds to augument the dataset
- rotation is per shape based along up direction
- Input:
- BxNx3 array, original batch of point clouds
- Return:
- BxNx3 array, rotated batch of point clouds
- """
- rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
- for k in range(batch_data.shape[0]):
- rotation_angle = np.random.uniform() * 2 * np.pi
- cosval = np.cos(rotation_angle)
- sinval = np.sin(rotation_angle)
- rotation_matrix = np.array([[cosval, 0, sinval],
- [0, 1, 0],
- [-sinval, 0, cosval]])
- shape_pc = batch_data[k, ...]
- rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
- return rotated_data
-
-
-# 按角度旋转点云
-def rotate_point_cloud_by_angle(batch_data, rotation_angle):
- """ Rotate the point cloud along up direction with certain angle.
- Input:
- BxNx3 array, original batch of point clouds
- Return:
- BxNx3 array, rotated batch of point clouds
- """
- rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
- for k in range(batch_data.shape[0]):
- # rotation_angle = np.random.uniform() * 2 * np.pi
- cosval = np.cos(rotation_angle)
- sinval = np.sin(rotation_angle)
- rotation_matrix = np.array([[cosval, 0, sinval],
- [0, 1, 0],
- [-sinval, 0, cosval]])
- shape_pc = batch_data[k, ...]
- rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
- return rotated_data
-
-
-# 抖动点云
-def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
- """ Randomly jitter points. jittering is per point.
- Input:
- BxNx3 array, original batch of point clouds
- Return:
- BxNx3 array, jittered batch of point clouds
- """
- B, N, C = batch_data.shape
- assert (clip > 0)
- jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)
- jittered_data += batch_data
- return jittered_data
-
-
-# 获得复数个数据文件
-def getDataFiles(list_filename):
- return [line.rstrip() for line in open(list_filename)]
-
-
-# 加载h5文件
-def load_h5(h5_filename):
- f = h5py.File(h5_filename)
- data = f['data'][:]
- label = f['label'][:]
- return (data, label)
-
-
-# 获得单个数据文件
-def loadDataFile(filename):
- return load_h5(filename)
-
-
-# 加载h5数据标签段
-def load_h5_data_label_seg(h5_filename):
- f = h5py.File(h5_filename)
- data = f['data'][:]
- label = f['label'][:]
- seg = f['pid'][:]
- return (data, label, seg)
-
-
-# 用seg加载数据文件
-def loadDataFile_with_seg(filename):
- return load_h5_data_label_seg(filename)
--
Gitee
From 04a1e98a2dfd928d2b66cd02733528c6f7c9a111 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:25 +0000
Subject: [PATCH 15/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/train=5Freal.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../built-in/keras_sample/train_real.py | 381 ------------------
1 file changed, 381 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/train_real.py
diff --git a/TensorFlow2/built-in/keras_sample/train_real.py b/TensorFlow2/built-in/keras_sample/train_real.py
deleted file mode 100644
index 34c60ca17..000000000
--- a/TensorFlow2/built-in/keras_sample/train_real.py
+++ /dev/null
@@ -1,381 +0,0 @@
-#
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import math
-import h5py
-import numpy as np
-import tensorflow as tf
-import socket
-import importlib
-import os
-import sys
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(BASE_DIR)
-sys.path.append(os.path.join(BASE_DIR, 'models'))
-sys.path.append(os.path.join(BASE_DIR, 'utils'))
-import provider
-import tf_util
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
-parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
-parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
-parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]')
-parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]')
-parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]')
-parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
-parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
-parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
-parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
-parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
-FLAGS = parser.parse_args()
-
-
-BATCH_SIZE = FLAGS.batch_size
-NUM_POINT = FLAGS.num_point
-MAX_EPOCH = FLAGS.max_epoch
-BASE_LEARNING_RATE = FLAGS.learning_rate
-GPU_INDEX = FLAGS.gpu
-MOMENTUM = FLAGS.momentum
-OPTIMIZER = FLAGS.optimizer
-DECAY_STEP = FLAGS.decay_step
-DECAY_RATE = FLAGS.decay_rate
-
-MODEL = importlib.import_module(FLAGS.model) # import network module
-MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
-LOG_DIR = FLAGS.log_dir
-if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
-os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
-os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
-LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
-LOG_FOUT.write(str(FLAGS)+'\n')
-
-MAX_NUM_POINT = 4096
-NUM_CLASSES = 40
-
-BN_INIT_DECAY = 0.5
-BN_DECAY_DECAY_RATE = 0.5
-BN_DECAY_DECAY_STEP = float(DECAY_STEP)
-BN_DECAY_CLIP = 0.99
-
-HOSTNAME = socket.gethostname()
-
-# ModelNet40 official train/test split
-TRAIN_FILES = provider.getDataFiles( \
- os.path.join(BASE_DIR, 'data_real/train_files.txt'))
-TEST_FILES = provider.getDataFiles(\
- os.path.join(BASE_DIR, 'data_real/test_files.txt'))
-print(TRAIN_FILES)
-print(TEST_FILES)
-
-def log_string(out_str):
- LOG_FOUT.write(out_str+'\n')
- LOG_FOUT.flush()
- print(out_str)
-
-
-# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
-# tf.train.exponential_decay函数实现指数衰减学习率。
-def get_learning_rate(batch):
- # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
- # 通过tf.train.exponential_decay函数实现指数衰减学习率。
- # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
- # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
- # tf.train.exponential_decay就是用来实现这个功能。
- #
- # 步骤:
- # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
- # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
- learning_rate = tf.compat.v1.train.exponential_decay(
- BASE_LEARNING_RATE, # Base learning rate.
- batch * BATCH_SIZE, # Current index into the dataset.
- DECAY_STEP, # Decay step.
- DECAY_RATE, # Decay rate.
- staircase=True)
- # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
- learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
- return learning_rate
-
-
-# 取得bn衰减
-# if the argument staircase is True,
-# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
-# 计算衰减的Batch Normalization 的 decay。
-def get_bn_decay(batch):
- # 指数衰减法
-
- # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
- # 通过tf.train.exponential_decay函数实现指数衰减学习率。
- # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
- # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
- # tf.train.exponential_decay就是用来实现这个功能。
- #
- # 步骤:
- # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
- # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
- bn_momentum = tf.compat.v1.train.exponential_decay(
- BN_INIT_DECAY,
- batch*BATCH_SIZE,
- BN_DECAY_DECAY_STEP,
- BN_DECAY_DECAY_RATE,
- staircase=True)
- # bn衰减0.99和1-衰减后的动量,取最小
- bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
- return bn_decay
-
-
-# 初始运行的训练函数。
-# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
-def train():
- # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
- with tf.Graph().as_default():
- # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
- # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
- # with tf.device('/gpu:'+str(GPU_INDEX)):
- with tf.device('/cpu:0'):
- # 使用了pointne_cls.py的placeholder_inputs()方法。
- # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
- # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
- # 它只会分配必要的内存,用于传入外部数据。
- pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
- # 向指定好的对象中喂入数据:tf.placeholder()
- # 取得占位符:是否在训练。
- is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
- print(is_training_pl)
-
- # Note the global_step=batch parameter to minimize.
- # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
- # 将 global_step = batch 参数最小化。
- # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
- # 定义 batch = 0
- batch = tf.Variable(0)
- # 取得bn衰减(自定义方法)
- bn_decay = get_bn_decay(batch)
- # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
- tf.compat.v1.summary.scalar('bn_decay', bn_decay)
-
- # Get model and loss
- # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
- # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
- # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
- # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
- # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
- pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
- # 调用pointnet_cls下的get_loss()
- loss = MODEL.get_loss(pred, labels_pl, end_points)
- tf.compat.v1.summary.scalar('loss', loss)
-
- # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
- # tf.equal() 比较两个张量对应位置是否相等
- correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
- # 压缩求和,用于降维
- accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
- tf.compat.v1.summary.scalar('accuracy', accuracy)
-
- # Get training operator
- # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
- learning_rate = get_learning_rate(batch)
- tf.compat.v1.summary.scalar('learning_rate', learning_rate)
- if OPTIMIZER == 'momentum':
- optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
- elif OPTIMIZER == 'adam':
- optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
- # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
- # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
- # (2)用计算得到的梯度来更新对应的变量(权重)
- # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
- # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
- train_op = optimizer.minimize(loss, global_step=batch)
-
- # Add ops to save and restore all the variables.
- saver = tf.compat.v1.train.Saver()
-
- # Create a session
- # 配置session 运行参数。
- # 创建sess的时候对sess进行参数配置
- config = tf.compat.v1.ConfigProto()
- # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
- config.gpu_options.allow_growth = True
- # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
- config.allow_soft_placement = True
- # 在终端打印出各项操作是在哪个设备上运行的
- config.log_device_placement = False
- # 创建 sess, 才能运行框架
- sess = tf.compat.v1.Session(config=config)
-
- # Add summary writers
- #merged = tf.merge_all_summaries()
- merged = tf.compat.v1.summary.merge_all()
- train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
- sess.graph)
- test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
-
- # Init variables
- # 初始化参数,开始训练
- # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
- # accuracy和loss。每10个epoch保存1次模型。
- init = tf.compat.v1.global_variables_initializer()
- # To fix the bug introduced in TF 0.12.1 as in
- # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
- #sess.run(init)
- # 运行sess初始化所有的全局变量
- sess.run(init, {is_training_pl: True})
-
- # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
- # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
- ops = {'pointclouds_pl': pointclouds_pl,
- 'labels_pl': labels_pl,
- 'is_training_pl': is_training_pl,
- 'pred': pred,
- 'loss': loss,
- 'train_op': train_op,
- 'merged': merged,
- 'step': batch}
-
- for epoch in range(MAX_EPOCH):
- # log(自定义方法)
- log_string('**** EPOCH %03d ****' % (epoch))
- # 在同一个位置刷新输出
- sys.stdout.flush()
-
- # 训练一个批次(自定义方法)
- # train_one_epoch 函数用来训练一个epoch
- train_one_epoch(sess, ops, train_writer)
- # 评估一个批次(自定义方法)
- # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
- eval_one_epoch(sess, ops, test_writer)
-
- # Save the variables to disk.
- # Save the variables to disk.每10个epoch保存1次模型
- if epoch % 10 == 0:
- save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
- # log(自定义方法)
- log_string("Model saved in file: %s" % save_path)
-
-
-# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
-# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
-def train_one_epoch(sess, ops, train_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = True
-
- # Shuffle train files
- # 随机打乱训练数据
- train_file_idxs = np.arange(0, len(TRAIN_FILES))
- np.random.shuffle(train_file_idxs)
-
- for fn in range(len(TRAIN_FILES)):
- log_string('----' + str(fn) + '-----')
- current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
- current_data = current_data[:,0:NUM_POINT,:]
- current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
- current_label = np.squeeze(current_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- total_correct = 0
- total_seen = 0
- loss_sum = 0
-
- # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
- # total_senn,总损失loss_sum.
- for batch_idx in range(num_batches):
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
-
- # Augment batched point clouds by rotation and jittering
- # 调用provider中rotate_point_cloud
- rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
- jittered_data = provider.jitter_point_cloud(rotated_data)
- feed_dict = {ops['pointclouds_pl']: jittered_data,
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training,}
- summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
- ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
- # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
- train_writer.add_summary(summary, step)
- pred_val = np.argmax(pred_val, 1)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += BATCH_SIZE
- loss_sum += loss_val
-
- # 记录平均loss,以及平均accuracy。
- log_string('mean loss: %f' % (loss_sum / float(num_batches)))
- log_string('accuracy: %f' % (total_correct / float(total_seen)))
-
-
-def eval_one_epoch(sess, ops, test_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = False
- total_correct = 0
- total_seen = 0
- loss_sum = 0
- total_seen_class = [0 for _ in range(NUM_CLASSES)]
- total_correct_class = [0 for _ in range(NUM_CLASSES)]
-
- for fn in range(len(TEST_FILES)):
- log_string('----' + str(fn) + '-----')
- current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
- current_data = current_data[:,0:NUM_POINT,:]
- current_label = np.squeeze(current_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- for batch_idx in range(num_batches):
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
-
- feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training}
- summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
- ops['loss'], ops['pred']], feed_dict=feed_dict)
- pred_val = np.argmax(pred_val, 1)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += BATCH_SIZE
- loss_sum += (loss_val*BATCH_SIZE)
- for i in range(start_idx, end_idx):
- l = current_label[i]
- total_seen_class[l] += 1
- total_correct_class[l] += (pred_val[i-start_idx] == l)
-
- log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
- log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
- log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
-
-
-
-if __name__ == "__main__":
- train()
- LOG_FOUT.close()
--
Gitee
From 570627f3860d78fba4dd92d31b82461d3b523c93 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:34 +0000
Subject: [PATCH 16/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/train.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/train.py | 452 ---------------------
1 file changed, 452 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/train.py
diff --git a/TensorFlow2/built-in/keras_sample/train.py b/TensorFlow2/built-in/keras_sample/train.py
deleted file mode 100644
index 4a6683530..000000000
--- a/TensorFlow2/built-in/keras_sample/train.py
+++ /dev/null
@@ -1,452 +0,0 @@
-#
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# import npu_device
-# npu_device.open().as_default()
-
-
-import argparse
-# import math
-# import h5py
-import numpy as np
-import tensorflow as tf
-import socket
-import importlib
-import os
-import sys
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(BASE_DIR)
-sys.path.append(os.path.join(BASE_DIR, 'models'))
-sys.path.append(os.path.join(BASE_DIR, 'utils'))
-import provider
-# import tf_util
-import time
-import datetime
-import ast
-from npu_device.compat.v1.npu_init import *
-import npu_device as npu
-npu.compat.enable_v1()
-
-starttime = datetime.datetime.now()
-
-parser = argparse.ArgumentParser()
-parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
-parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
-parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
-parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
-parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
-parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
-parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
-parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
-parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
-parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
-parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
-parser.add_argument('--data_path', type=str, default='', help='data path')
-parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data')
-parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval,
- help='if or not over detection, default is False')
-parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval,
- help='data dump flag, default is False')
-parser.add_argument('--data_dump_step', default="10",
- help='data dump step, default is 10')
-parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False')
-parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data')
-parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data')
-parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data')
-parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval,
- help='use_mixlist flag, default is False')
-parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval,
- help='fusion_off flag, default is False')
-parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json')
-parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg')
-parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False')
-FLAGS = parser.parse_args()
-
-
-BATCH_SIZE = FLAGS.batch_size
-NUM_POINT = FLAGS.num_point
-MAX_EPOCH = FLAGS.max_epoch
-BASE_LEARNING_RATE = FLAGS.learning_rate
-GPU_INDEX = FLAGS.gpu
-MOMENTUM = FLAGS.momentum
-OPTIMIZER = FLAGS.optimizer
-DECAY_STEP = FLAGS.decay_step
-DECAY_RATE = FLAGS.decay_rate
-
-MODEL = importlib.import_module(FLAGS.model) # import network module
-MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
-LOG_DIR = FLAGS.log_dir
-if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
-os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
-os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
-LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
-LOG_FOUT.write(str(FLAGS)+'\n')
-
-MAX_NUM_POINT = 2048
-NUM_CLASSES = 40
-
-BN_INIT_DECAY = 0.5
-BN_DECAY_DECAY_RATE = 0.5
-BN_DECAY_DECAY_STEP = float(DECAY_STEP)
-BN_DECAY_CLIP = 0.99
-
-HOSTNAME = socket.gethostname()
-
-# ModelNet40 official train/test split
-TRAIN_FILES = provider.getDataFiles( \
- os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt'))
-TEST_FILES = provider.getDataFiles(\
- os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt'))
-
-def log_string(out_str):
- LOG_FOUT.write(out_str+'\n')
- LOG_FOUT.flush()
- print(out_str)
-
-
-# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
-# tf.train.exponential_decay函数实现指数衰减学习率。
-def get_learning_rate(batch):
- # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
- # 通过tf.train.exponential_decay函数实现指数衰减学习率。
- # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
- # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
- # tf.train.exponential_decay就是用来实现这个功能。
- #
- # 步骤:
- # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
- # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
- learning_rate = tf.compat.v1.train.exponential_decay(
- BASE_LEARNING_RATE, # Base learning rate.
- batch * BATCH_SIZE, # Current index into the dataset.
- DECAY_STEP, # Decay step.
- DECAY_RATE, # Decay rate.
- staircase=True)
- # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
- learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
- return learning_rate
-
-
-# 取得bn衰减
-# if the argument staircase is True,
-# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
-# 计算衰减的Batch Normalization 的 decay。
-def get_bn_decay(batch):
- # 指数衰减法
-
- # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
- # 通过tf.train.exponential_decay函数实现指数衰减学习率。
- # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
- # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
- # tf.train.exponential_decay就是用来实现这个功能。
- #
- # 步骤:
- # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
- # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
- bn_momentum = tf.compat.v1.train.exponential_decay(
- BN_INIT_DECAY,
- batch*BATCH_SIZE,
- BN_DECAY_DECAY_STEP,
- BN_DECAY_DECAY_RATE,
- staircase=True)
- # bn衰减0.99和1-衰减后的动量,取最小
- bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
- return bn_decay
-
-
-# 初始运行的训练函数。
-# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
-def train():
- # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
- with tf.Graph().as_default():
- # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
- # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
- # with tf.device('/gpu:'+str(GPU_INDEX)):
- with tf.device('/gpu:0'):
- # 使用了pointne_cls.py的placeholder_inputs()方法。
- # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
- # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
- # 它只会分配必要的内存,用于传入外部数据。
- pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
- # 向指定好的对象中喂入数据:tf.placeholder()
- # 取得占位符:是否在训练。
- is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
- print(is_training_pl)
-
- # Note the global_step=batch parameter to minimize.
- # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
- # 将 global_step = batch 参数最小化。
- # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
- # 定义 batch = 0
- batch = tf.Variable(0)
- # 取得bn衰减(自定义方法)
- bn_decay = get_bn_decay(batch)
- # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
- tf.compat.v1.summary.scalar('bn_decay', bn_decay)
-
- # Get model and loss
- # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
- # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
- # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
- # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
- # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
- pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
- # 调用pointnet_cls下的get_loss()
- loss = MODEL.get_loss(pred, labels_pl, end_points)
- tf.compat.v1.summary.scalar('loss', loss)
-
- # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
- # tf.equal() 比较两个张量对应位置是否相等
- correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
- # 压缩求和,用于降维
- accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
- tf.compat.v1.summary.scalar('accuracy', accuracy)
-
- # Get training operator
- # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
- learning_rate = get_learning_rate(batch)
- tf.compat.v1.summary.scalar('learning_rate', learning_rate)
- if OPTIMIZER == 'momentum':
- optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
- elif OPTIMIZER == 'adam':
- optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
- # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
- # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
- # (2)用计算得到的梯度来更新对应的变量(权重)
- # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
- # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
- train_op = optimizer.minimize(loss, global_step=batch)
-
- # Add ops to save and restore all the variables.
- saver = tf.compat.v1.train.Saver()
-
- # Create a session
- # 配置session 运行参数。
- # 创建sess的时候对sess进行参数配置
- config = tf.compat.v1.ConfigProto()
- custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
- custom_op.name = 'NpuOptimizer'
- custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode)
- if FLAGS.data_dump_flag:
- custom_op.parameter_map["enable_dump"].b = True
- custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path)
- custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step)
- custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
- if FLAGS.over_dump:
- custom_op.parameter_map["enable_dump_debug"].b = True
- custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path)
- custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all")
- if FLAGS.profiling:
- custom_op.parameter_map["precision_mode"].b = True
- profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \
- "training_trace":"on", \
- "task_trace":"on", \
- "aicpu":"on", \
- "aic_metrics":"PipeUtilization",\
- "fp_point":"", \
- "bp_point":""}'
- custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options)
- if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision':
- custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file)
- if FLAGS.fusion_off_flag:
- custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file)
- if FLAGS.auto_tune:
- custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA")
- config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭
- config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭
-
- # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
- config.gpu_options.allow_growth = True
- # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
- config.allow_soft_placement = True
- # 在终端打印出各项操作是在哪个设备上运行的
- config.log_device_placement = False
- # 创建 sess, 才能运行框架
- sess = tf.compat.v1.Session(config=config)
-
- # Add summary writers
- #merged = tf.merge_all_summaries()
- merged = tf.compat.v1.summary.merge_all()
- train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
- sess.graph)
- test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
-
- # Init variables
- # 初始化参数,开始训练
- # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
- # accuracy和loss。每10个epoch保存1次模型。
- init = tf.compat.v1.global_variables_initializer()
- # To fix the bug introduced in TF 0.12.1 as in
- # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
- #sess.run(init)
- # 运行sess初始化所有的全局变量
- sess.run(init, {is_training_pl: True})
-
- # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
- # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
- ops = {'pointclouds_pl': pointclouds_pl,
- 'labels_pl': labels_pl,
- 'is_training_pl': is_training_pl,
- 'pred': pred,
- 'loss': loss,
- 'train_op': train_op,
- 'merged': merged,
- 'step': batch}
-
- for epoch in range(MAX_EPOCH):
- # log(自定义方法)
- log_string('**** EPOCH %03d ****' % (epoch))
- # 在同一个位置刷新输出
- sys.stdout.flush()
-
- # 训练一个批次(自定义方法)
- # train_one_epoch 函数用来训练一个epoch
- train_one_epoch(sess, ops, train_writer)
- # 评估一个批次(自定义方法)
- # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
- eval_one_epoch(sess, ops, test_writer)
-
- # Save the variables to disk.
- # Save the variables to disk.每10个epoch保存1次模型
- if epoch % 10 == 0:
- save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
- # log(自定义方法)
- log_string("Model saved in file: %s" % save_path)
-
-
-# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
-# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
-def train_one_epoch(sess, ops, train_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = True
-
- # Shuffle train files
- # 随机打乱训练数据
- train_file_idxs = np.arange(0, len(TRAIN_FILES))
- np.random.shuffle(train_file_idxs)
-
- L = []
- for fn in range(len(TRAIN_FILES)):
- log_string('----' + str(fn) + '-----')
- current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]]))
- current_data = current_data[:,0:NUM_POINT,:]
- current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
- current_label = np.squeeze(current_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- total_correct = 0
- total_seen = 0
- loss_sum = 0
- endtime = datetime.datetime.now()
- if fn == 0:
- TOTLE_TIME = (endtime - starttime).seconds
- L.append(TOTLE_TIME)
-
- # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
-
- # total_senn,总损失loss_sum.
- for batch_idx in range(num_batches):
- start_time = time.time()
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
-
- # Augment batched point clouds by rotation and jittering
- # 调用provider中rotate_point_cloud
- rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
- jittered_data = provider.jitter_point_cloud(rotated_data)
- feed_dict = {ops['pointclouds_pl']: jittered_data,
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training,}
- summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
- ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
- cost_time = time.time() - start_time
- FPS = BATCH_SIZE / cost_time
- # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
- train_writer.add_summary(summary, step)
- pred_val = np.argmax(pred_val, 1)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += BATCH_SIZE
- loss_sum += loss_val
-
- # 记录平均loss,以及平均accuracy。
- log_string('TOTLE_TIME : %.2f' % (float(L[0])))
- log_string('FPS : %.2f' % (float(FPS)))
- log_string('mean loss: %f' % (loss_sum / float(num_batches)))
- log_string('accuracy: %f' % (total_correct / float(total_seen)))
-
-
-def eval_one_epoch(sess, ops, test_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = False
- total_correct = 0
- total_seen = 0
- loss_sum = 0
- total_seen_class = [0 for _ in range(NUM_CLASSES)]
- total_correct_class = [0 for _ in range(NUM_CLASSES)]
-
- for fn in range(len(TEST_FILES)):
- log_string('----' + str(fn) + '-----')
- current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn]))
- current_data = current_data[:,0:NUM_POINT,:]
- current_label = np.squeeze(current_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- for batch_idx in range(num_batches):
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
-
- feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training}
- summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
- ops['loss'], ops['pred']], feed_dict=feed_dict)
- pred_val = np.argmax(pred_val, 1)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += BATCH_SIZE
- loss_sum += (loss_val*BATCH_SIZE)
- for i in range(start_idx, end_idx):
- l = current_label[i]
- total_seen_class[l] += 1
- total_correct_class[l] += (pred_val[i-start_idx] == l)
-
- log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
- log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
- log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
-
-
-
-if __name__ == "__main__":
- train()
- LOG_FOUT.close()
--
Gitee
From aaa9859708977267b8eb7672222083c111e8b26f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:42 +0000
Subject: [PATCH 17/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/requirements.txt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow2/built-in/keras_sample/requirements.txt | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/requirements.txt
diff --git a/TensorFlow2/built-in/keras_sample/requirements.txt b/TensorFlow2/built-in/keras_sample/requirements.txt
deleted file mode 100644
index e69de29bb..000000000
--
Gitee
From 701f6634e01f4a8445cea4c4be9c38063abc1acd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:49 +0000
Subject: [PATCH 18/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/README=5FBAK.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../built-in/keras_sample/README_BAK.md | 77 -------------------
1 file changed, 77 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/README_BAK.md
diff --git a/TensorFlow2/built-in/keras_sample/README_BAK.md b/TensorFlow2/built-in/keras_sample/README_BAK.md
deleted file mode 100644
index 6d7185b09..000000000
--- a/TensorFlow2/built-in/keras_sample/README_BAK.md
+++ /dev/null
@@ -1,77 +0,0 @@
-## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation*
-Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University.
-
-
-
-### Introduction
-This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction.
-
-Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective.
-
-In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset.
-
-### Citation
-If you find our work useful in your research, please consider citing:
-
- @article{qi2016pointnet,
- title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},
- author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
- journal={arXiv preprint arXiv:1612.00593},
- year={2016}
- }
-
-### Installation
-
-Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04.
-
-If you are using PyTorch, you can find a third-party pytorch implementation here.
-
-To install h5py for Python:
-```bash
-sudo apt-get install libhdf5-dev
-sudo pip install h5py
-```
-
-### Usage
-To train a model to classify point clouds sampled from 3D shapes:
-
- python train.py
-
-Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files.
-
-To see HELP for the training script:
-
- python train.py -h
-
-We can use TensorBoard to view the network architecture and monitor the training progress.
-
- tensorboard --logdir log
-
-After the above training, we can evaluate the model and output some visualizations of the error cases.
-
- python evaluate.py --visu
-
-Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images.
-
-If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files.
-
-### Part Segmentation
-To train a model for object part segmentation, firstly download the data:
-
- cd part_seg
- sh download_data.sh
-
-The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB).
-
-Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation).
-
-### License
-Our code is released under MIT License (see LICENSE file for details).
-
-### Selected Projects that Use PointNet
-
-* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities.
-* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation.
-* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds.
-* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation.
-* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017).
--
Gitee
From da4c030b682710472c9ae7f3c79c5bd8bc53de06 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:29:59 +0000
Subject: [PATCH 19/54] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20PointNet=5FID2913=5F?=
=?UTF-8?q?for=5FTensorFlow2.X?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep
new file mode 100644
index 000000000..e69de29bb
--
Gitee
From 9ad695cf55f6c68dbefebcdce45c1b3482fb037a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:30:20 +0000
Subject: [PATCH 20/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../.gitignore | 2 +
.../PointNet_ID2913_for_TensorFlow2.X/LICENSE | 51 ++
.../README.md | 233 +++++++++
.../README_BAK.md | 77 +++
.../evaluate.py | 199 ++++++++
.../modelzoo_level.txt | 3 +
.../provider.py | 165 +++++++
.../requirements.txt | 0
.../train.py | 452 ++++++++++++++++++
.../train_real.py | 381 +++++++++++++++
10 files changed, 1563 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore
new file mode 100644
index 000000000..8efb80c9a
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore
@@ -0,0 +1,2 @@
+/data/*
+/log/*
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..e93be0a6b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,51 @@
+PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation.
+
+Copyright (c) 2017, Geometric Computation Group of Stanford University
+
+The MIT License (MIT)
+
+Copyright (c) 2017 Charles R. Qi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+PointNet:针对3D分类和分割的点集深度学习。
+
+斯坦福大学几何计算小组(c)2017版权所有
+
+MIT许可证(MIT)
+
+版权所有(c)2017 Charles R.Qi
+
+特此授予获得副本的任何人免费的许可
+软件和相关文档文件(以下简称“软件”)的交易
+在软件中不受限制,包括但不限于权利
+使用,复制,修改,合并,发布,分发,再许可和/或出售
+本软件的副本,并允许本软件所针对的人
+具备以下条件:
+
+以上版权声明和此许可声明应包含在所有
+复制或实质性的软件部分。
+
+本软件按“原样”提供,不提供任何形式的明示或明示保证。
+暗示(包括但不限于适销性的保证),
+适用于特定目的和非侵权。在任何情况下都不会
+作者或版权持有人对任何索赔,损害或其他责任
+无论是由于合同,侵权或其他形式的诉讼而引起的责任,
+与软件或软件的使用或其他交易无关或与之有关
+软件。
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..2e27ca0f6
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md
@@ -0,0 +1,233 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Instance Segmentation**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.04.11**
+
+**大小(Size):43M**
+
+**框架(Framework):TensorFlow_2.6.2**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Official**
+
+**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码**
+
+
+概述
+
+## 简述
+
+点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。
+
+
+ - 参考论文:
+
+ https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593)
+
+ - 参考实现:
+ https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py)
+
+
+ - 适配昇腾 AI 处理器的实现:
+ skip
+
+ - 通过Git获取对应commit\_id的代码方法如下:
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+
+
+
+## 默认配置
+
+
+- 网络结构
+ - 设计最大池化层(对称函数),用于聚合所有点的特征信息
+ - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息
+ - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。
+
+- 训练超参(单卡):
+ - Batch size: 32
+ - learning_rate:0.0015
+ - num_point:2048
+ - Train epoch: 250
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+|-------|------|
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+相关代码示例。
+
+```
+ config_proto = tf.ConfigProto(allow_soft_placement=True)
+ custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+ session_config = npu_config_proto(config_proto=config_proto)
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+
+快速上手
+
+## 数据集准备
+
+1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。
+2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。
+```
+sudo apt-get install libhdf5-dev
+sudo pip install h5py
+```
+3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。
+
+## 模型训练
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+
+ 2. 单卡训练
+
+ 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。
+
+
+ ```
+ batch_size=32
+ #训练step
+ train_epochs=250
+ #学习率
+ learning_rate=0.0015
+ ```
+
+
+
+ 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test)
+
+ ```
+ 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡
+ bash train_full_1p.sh --data_path=xx
+ 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data
+ ├─data
+ ├─ply_data_test0.h5*
+ ├─ply_data_test_0_id2file.json*
+ ├─ply_data_test1.h5*
+ ├─ply_data_test_1_id2file.json*
+ ├─ply_data_train0.h5*
+ ├─ply_data_train_0_id2file.json*
+ ├─ply_data_train1.h5*
+ ├─ply_data_train_1_id2file.json*
+ ├─ply_data_train2.h5*
+ ├─ply_data_train_2_id2file.json*
+ ├─ply_data_train3.h5*
+ ├─ply_data_train_3_id2file.json*
+ ├─ply_data_train4.h5*
+ ├─ply_data_train_4_id2file.json*
+ ├─shape_names.txt*
+ ├─test_files.txt*
+ ├─train_files.txt*
+
+ ```
+
+迁移学习指导
+
+- 数据集准备。
+
+ 1. 获取数据。
+ 请参见“快速上手”中的数据集准备
+
+- 模型训练
+
+ 请参考“快速上手”章节
+
+高级参考
+
+## 脚本和示例代码
+
+ ├── README.md //说明文档
+ ├── requirements.txt //依赖
+ ├── modelzoo_level.txt //状态文件
+ ├── provider.py //数据集处理脚本
+ ├── train.py //网络训练脚本
+ ├── models //网络结构定义脚本
+ |—— pointnet_cls.py
+ |—— pointnet_cls_basic.py
+ |—— pointnet_seg.py
+ |—— transform_nets.py
+ ├── test
+ | |—— train_full_1p.sh //单卡训练脚本
+ | |—— train_performance_1p.sh //单卡训练脚本
+ ...
+
+## 脚本参数
+
+```
+batch_size 训练batch_size
+learning_rate 初始学习率
+max_epochs 最大训练epoch数
+num_point 每个点云包含从形状表面均匀采样的点数
+precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data'
+over_dump type=ast.literal_eval,help='if or not over detection, default is False'
+data_dump_flag type=ast.literal_eval,help='data dump flag, default is False'
+data_dump_step data dump step, default is 10
+profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False'
+profiling_dump_path type=str, help='the path to save profiling data'
+over_dump_path type=str, help='the path to save over dump data'
+data_dump_path type=str, help='the path to save dump data'
+use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False'
+fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False'
+mixlist_file type=str,help='mixlist file name, default is ops_info.json'
+fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg'
+auto_tune help='auto_tune flag, default is False'
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡训练。
+将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md
new file mode 100644
index 000000000..6d7185b09
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md
@@ -0,0 +1,77 @@
+## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation*
+Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University.
+
+
+
+### Introduction
+This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction.
+
+Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective.
+
+In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset.
+
+### Citation
+If you find our work useful in your research, please consider citing:
+
+ @article{qi2016pointnet,
+ title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},
+ author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
+ journal={arXiv preprint arXiv:1612.00593},
+ year={2016}
+ }
+
+### Installation
+
+Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04.
+
+If you are using PyTorch, you can find a third-party pytorch implementation here.
+
+To install h5py for Python:
+```bash
+sudo apt-get install libhdf5-dev
+sudo pip install h5py
+```
+
+### Usage
+To train a model to classify point clouds sampled from 3D shapes:
+
+ python train.py
+
+Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files.
+
+To see HELP for the training script:
+
+ python train.py -h
+
+We can use TensorBoard to view the network architecture and monitor the training progress.
+
+ tensorboard --logdir log
+
+After the above training, we can evaluate the model and output some visualizations of the error cases.
+
+ python evaluate.py --visu
+
+Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images.
+
+If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files.
+
+### Part Segmentation
+To train a model for object part segmentation, firstly download the data:
+
+ cd part_seg
+ sh download_data.sh
+
+The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB).
+
+Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation).
+
+### License
+Our code is released under MIT License (see LICENSE file for details).
+
+### Selected Projects that Use PointNet
+
+* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities.
+* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation.
+* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds.
+* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation.
+* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017).
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py
new file mode 100644
index 000000000..749f8c7f8
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py
@@ -0,0 +1,199 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import argparse
+import socket
+import importlib
+import time
+import os
+import scipy.misc
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+import pc_util
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]')
+parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
+parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
+parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MODEL_PATH = FLAGS.model_path
+GPU_INDEX = FLAGS.gpu
+MODEL = importlib.import_module(FLAGS.model) # import network module
+DUMP_DIR = FLAGS.dump_dir
+if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
+LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+NUM_CLASSES = 40
+SHAPE_NAMES = [line.rstrip() for line in \
+ open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))]
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+def evaluate(num_votes):
+ is_training = False
+
+ with tf.device('/cpu:0'):
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ # simple model
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+ config.log_device_placement = True
+ sess = tf.compat.v1.Session(config=config)
+
+ # Restore variables from disk.
+ saver.restore(sess, MODEL_PATH)
+ log_string("Model restored.")
+
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss}
+
+ eval_one_epoch(sess, ops, num_votes)
+
+
+def eval_one_epoch(sess, ops, num_votes=1, topk=1):
+ error_cnt = 0
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+ fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
+ for fn in range(len(TEST_FILES)):
+ log_string('----'+str(fn)+'----')
+ current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+ print(current_data.shape)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+ print(file_size)
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+ cur_batch_size = end_idx - start_idx
+
+ # Aggregating BEG
+ batch_loss_sum = 0 # sum of losses for the batch
+ batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
+ batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
+ for vote_idx in range(num_votes):
+ rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
+ vote_idx/float(num_votes) * np.pi * 2)
+ feed_dict = {ops['pointclouds_pl']: rotated_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
+ feed_dict=feed_dict)
+ batch_pred_sum += pred_val
+ batch_pred_val = np.argmax(pred_val, 1)
+ for el_idx in range(cur_batch_size):
+ batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
+ batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
+ # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
+ # pred_val = np.argmax(batch_pred_classes, 1)
+ pred_val = np.argmax(batch_pred_sum, 1)
+ # Aggregating END
+
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
+ total_correct += correct
+ total_seen += cur_batch_size
+ loss_sum += batch_loss_sum
+
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+ fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
+
+ if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
+ img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
+ SHAPE_NAMES[pred_val[i-start_idx]])
+ img_filename = os.path.join(DUMP_DIR, img_filename)
+ output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
+ scipy.misc.imsave(img_filename, output_img)
+ error_cnt += 1
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+ class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
+ for i, name in enumerate(SHAPE_NAMES):
+ log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
+
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ evaluate(num_votes=1)
+ LOG_FOUT.close()
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..31529da2e
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:OK
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py
new file mode 100644
index 000000000..18651c47f
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py
@@ -0,0 +1,165 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import sys
+import numpy as np
+import h5py
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+
+# Download dataset for point cloud classification
+# 拼接data路径
+'''
+DATA_DIR = os.path.join(BASE_DIR, 'data')
+# 如果没有路径,则创建文件夹
+if not os.path.exists(DATA_DIR):
+ os.mkdir(DATA_DIR)
+# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩
+# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。
+if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
+ www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
+ zipfile = os.path.basename(www)
+ os.system('wget %s; unzip %s' % (www, zipfile))
+ os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
+ os.system('rm %s' % (zipfile))
+'''
+
+# 把数据随机打乱
+def shuffle_data(data, labels):
+ """ Shuffle data and labels.
+ Input:
+ data: B,N,... numpy array
+ label: B,... numpy array
+ Return:
+ shuffled data, label and shuffle indices
+ """
+ # 取标签长度
+ idx = np.arange(len(labels))
+ # 打乱索引
+ np.random.shuffle(idx)
+ # 返回打乱的数据,标签和索引
+ return data[idx, ...], labels[idx], idx
+
+
+# 旋转点云
+def rotate_point_cloud(batch_data):
+ # 随机旋转点云以扩大数据集
+ # 旋转是基于向上方向的每个形状
+ # 输入:
+ # BxNx3阵列,原始batch的点云
+ # 返回:
+ # BxNx3阵列,旋转的点云batch
+ """ Randomly rotate the point clouds to augument the dataset
+ rotation is per shape based along up direction
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, rotated batch of point clouds
+ """
+ rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
+ for k in range(batch_data.shape[0]):
+ rotation_angle = np.random.uniform() * 2 * np.pi
+ cosval = np.cos(rotation_angle)
+ sinval = np.sin(rotation_angle)
+ rotation_matrix = np.array([[cosval, 0, sinval],
+ [0, 1, 0],
+ [-sinval, 0, cosval]])
+ shape_pc = batch_data[k, ...]
+ rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
+ return rotated_data
+
+
+# 按角度旋转点云
+def rotate_point_cloud_by_angle(batch_data, rotation_angle):
+ """ Rotate the point cloud along up direction with certain angle.
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, rotated batch of point clouds
+ """
+ rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
+ for k in range(batch_data.shape[0]):
+ # rotation_angle = np.random.uniform() * 2 * np.pi
+ cosval = np.cos(rotation_angle)
+ sinval = np.sin(rotation_angle)
+ rotation_matrix = np.array([[cosval, 0, sinval],
+ [0, 1, 0],
+ [-sinval, 0, cosval]])
+ shape_pc = batch_data[k, ...]
+ rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
+ return rotated_data
+
+
+# 抖动点云
+def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
+ """ Randomly jitter points. jittering is per point.
+ Input:
+ BxNx3 array, original batch of point clouds
+ Return:
+ BxNx3 array, jittered batch of point clouds
+ """
+ B, N, C = batch_data.shape
+ assert (clip > 0)
+ jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)
+ jittered_data += batch_data
+ return jittered_data
+
+
+# 获得复数个数据文件
+def getDataFiles(list_filename):
+ return [line.rstrip() for line in open(list_filename)]
+
+
+# 加载h5文件
+def load_h5(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ return (data, label)
+
+
+# 获得单个数据文件
+def loadDataFile(filename):
+ return load_h5(filename)
+
+
+# 加载h5数据标签段
+def load_h5_data_label_seg(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ seg = f['pid'][:]
+ return (data, label, seg)
+
+
+# 用seg加载数据文件
+def loadDataFile_with_seg(filename):
+ return load_h5_data_label_seg(filename)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py
new file mode 100644
index 000000000..4a6683530
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py
@@ -0,0 +1,452 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# import npu_device
+# npu_device.open().as_default()
+
+
+import argparse
+# import math
+# import h5py
+import numpy as np
+import tensorflow as tf
+import socket
+import importlib
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+# import tf_util
+import time
+import datetime
+import ast
+from npu_device.compat.v1.npu_init import *
+import npu_device as npu
+npu.compat.enable_v1()
+
+starttime = datetime.datetime.now()
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
+parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
+parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
+parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
+parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
+parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
+parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
+parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
+parser.add_argument('--data_path', type=str, default='', help='data path')
+parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data')
+parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval,
+ help='if or not over detection, default is False')
+parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval,
+ help='data dump flag, default is False')
+parser.add_argument('--data_dump_step', default="10",
+ help='data dump step, default is 10')
+parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False')
+parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data')
+parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data')
+parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data')
+parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval,
+ help='use_mixlist flag, default is False')
+parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval,
+ help='fusion_off flag, default is False')
+parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json')
+parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg')
+parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MAX_EPOCH = FLAGS.max_epoch
+BASE_LEARNING_RATE = FLAGS.learning_rate
+GPU_INDEX = FLAGS.gpu
+MOMENTUM = FLAGS.momentum
+OPTIMIZER = FLAGS.optimizer
+DECAY_STEP = FLAGS.decay_step
+DECAY_RATE = FLAGS.decay_rate
+
+MODEL = importlib.import_module(FLAGS.model) # import network module
+MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
+LOG_DIR = FLAGS.log_dir
+if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
+os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
+os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
+LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+MAX_NUM_POINT = 2048
+NUM_CLASSES = 40
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+BN_DECAY_DECAY_STEP = float(DECAY_STEP)
+BN_DECAY_CLIP = 0.99
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt'))
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+
+# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
+# tf.train.exponential_decay函数实现指数衰减学习率。
+def get_learning_rate(batch):
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # Base learning rate.
+ batch * BATCH_SIZE, # Current index into the dataset.
+ DECAY_STEP, # Decay step.
+ DECAY_RATE, # Decay rate.
+ staircase=True)
+ # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
+ learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
+ return learning_rate
+
+
+# 取得bn衰减
+# if the argument staircase is True,
+# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
+# 计算衰减的Batch Normalization 的 decay。
+def get_bn_decay(batch):
+ # 指数衰减法
+
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*BATCH_SIZE,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ # bn衰减0.99和1-衰减后的动量,取最小
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+ return bn_decay
+
+
+# 初始运行的训练函数。
+# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
+def train():
+ # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
+ with tf.Graph().as_default():
+ # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
+ # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
+ # with tf.device('/gpu:'+str(GPU_INDEX)):
+ with tf.device('/gpu:0'):
+ # 使用了pointne_cls.py的placeholder_inputs()方法。
+ # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
+ # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
+ # 它只会分配必要的内存,用于传入外部数据。
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ # 向指定好的对象中喂入数据:tf.placeholder()
+ # 取得占位符:是否在训练。
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+ print(is_training_pl)
+
+ # Note the global_step=batch parameter to minimize.
+ # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
+ # 将 global_step = batch 参数最小化。
+ # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
+ # 定义 batch = 0
+ batch = tf.Variable(0)
+ # 取得bn衰减(自定义方法)
+ bn_decay = get_bn_decay(batch)
+ # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
+ tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ # Get model and loss
+ # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
+ # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
+ # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
+ # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
+ # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
+ # 调用pointnet_cls下的get_loss()
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+ tf.compat.v1.summary.scalar('loss', loss)
+
+ # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
+ # tf.equal() 比较两个张量对应位置是否相等
+ correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
+ # 压缩求和,用于降维
+ accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
+ tf.compat.v1.summary.scalar('accuracy', accuracy)
+
+ # Get training operator
+ # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
+ learning_rate = get_learning_rate(batch)
+ tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ if OPTIMIZER == 'momentum':
+ optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
+ elif OPTIMIZER == 'adam':
+ optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
+ # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
+ # (2)用计算得到的梯度来更新对应的变量(权重)
+ # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
+ # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
+ train_op = optimizer.minimize(loss, global_step=batch)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ # 配置session 运行参数。
+ # 创建sess的时候对sess进行参数配置
+ config = tf.compat.v1.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode)
+ if FLAGS.data_dump_flag:
+ custom_op.parameter_map["enable_dump"].b = True
+ custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path)
+ custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step)
+ custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
+ if FLAGS.over_dump:
+ custom_op.parameter_map["enable_dump_debug"].b = True
+ custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path)
+ custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all")
+ if FLAGS.profiling:
+ custom_op.parameter_map["precision_mode"].b = True
+ profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \
+ "training_trace":"on", \
+ "task_trace":"on", \
+ "aicpu":"on", \
+ "aic_metrics":"PipeUtilization",\
+ "fp_point":"", \
+ "bp_point":""}'
+ custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options)
+ if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision':
+ custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file)
+ if FLAGS.fusion_off_flag:
+ custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file)
+ if FLAGS.auto_tune:
+ custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA")
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭
+
+ # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
+ config.gpu_options.allow_growth = True
+ # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
+ config.allow_soft_placement = True
+ # 在终端打印出各项操作是在哪个设备上运行的
+ config.log_device_placement = False
+ # 创建 sess, 才能运行框架
+ sess = tf.compat.v1.Session(config=config)
+
+ # Add summary writers
+ #merged = tf.merge_all_summaries()
+ merged = tf.compat.v1.summary.merge_all()
+ train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
+ sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
+
+ # Init variables
+ # 初始化参数,开始训练
+ # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
+ # accuracy和loss。每10个epoch保存1次模型。
+ init = tf.compat.v1.global_variables_initializer()
+ # To fix the bug introduced in TF 0.12.1 as in
+ # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
+ #sess.run(init)
+ # 运行sess初始化所有的全局变量
+ sess.run(init, {is_training_pl: True})
+
+ # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
+ # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss,
+ 'train_op': train_op,
+ 'merged': merged,
+ 'step': batch}
+
+ for epoch in range(MAX_EPOCH):
+ # log(自定义方法)
+ log_string('**** EPOCH %03d ****' % (epoch))
+ # 在同一个位置刷新输出
+ sys.stdout.flush()
+
+ # 训练一个批次(自定义方法)
+ # train_one_epoch 函数用来训练一个epoch
+ train_one_epoch(sess, ops, train_writer)
+ # 评估一个批次(自定义方法)
+ # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
+ eval_one_epoch(sess, ops, test_writer)
+
+ # Save the variables to disk.
+ # Save the variables to disk.每10个epoch保存1次模型
+ if epoch % 10 == 0:
+ save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
+ # log(自定义方法)
+ log_string("Model saved in file: %s" % save_path)
+
+
+# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
+# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
+def train_one_epoch(sess, ops, train_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = True
+
+ # Shuffle train files
+ # 随机打乱训练数据
+ train_file_idxs = np.arange(0, len(TRAIN_FILES))
+ np.random.shuffle(train_file_idxs)
+
+ L = []
+ for fn in range(len(TRAIN_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]]))
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ endtime = datetime.datetime.now()
+ if fn == 0:
+ TOTLE_TIME = (endtime - starttime).seconds
+ L.append(TOTLE_TIME)
+
+ # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
+
+ # total_senn,总损失loss_sum.
+ for batch_idx in range(num_batches):
+ start_time = time.time()
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ # Augment batched point clouds by rotation and jittering
+ # 调用provider中rotate_point_cloud
+ rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
+ jittered_data = provider.jitter_point_cloud(rotated_data)
+ feed_dict = {ops['pointclouds_pl']: jittered_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training,}
+ summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
+ cost_time = time.time() - start_time
+ FPS = BATCH_SIZE / cost_time
+ # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
+ train_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += loss_val
+
+ # 记录平均loss,以及平均accuracy。
+ log_string('TOTLE_TIME : %.2f' % (float(L[0])))
+ log_string('FPS : %.2f' % (float(FPS)))
+ log_string('mean loss: %f' % (loss_sum / float(num_batches)))
+ log_string('accuracy: %f' % (total_correct / float(total_seen)))
+
+
+def eval_one_epoch(sess, ops, test_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+
+ for fn in range(len(TEST_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn]))
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['loss'], ops['pred']], feed_dict=feed_dict)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+
+
+if __name__ == "__main__":
+ train()
+ LOG_FOUT.close()
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py
new file mode 100644
index 000000000..34c60ca17
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py
@@ -0,0 +1,381 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import math
+import h5py
+import numpy as np
+import tensorflow as tf
+import socket
+import importlib
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, 'models'))
+sys.path.append(os.path.join(BASE_DIR, 'utils'))
+import provider
+import tf_util
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
+parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
+parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]')
+parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]')
+parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]')
+parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
+parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
+parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
+parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
+parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MAX_EPOCH = FLAGS.max_epoch
+BASE_LEARNING_RATE = FLAGS.learning_rate
+GPU_INDEX = FLAGS.gpu
+MOMENTUM = FLAGS.momentum
+OPTIMIZER = FLAGS.optimizer
+DECAY_STEP = FLAGS.decay_step
+DECAY_RATE = FLAGS.decay_rate
+
+MODEL = importlib.import_module(FLAGS.model) # import network module
+MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
+LOG_DIR = FLAGS.log_dir
+if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
+os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
+os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
+LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+MAX_NUM_POINT = 4096
+NUM_CLASSES = 40
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+BN_DECAY_DECAY_STEP = float(DECAY_STEP)
+BN_DECAY_CLIP = 0.99
+
+HOSTNAME = socket.gethostname()
+
+# ModelNet40 official train/test split
+TRAIN_FILES = provider.getDataFiles( \
+ os.path.join(BASE_DIR, 'data_real/train_files.txt'))
+TEST_FILES = provider.getDataFiles(\
+ os.path.join(BASE_DIR, 'data_real/test_files.txt'))
+print(TRAIN_FILES)
+print(TEST_FILES)
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+
+# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。
+# tf.train.exponential_decay函数实现指数衰减学习率。
+def get_learning_rate(batch):
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # Base learning rate.
+ batch * BATCH_SIZE, # Current index into the dataset.
+ DECAY_STEP, # Decay step.
+ DECAY_RATE, # Decay rate.
+ staircase=True)
+ # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大)
+ learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
+ return learning_rate
+
+
+# 取得bn衰减
+# if the argument staircase is True,
+# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function.
+# 计算衰减的Batch Normalization 的 decay。
+def get_bn_decay(batch):
+ # 指数衰减法
+
+ # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。
+ # 通过tf.train.exponential_decay函数实现指数衰减学习率。
+ # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢,
+ # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。
+ # tf.train.exponential_decay就是用来实现这个功能。
+ #
+ # 步骤:
+ # 1.首先使用较大学习率(目的:为快速得到一个比较优的解);
+ # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定);
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*BATCH_SIZE,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ # bn衰减0.99和1-衰减后的动量,取最小
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+ return bn_decay
+
+
+# 初始运行的训练函数。
+# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练.
+def train():
+ # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图
+ with tf.Graph().as_default():
+ # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号,
+ # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。
+ # with tf.device('/gpu:'+str(GPU_INDEX)):
+ with tf.device('/cpu:0'):
+ # 使用了pointne_cls.py的placeholder_inputs()方法。
+ # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。
+ # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,
+ # 它只会分配必要的内存,用于传入外部数据。
+ pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ # 向指定好的对象中喂入数据:tf.placeholder()
+ # 取得占位符:是否在训练。
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+ print(is_training_pl)
+
+ # Note the global_step=batch parameter to minimize.
+ # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
+ # 将 global_step = batch 参数最小化。
+ # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。
+ # 定义 batch = 0
+ batch = tf.Variable(0)
+ # 取得bn衰减(自定义方法)
+ bn_decay = get_bn_decay(batch)
+ # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。
+ tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ # Get model and loss
+ # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知,
+ # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。
+ # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作,
+ # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。
+ # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。
+ pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
+ # 调用pointnet_cls下的get_loss()
+ loss = MODEL.get_loss(pred, labels_pl, end_points)
+ tf.compat.v1.summary.scalar('loss', loss)
+
+ # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵
+ # tf.equal() 比较两个张量对应位置是否相等
+ correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
+ # 压缩求和,用于降维
+ accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
+ tf.compat.v1.summary.scalar('accuracy', accuracy)
+
+ # Get training operator
+ # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。
+ learning_rate = get_learning_rate(batch)
+ tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ if OPTIMIZER == 'momentum':
+ optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
+ elif OPTIMIZER == 'adam':
+ optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值
+ # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…]
+ # (2)用计算得到的梯度来更新对应的变量(权重)
+ # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1
+ # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸
+ train_op = optimizer.minimize(loss, global_step=batch)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ # 配置session 运行参数。
+ # 创建sess的时候对sess进行参数配置
+ config = tf.compat.v1.ConfigProto()
+ # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
+ config.gpu_options.allow_growth = True
+ # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
+ config.allow_soft_placement = True
+ # 在终端打印出各项操作是在哪个设备上运行的
+ config.log_device_placement = False
+ # 创建 sess, 才能运行框架
+ sess = tf.compat.v1.Session(config=config)
+
+ # Add summary writers
+ #merged = tf.merge_all_summaries()
+ merged = tf.compat.v1.summary.merge_all()
+ train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
+ sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
+
+ # Init variables
+ # 初始化参数,开始训练
+ # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的
+ # accuracy和loss。每10个epoch保存1次模型。
+ init = tf.compat.v1.global_variables_initializer()
+ # To fix the bug introduced in TF 0.12.1 as in
+ # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
+ #sess.run(init)
+ # 运行sess初始化所有的全局变量
+ sess.run(init, {is_training_pl: True})
+
+ # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。
+ # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss,
+ 'train_op': train_op,
+ 'merged': merged,
+ 'step': batch}
+
+ for epoch in range(MAX_EPOCH):
+ # log(自定义方法)
+ log_string('**** EPOCH %03d ****' % (epoch))
+ # 在同一个位置刷新输出
+ sys.stdout.flush()
+
+ # 训练一个批次(自定义方法)
+ # train_one_epoch 函数用来训练一个epoch
+ train_one_epoch(sess, ops, train_writer)
+ # 评估一个批次(自定义方法)
+ # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss
+ eval_one_epoch(sess, ops, test_writer)
+
+ # Save the variables to disk.
+ # Save the variables to disk.每10个epoch保存1次模型
+ if epoch % 10 == 0:
+ save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
+ # log(自定义方法)
+ log_string("Model saved in file: %s" % save_path)
+
+
+# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。
+# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。
+def train_one_epoch(sess, ops, train_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = True
+
+ # Shuffle train files
+ # 随机打乱训练数据
+ train_file_idxs = np.arange(0, len(TRAIN_FILES))
+ np.random.shuffle(train_file_idxs)
+
+ for fn in range(len(TRAIN_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+
+ # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
+ # total_senn,总损失loss_sum.
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ # Augment batched point clouds by rotation and jittering
+ # 调用provider中rotate_point_cloud
+ rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
+ jittered_data = provider.jitter_point_cloud(rotated_data)
+ feed_dict = {ops['pointclouds_pl']: jittered_data,
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training,}
+ summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
+ # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
+ train_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += loss_val
+
+ # 记录平均loss,以及平均accuracy。
+ log_string('mean loss: %f' % (loss_sum / float(num_batches)))
+ log_string('accuracy: %f' % (total_correct / float(total_seen)))
+
+
+def eval_one_epoch(sess, ops, test_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+
+ for fn in range(len(TEST_FILES)):
+ log_string('----' + str(fn) + '-----')
+ current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
+ ops['loss'], ops['pred']], feed_dict=feed_dict)
+ pred_val = np.argmax(pred_val, 1)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += BATCH_SIZE
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ l = current_label[i]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+
+
+if __name__ == "__main__":
+ train()
+ LOG_FOUT.close()
--
Gitee
From f13bfd0462b0923220218c5308b58e2813e2c170 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:31:34 +0000
Subject: [PATCH 21/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/PointNet=5FID2913=5Ffor=5FTen?=
=?UTF-8?q?sorFlow2.X/.keep?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep
deleted file mode 100644
index e69de29bb..000000000
--
Gitee
From eb5a768c3aad0666c79b7d07ae3242f5a9213f70 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:31:46 +0000
Subject: [PATCH 22/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../utils/data_prep_util.py | 174 ++++
.../utils/eulerangles.py | 447 +++++++++
.../utils/pc_util.py | 227 +++++
.../utils/plyfile.py | 932 ++++++++++++++++++
.../utils/tf_util.py | 614 ++++++++++++
5 files changed, 2394 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py
new file mode 100644
index 000000000..5ec338160
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py
@@ -0,0 +1,174 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)
+import numpy as np
+import h5py
+
+SAMPLING_BIN = os.path.join(BASE_DIR, 'third_party/mesh_sampling/build/pcsample')
+
+SAMPLING_POINT_NUM = 2048
+SAMPLING_LEAF_SIZE = 0.005
+
+MODELNET40_PATH = '../datasets/modelnet40'
+def export_ply(pc, filename):
+ vertex = np.zeros(pc.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
+ for i in range(pc.shape[0]):
+ vertex[i] = (pc[i][0], pc[i][1], pc[i][2])
+ ply_out = PlyData([PlyElement.describe(vertex, 'vertex', comments=['vertices'])])
+ ply_out.write(filename)
+
+# Sample points on the obj shape
+def get_sampling_command(obj_filename, ply_filename):
+ cmd = SAMPLING_BIN + ' ' + obj_filename
+ cmd += ' ' + ply_filename
+ cmd += ' -n_samples %d ' % SAMPLING_POINT_NUM
+ cmd += ' -leaf_size %f ' % SAMPLING_LEAF_SIZE
+ return cmd
+
+# --------------------------------------------------------------
+# Following are the helper functions to load MODELNET40 shapes
+# --------------------------------------------------------------
+
+# Read in the list of categories in MODELNET40
+def get_category_names():
+ shape_names_file = os.path.join(MODELNET40_PATH, 'shape_names.txt')
+ shape_names = [line.rstrip() for line in open(shape_names_file)]
+ return shape_names
+
+# Return all the filepaths for the shapes in MODELNET40
+def get_obj_filenames():
+ obj_filelist_file = os.path.join(MODELNET40_PATH, 'filelist.txt')
+ obj_filenames = [os.path.join(MODELNET40_PATH, line.rstrip()) for line in open(obj_filelist_file)]
+ print('Got %d obj files in modelnet40.' % len(obj_filenames))
+ return obj_filenames
+
+# Helper function to create the father folder and all subdir folders if not exist
+def batch_mkdir(output_folder, subdir_list):
+ if not os.path.exists(output_folder):
+ os.mkdir(output_folder)
+ for subdir in subdir_list:
+ if not os.path.exists(os.path.join(output_folder, subdir)):
+ os.mkdir(os.path.join(output_folder, subdir))
+
+# ----------------------------------------------------------------
+# Following are the helper functions to load save/load HDF5 files
+# ----------------------------------------------------------------
+
+# Write numpy array data and label to h5_filename
+def save_h5_data_label_normal(h5_filename, data, label, normal,
+ data_dtype='float32', label_dtype='uint8', normal_dtype='float32'):
+ h5_fout = h5py.File(h5_filename)
+ h5_fout.create_dataset(
+ 'data', data=data,
+ compression='gzip', compression_opts=4,
+ dtype=data_dtype)
+ h5_fout.create_dataset(
+ 'normal', data=normal,
+ compression='gzip', compression_opts=4,
+ dtype=normal_dtype)
+ h5_fout.create_dataset(
+ 'label', data=label,
+ compression='gzip', compression_opts=1,
+ dtype=label_dtype)
+ h5_fout.close()
+
+
+# Write numpy array data and label to h5_filename
+def save_h5(h5_filename, data, label, data_dtype='uint8', label_dtype='uint8'):
+ h5_fout = h5py.File(h5_filename)
+ h5_fout.create_dataset(
+ 'data', data=data,
+ compression='gzip', compression_opts=4,
+ dtype=data_dtype)
+ h5_fout.create_dataset(
+ 'label', data=label,
+ compression='gzip', compression_opts=1,
+ dtype=label_dtype)
+ h5_fout.close()
+
+# Read numpy array data and label from h5_filename
+def load_h5_data_label_normal(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ normal = f['normal'][:]
+ return (data, label, normal)
+
+# Read numpy array data and label from h5_filename
+def load_h5_data_label_seg(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ seg = f['pid'][:]
+ return (data, label, seg)
+
+# Read numpy array data and label from h5_filename
+def load_h5(h5_filename):
+ f = h5py.File(h5_filename)
+ data = f['data'][:]
+ label = f['label'][:]
+ return (data, label)
+
+# ----------------------------------------------------------------
+# Following are the helper functions to load save/load PLY files
+# ----------------------------------------------------------------
+
+# Load PLY file
+def load_ply_data(filename, point_num):
+ plydata = PlyData.read(filename)
+ pc = plydata['vertex'].data[:point_num]
+ pc_array = np.array([[x, y, z] for x,y,z in pc])
+ return pc_array
+
+# Load PLY file
+def load_ply_normal(filename, point_num):
+ plydata = PlyData.read(filename)
+ pc = plydata['normal'].data[:point_num]
+ pc_array = np.array([[x, y, z] for x,y,z in pc])
+ return pc_array
+
+# Make up rows for Nxk array
+# Input Pad is 'edge' or 'constant'
+def pad_arr_rows(arr, row, pad='edge'):
+ assert(len(arr.shape) == 2)
+ assert(arr.shape[0] <= row)
+ assert(pad == 'edge' or pad == 'constant')
+ if arr.shape[0] == row:
+ return arr
+ if pad == 'edge':
+ return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'edge')
+ if pad == 'constant':
+ return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'constant', (0, 0))
+
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py
new file mode 100644
index 000000000..9157409ae
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py
@@ -0,0 +1,447 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+#
+# See COPYING file distributed along with the NiBabel package for the
+# copyright and license terms.
+#
+### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+''' Module implementing Euler angle rotations and their conversions
+
+See:
+
+* http://en.wikipedia.org/wiki/Rotation_matrix
+* http://en.wikipedia.org/wiki/Euler_angles
+* http://mathworld.wolfram.com/EulerAngles.html
+
+See also: *Representing Attitude with Euler Angles and Quaternions: A
+Reference* (2006) by James Diebel. A cached PDF link last found here:
+
+http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134
+
+Euler's rotation theorem tells us that any rotation in 3D can be
+described by 3 angles. Let's call the 3 angles the *Euler angle vector*
+and call the angles in the vector :math:`alpha`, :math:`beta` and
+:math:`gamma`. The vector is [ :math:`alpha`,
+:math:`beta`. :math:`gamma` ] and, in this description, the order of the
+parameters specifies the order in which the rotations occur (so the
+rotation corresponding to :math:`alpha` is applied first).
+
+In order to specify the meaning of an *Euler angle vector* we need to
+specify the axes around which each of the rotations corresponding to
+:math:`alpha`, :math:`beta` and :math:`gamma` will occur.
+
+There are therefore three axes for the rotations :math:`alpha`,
+:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,
+:math:`k`.
+
+Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3
+rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3
+matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the
+whole rotation expressed by the Euler angle vector [ :math:`alpha`,
+:math:`beta`. :math:`gamma` ], `R` is given by::
+
+ R = np.dot(G, np.dot(B, A))
+
+See http://mathworld.wolfram.com/EulerAngles.html
+
+The order :math:`G B A` expresses the fact that the rotations are
+performed in the order of the vector (:math:`alpha` around axis `i` =
+`A` first).
+
+To convert a given Euler angle vector to a meaningful rotation, and a
+rotation matrix, we need to define:
+
+* the axes `i`, `j`, `k`
+* whether a rotation matrix should be applied on the left of a vector to
+ be transformed (vectors are column vectors) or on the right (vectors
+ are row vectors).
+* whether the rotations move the axes as they are applied (intrinsic
+ rotations) - compared the situation where the axes stay fixed and the
+ vectors move within the axis frame (extrinsic)
+* the handedness of the coordinate system
+
+See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities
+
+We are using the following conventions:
+
+* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus
+ an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]
+ in our convention implies a :math:`alpha` radian rotation around the
+ `z` axis, followed by a :math:`beta` rotation around the `y` axis,
+ followed by a :math:`gamma` rotation around the `x` axis.
+* the rotation matrix applies on the left, to column vectors on the
+ right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix
+ with N column vectors, the transformed vector set `vdash` is given by
+ ``vdash = np.dot(R, v)``.
+* extrinsic rotations - the axes are fixed, and do not move with the
+ rotations.
+* a right-handed coordinate system
+
+The convention of rotation around ``z``, followed by rotation around
+``y``, followed by rotation around ``x``, is known (confusingly) as
+"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.
+'''
+
+import math
+
+import sys
+if sys.version_info >= (3,0):
+ from functools import reduce
+
+import numpy as np
+
+
+_FLOAT_EPS_4 = np.finfo(float).eps * 4.0
+
+
+def euler2mat(z=0, y=0, x=0):
+ ''' Return matrix for rotations around z, y and x axes
+
+ Uses the z, then y, then x convention above
+
+ Parameters
+ ----------
+ z : scalar
+ Rotation angle in radians around z-axis (performed first)
+ y : scalar
+ Rotation angle in radians around y-axis
+ x : scalar
+ Rotation angle in radians around x-axis (performed last)
+
+ Returns
+ -------
+ M : array shape (3,3)
+ Rotation matrix giving same rotation as for given angles
+
+ Examples
+ --------
+ >>> zrot = 1.3 # radians
+ >>> yrot = -0.1
+ >>> xrot = 0.2
+ >>> M = euler2mat(zrot, yrot, xrot)
+ >>> M.shape == (3, 3)
+ True
+
+ The output rotation matrix is equal to the composition of the
+ individual rotations
+
+ >>> M1 = euler2mat(zrot)
+ >>> M2 = euler2mat(0, yrot)
+ >>> M3 = euler2mat(0, 0, xrot)
+ >>> composed_M = np.dot(M3, np.dot(M2, M1))
+ >>> np.allclose(M, composed_M)
+ True
+
+ You can specify rotations by named arguments
+
+ >>> np.all(M3 == euler2mat(x=xrot))
+ True
+
+ When applying M to a vector, the vector should column vector to the
+ right of M. If the right hand side is a 2D array rather than a
+ vector, then each column of the 2D array represents a vector.
+
+ >>> vec = np.array([1, 0, 0]).reshape((3,1))
+ >>> v2 = np.dot(M, vec)
+ >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
+ >>> vecs2 = np.dot(M, vecs)
+
+ Rotations are counter-clockwise.
+
+ >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
+ >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
+ True
+ >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
+ >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
+ True
+ >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
+ >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
+ True
+
+ Notes
+ -----
+ The direction of rotation is given by the right-hand rule (orient
+ the thumb of the right hand along the axis around which the rotation
+ occurs, with the end of the thumb at the positive end of the axis;
+ curl your fingers; the direction your fingers curl is the direction
+ of rotation). Therefore, the rotations are counterclockwise if
+ looking along the axis of rotation from positive to negative.
+ '''
+ Ms = []
+ if z:
+ cosz = math.cos(z)
+ sinz = math.sin(z)
+ Ms.append(np.array(
+ [[cosz, -sinz, 0],
+ [sinz, cosz, 0],
+ [0, 0, 1]]))
+ if y:
+ cosy = math.cos(y)
+ siny = math.sin(y)
+ Ms.append(np.array(
+ [[cosy, 0, siny],
+ [0, 1, 0],
+ [-siny, 0, cosy]]))
+ if x:
+ cosx = math.cos(x)
+ sinx = math.sin(x)
+ Ms.append(np.array(
+ [[1, 0, 0],
+ [0, cosx, -sinx],
+ [0, sinx, cosx]]))
+ if Ms:
+ return reduce(np.dot, Ms[::-1])
+ return np.eye(3)
+
+
+def mat2euler(M, cy_thresh=None):
+ ''' Discover Euler angle vector from 3x3 matrix
+
+ Uses the conventions above.
+
+ Parameters
+ ----------
+ M : array-like, shape (3,3)
+ cy_thresh : None or scalar, optional
+ threshold below which to give up on straightforward arctan for
+ estimating x rotation. If None (default), estimate from
+ precision of input.
+
+ Returns
+ -------
+ z : scalar
+ y : scalar
+ x : scalar
+ Rotations in radians around z, y, x axes, respectively
+
+ Notes
+ -----
+ If there was no numerical error, the routine could be derived using
+ Sympy expression for z then y then x rotation matrix, which is::
+
+ [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
+ [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
+ [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
+
+ with the obvious derivations for z, y, and x
+
+ z = atan2(-r12, r11)
+ y = asin(r13)
+ x = atan2(-r23, r33)
+
+ Problems arise when cos(y) is close to zero, because both of::
+
+ z = atan2(cos(y)*sin(z), cos(y)*cos(z))
+ x = atan2(cos(y)*sin(x), cos(x)*cos(y))
+
+ will be close to atan2(0, 0), and highly unstable.
+
+ The ``cy`` fix for numerical instability below is from: *Graphics
+ Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
+ 0123361559. Specifically it comes from EulerAngles.c by Ken
+ Shoemake, and deals with the case where cos(y) is close to zero:
+
+ See: http://www.graphicsgems.org/
+
+ The code appears to be licensed (from the website) as "can be used
+ without restrictions".
+ '''
+ M = np.asarray(M)
+ if cy_thresh is None:
+ try:
+ cy_thresh = np.finfo(M.dtype).eps * 4
+ except ValueError:
+ cy_thresh = _FLOAT_EPS_4
+ r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
+ # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
+ cy = math.sqrt(r33*r33 + r23*r23)
+ if cy > cy_thresh: # cos(y) not close to zero, standard form
+ z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
+ y = math.atan2(r13, cy) # atan2(sin(y), cy)
+ x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
+ else: # cos(y) (close to) zero, so x -> 0.0 (see above)
+ # so r21 -> sin(z), r22 -> cos(z) and
+ z = math.atan2(r21, r22)
+ y = math.atan2(r13, cy) # atan2(sin(y), cy)
+ x = 0.0
+ return z, y, x
+
+
+def euler2quat(z=0, y=0, x=0):
+ ''' Return quaternion corresponding to these Euler angles
+
+ Uses the z, then y, then x convention above
+
+ Parameters
+ ----------
+ z : scalar
+ Rotation angle in radians around z-axis (performed first)
+ y : scalar
+ Rotation angle in radians around y-axis
+ x : scalar
+ Rotation angle in radians around x-axis (performed last)
+
+ Returns
+ -------
+ quat : array shape (4,)
+ Quaternion in w, x, y z (real, then vector) format
+
+ Notes
+ -----
+ We can derive this formula in Sympy using:
+
+ 1. Formula giving quaternion corresponding to rotation of theta radians
+ about arbitrary axis:
+ http://mathworld.wolfram.com/EulerParameters.html
+ 2. Generated formulae from 1.) for quaternions corresponding to
+ theta radians rotations about ``x, y, z`` axes
+ 3. Apply quaternion multiplication formula -
+ http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
+ formulae from 2.) to give formula for combined rotations.
+ '''
+ z = z/2.0
+ y = y/2.0
+ x = x/2.0
+ cz = math.cos(z)
+ sz = math.sin(z)
+ cy = math.cos(y)
+ sy = math.sin(y)
+ cx = math.cos(x)
+ sx = math.sin(x)
+ return np.array([
+ cx*cy*cz - sx*sy*sz,
+ cx*sy*sz + cy*cz*sx,
+ cx*cz*sy - sx*cy*sz,
+ cx*cy*sz + sx*cz*sy])
+
+
+def quat2euler(q):
+ ''' Return Euler angles corresponding to quaternion `q`
+
+ Parameters
+ ----------
+ q : 4 element sequence
+ w, x, y, z of quaternion
+
+ Returns
+ -------
+ z : scalar
+ Rotation angle in radians around z-axis (performed first)
+ y : scalar
+ Rotation angle in radians around y-axis
+ x : scalar
+ Rotation angle in radians around x-axis (performed last)
+
+ Notes
+ -----
+ It's possible to reduce the amount of calculation a little, by
+ combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
+ the reduction in computation is small, and the code repetition is
+ large.
+ '''
+ # delayed import to avoid cyclic dependencies
+ import nibabel.quaternions as nq
+ return mat2euler(nq.quat2mat(q))
+
+
+def euler2angle_axis(z=0, y=0, x=0):
+ ''' Return angle, axis corresponding to these Euler angles
+
+ Uses the z, then y, then x convention above
+
+ Parameters
+ ----------
+ z : scalar
+ Rotation angle in radians around z-axis (performed first)
+ y : scalar
+ Rotation angle in radians around y-axis
+ x : scalar
+ Rotation angle in radians around x-axis (performed last)
+
+ Returns
+ -------
+ theta : scalar
+ angle of rotation
+ vector : array shape (3,)
+ axis around which rotation occurs
+
+ Examples
+ --------
+ >>> theta, vec = euler2angle_axis(0, 1.5, 0)
+ >>> print(theta)
+ 1.5
+ >>> np.allclose(vec, [0, 1, 0])
+ True
+ '''
+ # delayed import to avoid cyclic dependencies
+ import nibabel.quaternions as nq
+ return nq.quat2angle_axis(euler2quat(z, y, x))
+
+
+def angle_axis2euler(theta, vector, is_normalized=False):
+ ''' Convert angle, axis pair to Euler angles
+
+ Parameters
+ ----------
+ theta : scalar
+ angle of rotation
+ vector : 3 element sequence
+ vector specifying axis for rotation.
+ is_normalized : bool, optional
+ True if vector is already normalized (has norm of 1). Default
+ False
+
+ Returns
+ -------
+ z : scalar
+ y : scalar
+ x : scalar
+ Rotations in radians around z, y, x axes, respectively
+
+ Examples
+ --------
+ >>> z, y, x = angle_axis2euler(0, [1, 0, 0])
+ >>> np.allclose((z, y, x), 0)
+ True
+
+ Notes
+ -----
+ It's possible to reduce the amount of calculation a little, by
+ combining parts of the ``angle_axis2mat`` and ``mat2euler``
+ functions, but the reduction in computation is small, and the code
+ repetition is large.
+ '''
+ # delayed import to avoid cyclic dependencies
+ import nibabel.quaternions as nq
+ M = nq.angle_axis2mat(theta, vector, is_normalized)
+ return mat2euler(M)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py
new file mode 100644
index 000000000..c23728ef5
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py
@@ -0,0 +1,227 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+""" Utility functions for processing point clouds.
+
+Author: Charles R. Qi, Hao Su
+Date: November 2016
+"""
+
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+
+# Draw point cloud
+from eulerangles import euler2mat
+
+# Point cloud IO
+import numpy as np
+from plyfile import PlyData, PlyElement
+
+
+# ----------------------------------------
+# Point Cloud/Volume Conversions
+# ----------------------------------------
+
+def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
+ """ Input is BxNx3 batch of point cloud
+ Output is Bx(vsize^3)
+ """
+ vol_list = []
+ for b in range(point_clouds.shape[0]):
+ vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
+ if flatten:
+ vol_list.append(vol.flatten())
+ else:
+ vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
+ if flatten:
+ return np.vstack(vol_list)
+ else:
+ return np.concatenate(vol_list, 0)
+
+
+def point_cloud_to_volume(points, vsize, radius=1.0):
+ """ input is Nx3 points.
+ output is vsize*vsize*vsize
+ assumes points are in range [-radius, radius]
+ """
+ vol = np.zeros((vsize,vsize,vsize))
+ voxel = 2*radius/float(vsize)
+ locations = (points + radius)/voxel
+ locations = locations.astype(int)
+ vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
+ return vol
+
+#a = np.zeros((16,1024,3))
+#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
+
+def volume_to_point_cloud(vol):
+ """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
+ return Nx3 numpy array.
+ """
+ vsize = vol.shape[0]
+ assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
+ points = []
+ for a in range(vsize):
+ for b in range(vsize):
+ for c in range(vsize):
+ if vol[a,b,c] == 1:
+ points.append(np.array([a,b,c]))
+ if len(points) == 0:
+ return np.zeros((0,3))
+ points = np.vstack(points)
+ return points
+
+# ----------------------------------------
+# Point cloud IO
+# ----------------------------------------
+
+def read_ply(filename):
+ """ read XYZ point cloud from filename PLY file """
+ plydata = PlyData.read(filename)
+ pc = plydata['vertex'].data
+ pc_array = np.array([[x, y, z] for x,y,z in pc])
+ return pc_array
+
+
+def write_ply(points, filename, text=True):
+ """ input: Nx3, write points to filename as PLY format. """
+ points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
+ vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
+ el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
+ PlyData([el], text=text).write(filename)
+
+
+# ----------------------------------------
+# Simple Point cloud and Volume Renderers
+# ----------------------------------------
+
+def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
+ xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
+ """ Render point cloud to image with alpha channel.
+ Input:
+ points: Nx3 numpy array (+y is up direction)
+ Output:
+ gray image as numpy array of size canvasSizexcanvasSize
+ """
+ image = np.zeros((canvasSize, canvasSize))
+ if input_points is None or input_points.shape[0] == 0:
+ return image
+
+ points = input_points[:, switch_xyz]
+ M = euler2mat(zrot, yrot, xrot)
+ points = (np.dot(M, points.transpose())).transpose()
+
+ # Normalize the point cloud
+ # We normalize scale to fit points in a unit sphere
+ if normalize:
+ centroid = np.mean(points, axis=0)
+ points -= centroid
+ furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
+ points /= furthest_distance
+
+ # Pre-compute the Gaussian disk
+ radius = (diameter-1)/2.0
+ disk = np.zeros((diameter, diameter))
+ for i in range(diameter):
+ for j in range(diameter):
+ if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
+ disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
+ mask = np.argwhere(disk > 0)
+ dx = mask[:, 0]
+ dy = mask[:, 1]
+ dv = disk[disk > 0]
+
+ # Order points by z-buffer
+ zorder = np.argsort(points[:, 2])
+ points = points[zorder, :]
+ points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
+ max_depth = np.max(points[:, 2])
+
+ for i in range(points.shape[0]):
+ j = points.shape[0] - i - 1
+ x = points[j, 0]
+ y = points[j, 1]
+ xc = canvasSize/2 + (x*space)
+ yc = canvasSize/2 + (y*space)
+ xc = int(np.round(xc))
+ yc = int(np.round(yc))
+
+ px = dx + xc
+ py = dy + yc
+
+ image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
+
+ image = image / np.max(image)
+ return image
+
+def point_cloud_three_views(points):
+ """ input points Nx3 numpy array (+y is up direction).
+ return an numpy array gray image of size 500x1500. """
+ # +y is up direction
+ # xrot is azimuth
+ # yrot is in-plane
+ # zrot is elevation
+ img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
+ img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
+ img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
+ image_large = np.concatenate([img1, img2, img3], 1)
+ return image_large
+
+
+from PIL import Image
+def point_cloud_three_views_demo():
+ """ Demo for draw_point_cloud function """
+ points = read_ply('../third_party/mesh_sampling/piano.ply')
+ im_array = point_cloud_three_views(points)
+ img = Image.fromarray(np.uint8(im_array*255.0))
+ img.save('piano.jpg')
+
+if __name__=="__main__":
+ point_cloud_three_views_demo()
+
+
+import matplotlib.pyplot as plt
+def pyplot_draw_point_cloud(points, output_filename):
+ """ points is a Nx3 numpy array """
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+ ax.scatter(points[:,0], points[:,1], points[:,2])
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+ ax.set_zlabel('z')
+ #savefig(output_filename)
+
+def pyplot_draw_volume(vol, output_filename):
+ """ vol is of size vsize*vsize*vsize
+ output an image to output_filename
+ """
+ points = volume_to_point_cloud(vol)
+ pyplot_draw_point_cloud(points, output_filename)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py
new file mode 100644
index 000000000..206a2c3ca
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py
@@ -0,0 +1,932 @@
+# Copyright 2014 Darsh Ranjan
+#
+# This file is part of python-plyfile.
+#
+# python-plyfile is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# python-plyfile is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+#
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# along with python-plyfile. If not, see
+# .
+
+from itertools import islice as _islice
+
+import numpy as _np
+from sys import byteorder as _byteorder
+
+
+try:
+ _range = range
+except NameError:
+ _range = range
+
+
+# Many-many relation
+_data_type_relation = [
+ ('int8', 'i1'),
+ ('char', 'i1'),
+ ('uint8', 'u1'),
+ ('uchar', 'b1'),
+ ('uchar', 'u1'),
+ ('int16', 'i2'),
+ ('short', 'i2'),
+ ('uint16', 'u2'),
+ ('ushort', 'u2'),
+ ('int32', 'i4'),
+ ('int', 'i4'),
+ ('uint32', 'u4'),
+ ('uint', 'u4'),
+ ('float32', 'f4'),
+ ('float', 'f4'),
+ ('float64', 'f8'),
+ ('double', 'f8')
+]
+
+_data_types = dict(_data_type_relation)
+_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
+
+_types_list = []
+_types_set = set()
+for (_a, _b) in _data_type_relation:
+ if _a not in _types_set:
+ _types_list.append(_a)
+ _types_set.add(_a)
+ if _b not in _types_set:
+ _types_list.append(_b)
+ _types_set.add(_b)
+
+
+_byte_order_map = {
+ 'ascii': '=',
+ 'binary_little_endian': '<',
+ 'binary_big_endian': '>'
+}
+
+_byte_order_reverse = {
+ '<': 'binary_little_endian',
+ '>': 'binary_big_endian'
+}
+
+_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
+
+
+def _lookup_type(type_str):
+ if type_str not in _data_type_reverse:
+ try:
+ type_str = _data_types[type_str]
+ except KeyError:
+ raise ValueError("field type %r not in %r" %
+ (type_str, _types_list))
+
+ return _data_type_reverse[type_str]
+
+
+def _split_line(line, n):
+ fields = line.split(None, n)
+ if len(fields) == n:
+ fields.append('')
+
+ assert len(fields) == n + 1
+
+ return fields
+
+
+def make2d(array, cols=None, dtype=None):
+ '''
+ Make a 2D array from an array of arrays. The `cols' and `dtype'
+ arguments can be omitted if the array is not empty.
+
+ '''
+ if (cols is None or dtype is None) and not len(array):
+ raise RuntimeError("cols and dtype must be specified for empty "
+ "array")
+
+ if cols is None:
+ cols = len(array[0])
+
+ if dtype is None:
+ dtype = array[0].dtype
+
+ return _np.fromiter(array, [('_', dtype, (cols,))],
+ count=len(array))['_']
+
+
+class PlyParseError(Exception):
+
+ '''
+ Raised when a PLY file cannot be parsed.
+
+ The attributes `element', `row', `property', and `message' give
+ additional information.
+
+ '''
+
+ def __init__(self, message, element=None, row=None, prop=None):
+ self.message = message
+ self.element = element
+ self.row = row
+ self.prop = prop
+
+ s = ''
+ if self.element:
+ s += 'element %r: ' % self.element.name
+ if self.row is not None:
+ s += 'row %d: ' % self.row
+ if self.prop:
+ s += 'property %r: ' % self.prop.name
+ s += self.message
+
+ Exception.__init__(self, s)
+
+ def __repr__(self):
+ return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
+ self.message, self.element, self.row, self.prop)
+
+
+class PlyData(object):
+
+ '''
+ PLY file header and data.
+
+ A PlyData instance is created in one of two ways: by the static
+ method PlyData.read (to read a PLY file), or directly from __init__
+ given a sequence of elements (which can then be written to a PLY
+ file).
+
+ '''
+
+ def __init__(self, elements=[], text=False, byte_order='=',
+ comments=[], obj_info=[]):
+ '''
+ elements: sequence of PlyElement instances.
+
+ text: whether the resulting PLY file will be text (True) or
+ binary (False).
+
+ byte_order: '<' for little-endian, '>' for big-endian, or '='
+ for native. This is only relevant if `text' is False.
+
+ comments: sequence of strings that will be placed in the header
+ between the 'ply' and 'format ...' lines.
+
+ obj_info: like comments, but will be placed in the header with
+ "obj_info ..." instead of "comment ...".
+
+ '''
+ if byte_order == '=' and not text:
+ byte_order = _native_byte_order
+
+ self.byte_order = byte_order
+ self.text = text
+
+ self.comments = list(comments)
+ self.obj_info = list(obj_info)
+ self.elements = elements
+
+ def _get_elements(self):
+ return self._elements
+
+ def _set_elements(self, elements):
+ self._elements = tuple(elements)
+ self._index()
+
+ elements = property(_get_elements, _set_elements)
+
+ def _get_byte_order(self):
+ return self._byte_order
+
+ def _set_byte_order(self, byte_order):
+ if byte_order not in ['<', '>', '=']:
+ raise ValueError("byte order must be '<', '>', or '='")
+
+ self._byte_order = byte_order
+
+ byte_order = property(_get_byte_order, _set_byte_order)
+
+ def _index(self):
+ self._element_lookup = dict((elt.name, elt) for elt in
+ self._elements)
+ if len(self._element_lookup) != len(self._elements):
+ raise ValueError("two elements with same name")
+
+ @staticmethod
+ def _parse_header(stream):
+ '''
+ Parse a PLY header from a readable file-like stream.
+
+ '''
+ lines = []
+ comments = {'comment': [], 'obj_info': []}
+ while True:
+ line = stream.readline().decode('ascii').strip()
+ fields = _split_line(line, 1)
+
+ if fields[0] == 'end_header':
+ break
+
+ elif fields[0] in comments.keys():
+ lines.append(fields)
+ else:
+ lines.append(line.split())
+
+ a = 0
+ if lines[a] != ['ply']:
+ raise PlyParseError("expected 'ply'")
+
+ a += 1
+ while lines[a][0] in comments.keys():
+ comments[lines[a][0]].append(lines[a][1])
+ a += 1
+
+ if lines[a][0] != 'format':
+ raise PlyParseError("expected 'format'")
+
+ if lines[a][2] != '1.0':
+ raise PlyParseError("expected version '1.0'")
+
+ if len(lines[a]) != 3:
+ raise PlyParseError("too many fields after 'format'")
+
+ fmt = lines[a][1]
+
+ if fmt not in _byte_order_map:
+ raise PlyParseError("don't understand format %r" % fmt)
+
+ byte_order = _byte_order_map[fmt]
+ text = fmt == 'ascii'
+
+ a += 1
+ while a < len(lines) and lines[a][0] in comments.keys():
+ comments[lines[a][0]].append(lines[a][1])
+ a += 1
+
+ return PlyData(PlyElement._parse_multi(lines[a:]),
+ text, byte_order,
+ comments['comment'], comments['obj_info'])
+
+ @staticmethod
+ def read(stream):
+ '''
+ Read PLY data from a readable file-like object or filename.
+
+ '''
+ (must_close, stream) = _open_stream(stream, 'read')
+ try:
+ data = PlyData._parse_header(stream)
+ for elt in data:
+ elt._read(stream, data.text, data.byte_order)
+ finally:
+ if must_close:
+ stream.close()
+
+ return data
+
+ def write(self, stream):
+ '''
+ Write PLY data to a writeable file-like object or filename.
+
+ '''
+ (must_close, stream) = _open_stream(stream, 'write')
+ try:
+ stream.write(self.header.encode('ascii'))
+ stream.write(b'\r\n')
+ for elt in self:
+ elt._write(stream, self.text, self.byte_order)
+ finally:
+ if must_close:
+ stream.close()
+
+ @property
+ def header(self):
+ '''
+ Provide PLY-formatted metadata for the instance.
+
+ '''
+ lines = ['ply']
+
+ if self.text:
+ lines.append('format ascii 1.0')
+ else:
+ lines.append('format ' +
+ _byte_order_reverse[self.byte_order] +
+ ' 1.0')
+
+ # Some information is lost here, since all comments are placed
+ # between the 'format' line and the first element.
+ for c in self.comments:
+ lines.append('comment ' + c)
+
+ for c in self.obj_info:
+ lines.append('obj_info ' + c)
+
+ lines.extend(elt.header for elt in self.elements)
+ lines.append('end_header')
+ return '\r\n'.join(lines)
+
+ def __iter__(self):
+ return iter(self.elements)
+
+ def __len__(self):
+ return len(self.elements)
+
+ def __contains__(self, name):
+ return name in self._element_lookup
+
+ def __getitem__(self, name):
+ return self._element_lookup[name]
+
+ def __str__(self):
+ return self.header
+
+ def __repr__(self):
+ return ('PlyData(%r, text=%r, byte_order=%r, '
+ 'comments=%r, obj_info=%r)' %
+ (self.elements, self.text, self.byte_order,
+ self.comments, self.obj_info))
+
+
+def _open_stream(stream, read_or_write):
+ if hasattr(stream, read_or_write):
+ return (False, stream)
+ try:
+ return (True, open(stream, read_or_write[0] + 'b'))
+ except TypeError:
+ raise RuntimeError("expected open file or filename")
+
+
+class PlyElement(object):
+
+ '''
+ PLY file element.
+
+ A client of this library doesn't normally need to instantiate this
+ directly, so the following is only for the sake of documenting the
+ internals.
+
+ Creating a PlyElement instance is generally done in one of two ways:
+ as a byproduct of PlyData.read (when reading a PLY file) and by
+ PlyElement.describe (before writing a PLY file).
+
+ '''
+
+ def __init__(self, name, properties, count, comments=[]):
+ '''
+ This is not part of the public interface. The preferred methods
+ of obtaining PlyElement instances are PlyData.read (to read from
+ a file) and PlyElement.describe (to construct from a numpy
+ array).
+
+ '''
+ self._name = str(name)
+ self._check_name()
+ self._count = count
+
+ self._properties = tuple(properties)
+ self._index()
+
+ self.comments = list(comments)
+
+ self._have_list = any(isinstance(p, PlyListProperty)
+ for p in self.properties)
+
+ @property
+ def count(self):
+ return self._count
+
+ def _get_data(self):
+ return self._data
+
+ def _set_data(self, data):
+ self._data = data
+ self._count = len(data)
+ self._check_sanity()
+
+ data = property(_get_data, _set_data)
+
+ def _check_sanity(self):
+ for prop in self.properties:
+ if prop.name not in self._data.dtype.fields:
+ raise ValueError("dangling property %r" % prop.name)
+
+ def _get_properties(self):
+ return self._properties
+
+ def _set_properties(self, properties):
+ self._properties = tuple(properties)
+ self._check_sanity()
+ self._index()
+
+ properties = property(_get_properties, _set_properties)
+
+ def _index(self):
+ self._property_lookup = dict((prop.name, prop)
+ for prop in self._properties)
+ if len(self._property_lookup) != len(self._properties):
+ raise ValueError("two properties with same name")
+
+ def ply_property(self, name):
+ return self._property_lookup[name]
+
+ @property
+ def name(self):
+ return self._name
+
+ def _check_name(self):
+ if any(c.isspace() for c in self._name):
+ msg = "element name %r contains spaces" % self._name
+ raise ValueError(msg)
+
+ def dtype(self, byte_order='='):
+ '''
+ Return the numpy dtype of the in-memory representation of the
+ data. (If there are no list properties, and the PLY format is
+ binary, then this also accurately describes the on-disk
+ representation of the element.)
+
+ '''
+ return [(prop.name, prop.dtype(byte_order))
+ for prop in self.properties]
+
+ @staticmethod
+ def _parse_multi(header_lines):
+ '''
+ Parse a list of PLY element definitions.
+
+ '''
+ elements = []
+ while header_lines:
+ (elt, header_lines) = PlyElement._parse_one(header_lines)
+ elements.append(elt)
+
+ return elements
+
+ @staticmethod
+ def _parse_one(lines):
+ '''
+ Consume one element definition. The unconsumed input is
+ returned along with a PlyElement instance.
+
+ '''
+ a = 0
+ line = lines[a]
+
+ if line[0] != 'element':
+ raise PlyParseError("expected 'element'")
+ if len(line) > 3:
+ raise PlyParseError("too many fields after 'element'")
+ if len(line) < 3:
+ raise PlyParseError("too few fields after 'element'")
+
+ (name, count) = (line[1], int(line[2]))
+
+ comments = []
+ properties = []
+ while True:
+ a += 1
+ if a >= len(lines):
+ break
+
+ if lines[a][0] == 'comment':
+ comments.append(lines[a][1])
+ elif lines[a][0] == 'property':
+ properties.append(PlyProperty._parse_one(lines[a]))
+ else:
+ break
+
+ return (PlyElement(name, properties, count, comments),
+ lines[a:])
+
+ @staticmethod
+ def describe(data, name, len_types={}, val_types={},
+ comments=[]):
+ '''
+ Construct a PlyElement from an array's metadata.
+
+ len_types and val_types can be given as mappings from list
+ property names to type strings (like 'u1', 'f4', etc., or
+ 'int8', 'float32', etc.). These can be used to define the length
+ and value types of list properties. List property lengths
+ always default to type 'u1' (8-bit unsigned integer), and value
+ types default to 'i4' (32-bit integer).
+
+ '''
+ if not isinstance(data, _np.ndarray):
+ raise TypeError("only numpy arrays are supported")
+
+ if len(data.shape) != 1:
+ raise ValueError("only one-dimensional arrays are "
+ "supported")
+
+ count = len(data)
+
+ properties = []
+ descr = data.dtype.descr
+
+ for t in descr:
+ if not isinstance(t[1], str):
+ raise ValueError("nested records not supported")
+
+ if not t[0]:
+ raise ValueError("field with empty name")
+
+ if len(t) != 2 or t[1][1] == 'O':
+ # non-scalar field, which corresponds to a list
+ # property in PLY.
+
+ if t[1][1] == 'O':
+ if len(t) != 2:
+ raise ValueError("non-scalar object fields not "
+ "supported")
+
+ len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
+ if t[1][1] == 'O':
+ val_type = val_types.get(t[0], 'i4')
+ val_str = _lookup_type(val_type)
+ else:
+ val_str = _lookup_type(t[1][1:])
+
+ prop = PlyListProperty(t[0], len_str, val_str)
+ else:
+ val_str = _lookup_type(t[1][1:])
+ prop = PlyProperty(t[0], val_str)
+
+ properties.append(prop)
+
+ elt = PlyElement(name, properties, count, comments)
+ elt.data = data
+
+ return elt
+
+ def _read(self, stream, text, byte_order):
+ '''
+ Read the actual data from a PLY file.
+
+ '''
+ if text:
+ self._read_txt(stream)
+ else:
+ if self._have_list:
+ # There are list properties, so a simple load is
+ # impossible.
+ self._read_bin(stream, byte_order)
+ else:
+ # There are no list properties, so loading the data is
+ # much more straightforward.
+ self._data = _np.fromfile(stream,
+ self.dtype(byte_order),
+ self.count)
+
+ if len(self._data) < self.count:
+ k = len(self._data)
+ del self._data
+ raise PlyParseError("early end-of-file", self, k)
+
+ self._check_sanity()
+
+ def _write(self, stream, text, byte_order):
+ '''
+ Write the data to a PLY file.
+
+ '''
+ if text:
+ self._write_txt(stream)
+ else:
+ if self._have_list:
+ # There are list properties, so serialization is
+ # slightly complicated.
+ self._write_bin(stream, byte_order)
+ else:
+ # no list properties, so serialization is
+ # straightforward.
+ self.data.astype(self.dtype(byte_order),
+ copy=False).tofile(stream)
+
+ def _read_txt(self, stream):
+ '''
+ Load a PLY element from an ASCII-format PLY file. The element
+ may contain list properties.
+
+ '''
+ self._data = _np.empty(self.count, dtype=self.dtype())
+
+ k = 0
+ for line in _islice(iter(stream.readline, b''), self.count):
+ fields = iter(line.strip().split())
+ for prop in self.properties:
+ try:
+ self._data[prop.name][k] = prop._from_fields(fields)
+ except StopIteration:
+ raise PlyParseError("early end-of-line",
+ self, k, prop)
+ except ValueError:
+ raise PlyParseError("malformed input",
+ self, k, prop)
+ try:
+ next(fields)
+ except StopIteration:
+ pass
+ else:
+ raise PlyParseError("expected end-of-line", self, k)
+ k += 1
+
+ if k < self.count:
+ del self._data
+ raise PlyParseError("early end-of-file", self, k)
+
+ def _write_txt(self, stream):
+ '''
+ Save a PLY element to an ASCII-format PLY file. The element may
+ contain list properties.
+
+ '''
+ for rec in self.data:
+ fields = []
+ for prop in self.properties:
+ fields.extend(prop._to_fields(rec[prop.name]))
+
+ _np.savetxt(stream, [fields], '%.18g', newline='\r\n')
+
+ def _read_bin(self, stream, byte_order):
+ '''
+ Load a PLY element from a binary PLY file. The element may
+ contain list properties.
+
+ '''
+ self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
+
+ for k in _range(self.count):
+ for prop in self.properties:
+ try:
+ self._data[prop.name][k] = \
+ prop._read_bin(stream, byte_order)
+ except StopIteration:
+ raise PlyParseError("early end-of-file",
+ self, k, prop)
+
+ def _write_bin(self, stream, byte_order):
+ '''
+ Save a PLY element to a binary PLY file. The element may
+ contain list properties.
+
+ '''
+ for rec in self.data:
+ for prop in self.properties:
+ prop._write_bin(rec[prop.name], stream, byte_order)
+
+ @property
+ def header(self):
+ '''
+ Format this element's metadata as it would appear in a PLY
+ header.
+
+ '''
+ lines = ['element %s %d' % (self.name, self.count)]
+
+ # Some information is lost here, since all comments are placed
+ # between the 'element' line and the first property definition.
+ for c in self.comments:
+ lines.append('comment ' + c)
+
+ lines.extend(list(map(str, self.properties)))
+
+ return '\r\n'.join(lines)
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, value):
+ self.data[key] = value
+
+ def __str__(self):
+ return self.header
+
+ def __repr__(self):
+ return ('PlyElement(%r, %r, count=%d, comments=%r)' %
+ (self.name, self.properties, self.count,
+ self.comments))
+
+
+class PlyProperty(object):
+
+ '''
+ PLY property description. This class is pure metadata; the data
+ itself is contained in PlyElement instances.
+
+ '''
+
+ def __init__(self, name, val_dtype):
+ self._name = str(name)
+ self._check_name()
+ self.val_dtype = val_dtype
+
+ def _get_val_dtype(self):
+ return self._val_dtype
+
+ def _set_val_dtype(self, val_dtype):
+ self._val_dtype = _data_types[_lookup_type(val_dtype)]
+
+ val_dtype = property(_get_val_dtype, _set_val_dtype)
+
+ @property
+ def name(self):
+ return self._name
+
+ def _check_name(self):
+ if any(c.isspace() for c in self._name):
+ msg = "Error: property name %r contains spaces" % self._name
+ raise RuntimeError(msg)
+
+ @staticmethod
+ def _parse_one(line):
+ assert line[0] == 'property'
+
+ if line[1] == 'list':
+ if len(line) > 5:
+ raise PlyParseError("too many fields after "
+ "'property list'")
+ if len(line) < 5:
+ raise PlyParseError("too few fields after "
+ "'property list'")
+
+ return PlyListProperty(line[4], line[2], line[3])
+
+ else:
+ if len(line) > 3:
+ raise PlyParseError("too many fields after "
+ "'property'")
+ if len(line) < 3:
+ raise PlyParseError("too few fields after "
+ "'property'")
+
+ return PlyProperty(line[2], line[1])
+
+ def dtype(self, byte_order='='):
+ '''
+ Return the numpy dtype description for this property (as a tuple
+ of strings).
+
+ '''
+ return byte_order + self.val_dtype
+
+ def _from_fields(self, fields):
+ '''
+ Parse from generator. Raise StopIteration if the property could
+ not be read.
+
+ '''
+ return _np.dtype(self.dtype()).type(next(fields))
+
+ def _to_fields(self, data):
+ '''
+ Return generator over one item.
+
+ '''
+ yield _np.dtype(self.dtype()).type(data)
+
+ def _read_bin(self, stream, byte_order):
+ '''
+ Read data from a binary stream. Raise StopIteration if the
+ property could not be read.
+
+ '''
+ try:
+ return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
+ except IndexError:
+ raise StopIteration
+
+ def _write_bin(self, data, stream, byte_order):
+ '''
+ Write data to a binary stream.
+
+ '''
+ _np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
+
+ def __str__(self):
+ val_str = _data_type_reverse[self.val_dtype]
+ return 'property %s %s' % (val_str, self.name)
+
+ def __repr__(self):
+ return 'PlyProperty(%r, %r)' % (self.name,
+ _lookup_type(self.val_dtype))
+
+
+class PlyListProperty(PlyProperty):
+
+ '''
+ PLY list property description.
+
+ '''
+
+ def __init__(self, name, len_dtype, val_dtype):
+ PlyProperty.__init__(self, name, val_dtype)
+
+ self.len_dtype = len_dtype
+
+ def _get_len_dtype(self):
+ return self._len_dtype
+
+ def _set_len_dtype(self, len_dtype):
+ self._len_dtype = _data_types[_lookup_type(len_dtype)]
+
+ len_dtype = property(_get_len_dtype, _set_len_dtype)
+
+ def dtype(self, byte_order='='):
+ '''
+ List properties always have a numpy dtype of "object".
+
+ '''
+ return '|O'
+
+ def list_dtype(self, byte_order='='):
+ '''
+ Return the pair (len_dtype, val_dtype) (both numpy-friendly
+ strings).
+
+ '''
+ return (byte_order + self.len_dtype,
+ byte_order + self.val_dtype)
+
+ def _from_fields(self, fields):
+ (len_t, val_t) = self.list_dtype()
+
+ n = int(_np.dtype(len_t).type(next(fields)))
+
+ data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
+ if len(data) < n:
+ raise StopIteration
+
+ return data
+
+ def _to_fields(self, data):
+ '''
+ Return generator over the (numerical) PLY representation of the
+ list data (length followed by actual data).
+
+ '''
+ (len_t, val_t) = self.list_dtype()
+
+ data = _np.asarray(data, dtype=val_t).ravel()
+
+ yield _np.dtype(len_t).type(data.size)
+ for x in data:
+ yield x
+
+ def _read_bin(self, stream, byte_order):
+ (len_t, val_t) = self.list_dtype(byte_order)
+
+ try:
+ n = _np.fromfile(stream, len_t, 1)[0]
+ except IndexError:
+ raise StopIteration
+
+ data = _np.fromfile(stream, val_t, n)
+ if len(data) < n:
+ raise StopIteration
+
+ return data
+
+ def _write_bin(self, data, stream, byte_order):
+ '''
+ Write data to a binary stream.
+
+ '''
+ (len_t, val_t) = self.list_dtype(byte_order)
+
+ data = _np.asarray(data, dtype=val_t).ravel()
+
+ _np.array(data.size, dtype=len_t).tofile(stream)
+ data.tofile(stream)
+
+ def __str__(self):
+ len_str = _data_type_reverse[self.len_dtype]
+ val_str = _data_type_reverse[self.val_dtype]
+ return 'property list %s %s %s' % (len_str, val_str, self.name)
+
+ def __repr__(self):
+ return ('PlyListProperty(%r, %r, %r)' %
+ (self.name,
+ _lookup_type(self.len_dtype),
+ _lookup_type(self.val_dtype)))
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py
new file mode 100644
index 000000000..4ac248baa
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py
@@ -0,0 +1,614 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+""" Wrapper functions for TensorFlow layers.
+
+Author: Charles R. Qi
+Date: November 2016
+"""
+import numpy as np
+import tensorflow as tf
+
+# 建立CPU实例:name 变量名,shape 纬度的整形数字列表,initializer初始化变量,返回张量
+def _variable_on_cpu(name, shape, initializer, use_fp16=False):
+ """Helper to create a Variable stored on CPU memory.
+ Args:
+ name: name of the variable
+ shape: list of ints
+ initializer: initializer for Variable
+ Returns:
+ Variable Tensor
+ """
+ with tf.device('/cpu:0'):
+ dtype = tf.float16 if use_fp16 else tf.float32
+ var = tf.compat.v1.get_variable(name, shape, initializer=initializer, dtype=dtype)
+ return var
+
+
+# 随权重衰减变化的变量
+def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
+ """Helper to create an initialized Variable with weight decay.
+
+ Note that the Variable is initialized with a truncated normal distribution.
+ A weight decay is added only if one is specified.
+
+ Args:
+ name: name of the variable
+ shape: list of ints
+ stddev: standard deviation of a truncated Gaussian
+ wd: add L2Loss weight decay multiplied by this float. If None, weight
+ decay is not added for this Variable.
+ use_xavier: bool, whether to use xavier initializer
+
+ Returns:
+ Variable Tensor
+ """
+ if use_xavier:
+ initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
+ else:
+ initializer = tf.compat.v1.truncated_normal_initializer(stddev=stddev)
+ var = _variable_on_cpu(name, shape, initializer)
+ if wd is not None:
+ weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
+ tf.compat.v1.add_to_collection('losses', weight_decay)
+ return var
+
+
+# 一维卷积层
+def conv1d(inputs,
+ num_output_channels,
+ kernel_size,
+ scope,
+ stride=1,
+ padding='SAME',
+ use_xavier=True,
+ stddev=1e-3,
+ weight_decay=0.0,
+ activation_fn=tf.nn.relu,
+ bn=False,
+ bn_decay=None,
+ is_training=None):
+ """ 1D convolution with non-linear operation.
+
+ Args:
+ inputs: 3-D tensor variable BxLxC
+ num_output_channels: int
+ kernel_size: int
+ scope: string
+ stride: int
+ padding: 'SAME' or 'VALID'
+ use_xavier: bool, use xavier_initializer if true
+ stddev: float, stddev for truncated_normal init
+ weight_decay: float
+ activation_fn: function
+ bn: bool, whether to use batch norm
+ bn_decay: float or float tensor variable in [0,1]
+ is_training: bool Tensor variable
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ num_in_channels = inputs.get_shape()[-1]
+ kernel_shape = [kernel_size,
+ num_in_channels, num_output_channels]
+ kernel = _variable_with_weight_decay('weights',
+ shape=kernel_shape,
+ use_xavier=use_xavier,
+ stddev=stddev,
+ wd=weight_decay)
+ outputs = tf.nn.conv1d(input=inputs, filters=kernel,
+ stride=stride,
+ padding=padding)
+ biases = _variable_on_cpu('biases', [num_output_channels],
+ tf.compat.v1.constant_initializer(0.0))
+ outputs = tf.nn.bias_add(outputs, biases)
+
+ if bn:
+ outputs = batch_norm_for_conv1d(outputs, is_training,
+ bn_decay=bn_decay, scope='bn')
+
+ if activation_fn is not None:
+ outputs = activation_fn(outputs)
+ return outputs
+
+
+# 二维卷积层
+def conv2d(inputs,
+ num_output_channels,
+ kernel_size,
+ scope,
+ stride=[1, 1],
+ padding='SAME',
+ use_xavier=True,
+ stddev=1e-3,
+ weight_decay=0.0,
+ activation_fn=tf.nn.relu,
+ bn=False,
+ bn_decay=None,
+ is_training=None):
+ """ 2D convolution with non-linear operation.
+
+ Args:
+ inputs: 4-D tensor variable BxHxWxC
+ num_output_channels: int
+ kernel_size: a list of 2 ints
+ scope: string
+ stride: a list of 2 ints
+ padding: 'SAME' or 'VALID'
+ use_xavier: bool, use xavier_initializer if true
+ stddev: float, stddev for truncated_normal init
+ weight_decay: float
+ activation_fn: function
+ bn: bool, whether to use batch norm
+ bn_decay: float or float tensor variable in [0,1]
+ is_training: bool Tensor variable
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_h, kernel_w = kernel_size
+ num_in_channels = inputs.get_shape()[-1]
+ kernel_shape = [kernel_h, kernel_w,
+ num_in_channels, num_output_channels]
+ kernel = _variable_with_weight_decay('weights',
+ shape=kernel_shape,
+ use_xavier=use_xavier,
+ stddev=stddev,
+ wd=weight_decay)
+ stride_h, stride_w = stride
+ outputs = tf.nn.conv2d(input=inputs, filters=kernel,
+ strides=[1, stride_h, stride_w, 1],
+ padding=padding)
+ biases = _variable_on_cpu('biases', [num_output_channels],
+ tf.compat.v1.constant_initializer(0.0))
+ outputs = tf.nn.bias_add(outputs, biases)
+
+ if bn:
+ outputs = batch_norm_for_conv2d(outputs, is_training,
+ bn_decay=bn_decay, scope='bn')
+
+ if activation_fn is not None:
+ outputs = activation_fn(outputs)
+ return outputs
+
+
+def conv2d_transpose(inputs,
+ num_output_channels,
+ kernel_size,
+ scope,
+ stride=[1, 1],
+ padding='SAME',
+ use_xavier=True,
+ stddev=1e-3,
+ weight_decay=0.0,
+ activation_fn=tf.nn.relu,
+ bn=False,
+ bn_decay=None,
+ is_training=None):
+ """ 2D convolution transpose with non-linear operation.
+
+ Args:
+ inputs: 4-D tensor variable BxHxWxC
+ num_output_channels: int
+ kernel_size: a list of 2 ints
+ scope: string
+ stride: a list of 2 ints
+ padding: 'SAME' or 'VALID'
+ use_xavier: bool, use xavier_initializer if true
+ stddev: float, stddev for truncated_normal init
+ weight_decay: float
+ activation_fn: function
+ bn: bool, whether to use batch norm
+ bn_decay: float or float tensor variable in [0,1]
+ is_training: bool Tensor variable
+
+ Returns:
+ Variable tensor
+
+ Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_h, kernel_w = kernel_size
+ num_in_channels = inputs.get_shape()[-1]
+ kernel_shape = [kernel_h, kernel_w,
+ num_output_channels, num_in_channels] # reversed to conv2d
+ kernel = _variable_with_weight_decay('weights',
+ shape=kernel_shape,
+ use_xavier=use_xavier,
+ stddev=stddev,
+ wd=weight_decay)
+ stride_h, stride_w = stride
+
+ # from slim.convolution2d_transpose
+ def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
+ dim_size *= stride_size
+
+ if padding == 'VALID' and dim_size is not None:
+ dim_size += max(kernel_size - stride_size, 0)
+ return dim_size
+
+ # caculate output shape
+ batch_size = inputs.get_shape()[0]
+ height = inputs.get_shape()[1]
+ width = inputs.get_shape()[2]
+ out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
+ out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
+ output_shape = [batch_size, out_height, out_width, num_output_channels]
+
+ outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
+ [1, stride_h, stride_w, 1],
+ padding=padding)
+ biases = _variable_on_cpu('biases', [num_output_channels],
+ tf.compat.v1.constant_initializer(0.0))
+ outputs = tf.nn.bias_add(outputs, biases)
+
+ if bn:
+ outputs = batch_norm_for_conv2d(outputs, is_training,
+ bn_decay=bn_decay, scope='bn')
+
+ if activation_fn is not None:
+ outputs = activation_fn(outputs)
+ return outputs
+
+
+# 三维卷积层
+def conv3d(inputs,
+ num_output_channels,
+ kernel_size,
+ scope,
+ stride=[1, 1, 1],
+ padding='SAME',
+ use_xavier=True,
+ stddev=1e-3,
+ weight_decay=0.0,
+ activation_fn=tf.nn.relu,
+ bn=False,
+ bn_decay=None,
+ is_training=None):
+ """ 3D convolution with non-linear operation.
+
+ Args:
+ inputs: 5-D tensor variable BxDxHxWxC
+ num_output_channels: int
+ kernel_size: a list of 3 ints
+ scope: string
+ stride: a list of 3 ints
+ padding: 'SAME' or 'VALID'
+ use_xavier: bool, use xavier_initializer if true
+ stddev: float, stddev for truncated_normal init
+ weight_decay: float
+ activation_fn: function
+ bn: bool, whether to use batch norm
+ bn_decay: float or float tensor variable in [0,1]
+ is_training: bool Tensor variable
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_d, kernel_h, kernel_w = kernel_size
+ num_in_channels = inputs.get_shape()[-1]
+ kernel_shape = [kernel_d, kernel_h, kernel_w,
+ num_in_channels, num_output_channels]
+ kernel = _variable_with_weight_decay('weights',
+ shape=kernel_shape,
+ use_xavier=use_xavier,
+ stddev=stddev,
+ wd=weight_decay)
+ stride_d, stride_h, stride_w = stride
+ outputs = tf.nn.conv3d(inputs, kernel,
+ [1, stride_d, stride_h, stride_w, 1],
+ padding=padding)
+ biases = _variable_on_cpu('biases', [num_output_channels],
+ tf.compat.v1.constant_initializer(0.0))
+ outputs = tf.nn.bias_add(outputs, biases)
+
+ if bn:
+ outputs = batch_norm_for_conv3d(outputs, is_training,
+ bn_decay=bn_decay, scope='bn')
+
+ if activation_fn is not None:
+ outputs = activation_fn(outputs)
+ return outputs
+
+
+# 全连接网络
+def fully_connected(inputs,
+ num_outputs,
+ scope,
+ use_xavier=True,
+ stddev=1e-3,
+ weight_decay=0.0,
+ activation_fn=tf.nn.relu,
+ bn=False,
+ bn_decay=None,
+ is_training=None):
+ """ Fully connected layer with non-linear operation.
+
+ Args:
+ inputs: 2-D tensor BxN
+ num_outputs: int
+
+ Returns:
+ Variable tensor of size B x num_outputs.
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ num_input_units = inputs.get_shape()[-1]
+ weights = _variable_with_weight_decay('weights',
+ shape=[num_input_units, num_outputs],
+ use_xavier=use_xavier,
+ stddev=stddev,
+ wd=weight_decay)
+ outputs = tf.matmul(inputs, weights)
+ biases = _variable_on_cpu('biases', [num_outputs],
+ tf.compat.v1.constant_initializer(0.0))
+ outputs = tf.nn.bias_add(outputs, biases)
+
+ if bn:
+ outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
+
+ if activation_fn is not None:
+ outputs = activation_fn(outputs)
+ return outputs
+
+
+# 2D最大值池化(2X2)
+def max_pool2d(inputs,
+ kernel_size,
+ scope,
+ stride=[2, 2],
+ padding='VALID'):
+ """ 2D max pooling.
+
+ Args:
+ inputs: 4-D tensor BxHxWxC
+ kernel_size: a list of 2 ints
+ stride: a list of 2 ints
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_h, kernel_w = kernel_size
+ stride_h, stride_w = stride
+ outputs = tf.nn.max_pool2d(input=inputs,
+ ksize=[1, kernel_h, kernel_w, 1],
+ strides=[1, stride_h, stride_w, 1],
+ padding=padding,
+ name=sc.name)
+ return outputs
+
+
+# 2D均值池化(2X2)
+def avg_pool2d(inputs,
+ kernel_size,
+ scope,
+ stride=[2, 2],
+ padding='VALID'):
+ """ 2D avg pooling.
+
+ Args:
+ inputs: 4-D tensor BxHxWxC
+ kernel_size: a list of 2 ints
+ stride: a list of 2 ints
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_h, kernel_w = kernel_size
+ stride_h, stride_w = stride
+ outputs = tf.nn.avg_pool2d(input=inputs,
+ ksize=[1, kernel_h, kernel_w, 1],
+ strides=[1, stride_h, stride_w, 1],
+ padding=padding,
+ name=sc.name)
+ return outputs
+
+
+# 3D最大值池化(2X2X2)
+def max_pool3d(inputs,
+ kernel_size,
+ scope,
+ stride=[2, 2, 2],
+ padding='VALID'):
+ """ 3D max pooling.
+
+ Args:
+ inputs: 5-D tensor BxDxHxWxC
+ kernel_size: a list of 3 ints
+ stride: a list of 3 ints
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_d, kernel_h, kernel_w = kernel_size
+ stride_d, stride_h, stride_w = stride
+ outputs = tf.nn.max_pool3d(inputs,
+ ksize=[1, kernel_d, kernel_h, kernel_w, 1],
+ strides=[1, stride_d, stride_h, stride_w, 1],
+ padding=padding,
+ name=sc.name)
+ return outputs
+
+
+# 3D均值池化(2X2X2)
+def avg_pool3d(inputs,
+ kernel_size,
+ scope,
+ stride=[2, 2, 2],
+ padding='VALID'):
+ """ 3D avg pooling.
+
+ Args:
+ inputs: 5-D tensor BxDxHxWxC
+ kernel_size: a list of 3 ints
+ stride: a list of 3 ints
+
+ Returns:
+ Variable tensor
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ kernel_d, kernel_h, kernel_w = kernel_size
+ stride_d, stride_h, stride_w = stride
+ outputs = tf.nn.avg_pool3d(inputs,
+ ksize=[1, kernel_d, kernel_h, kernel_w, 1],
+ strides=[1, stride_d, stride_h, stride_w, 1],
+ padding=padding,
+ name=sc.name)
+ return outputs
+
+
+# 批量归一化(模版)
+def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
+ """ Batch normalization on convolutional maps and beyond...
+ Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
+
+ Args:
+ inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
+ is_training: boolean tf.Varialbe, true indicates training phase
+ scope: string, variable scope
+ moments_dims: a list of ints, indicating dimensions for moments calculation
+ bn_decay: float or float tensor variable, controling moving average weight
+ Return:
+ normed: batch-normalized maps
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ num_channels = inputs.get_shape()[-1]
+ beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
+ name='beta', trainable=True)
+ gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
+ name='gamma', trainable=True)
+ batch_mean, batch_var = tf.nn.moments(x=inputs, axes=moments_dims, name='moments')
+ decay = bn_decay if bn_decay is not None else 0.9
+ ema = tf.train.ExponentialMovingAverage(decay=decay)
+ # Operator that maintains moving averages of variables.
+ ema_apply_op = tf.cond(pred=is_training,
+ true_fn=lambda: ema.apply([batch_mean, batch_var]),
+ false_fn=lambda: tf.no_op())
+
+ # Update moving average and return current batch's avg and var.
+ def mean_var_with_update():
+ with tf.control_dependencies([ema_apply_op]):
+ return tf.identity(batch_mean), tf.identity(batch_var)
+
+ # ema.average returns the Variable holding the average of var.
+ mean, var = tf.cond(pred=is_training,
+ true_fn=mean_var_with_update,
+ false_fn=lambda: (ema.average(batch_mean), ema.average(batch_var)))
+ normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
+ return normed
+
+
+# 批量归一化(模版)
+def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
+ """ Batch normalization on FC data.
+
+ Args:
+ inputs: Tensor, 2D BxC input
+ is_training: boolean tf.Varialbe, true indicates training phase
+ bn_decay: float or float tensor variable, controling moving average weight
+ scope: string, variable scope
+ Return:
+ normed: batch-normalized maps
+ """
+ return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)
+
+
+# 批量归一化(一维卷积层)
+def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):
+ """ Batch normalization on 1D convolutional maps.
+
+ Args:
+ inputs: Tensor, 3D BLC input maps
+ is_training: boolean tf.Varialbe, true indicates training phase
+ bn_decay: float or float tensor variable, controling moving average weight
+ scope: string, variable scope
+ Return:
+ normed: batch-normalized maps
+ """
+ return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay)
+
+
+# 批量归一化(二维卷积层)
+def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
+ """ Batch normalization on 2D convolutional maps.
+
+ Args:
+ inputs: Tensor, 4D BHWC input maps
+ is_training: boolean tf.Varialbe, true indicates training phase
+ bn_decay: float or float tensor variable, controling moving average weight
+ scope: string, variable scope
+ Return:
+ normed: batch-normalized maps
+ """
+ return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)
+
+
+# 批量归一化(三维卷积层)
+def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
+ """ Batch normalization on 3D convolutional maps.
+
+ Args:
+ inputs: Tensor, 5D BDHWC input maps
+ is_training: boolean tf.Varialbe, true indicates training phase
+ bn_decay: float or float tensor variable, controling moving average weight
+ scope: string, variable scope
+ Return:
+ normed: batch-normalized maps
+ """
+ return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)
+
+
+# 舍弃部分神经元连接,降低过拟合(预设50%)
+def dropout(inputs,
+ is_training,
+ scope,
+ keep_prob=0.5,
+ noise_shape=None):
+ """ Dropout layer.
+
+ Args:
+ inputs: tensor
+ is_training: boolean tf.Variable
+ scope: string
+ keep_prob: float in [0,1]
+ noise_shape: list of ints
+
+ Returns:
+ tensor variable
+ """
+ with tf.compat.v1.variable_scope(scope) as sc:
+ outputs = tf.cond(pred=is_training,
+ true_fn=lambda: tf.nn.dropout(inputs, (1 - keep_prob), noise_shape),
+ false_fn=lambda: inputs)
+ return outputs
--
Gitee
From 778498fdb2d891cfc68173297a2d8b1ebb36e440 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:32:02 +0000
Subject: [PATCH 23/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../test/train_full_1p.sh | 179 +++++++++++++++++
.../test/train_performance_1p.sh | 186 ++++++++++++++++++
2 files changed, 365 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..3cfc94739
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,179 @@
+#!/bin/bash
+
+#current path, no revsion
+cur_path=`pwd`
+
+#ENV
+#this is necessary for lib.so.103
+#export LD_LIBRARY_PATH=/usr/include/h5py/lib:$LD_LIBRARY_PATH
+
+#HCCL params, no revision
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+ckpt_path=""
+#设置默认日志级别,不需要修改
+#export ASCEND_GLOBAL_LOG_LEVEL=3
+#export ASCEND_GLOBAL_EVENT_ENABLE=1
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="PointNet_ID2913_for_TensorFlow2.X"
+#训练epoch
+train_epochs=250
+#训练batch_size
+batch_size=32
+#训练step
+train_steps=0
+#学习率
+learning_rate=0.0015
+num_point=2048
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="./configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="./configs/fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $RANK_ID"
+ export RANK_ID=$RANK_ID
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune
+ nohup python3 ${cur_path}/../train.py \
+ --log_dir=${cur_path}/output/$ASCEND_DEVICE_ID/ckpt \
+ --num_point=2048 \
+ --data_path=${data_path} \
+ --batch_size=32 \
+ --learning_rate=0.0015 \
+ --max_epoch=250 \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --fusion_off_file=${fusion_off_file} \
+ --auto_tune=${auto_tune} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+
+done
+wait
+
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+FPS=`grep FPS ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=` grep 'eval accuracy' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+TrainingTime=`grep TOTLE_TIME ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep '^mean loss' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print $NF}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk '{print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh
new file mode 100644
index 000000000..72e349b70
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh
@@ -0,0 +1,186 @@
+#!/bin/bash
+
+#current path, no revsion
+cur_path=`pwd`
+
+#ENV
+#this is necessary for lib.so.103
+#export LD_LIBRARY_PATH=/usr/include/h5py/lib:$LD_LIBRARY_PATH
+
+#HCCL params, no revision
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+ckpt_path=""
+#设置默认日志级别,不需要修改
+#export ASCEND_GLOBAL_LOG_LEVEL=3
+#export ASCEND_GLOBAL_EVENT_ENABLE=1
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="PointNet_ID2913_for_TensorFlow2.X"
+#训练epoch
+train_epochs=1
+#训练batch_size
+batch_size=32
+#训练step
+train_steps=0
+#学习率
+learning_rate=0.0015
+num_point=2048
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="./configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="./configs/fusion_switch.cfg"
+auto_tune=False
+############维测参数##############
+
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --auto_tune* ]];then
+ auto_tune=`echo ${para#*=}`
+ fi
+done
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $RANK_ID"
+ export RANK_ID=$RANK_ID
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune
+ nohup python3 train.py \
+ --log_dir=${cur_path}/output/$ASCEND_DEVICE_ID/ckpt \
+ --num_point=2048 \
+ --data_path=${data_path} \
+ --batch_size=32 \
+ --learning_rate=0.0015 \
+ --max_epoch=10 \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --fusion_off_file=${fusion_off_file} \
+ --auto_tune=${auto_tune} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+
+done
+wait
+
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+# epoch_sec=`grep -a 'epoch time: ' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $3}'`
+
+# FPS=`awk -v x=1 -v y="$epoch_sec" 'BEGIN{printf "%.2f\n",x/y}'`
+# shapes=`awk -v x=5 -v y=2048 'BEGIN{printf "%.2f\n",x*y}'`
+# FPS=`awk -v x="$shapes" -v y="$FPS" 'BEGIN{printf "%.2f\n",x*y}'`
+FPS=`grep FPS ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+# train_accuracy="null"
+train_accuracy=` grep 'eval accuracy' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+TrainingTime=`grep TOTLE_TIME ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'`
+# TrainingTime=`awk -v x=320 -v y="$epoch_sec" 'BEGIN{printf "%.3f\n",y/x}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+# grep "loss:" ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $3}'|sed 's/.$//' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+grep '^mean loss' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print $NF}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk '{print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
--
Gitee
From a76e2fa021a627cf6cd038b21744524b9503c2d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:32:19 +0000
Subject: [PATCH 24/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../sem_seg/README.md | 36 +
.../sem_seg/batch_inference.py | 201 ++++++
.../sem_seg/collect_indoor3d_data.py | 52 ++
.../sem_seg/download_data.sh | 7 +
.../sem_seg/eval_iou_accuracy.py | 68 ++
.../sem_seg/gen_indoor3d_h5.py | 115 ++++
.../sem_seg/indoor3d_util.py | 619 ++++++++++++++++++
.../sem_seg/meta/all_data_label.txt | 272 ++++++++
.../sem_seg/meta/anno_paths.txt | 272 ++++++++
.../sem_seg/meta/area6_data_label.txt | 48 ++
.../sem_seg/meta/class_names.txt | 13 +
.../sem_seg/model.py | 106 +++
.../sem_seg/train.py | 306 +++++++++
13 files changed, 2115 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md
new file mode 100644
index 000000000..bc4b48b33
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md
@@ -0,0 +1,36 @@
+## Semantic Segmentation of Indoor Scenes
+
+### Dataset
+
+Donwload prepared HDF5 data for training:
+
+ sh download_data.sh
+
+(optional) Download 3D indoor parsing dataset (S3DIS Dataset) for testing and visualization. Version 1.2 of the dataset is used in this work.
+
+
+To prepare your own HDF5 data, you need to firstly download 3D indoor parsing dataset and then use `python collect_indoor3d_data.py` for data re-organization and `python gen_indoor3d_h5.py` to generate HDF5 files.
+
+### Training
+
+Once you have downloaded prepared HDF5 files or prepared them by yourself, to start training:
+
+ python train.py --log_dir log6 --test_area 6
+
+In default a simple model based on vanilla PointNet is used for training. Area 6 is used for test set.
+
+### Testing
+
+Testing requires download of 3D indoor parsing data and preprocessing with `collect_indoor3d_data.py`
+
+After training, use `batch_inference.py` command to segment rooms in test set. In our work we use 6-fold training that trains 6 models. For model1 , area2-6 are used as train set, area1 is used as test set. For model2, area1,3-6 are used as train set and area2 is used as test set... Note that S3DIS dataset paper uses a different 3-fold training, which was not publicly announced at the time of our work.
+
+For example, to test model6, use command:
+
+ python batch_inference.py --model_path log6/model.ckpt --dump_dir log6/dump --output_filelist log6/output_filelist.txt --room_data_filelist meta/area6_data_label.txt --visu
+
+Some OBJ files will be created for prediciton visualization in `log6/dump`.
+
+To evaluate overall segmentation accuracy, we evaluate 6 models on their corresponding test areas and use `eval_iou_accuracy.py` to produce point classification accuracy and IoU as reported in the paper.
+
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py
new file mode 100644
index 000000000..67567f4a6
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py
@@ -0,0 +1,201 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(BASE_DIR)
+from model import *
+import indoor3d_util
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')
+parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
+parser.add_argument('--model_path', required=True, help='model checkpoint file path')
+parser.add_argument('--dump_dir', required=True, help='dump folder path')
+parser.add_argument('--output_filelist', required=True, help='TXT filename, filelist, each line is an output for a room')
+parser.add_argument('--room_data_filelist', required=True, help='TXT filename, filelist, each line is a test room data label file.')
+parser.add_argument('--no_clutter', action='store_true', help='If true, donot count the clutter class')
+parser.add_argument('--visu', action='store_true', help='Whether to output OBJ file for prediction visualization.')
+FLAGS = parser.parse_args()
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MODEL_PATH = FLAGS.model_path
+GPU_INDEX = FLAGS.gpu
+DUMP_DIR = FLAGS.dump_dir
+if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
+LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(FLAGS.room_data_filelist)]
+
+NUM_CLASSES = 13
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+def evaluate():
+ is_training = False
+
+ with tf.device('/gpu:'+str(GPU_INDEX)):
+ pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ # simple model
+ pred = get_model(pointclouds_pl, is_training_pl)
+ loss = get_loss(pred, labels_pl)
+ pred_softmax = tf.nn.softmax(pred)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+ config.log_device_placement = True
+ sess = tf.compat.v1.Session(config=config)
+
+ # Restore variables from disk.
+ saver.restore(sess, MODEL_PATH)
+ log_string("Model restored.")
+
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'pred_softmax': pred_softmax,
+ 'loss': loss}
+
+ total_correct = 0
+ total_seen = 0
+ fout_out_filelist = open(FLAGS.output_filelist, 'w')
+ for room_path in ROOM_PATH_LIST:
+ out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt'
+ out_data_label_filename = os.path.join(DUMP_DIR, out_data_label_filename)
+ out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
+ out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename)
+ print(room_path, out_data_label_filename)
+ a, b = eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename)
+ total_correct += a
+ total_seen += b
+ fout_out_filelist.write(out_data_label_filename+'\n')
+ fout_out_filelist.close()
+ log_string('all room eval accuracy: %f'% (total_correct / float(total_seen)))
+
+def eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename):
+ error_cnt = 0
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+ if FLAGS.visu:
+ fout = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_pred.obj'), 'w')
+ fout_gt = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_gt.obj'), 'w')
+ fout_data_label = open(out_data_label_filename, 'w')
+ fout_gt_label = open(out_gt_label_filename, 'w')
+
+ current_data, current_label = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT)
+ current_data = current_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(current_label)
+ # Get room dimension..
+ data_label = np.load(room_path)
+ data = data_label[:,0:6]
+ max_room_x = max(data[:,0])
+ max_room_y = max(data[:,1])
+ max_room_z = max(data[:,2])
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+ print(file_size)
+
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+ cur_batch_size = end_idx - start_idx
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ loss_val, pred_val = sess.run([ops['loss'], ops['pred_softmax']],
+ feed_dict=feed_dict)
+
+ if FLAGS.no_clutter:
+ pred_label = np.argmax(pred_val[:,:,0:12], 2) # BxN
+ else:
+ pred_label = np.argmax(pred_val, 2) # BxN
+ # Save prediction labels to OBJ file
+ for b in range(BATCH_SIZE):
+ pts = current_data[start_idx+b, :, :]
+ l = current_label[start_idx+b,:]
+ pts[:,6] *= max_room_x
+ pts[:,7] *= max_room_y
+ pts[:,8] *= max_room_z
+ pts[:,3:6] *= 255.0
+ pred = pred_label[b, :]
+ for i in range(NUM_POINT):
+ color = indoor3d_util.g_label2color[pred[i]]
+ color_gt = indoor3d_util.g_label2color[current_label[start_idx+b, i]]
+ if FLAGS.visu:
+ fout.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color[0], color[1], color[2]))
+ fout_gt.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color_gt[0], color_gt[1], color_gt[2]))
+ fout_data_label.write('%f %f %f %d %d %d %f %d\n' % (pts[i,6], pts[i,7], pts[i,8], pts[i,3], pts[i,4], pts[i,5], pred_val[b,i,pred[i]], pred[i]))
+ fout_gt_label.write('%d\n' % (l[i]))
+ correct = np.sum(pred_label == current_label[start_idx:end_idx,:])
+ total_correct += correct
+ total_seen += (cur_batch_size*NUM_POINT)
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ for j in range(NUM_POINT):
+ l = current_label[i, j]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_label[i-start_idx, j] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ fout_data_label.close()
+ fout_gt_label.close()
+ if FLAGS.visu:
+ fout.close()
+ fout_gt.close()
+ return total_correct, total_seen
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ evaluate()
+ LOG_FOUT.close()
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py
new file mode 100644
index 000000000..08133ce66
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py
@@ -0,0 +1,52 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(BASE_DIR)
+import indoor3d_util
+
+anno_paths = [line.rstrip() for line in open(os.path.join(BASE_DIR, 'meta/anno_paths.txt'))]
+anno_paths = [os.path.join(indoor3d_util.DATA_PATH, p) for p in anno_paths]
+
+output_folder = os.path.join(ROOT_DIR, 'data/stanford_indoor3d')
+if not os.path.exists(output_folder):
+ os.mkdir(output_folder)
+
+# Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually.
+for anno_path in anno_paths:
+ print(anno_path)
+ try:
+ elements = anno_path.split('/')
+ out_filename = elements[-3]+'_'+elements[-2]+'.npy' # Area_1_hallway_1.npy
+ indoor3d_util.collect_point_label(anno_path, os.path.join(output_folder, out_filename), 'numpy')
+ except:
+ print(anno_path, 'ERROR!!')
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh
new file mode 100644
index 000000000..baeb5cf15
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Download HDF5 for indoor 3d semantic segmentation (around 1.6GB)
+wget https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip
+unzip indoor3d_sem_seg_hdf5_data.zip
+rm indoor3d_sem_seg_hdf5_data.zip
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py
new file mode 100644
index 000000000..03d17e0e4
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py
@@ -0,0 +1,68 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+
+pred_data_label_filenames = [line.rstrip() for line in open('all_pred_data_label_filelist.txt')]
+gt_label_filenames = [f.rstrip('_pred\.txt') + '_gt.txt' for f in pred_data_label_filenames]
+num_room = len(gt_label_filenames)
+
+
+gt_classes = [0 for _ in range(13)]
+positive_classes = [0 for _ in range(13)]
+true_positive_classes = [0 for _ in range(13)]
+for i in range(num_room):
+ print(i)
+ data_label = np.loadtxt(pred_data_label_filenames[i])
+ pred_label = data_label[:,-1]
+ gt_label = np.loadtxt(gt_label_filenames[i])
+ print(gt_label.shape)
+ for j in range(gt_label.shape[0]):
+ gt_l = int(gt_label[j])
+ pred_l = int(pred_label[j])
+ gt_classes[gt_l] += 1
+ positive_classes[pred_l] += 1
+ true_positive_classes[gt_l] += int(gt_l==pred_l)
+
+
+print(gt_classes)
+print(positive_classes)
+print(true_positive_classes)
+
+
+print('Overall accuracy: {0}'.format(sum(true_positive_classes)/float(sum(positive_classes))))
+
+print('IoU:')
+iou_list = []
+for i in range(13):
+ iou = true_positive_classes[i]/float(gt_classes[i]+positive_classes[i]-true_positive_classes[i])
+ print(iou)
+ iou_list.append(iou)
+
+print(sum(iou_list)/13.0)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py
new file mode 100644
index 000000000..03a0c5977
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py
@@ -0,0 +1,115 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import numpy as np
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(ROOT_DIR, 'utils'))
+import data_prep_util
+import indoor3d_util
+
+# Constants
+data_dir = os.path.join(ROOT_DIR, 'data')
+indoor3d_data_dir = os.path.join(data_dir, 'stanford_indoor3d')
+NUM_POINT = 4096
+H5_BATCH_SIZE = 1000
+data_dim = [NUM_POINT, 9]
+label_dim = [NUM_POINT]
+data_dtype = 'float32'
+label_dtype = 'uint8'
+
+# Set paths
+filelist = os.path.join(BASE_DIR, 'meta/all_data_label.txt')
+data_label_files = [os.path.join(indoor3d_data_dir, line.rstrip()) for line in open(filelist)]
+output_dir = os.path.join(data_dir, 'indoor3d_sem_seg_hdf5_data')
+if not os.path.exists(output_dir):
+ os.mkdir(output_dir)
+output_filename_prefix = os.path.join(output_dir, 'ply_data_all')
+output_room_filelist = os.path.join(output_dir, 'room_filelist.txt')
+fout_room = open(output_room_filelist, 'w')
+
+# --------------------------------------
+# ----- BATCH WRITE TO HDF5 -----
+# --------------------------------------
+batch_data_dim = [H5_BATCH_SIZE] + data_dim
+batch_label_dim = [H5_BATCH_SIZE] + label_dim
+h5_batch_data = np.zeros(batch_data_dim, dtype = np.float32)
+h5_batch_label = np.zeros(batch_label_dim, dtype = np.uint8)
+buffer_size = 0 # state: record how many samples are currently in buffer
+h5_index = 0 # state: the next h5 file to save
+
+def insert_batch(data, label, last_batch=False):
+ global h5_batch_data, h5_batch_label
+ global buffer_size, h5_index
+ data_size = data.shape[0]
+ # If there is enough space, just insert
+ if buffer_size + data_size <= h5_batch_data.shape[0]:
+ h5_batch_data[buffer_size:buffer_size+data_size, ...] = data
+ h5_batch_label[buffer_size:buffer_size+data_size] = label
+ buffer_size += data_size
+ else: # not enough space
+ capacity = h5_batch_data.shape[0] - buffer_size
+ assert(capacity>=0)
+ if capacity > 0:
+ h5_batch_data[buffer_size:buffer_size+capacity, ...] = data[0:capacity, ...]
+ h5_batch_label[buffer_size:buffer_size+capacity, ...] = label[0:capacity, ...]
+ # Save batch data and label to h5 file, reset buffer_size
+ h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
+ data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype)
+ print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0]))
+ h5_index += 1
+ buffer_size = 0
+ # recursive call
+ insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
+ if last_batch and buffer_size > 0:
+ h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
+ data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...], data_dtype, label_dtype)
+ print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
+ h5_index += 1
+ buffer_size = 0
+ return
+
+
+sample_cnt = 0
+for i, data_label_filename in enumerate(data_label_files):
+ print(data_label_filename)
+ data, label = indoor3d_util.room2blocks_wrapper_normalized(data_label_filename, NUM_POINT, block_size=1.0, stride=0.5,
+ random_sample=False, sample_num=None)
+ print('{0}, {1}'.format(data.shape, label.shape))
+ for _ in range(data.shape[0]):
+ fout_room.write(os.path.basename(data_label_filename)[0:-4]+'\n')
+
+ sample_cnt += data.shape[0]
+ insert_batch(data, label, i == len(data_label_files)-1)
+
+fout_room.close()
+print("Total samples: {0}".format(sample_cnt))
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py
new file mode 100644
index 000000000..ea349d241
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py
@@ -0,0 +1,619 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+import glob
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(BASE_DIR)
+
+# -----------------------------------------------------------------------------
+# CONSTANTS
+# -----------------------------------------------------------------------------
+
+DATA_PATH = os.path.join(ROOT_DIR, 'data', 'Stanford3dDataset_v1.2_Aligned_Version')
+g_classes = [x.rstrip() for x in open(os.path.join(BASE_DIR, 'meta/class_names.txt'))]
+g_class2label = {cls: i for i,cls in enumerate(g_classes)}
+g_class2color = {'ceiling': [0,255,0],
+ 'floor': [0,0,255],
+ 'wall': [0,255,255],
+ 'beam': [255,255,0],
+ 'column': [255,0,255],
+ 'window': [100,100,255],
+ 'door': [200,200,100],
+ 'table': [170,120,200],
+ 'chair': [255,0,0],
+ 'sofa': [200,100,100],
+ 'bookcase': [10,200,100],
+ 'board': [200,200,200],
+ 'clutter': [50,50,50]}
+g_easy_view_labels = [7,8,9,10,11,1]
+g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes}
+
+
+# -----------------------------------------------------------------------------
+# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES
+# -----------------------------------------------------------------------------
+
+def collect_point_label(anno_path, out_filename, file_format='txt'):
+ """ Convert original dataset files to data_label file (each line is XYZRGBL).
+ We aggregated all the points from each instance in the room.
+
+ Args:
+ anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
+ out_filename: path to save collected points and labels (each line is XYZRGBL)
+ file_format: txt or numpy, determines what file format to save.
+ Returns:
+ None
+ Note:
+ the points are shifted before save, the most negative point is now at origin.
+ """
+ points_list = []
+
+ for f in glob.glob(os.path.join(anno_path, '*.txt')):
+ cls = os.path.basename(f).split('_')[0]
+ if cls not in g_classes: # note: in some room there is 'staris' class..
+ cls = 'clutter'
+ points = np.loadtxt(f)
+ labels = np.ones((points.shape[0],1)) * g_class2label[cls]
+ points_list.append(np.concatenate([points, labels], 1)) # Nx7
+
+ data_label = np.concatenate(points_list, 0)
+ xyz_min = np.amin(data_label, axis=0)[0:3]
+ data_label[:, 0:3] -= xyz_min
+
+ if file_format=='txt':
+ fout = open(out_filename, 'w')
+ for i in range(data_label.shape[0]):
+ fout.write('%f %f %f %d %d %d %d\n' % \
+ (data_label[i,0], data_label[i,1], data_label[i,2],
+ data_label[i,3], data_label[i,4], data_label[i,5],
+ data_label[i,6]))
+ fout.close()
+ elif file_format=='numpy':
+ np.save(out_filename, data_label)
+ else:
+ print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
+ (file_format))
+ exit()
+
+def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False):
+ """ For visualization of a room from data_label file,
+ input_filename: each line is X Y Z R G B L
+ out_filename: OBJ filename,
+ visualize input file by coloring point with label color
+ easy_view: only visualize furnitures and floor
+ """
+ data_label = np.loadtxt(input_filename)
+ data = data_label[:, 0:6]
+ label = data_label[:, -1].astype(int)
+ fout = open(out_filename, 'w')
+ for i in range(data.shape[0]):
+ color = g_label2color[label[i]]
+ if easy_view and (label[i] not in g_easy_view_labels):
+ continue
+ if no_wall and ((label[i] == 2) or (label[i]==0)):
+ continue
+ if label_color:
+ fout.write('v %f %f %f %d %d %d\n' % \
+ (data[i,0], data[i,1], data[i,2], color[0], color[1], color[2]))
+ else:
+ fout.write('v %f %f %f %d %d %d\n' % \
+ (data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5]))
+ fout.close()
+
+
+
+# -----------------------------------------------------------------------------
+# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING
+# -----------------------------------------------------------------------------
+
+def sample_data(data, num_sample):
+ """ data is in N x ...
+ we want to keep num_samplexC of them.
+ if N > num_sample, we will randomly keep num_sample of them.
+ if N < num_sample, we will randomly duplicate samples.
+ """
+ N = data.shape[0]
+ if (N == num_sample):
+ return data, range(N)
+ elif (N > num_sample):
+ sample = np.random.choice(N, num_sample)
+ return data[sample, ...], sample
+ else:
+ sample = np.random.choice(N, num_sample-N)
+ dup_data = data[sample, ...]
+ return np.concatenate([data, dup_data], 0), range(N)+list(sample)
+
+def sample_data_label(data, label, num_sample):
+ new_data, sample_indices = sample_data(data, num_sample)
+ new_label = label[sample_indices]
+ return new_data, new_label
+
+def room2blocks(data, label, num_point, block_size=1.0, stride=1.0,
+ random_sample=False, sample_num=None, sample_aug=1):
+ """ Prepare block training data.
+ Args:
+ data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
+ assumes the data is shifted (min point is origin) and aligned
+ (aligned with XYZ axis)
+ label: N size uint8 numpy array from 0-12
+ num_point: int, how many points to sample in each block
+ block_size: float, physical size of the block in meters
+ stride: float, stride for block sweeping
+ random_sample: bool, if True, we will randomly sample blocks in the room
+ sample_num: int, if random sample, how many blocks to sample
+ [default: room area]
+ sample_aug: if random sample, how much aug
+ Returns:
+ block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1]
+ block_labels: K x num_point x 1 np array of uint8 labels
+
+ TODO: for this version, blocking is in fixed, non-overlapping pattern.
+ """
+ assert(stride<=block_size)
+
+ limit = np.amax(data, 0)[0:3]
+
+ # Get the corner location for our sampling blocks
+ xbeg_list = []
+ ybeg_list = []
+ if not random_sample:
+ num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1
+ num_block_y = int(np.ceil((limit[1] - block_size) / stride)) + 1
+ for i in range(num_block_x):
+ for j in range(num_block_y):
+ xbeg_list.append(i*stride)
+ ybeg_list.append(j*stride)
+ else:
+ num_block_x = int(np.ceil(limit[0] / block_size))
+ num_block_y = int(np.ceil(limit[1] / block_size))
+ if sample_num is None:
+ sample_num = num_block_x * num_block_y * sample_aug
+ for _ in range(sample_num):
+ xbeg = np.random.uniform(-block_size, limit[0])
+ ybeg = np.random.uniform(-block_size, limit[1])
+ xbeg_list.append(xbeg)
+ ybeg_list.append(ybeg)
+
+ # Collect blocks
+ block_data_list = []
+ block_label_list = []
+ idx = 0
+ for idx in range(len(xbeg_list)):
+ xbeg = xbeg_list[idx]
+ ybeg = ybeg_list[idx]
+ xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg)
+ ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg)
+ cond = xcond & ycond
+ if np.sum(cond) < 100: # discard block if there are less than 100 pts.
+ continue
+
+ block_data = data[cond, :]
+ block_label = label[cond]
+
+ # randomly subsample data
+ block_data_sampled, block_label_sampled = \
+ sample_data_label(block_data, block_label, num_point)
+ block_data_list.append(np.expand_dims(block_data_sampled, 0))
+ block_label_list.append(np.expand_dims(block_label_sampled, 0))
+
+ return np.concatenate(block_data_list, 0), \
+ np.concatenate(block_label_list, 0)
+
+
+def room2blocks_plus(data_label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug):
+ """ room2block with input filename and RGB preprocessing.
+ """
+ data = data_label[:,0:6]
+ data[:,3:6] /= 255.0
+ label = data_label[:,-1].astype(np.uint8)
+
+ return room2blocks(data, label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug)
+
+def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0,
+ random_sample=False, sample_num=None, sample_aug=1):
+ if data_label_filename[-3:] == 'txt':
+ data_label = np.loadtxt(data_label_filename)
+ elif data_label_filename[-3:] == 'npy':
+ data_label = np.load(data_label_filename)
+ else:
+ print('Unknown file type! exiting.')
+ exit()
+ return room2blocks_plus(data_label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug)
+
+def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug):
+ """ room2block, with input filename and RGB preprocessing.
+ for each block centralize XYZ, add normalized XYZ as 678 channels
+ """
+ data = data_label[:,0:6]
+ data[:,3:6] /= 255.0
+ label = data_label[:,-1].astype(np.uint8)
+ max_room_x = max(data[:,0])
+ max_room_y = max(data[:,1])
+ max_room_z = max(data[:,2])
+
+ data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug)
+ new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
+ for b in range(data_batch.shape[0]):
+ new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
+ new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
+ new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
+ minx = min(data_batch[b, :, 0])
+ miny = min(data_batch[b, :, 1])
+ data_batch[b, :, 0] -= (minx+block_size/2)
+ data_batch[b, :, 1] -= (miny+block_size/2)
+ new_data_batch[:, :, 0:6] = data_batch
+ return new_data_batch, label_batch
+
+
+def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,
+ random_sample=False, sample_num=None, sample_aug=1):
+ if data_label_filename[-3:] == 'txt':
+ data_label = np.loadtxt(data_label_filename)
+ elif data_label_filename[-3:] == 'npy':
+ data_label = np.load(data_label_filename)
+ else:
+ print('Unknown file type! exiting.')
+ exit()
+ return room2blocks_plus_normalized(data_label, num_point, block_size, stride,
+ random_sample, sample_num, sample_aug)
+
+def room2samples(data, label, sample_num_point):
+ """ Prepare whole room samples.
+
+ Args:
+ data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
+ assumes the data is shifted (min point is origin) and
+ aligned (aligned with XYZ axis)
+ label: N size uint8 numpy array from 0-12
+ sample_num_point: int, how many points to sample in each sample
+ Returns:
+ sample_datas: K x sample_num_point x 9
+ numpy array of XYZRGBX'Y'Z', RGB is in [0,1]
+ sample_labels: K x sample_num_point x 1 np array of uint8 labels
+ """
+ N = data.shape[0]
+ order = np.arange(N)
+ np.random.shuffle(order)
+ data = data[order, :]
+ label = label[order]
+
+ batch_num = int(np.ceil(N / float(sample_num_point)))
+ sample_datas = np.zeros((batch_num, sample_num_point, 6))
+ sample_labels = np.zeros((batch_num, sample_num_point, 1))
+
+ for i in range(batch_num):
+ beg_idx = i*sample_num_point
+ end_idx = min((i+1)*sample_num_point, N)
+ num = end_idx - beg_idx
+ sample_datas[i,0:num,:] = data[beg_idx:end_idx, :]
+ sample_labels[i,0:num,0] = label[beg_idx:end_idx]
+ if num < sample_num_point:
+ makeup_indices = np.random.choice(N, sample_num_point - num)
+ sample_datas[i,num:,:] = data[makeup_indices, :]
+ sample_labels[i,num:,0] = label[makeup_indices]
+ return sample_datas, sample_labels
+
+def room2samples_plus_normalized(data_label, num_point):
+ """ room2sample, with input filename and RGB preprocessing.
+ for each block centralize XYZ, add normalized XYZ as 678 channels
+ """
+ data = data_label[:,0:6]
+ data[:,3:6] /= 255.0
+ label = data_label[:,-1].astype(np.uint8)
+ max_room_x = max(data[:,0])
+ max_room_y = max(data[:,1])
+ max_room_z = max(data[:,2])
+ #print(max_room_x, max_room_y, max_room_z)
+
+ data_batch, label_batch = room2samples(data, label, num_point)
+ new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
+ for b in range(data_batch.shape[0]):
+ new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
+ new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
+ new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
+ #minx = min(data_batch[b, :, 0])
+ #miny = min(data_batch[b, :, 1])
+ #data_batch[b, :, 0] -= (minx+block_size/2)
+ #data_batch[b, :, 1] -= (miny+block_size/2)
+ new_data_batch[:, :, 0:6] = data_batch
+ return new_data_batch, label_batch
+
+
+def room2samples_wrapper_normalized(data_label_filename, num_point):
+ if data_label_filename[-3:] == 'txt':
+ data_label = np.loadtxt(data_label_filename)
+ elif data_label_filename[-3:] == 'npy':
+ data_label = np.load(data_label_filename)
+ else:
+ print('Unknown file type! exiting.')
+ exit()
+ return room2samples_plus_normalized(data_label, num_point)
+
+
+# -----------------------------------------------------------------------------
+# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation)
+# -----------------------------------------------------------------------------
+
+def collect_bounding_box(anno_path, out_filename):
+ """ Compute bounding boxes from each instance in original dataset files on
+ one room. **We assume the bbox is aligned with XYZ coordinate.**
+
+ Args:
+ anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
+ out_filename: path to save instance bounding boxes for that room.
+ each line is x1 y1 z1 x2 y2 z2 label,
+ where (x1,y1,z1) is the point on the diagonal closer to origin
+ Returns:
+ None
+ Note:
+ room points are shifted, the most negative point is now at origin.
+ """
+ bbox_label_list = []
+
+ for f in glob.glob(os.path.join(anno_path, '*.txt')):
+ cls = os.path.basename(f).split('_')[0]
+ if cls not in g_classes: # note: in some room there is 'staris' class..
+ cls = 'clutter'
+ points = np.loadtxt(f)
+ label = g_class2label[cls]
+ # Compute tightest axis aligned bounding box
+ xyz_min = np.amin(points[:, 0:3], axis=0)
+ xyz_max = np.amax(points[:, 0:3], axis=0)
+ ins_bbox_label = np.expand_dims(
+ np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
+ bbox_label_list.append(ins_bbox_label)
+
+ bbox_label = np.concatenate(bbox_label_list, 0)
+ room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
+ bbox_label[:, 0:3] -= room_xyz_min
+ bbox_label[:, 3:6] -= room_xyz_min
+
+ fout = open(out_filename, 'w')
+ for i in range(bbox_label.shape[0]):
+ fout.write('%f %f %f %f %f %f %d\n' % \
+ (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
+ bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
+ bbox_label[i,6]))
+ fout.close()
+
+def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False):
+ """ Visualization of bounding boxes.
+
+ Args:
+ input_filename: each line is x1 y1 z1 x2 y2 z2 label
+ out_filename_prefix: OBJ filename prefix,
+ visualize object by g_label2color
+ easy_view: if True, only visualize furniture and floor
+ Returns:
+ output a list of OBJ file and MTL files with the same prefix
+ """
+ bbox_label = np.loadtxt(input_filename)
+ bbox = bbox_label[:, 0:6]
+ label = bbox_label[:, -1].astype(int)
+ v_cnt = 0 # count vertex
+ ins_cnt = 0 # count instance
+ for i in range(bbox.shape[0]):
+ if easy_view and (label[i] not in g_easy_view_labels):
+ continue
+ obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj'
+ mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl'
+ fout_obj = open(obj_filename, 'w')
+ fout_mtl = open(mtl_filename, 'w')
+ fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
+
+ length = bbox[i, 3:6] - bbox[i, 0:3]
+ a = length[0]
+ b = length[1]
+ c = length[2]
+ x = bbox[i, 0]
+ y = bbox[i, 1]
+ z = bbox[i, 2]
+ color = np.array(g_label2color[label[i]], dtype=float) / 255.0
+
+ material = 'material%d' % (ins_cnt)
+ fout_obj.write('usemtl %s\n' % (material))
+ fout_obj.write('v %f %f %f\n' % (x,y,z+c))
+ fout_obj.write('v %f %f %f\n' % (x,y+b,z+c))
+ fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c))
+ fout_obj.write('v %f %f %f\n' % (x+a,y,z+c))
+ fout_obj.write('v %f %f %f\n' % (x,y,z))
+ fout_obj.write('v %f %f %f\n' % (x,y+b,z))
+ fout_obj.write('v %f %f %f\n' % (x+a,y+b,z))
+ fout_obj.write('v %f %f %f\n' % (x+a,y,z))
+ fout_obj.write('g default\n')
+ v_cnt = 0 # for individual box
+ fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))
+ fout_obj.write('\n')
+
+ fout_mtl.write('newmtl %s\n' % (material))
+ fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
+ fout_mtl.write('\n')
+ fout_obj.close()
+ fout_mtl.close()
+
+ v_cnt += 8
+ ins_cnt += 1
+
+def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False):
+ """ Visualization of bounding boxes.
+
+ Args:
+ input_filename: each line is x1 y1 z1 x2 y2 z2 label
+ out_filename_prefix: OBJ filename prefix,
+ visualize object by g_label2color
+ easy_view: if True, only visualize furniture and floor
+ permute: if not None, permute XYZ for rendering, e.g. [0 2 1]
+ center: if True, move obj to have zero origin
+ Returns:
+ output a list of OBJ file and MTL files with the same prefix
+ """
+ bbox_label = np.loadtxt(input_filename)
+ bbox = bbox_label[:, 0:6]
+ if permute is not None:
+ assert(len(permute)==3)
+ permute = np.array(permute)
+ bbox[:,0:3] = bbox[:,permute]
+ bbox[:,3:6] = bbox[:,permute+3]
+ if center:
+ xyz_max = np.amax(bbox[:,3:6], 0)
+ bbox[:,0:3] -= (xyz_max/2.0)
+ bbox[:,3:6] -= (xyz_max/2.0)
+ bbox /= np.max(xyz_max/2.0)
+ label = bbox_label[:, -1].astype(int)
+ obj_filename = out_filename_prefix+'.obj'
+ mtl_filename = out_filename_prefix+'.mtl'
+
+ fout_obj = open(obj_filename, 'w')
+ fout_mtl = open(mtl_filename, 'w')
+ fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
+ v_cnt = 0 # count vertex
+ ins_cnt = 0 # count instance
+ for i in range(bbox.shape[0]):
+ if easy_view and (label[i] not in g_easy_view_labels):
+ continue
+ if exclude_table and label[i] == g_classes.index('table'):
+ continue
+
+ length = bbox[i, 3:6] - bbox[i, 0:3]
+ a = length[0]
+ b = length[1]
+ c = length[2]
+ x = bbox[i, 0]
+ y = bbox[i, 1]
+ z = bbox[i, 2]
+ color = np.array(g_label2color[label[i]], dtype=float) / 255.0
+
+ material = 'material%d' % (ins_cnt)
+ fout_obj.write('usemtl %s\n' % (material))
+ fout_obj.write('v %f %f %f\n' % (x,y,z+c))
+ fout_obj.write('v %f %f %f\n' % (x,y+b,z+c))
+ fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c))
+ fout_obj.write('v %f %f %f\n' % (x+a,y,z+c))
+ fout_obj.write('v %f %f %f\n' % (x,y,z))
+ fout_obj.write('v %f %f %f\n' % (x,y+b,z))
+ fout_obj.write('v %f %f %f\n' % (x+a,y+b,z))
+ fout_obj.write('v %f %f %f\n' % (x+a,y,z))
+ fout_obj.write('g default\n')
+ fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))
+ fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))
+ fout_obj.write('\n')
+
+ fout_mtl.write('newmtl %s\n' % (material))
+ fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
+ fout_mtl.write('\n')
+
+ v_cnt += 8
+ ins_cnt += 1
+
+ fout_obj.close()
+ fout_mtl.close()
+
+
+def collect_point_bounding_box(anno_path, out_filename, file_format):
+ """ Compute bounding boxes from each instance in original dataset files on
+ one room. **We assume the bbox is aligned with XYZ coordinate.**
+ Save both the point XYZRGB and the bounding box for the point's
+ parent element.
+
+ Args:
+ anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
+ out_filename: path to save instance bounding boxes for each point,
+ plus the point's XYZRGBL
+ each line is XYZRGBL offsetX offsetY offsetZ a b c,
+ where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ
+ where (cx,cy,cz) is center of the box, a,b,c are distances from center
+ to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc.
+ file_format: output file format, txt or numpy
+ Returns:
+ None
+
+ Note:
+ room points are shifted, the most negative point is now at origin.
+ """
+ point_bbox_list = []
+
+ for f in glob.glob(os.path.join(anno_path, '*.txt')):
+ cls = os.path.basename(f).split('_')[0]
+ if cls not in g_classes: # note: in some room there is 'staris' class..
+ cls = 'clutter'
+ points = np.loadtxt(f) # Nx6
+ label = g_class2label[cls] # N,
+ # Compute tightest axis aligned bounding box
+ xyz_min = np.amin(points[:, 0:3], axis=0) # 3,
+ xyz_max = np.amax(points[:, 0:3], axis=0) # 3,
+ xyz_center = (xyz_min + xyz_max) / 2
+ dimension = (xyz_max - xyz_min) / 2
+
+ xyz_offsets = xyz_center - points[:,0:3] # Nx3
+ dimensions = np.ones((points.shape[0],3)) * dimension # Nx3
+ labels = np.ones((points.shape[0],1)) * label # N
+ point_bbox_list.append(np.concatenate([points, labels,
+ xyz_offsets, dimensions], 1)) # Nx13
+
+ point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13
+ room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0)
+ point_bbox[:, 0:3] -= room_xyz_min
+
+ if file_format == 'txt':
+ fout = open(out_filename, 'w')
+ for i in range(point_bbox.shape[0]):
+ fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' % \
+ (point_bbox[i,0], point_bbox[i,1], point_bbox[i,2],
+ point_bbox[i,3], point_bbox[i,4], point_bbox[i,5],
+ point_bbox[i,6],
+ point_bbox[i,7], point_bbox[i,8], point_bbox[i,9],
+ point_bbox[i,10], point_bbox[i,11], point_bbox[i,12]))
+
+ fout.close()
+ elif file_format == 'numpy':
+ np.save(out_filename, point_bbox)
+ else:
+ print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
+ (file_format))
+ exit()
+
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt
new file mode 100644
index 000000000..636e686a5
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt
@@ -0,0 +1,272 @@
+Area_1_conferenceRoom_1.npy
+Area_1_conferenceRoom_2.npy
+Area_1_copyRoom_1.npy
+Area_1_hallway_1.npy
+Area_1_hallway_2.npy
+Area_1_hallway_3.npy
+Area_1_hallway_4.npy
+Area_1_hallway_5.npy
+Area_1_hallway_6.npy
+Area_1_hallway_7.npy
+Area_1_hallway_8.npy
+Area_1_office_10.npy
+Area_1_office_11.npy
+Area_1_office_12.npy
+Area_1_office_13.npy
+Area_1_office_14.npy
+Area_1_office_15.npy
+Area_1_office_16.npy
+Area_1_office_17.npy
+Area_1_office_18.npy
+Area_1_office_19.npy
+Area_1_office_1.npy
+Area_1_office_20.npy
+Area_1_office_21.npy
+Area_1_office_22.npy
+Area_1_office_23.npy
+Area_1_office_24.npy
+Area_1_office_25.npy
+Area_1_office_26.npy
+Area_1_office_27.npy
+Area_1_office_28.npy
+Area_1_office_29.npy
+Area_1_office_2.npy
+Area_1_office_30.npy
+Area_1_office_31.npy
+Area_1_office_3.npy
+Area_1_office_4.npy
+Area_1_office_5.npy
+Area_1_office_6.npy
+Area_1_office_7.npy
+Area_1_office_8.npy
+Area_1_office_9.npy
+Area_1_pantry_1.npy
+Area_1_WC_1.npy
+Area_2_auditorium_1.npy
+Area_2_auditorium_2.npy
+Area_2_conferenceRoom_1.npy
+Area_2_hallway_10.npy
+Area_2_hallway_11.npy
+Area_2_hallway_12.npy
+Area_2_hallway_1.npy
+Area_2_hallway_2.npy
+Area_2_hallway_3.npy
+Area_2_hallway_4.npy
+Area_2_hallway_5.npy
+Area_2_hallway_6.npy
+Area_2_hallway_7.npy
+Area_2_hallway_8.npy
+Area_2_hallway_9.npy
+Area_2_office_10.npy
+Area_2_office_11.npy
+Area_2_office_12.npy
+Area_2_office_13.npy
+Area_2_office_14.npy
+Area_2_office_1.npy
+Area_2_office_2.npy
+Area_2_office_3.npy
+Area_2_office_4.npy
+Area_2_office_5.npy
+Area_2_office_6.npy
+Area_2_office_7.npy
+Area_2_office_8.npy
+Area_2_office_9.npy
+Area_2_storage_1.npy
+Area_2_storage_2.npy
+Area_2_storage_3.npy
+Area_2_storage_4.npy
+Area_2_storage_5.npy
+Area_2_storage_6.npy
+Area_2_storage_7.npy
+Area_2_storage_8.npy
+Area_2_storage_9.npy
+Area_2_WC_1.npy
+Area_2_WC_2.npy
+Area_3_conferenceRoom_1.npy
+Area_3_hallway_1.npy
+Area_3_hallway_2.npy
+Area_3_hallway_3.npy
+Area_3_hallway_4.npy
+Area_3_hallway_5.npy
+Area_3_hallway_6.npy
+Area_3_lounge_1.npy
+Area_3_lounge_2.npy
+Area_3_office_10.npy
+Area_3_office_1.npy
+Area_3_office_2.npy
+Area_3_office_3.npy
+Area_3_office_4.npy
+Area_3_office_5.npy
+Area_3_office_6.npy
+Area_3_office_7.npy
+Area_3_office_8.npy
+Area_3_office_9.npy
+Area_3_storage_1.npy
+Area_3_storage_2.npy
+Area_3_WC_1.npy
+Area_3_WC_2.npy
+Area_4_conferenceRoom_1.npy
+Area_4_conferenceRoom_2.npy
+Area_4_conferenceRoom_3.npy
+Area_4_hallway_10.npy
+Area_4_hallway_11.npy
+Area_4_hallway_12.npy
+Area_4_hallway_13.npy
+Area_4_hallway_14.npy
+Area_4_hallway_1.npy
+Area_4_hallway_2.npy
+Area_4_hallway_3.npy
+Area_4_hallway_4.npy
+Area_4_hallway_5.npy
+Area_4_hallway_6.npy
+Area_4_hallway_7.npy
+Area_4_hallway_8.npy
+Area_4_hallway_9.npy
+Area_4_lobby_1.npy
+Area_4_lobby_2.npy
+Area_4_office_10.npy
+Area_4_office_11.npy
+Area_4_office_12.npy
+Area_4_office_13.npy
+Area_4_office_14.npy
+Area_4_office_15.npy
+Area_4_office_16.npy
+Area_4_office_17.npy
+Area_4_office_18.npy
+Area_4_office_19.npy
+Area_4_office_1.npy
+Area_4_office_20.npy
+Area_4_office_21.npy
+Area_4_office_22.npy
+Area_4_office_2.npy
+Area_4_office_3.npy
+Area_4_office_4.npy
+Area_4_office_5.npy
+Area_4_office_6.npy
+Area_4_office_7.npy
+Area_4_office_8.npy
+Area_4_office_9.npy
+Area_4_storage_1.npy
+Area_4_storage_2.npy
+Area_4_storage_3.npy
+Area_4_storage_4.npy
+Area_4_WC_1.npy
+Area_4_WC_2.npy
+Area_4_WC_3.npy
+Area_4_WC_4.npy
+Area_5_conferenceRoom_1.npy
+Area_5_conferenceRoom_2.npy
+Area_5_conferenceRoom_3.npy
+Area_5_hallway_10.npy
+Area_5_hallway_11.npy
+Area_5_hallway_12.npy
+Area_5_hallway_13.npy
+Area_5_hallway_14.npy
+Area_5_hallway_15.npy
+Area_5_hallway_1.npy
+Area_5_hallway_2.npy
+Area_5_hallway_3.npy
+Area_5_hallway_4.npy
+Area_5_hallway_5.npy
+Area_5_hallway_6.npy
+Area_5_hallway_7.npy
+Area_5_hallway_8.npy
+Area_5_hallway_9.npy
+Area_5_lobby_1.npy
+Area_5_office_10.npy
+Area_5_office_11.npy
+Area_5_office_12.npy
+Area_5_office_13.npy
+Area_5_office_14.npy
+Area_5_office_15.npy
+Area_5_office_16.npy
+Area_5_office_17.npy
+Area_5_office_18.npy
+Area_5_office_19.npy
+Area_5_office_1.npy
+Area_5_office_20.npy
+Area_5_office_21.npy
+Area_5_office_22.npy
+Area_5_office_23.npy
+Area_5_office_24.npy
+Area_5_office_25.npy
+Area_5_office_26.npy
+Area_5_office_27.npy
+Area_5_office_28.npy
+Area_5_office_29.npy
+Area_5_office_2.npy
+Area_5_office_30.npy
+Area_5_office_31.npy
+Area_5_office_32.npy
+Area_5_office_33.npy
+Area_5_office_34.npy
+Area_5_office_35.npy
+Area_5_office_36.npy
+Area_5_office_37.npy
+Area_5_office_38.npy
+Area_5_office_39.npy
+Area_5_office_3.npy
+Area_5_office_40.npy
+Area_5_office_41.npy
+Area_5_office_42.npy
+Area_5_office_4.npy
+Area_5_office_5.npy
+Area_5_office_6.npy
+Area_5_office_7.npy
+Area_5_office_8.npy
+Area_5_office_9.npy
+Area_5_pantry_1.npy
+Area_5_storage_1.npy
+Area_5_storage_2.npy
+Area_5_storage_3.npy
+Area_5_storage_4.npy
+Area_5_WC_1.npy
+Area_5_WC_2.npy
+Area_6_conferenceRoom_1.npy
+Area_6_copyRoom_1.npy
+Area_6_hallway_1.npy
+Area_6_hallway_2.npy
+Area_6_hallway_3.npy
+Area_6_hallway_4.npy
+Area_6_hallway_5.npy
+Area_6_hallway_6.npy
+Area_6_lounge_1.npy
+Area_6_office_10.npy
+Area_6_office_11.npy
+Area_6_office_12.npy
+Area_6_office_13.npy
+Area_6_office_14.npy
+Area_6_office_15.npy
+Area_6_office_16.npy
+Area_6_office_17.npy
+Area_6_office_18.npy
+Area_6_office_19.npy
+Area_6_office_1.npy
+Area_6_office_20.npy
+Area_6_office_21.npy
+Area_6_office_22.npy
+Area_6_office_23.npy
+Area_6_office_24.npy
+Area_6_office_25.npy
+Area_6_office_26.npy
+Area_6_office_27.npy
+Area_6_office_28.npy
+Area_6_office_29.npy
+Area_6_office_2.npy
+Area_6_office_30.npy
+Area_6_office_31.npy
+Area_6_office_32.npy
+Area_6_office_33.npy
+Area_6_office_34.npy
+Area_6_office_35.npy
+Area_6_office_36.npy
+Area_6_office_37.npy
+Area_6_office_3.npy
+Area_6_office_4.npy
+Area_6_office_5.npy
+Area_6_office_6.npy
+Area_6_office_7.npy
+Area_6_office_8.npy
+Area_6_office_9.npy
+Area_6_openspace_1.npy
+Area_6_pantry_1.npy
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt
new file mode 100644
index 000000000..0ad2f2599
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt
@@ -0,0 +1,272 @@
+Area_1/conferenceRoom_1/Annotations
+Area_1/conferenceRoom_2/Annotations
+Area_1/copyRoom_1/Annotations
+Area_1/hallway_1/Annotations
+Area_1/hallway_2/Annotations
+Area_1/hallway_3/Annotations
+Area_1/hallway_4/Annotations
+Area_1/hallway_5/Annotations
+Area_1/hallway_6/Annotations
+Area_1/hallway_7/Annotations
+Area_1/hallway_8/Annotations
+Area_1/office_10/Annotations
+Area_1/office_11/Annotations
+Area_1/office_12/Annotations
+Area_1/office_13/Annotations
+Area_1/office_14/Annotations
+Area_1/office_15/Annotations
+Area_1/office_16/Annotations
+Area_1/office_17/Annotations
+Area_1/office_18/Annotations
+Area_1/office_19/Annotations
+Area_1/office_1/Annotations
+Area_1/office_20/Annotations
+Area_1/office_21/Annotations
+Area_1/office_22/Annotations
+Area_1/office_23/Annotations
+Area_1/office_24/Annotations
+Area_1/office_25/Annotations
+Area_1/office_26/Annotations
+Area_1/office_27/Annotations
+Area_1/office_28/Annotations
+Area_1/office_29/Annotations
+Area_1/office_2/Annotations
+Area_1/office_30/Annotations
+Area_1/office_31/Annotations
+Area_1/office_3/Annotations
+Area_1/office_4/Annotations
+Area_1/office_5/Annotations
+Area_1/office_6/Annotations
+Area_1/office_7/Annotations
+Area_1/office_8/Annotations
+Area_1/office_9/Annotations
+Area_1/pantry_1/Annotations
+Area_1/WC_1/Annotations
+Area_2/auditorium_1/Annotations
+Area_2/auditorium_2/Annotations
+Area_2/conferenceRoom_1/Annotations
+Area_2/hallway_10/Annotations
+Area_2/hallway_11/Annotations
+Area_2/hallway_12/Annotations
+Area_2/hallway_1/Annotations
+Area_2/hallway_2/Annotations
+Area_2/hallway_3/Annotations
+Area_2/hallway_4/Annotations
+Area_2/hallway_5/Annotations
+Area_2/hallway_6/Annotations
+Area_2/hallway_7/Annotations
+Area_2/hallway_8/Annotations
+Area_2/hallway_9/Annotations
+Area_2/office_10/Annotations
+Area_2/office_11/Annotations
+Area_2/office_12/Annotations
+Area_2/office_13/Annotations
+Area_2/office_14/Annotations
+Area_2/office_1/Annotations
+Area_2/office_2/Annotations
+Area_2/office_3/Annotations
+Area_2/office_4/Annotations
+Area_2/office_5/Annotations
+Area_2/office_6/Annotations
+Area_2/office_7/Annotations
+Area_2/office_8/Annotations
+Area_2/office_9/Annotations
+Area_2/storage_1/Annotations
+Area_2/storage_2/Annotations
+Area_2/storage_3/Annotations
+Area_2/storage_4/Annotations
+Area_2/storage_5/Annotations
+Area_2/storage_6/Annotations
+Area_2/storage_7/Annotations
+Area_2/storage_8/Annotations
+Area_2/storage_9/Annotations
+Area_2/WC_1/Annotations
+Area_2/WC_2/Annotations
+Area_3/conferenceRoom_1/Annotations
+Area_3/hallway_1/Annotations
+Area_3/hallway_2/Annotations
+Area_3/hallway_3/Annotations
+Area_3/hallway_4/Annotations
+Area_3/hallway_5/Annotations
+Area_3/hallway_6/Annotations
+Area_3/lounge_1/Annotations
+Area_3/lounge_2/Annotations
+Area_3/office_10/Annotations
+Area_3/office_1/Annotations
+Area_3/office_2/Annotations
+Area_3/office_3/Annotations
+Area_3/office_4/Annotations
+Area_3/office_5/Annotations
+Area_3/office_6/Annotations
+Area_3/office_7/Annotations
+Area_3/office_8/Annotations
+Area_3/office_9/Annotations
+Area_3/storage_1/Annotations
+Area_3/storage_2/Annotations
+Area_3/WC_1/Annotations
+Area_3/WC_2/Annotations
+Area_4/conferenceRoom_1/Annotations
+Area_4/conferenceRoom_2/Annotations
+Area_4/conferenceRoom_3/Annotations
+Area_4/hallway_10/Annotations
+Area_4/hallway_11/Annotations
+Area_4/hallway_12/Annotations
+Area_4/hallway_13/Annotations
+Area_4/hallway_14/Annotations
+Area_4/hallway_1/Annotations
+Area_4/hallway_2/Annotations
+Area_4/hallway_3/Annotations
+Area_4/hallway_4/Annotations
+Area_4/hallway_5/Annotations
+Area_4/hallway_6/Annotations
+Area_4/hallway_7/Annotations
+Area_4/hallway_8/Annotations
+Area_4/hallway_9/Annotations
+Area_4/lobby_1/Annotations
+Area_4/lobby_2/Annotations
+Area_4/office_10/Annotations
+Area_4/office_11/Annotations
+Area_4/office_12/Annotations
+Area_4/office_13/Annotations
+Area_4/office_14/Annotations
+Area_4/office_15/Annotations
+Area_4/office_16/Annotations
+Area_4/office_17/Annotations
+Area_4/office_18/Annotations
+Area_4/office_19/Annotations
+Area_4/office_1/Annotations
+Area_4/office_20/Annotations
+Area_4/office_21/Annotations
+Area_4/office_22/Annotations
+Area_4/office_2/Annotations
+Area_4/office_3/Annotations
+Area_4/office_4/Annotations
+Area_4/office_5/Annotations
+Area_4/office_6/Annotations
+Area_4/office_7/Annotations
+Area_4/office_8/Annotations
+Area_4/office_9/Annotations
+Area_4/storage_1/Annotations
+Area_4/storage_2/Annotations
+Area_4/storage_3/Annotations
+Area_4/storage_4/Annotations
+Area_4/WC_1/Annotations
+Area_4/WC_2/Annotations
+Area_4/WC_3/Annotations
+Area_4/WC_4/Annotations
+Area_5/conferenceRoom_1/Annotations
+Area_5/conferenceRoom_2/Annotations
+Area_5/conferenceRoom_3/Annotations
+Area_5/hallway_10/Annotations
+Area_5/hallway_11/Annotations
+Area_5/hallway_12/Annotations
+Area_5/hallway_13/Annotations
+Area_5/hallway_14/Annotations
+Area_5/hallway_15/Annotations
+Area_5/hallway_1/Annotations
+Area_5/hallway_2/Annotations
+Area_5/hallway_3/Annotations
+Area_5/hallway_4/Annotations
+Area_5/hallway_5/Annotations
+Area_5/hallway_6/Annotations
+Area_5/hallway_7/Annotations
+Area_5/hallway_8/Annotations
+Area_5/hallway_9/Annotations
+Area_5/lobby_1/Annotations
+Area_5/office_10/Annotations
+Area_5/office_11/Annotations
+Area_5/office_12/Annotations
+Area_5/office_13/Annotations
+Area_5/office_14/Annotations
+Area_5/office_15/Annotations
+Area_5/office_16/Annotations
+Area_5/office_17/Annotations
+Area_5/office_18/Annotations
+Area_5/office_19/Annotations
+Area_5/office_1/Annotations
+Area_5/office_20/Annotations
+Area_5/office_21/Annotations
+Area_5/office_22/Annotations
+Area_5/office_23/Annotations
+Area_5/office_24/Annotations
+Area_5/office_25/Annotations
+Area_5/office_26/Annotations
+Area_5/office_27/Annotations
+Area_5/office_28/Annotations
+Area_5/office_29/Annotations
+Area_5/office_2/Annotations
+Area_5/office_30/Annotations
+Area_5/office_31/Annotations
+Area_5/office_32/Annotations
+Area_5/office_33/Annotations
+Area_5/office_34/Annotations
+Area_5/office_35/Annotations
+Area_5/office_36/Annotations
+Area_5/office_37/Annotations
+Area_5/office_38/Annotations
+Area_5/office_39/Annotations
+Area_5/office_3/Annotations
+Area_5/office_40/Annotations
+Area_5/office_41/Annotations
+Area_5/office_42/Annotations
+Area_5/office_4/Annotations
+Area_5/office_5/Annotations
+Area_5/office_6/Annotations
+Area_5/office_7/Annotations
+Area_5/office_8/Annotations
+Area_5/office_9/Annotations
+Area_5/pantry_1/Annotations
+Area_5/storage_1/Annotations
+Area_5/storage_2/Annotations
+Area_5/storage_3/Annotations
+Area_5/storage_4/Annotations
+Area_5/WC_1/Annotations
+Area_5/WC_2/Annotations
+Area_6/conferenceRoom_1/Annotations
+Area_6/copyRoom_1/Annotations
+Area_6/hallway_1/Annotations
+Area_6/hallway_2/Annotations
+Area_6/hallway_3/Annotations
+Area_6/hallway_4/Annotations
+Area_6/hallway_5/Annotations
+Area_6/hallway_6/Annotations
+Area_6/lounge_1/Annotations
+Area_6/office_10/Annotations
+Area_6/office_11/Annotations
+Area_6/office_12/Annotations
+Area_6/office_13/Annotations
+Area_6/office_14/Annotations
+Area_6/office_15/Annotations
+Area_6/office_16/Annotations
+Area_6/office_17/Annotations
+Area_6/office_18/Annotations
+Area_6/office_19/Annotations
+Area_6/office_1/Annotations
+Area_6/office_20/Annotations
+Area_6/office_21/Annotations
+Area_6/office_22/Annotations
+Area_6/office_23/Annotations
+Area_6/office_24/Annotations
+Area_6/office_25/Annotations
+Area_6/office_26/Annotations
+Area_6/office_27/Annotations
+Area_6/office_28/Annotations
+Area_6/office_29/Annotations
+Area_6/office_2/Annotations
+Area_6/office_30/Annotations
+Area_6/office_31/Annotations
+Area_6/office_32/Annotations
+Area_6/office_33/Annotations
+Area_6/office_34/Annotations
+Area_6/office_35/Annotations
+Area_6/office_36/Annotations
+Area_6/office_37/Annotations
+Area_6/office_3/Annotations
+Area_6/office_4/Annotations
+Area_6/office_5/Annotations
+Area_6/office_6/Annotations
+Area_6/office_7/Annotations
+Area_6/office_8/Annotations
+Area_6/office_9/Annotations
+Area_6/openspace_1/Annotations
+Area_6/pantry_1/Annotations
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt
new file mode 100644
index 000000000..d067baa09
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt
@@ -0,0 +1,48 @@
+data/stanford_indoor3d/Area_6_conferenceRoom_1.npy
+data/stanford_indoor3d/Area_6_copyRoom_1.npy
+data/stanford_indoor3d/Area_6_hallway_1.npy
+data/stanford_indoor3d/Area_6_hallway_2.npy
+data/stanford_indoor3d/Area_6_hallway_3.npy
+data/stanford_indoor3d/Area_6_hallway_4.npy
+data/stanford_indoor3d/Area_6_hallway_5.npy
+data/stanford_indoor3d/Area_6_hallway_6.npy
+data/stanford_indoor3d/Area_6_lounge_1.npy
+data/stanford_indoor3d/Area_6_office_10.npy
+data/stanford_indoor3d/Area_6_office_11.npy
+data/stanford_indoor3d/Area_6_office_12.npy
+data/stanford_indoor3d/Area_6_office_13.npy
+data/stanford_indoor3d/Area_6_office_14.npy
+data/stanford_indoor3d/Area_6_office_15.npy
+data/stanford_indoor3d/Area_6_office_16.npy
+data/stanford_indoor3d/Area_6_office_17.npy
+data/stanford_indoor3d/Area_6_office_18.npy
+data/stanford_indoor3d/Area_6_office_19.npy
+data/stanford_indoor3d/Area_6_office_1.npy
+data/stanford_indoor3d/Area_6_office_20.npy
+data/stanford_indoor3d/Area_6_office_21.npy
+data/stanford_indoor3d/Area_6_office_22.npy
+data/stanford_indoor3d/Area_6_office_23.npy
+data/stanford_indoor3d/Area_6_office_24.npy
+data/stanford_indoor3d/Area_6_office_25.npy
+data/stanford_indoor3d/Area_6_office_26.npy
+data/stanford_indoor3d/Area_6_office_27.npy
+data/stanford_indoor3d/Area_6_office_28.npy
+data/stanford_indoor3d/Area_6_office_29.npy
+data/stanford_indoor3d/Area_6_office_2.npy
+data/stanford_indoor3d/Area_6_office_30.npy
+data/stanford_indoor3d/Area_6_office_31.npy
+data/stanford_indoor3d/Area_6_office_32.npy
+data/stanford_indoor3d/Area_6_office_33.npy
+data/stanford_indoor3d/Area_6_office_34.npy
+data/stanford_indoor3d/Area_6_office_35.npy
+data/stanford_indoor3d/Area_6_office_36.npy
+data/stanford_indoor3d/Area_6_office_37.npy
+data/stanford_indoor3d/Area_6_office_3.npy
+data/stanford_indoor3d/Area_6_office_4.npy
+data/stanford_indoor3d/Area_6_office_5.npy
+data/stanford_indoor3d/Area_6_office_6.npy
+data/stanford_indoor3d/Area_6_office_7.npy
+data/stanford_indoor3d/Area_6_office_8.npy
+data/stanford_indoor3d/Area_6_office_9.npy
+data/stanford_indoor3d/Area_6_openspace_1.npy
+data/stanford_indoor3d/Area_6_pantry_1.npy
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt
new file mode 100644
index 000000000..ca1d17882
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt
@@ -0,0 +1,13 @@
+ceiling
+floor
+wall
+beam
+column
+window
+door
+table
+chair
+sofa
+bookcase
+board
+clutter
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py
new file mode 100644
index 000000000..9efdce3c0
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py
@@ -0,0 +1,106 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import math
+import time
+import numpy as np
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(os.path.join(ROOT_DIR, 'utils'))
+import tf_util
+
+def placeholder_inputs(batch_size, num_point):
+ pointclouds_pl = tf.compat.v1.placeholder(tf.float32,
+ shape=(batch_size, num_point, 9))
+ labels_pl = tf.compat.v1.placeholder(tf.int32,
+ shape=(batch_size, num_point))
+ return pointclouds_pl, labels_pl
+
+def get_model(point_cloud, is_training, bn_decay=None):
+ """ ConvNet baseline, input is BxNx3 gray image """
+ batch_size = point_cloud.get_shape()[0].value
+ num_point = point_cloud.get_shape()[1].value
+
+ input_image = tf.expand_dims(point_cloud, -1)
+ # CONV
+ net = tf_util.conv2d(input_image, 64, [1,9], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
+ points_feat1 = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
+ # MAX
+ pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point,1], padding='VALID', scope='maxpool1')
+ # FC
+ pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1])
+ pc_feat1 = tf_util.fully_connected(pc_feat1, 256, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
+ pc_feat1 = tf_util.fully_connected(pc_feat1, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
+ print(pc_feat1)
+
+ # CONCAT
+ pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
+ points_feat1_concat = tf.concat(axis=3, values=[points_feat1, pc_feat1_expand])
+
+ # CONV
+ net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv6')
+ net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv7')
+ net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
+ net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
+ activation_fn=None, scope='conv8')
+ net = tf.squeeze(net, [2])
+
+ return net
+
+def get_loss(pred, label):
+ """ pred: B,N,13
+ label: B,N """
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
+ return tf.reduce_mean(input_tensor=loss)
+
+if __name__ == "__main__":
+ with tf.Graph().as_default():
+ a = tf.compat.v1.placeholder(tf.float32, shape=(32,4096,9))
+ net = get_model(a, tf.constant(True))
+ with tf.compat.v1.Session() as sess:
+ init = tf.compat.v1.global_variables_initializer()
+ sess.run(init)
+ start = time.time()
+ for i in range(100):
+ print(i)
+ sess.run(net, feed_dict={a:np.random.rand(32,4096,9)})
+ print(time.time() - start)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py
new file mode 100644
index 000000000..dace8d03b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py
@@ -0,0 +1,306 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import math
+import h5py
+import numpy as np
+import tensorflow as tf
+import socket
+
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT_DIR = os.path.dirname(BASE_DIR)
+sys.path.append(BASE_DIR)
+sys.path.append(ROOT_DIR)
+sys.path.append(os.path.join(ROOT_DIR, 'utils'))
+import provider
+import tf_util
+from model import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
+parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
+parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
+parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
+parser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 24]')
+parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
+parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
+parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
+parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
+parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
+parser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]')
+FLAGS = parser.parse_args()
+
+
+BATCH_SIZE = FLAGS.batch_size
+NUM_POINT = FLAGS.num_point
+MAX_EPOCH = FLAGS.max_epoch
+NUM_POINT = FLAGS.num_point
+BASE_LEARNING_RATE = FLAGS.learning_rate
+GPU_INDEX = FLAGS.gpu
+MOMENTUM = FLAGS.momentum
+OPTIMIZER = FLAGS.optimizer
+DECAY_STEP = FLAGS.decay_step
+DECAY_RATE = FLAGS.decay_rate
+
+LOG_DIR = FLAGS.log_dir
+if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
+os.system('cp model.py %s' % (LOG_DIR)) # bkp of model def
+os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
+LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
+LOG_FOUT.write(str(FLAGS)+'\n')
+
+MAX_NUM_POINT = 4096
+NUM_CLASSES = 13
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
+BN_DECAY_DECAY_STEP = float(DECAY_STEP)
+BN_DECAY_CLIP = 0.99
+
+HOSTNAME = socket.gethostname()
+
+ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data/all_files.txt')
+room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')]
+
+# Load ALL data
+data_batch_list = []
+label_batch_list = []
+for h5_filename in ALL_FILES:
+ data_batch, label_batch = provider.loadDataFile(h5_filename)
+ data_batch_list.append(data_batch)
+ label_batch_list.append(label_batch)
+data_batches = np.concatenate(data_batch_list, 0)
+label_batches = np.concatenate(label_batch_list, 0)
+print(data_batches.shape)
+print(label_batches.shape)
+
+test_area = 'Area_'+str(FLAGS.test_area)
+train_idxs = []
+test_idxs = []
+for i,room_name in enumerate(room_filelist):
+ if test_area in room_name:
+ test_idxs.append(i)
+ else:
+ train_idxs.append(i)
+
+train_data = data_batches[train_idxs,...]
+train_label = label_batches[train_idxs]
+test_data = data_batches[test_idxs,...]
+test_label = label_batches[test_idxs]
+print(train_data.shape, train_label.shape)
+print(test_data.shape, test_label.shape)
+
+
+
+
+def log_string(out_str):
+ LOG_FOUT.write(out_str+'\n')
+ LOG_FOUT.flush()
+ print(out_str)
+
+
+def get_learning_rate(batch):
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # Base learning rate.
+ batch * BATCH_SIZE, # Current index into the dataset.
+ DECAY_STEP, # Decay step.
+ DECAY_RATE, # Decay rate.
+ staircase=True)
+ learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
+ return learning_rate
+
+def get_bn_decay(batch):
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*BATCH_SIZE,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+ return bn_decay
+
+def train():
+ with tf.Graph().as_default():
+ with tf.device('/gpu:'+str(GPU_INDEX)):
+ pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
+ is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ # Note the global_step=batch parameter to minimize.
+ # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
+ batch = tf.Variable(0)
+ bn_decay = get_bn_decay(batch)
+ tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ # Get model and loss
+ pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
+ loss = get_loss(pred, labels_pl)
+ tf.compat.v1.summary.scalar('loss', loss)
+
+ correct = tf.equal(tf.argmax(input=pred, axis=2), tf.cast(labels_pl, dtype=tf.int64))
+ accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
+ tf.compat.v1.summary.scalar('accuracy', accuracy)
+
+ # Get training operator
+ learning_rate = get_learning_rate(batch)
+ tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ if OPTIMIZER == 'momentum':
+ optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
+ elif OPTIMIZER == 'adam':
+ optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ train_op = optimizer.minimize(loss, global_step=batch)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Create a session
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+ config.log_device_placement = True
+ sess = tf.compat.v1.Session(config=config)
+
+ # Add summary writers
+ merged = tf.compat.v1.summary.merge_all()
+ train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
+ sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
+
+ # Init variables
+ init = tf.compat.v1.global_variables_initializer()
+ sess.run(init, {is_training_pl:True})
+
+ ops = {'pointclouds_pl': pointclouds_pl,
+ 'labels_pl': labels_pl,
+ 'is_training_pl': is_training_pl,
+ 'pred': pred,
+ 'loss': loss,
+ 'train_op': train_op,
+ 'merged': merged,
+ 'step': batch}
+
+ for epoch in range(MAX_EPOCH):
+ log_string('**** EPOCH %03d ****' % (epoch))
+ sys.stdout.flush()
+
+ train_one_epoch(sess, ops, train_writer)
+ eval_one_epoch(sess, ops, test_writer)
+
+ # Save the variables to disk.
+ if epoch % 10 == 0:
+ save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
+ log_string("Model saved in file: %s" % save_path)
+
+
+
+def train_one_epoch(sess, ops, train_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = True
+
+ log_string('----')
+ current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+
+ for batch_idx in range(num_batches):
+ if batch_idx % 100 == 0:
+ print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training,}
+ summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
+ feed_dict=feed_dict)
+ train_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 2)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += (BATCH_SIZE*NUM_POINT)
+ loss_sum += loss_val
+
+ log_string('mean loss: %f' % (loss_sum / float(num_batches)))
+ log_string('accuracy: %f' % (total_correct / float(total_seen)))
+
+
+def eval_one_epoch(sess, ops, test_writer):
+ """ ops: dict mapping from string to tf ops """
+ is_training = False
+ total_correct = 0
+ total_seen = 0
+ loss_sum = 0
+ total_seen_class = [0 for _ in range(NUM_CLASSES)]
+ total_correct_class = [0 for _ in range(NUM_CLASSES)]
+
+ log_string('----')
+ current_data = test_data[:,0:NUM_POINT,:]
+ current_label = np.squeeze(test_label)
+
+ file_size = current_data.shape[0]
+ num_batches = file_size // BATCH_SIZE
+
+ for batch_idx in range(num_batches):
+ start_idx = batch_idx * BATCH_SIZE
+ end_idx = (batch_idx+1) * BATCH_SIZE
+
+ feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
+ ops['labels_pl']: current_label[start_idx:end_idx],
+ ops['is_training_pl']: is_training}
+ summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']],
+ feed_dict=feed_dict)
+ test_writer.add_summary(summary, step)
+ pred_val = np.argmax(pred_val, 2)
+ correct = np.sum(pred_val == current_label[start_idx:end_idx])
+ total_correct += correct
+ total_seen += (BATCH_SIZE*NUM_POINT)
+ loss_sum += (loss_val*BATCH_SIZE)
+ for i in range(start_idx, end_idx):
+ for j in range(NUM_POINT):
+ l = current_label[i, j]
+ total_seen_class[l] += 1
+ total_correct_class[l] += (pred_val[i-start_idx, j] == l)
+
+ log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
+ log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
+ log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
+
+
+
+if __name__ == "__main__":
+ train()
+ LOG_FOUT.close()
--
Gitee
From d7c08c3e64117685ad13688958b27311de93d7bb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:33:16 +0000
Subject: [PATCH 25/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../models/pointnet_cls.py | 136 +++++++++++++++++
.../models/pointnet_cls_basic.py | 105 +++++++++++++
.../models/pointnet_seg.py | 143 ++++++++++++++++++
.../models/transform_nets.py | 140 +++++++++++++++++
4 files changed, 524 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py
new file mode 100644
index 000000000..0922464e0
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py
@@ -0,0 +1,136 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import math
+import sys
+import os
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, '../utils'))
+import tf_util
+from transform_nets import input_transform_net, feature_transform_net
+
+def placeholder_inputs(batch_size, num_point):
+ # placeholder(),占位符,在tensorflow中类似于函数参数,运行时必须传入值。
+ # dtype:数据类型。shape:数据形状。
+ pointclouds_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point, 3))
+ labels_pl = tf.compat.v1.placeholder(tf.int32, shape=(batch_size))
+ return pointclouds_pl, labels_pl
+
+
+def get_model(point_cloud, is_training, bn_decay=None):
+ """ Classification PointNet, input is BxNx3, output Bx40 """
+ # 把上面函数封装的设定值拿出来
+ # batch_size = point_cloud.get_shape()[0].value
+ batch_size = point_cloud.get_shape()[0]
+ num_point = point_cloud.get_shape()[1]
+ end_points = {}
+
+ # tf.variable_scope可以让不同命名空间中的变量取相同的名字,无论tf.get_variable或者tf.Variable生成的变量
+ with tf.compat.v1.variable_scope('transform_net1') as sc:
+ # 因此point_cloud, 32x1024x3的tensor,is_training为bool类型tansor,shape未指定,bn_decay为学习率,
+ # 此学习率随着trainprocess按照指数function慢慢递增,K按照此函数解释,
+ # 为transformnet的卷积核的维度3xK维,则最后返回的transform为32x3x3的一个tensor。
+ transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
+ # 将矩阵a乘以矩阵b, 生成a * b
+ point_cloud_transformed = tf.matmul(point_cloud, transform)
+ # 在张量形状的维度增加一维,-1表示在最后一维后面增加
+ input_image = tf.expand_dims(point_cloud_transformed, -1)
+
+ net = tf_util.conv2d(input_image, 64, [1,3],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv2', bn_decay=bn_decay)
+
+ with tf.compat.v1.variable_scope('transform_net2') as sc:
+ transform = feature_transform_net(net, is_training, bn_decay, K=64)
+ end_points['transform'] = transform
+ net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
+ net_transformed = tf.expand_dims(net_transformed, [2])
+
+ net = tf_util.conv2d(net_transformed, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv3', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv4', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv5', bn_decay=bn_decay)
+
+ # Symmetric function: max pooling
+ net = tf_util.max_pool2d(net, [num_point,1],
+ padding='VALID', scope='maxpool')
+
+ net = tf.reshape(net, [batch_size, -1])
+ net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
+ scope='fc1', bn_decay=bn_decay)
+ net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
+ scope='dp1')
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
+ scope='fc2', bn_decay=bn_decay)
+ net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
+ scope='dp2')
+ net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
+
+ return net, end_points
+
+
+def get_loss(pred, label, end_points, reg_weight=0.001):
+ """ pred: B*NUM_CLASSES,
+ label: B, """
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
+ classify_loss = tf.reduce_mean(input_tensor=loss)
+ tf.compat.v1.summary.scalar('classify loss', classify_loss)
+
+ # Enforce the transformation as orthogonal matrix
+ transform = end_points['transform'] # BxKxK
+ K = transform.get_shape()[1]
+ mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1]))
+ mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
+ mat_diff_loss = tf.nn.l2_loss(mat_diff)
+ tf.compat.v1.summary.scalar('mat loss', mat_diff_loss)
+
+ return classify_loss + mat_diff_loss * reg_weight
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ inputs = tf.zeros((32,1024,3))
+ outputs = get_model(inputs, tf.constant(True))
+ print(outputs)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py
new file mode 100644
index 000000000..de16d003e
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py
@@ -0,0 +1,105 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import math
+import sys
+import os
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, '../utils'))
+import tf_util
+
+def placeholder_inputs(batch_size, num_point):
+ pointclouds_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point, 3))
+ labels_pl = tf.compat.v1.placeholder(tf.int32, shape=(batch_size))
+ return pointclouds_pl, labels_pl
+
+
+def get_model(point_cloud, is_training, bn_decay=None):
+ """ Classification PointNet, input is BxNx3, output Bx40 """
+ batch_size = point_cloud.get_shape()[0].value
+ num_point = point_cloud.get_shape()[1].value
+ end_points = {}
+ input_image = tf.expand_dims(point_cloud, -1)
+
+ # Point functions (MLP implemented as conv2d)
+ net = tf_util.conv2d(input_image, 64, [1,3],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv2', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv3', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv4', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv5', bn_decay=bn_decay)
+
+ # Symmetric function: max pooling
+ net = tf_util.max_pool2d(net, [num_point,1],
+ padding='VALID', scope='maxpool')
+
+ # MLP on global point cloud vector
+ net = tf.reshape(net, [batch_size, -1])
+ net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
+ scope='fc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
+ scope='fc2', bn_decay=bn_decay)
+ net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
+ scope='dp1')
+ net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
+
+ return net, end_points
+
+
+def get_loss(pred, label, end_points):
+ """ pred: B*NUM_CLASSES,
+ label: B, """
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
+ classify_loss = tf.reduce_mean(input_tensor=loss)
+ tf.compat.v1.summary.scalar('classify loss', classify_loss)
+ return classify_loss
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ inputs = tf.zeros((32,1024,3))
+ outputs = get_model(inputs, tf.constant(True))
+ print(outputs)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py
new file mode 100644
index 000000000..a872aeb87
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py
@@ -0,0 +1,143 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import math
+import sys
+import os
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, '../utils'))
+import tf_util
+from transform_nets import input_transform_net, feature_transform_net
+
+def placeholder_inputs(batch_size, num_point):
+ pointclouds_pl = tf.compat.v1.placeholder(tf.float32,
+ shape=(batch_size, num_point, 3))
+ labels_pl = tf.compat.v1.placeholder(tf.int32,
+ shape=(batch_size, num_point))
+ return pointclouds_pl, labels_pl
+
+
+def get_model(point_cloud, is_training, bn_decay=None):
+ """ Classification PointNet, input is BxNx3, output BxNx50 """
+ batch_size = point_cloud.get_shape()[0].value
+ num_point = point_cloud.get_shape()[1].value
+ end_points = {}
+
+ with tf.compat.v1.variable_scope('transform_net1') as sc:
+ transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
+ point_cloud_transformed = tf.matmul(point_cloud, transform)
+ input_image = tf.expand_dims(point_cloud_transformed, -1)
+
+ net = tf_util.conv2d(input_image, 64, [1,3],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv2', bn_decay=bn_decay)
+
+ with tf.compat.v1.variable_scope('transform_net2') as sc:
+ transform = feature_transform_net(net, is_training, bn_decay, K=64)
+ end_points['transform'] = transform
+ net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
+ point_feat = tf.expand_dims(net_transformed, [2])
+ print(point_feat)
+
+ net = tf_util.conv2d(point_feat, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv3', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv4', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv5', bn_decay=bn_decay)
+ global_feat = tf_util.max_pool2d(net, [num_point,1],
+ padding='VALID', scope='maxpool')
+ print(global_feat)
+
+ global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
+ concat_feat = tf.concat(3, [point_feat, global_feat_expand])
+ print(concat_feat)
+
+ net = tf_util.conv2d(concat_feat, 512, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv6', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 256, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv7', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv8', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='conv9', bn_decay=bn_decay)
+
+ net = tf_util.conv2d(net, 50, [1,1],
+ padding='VALID', stride=[1,1], activation_fn=None,
+ scope='conv10')
+ net = tf.squeeze(net, [2]) # BxNxC
+
+ return net, end_points
+
+
+def get_loss(pred, label, end_points, reg_weight=0.001):
+ """ pred: BxNxC,
+ label: BxN, """
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
+ classify_loss = tf.reduce_mean(input_tensor=loss)
+ tf.scalar_summary('classify loss', classify_loss)
+
+ # Enforce the transformation as orthogonal matrix
+ transform = end_points['transform'] # BxKxK
+ K = transform.get_shape()[1].value
+ mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1]))
+ mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
+ mat_diff_loss = tf.nn.l2_loss(mat_diff)
+ tf.scalar_summary('mat_loss', mat_diff_loss)
+
+ return classify_loss + mat_diff_loss * reg_weight
+
+
+if __name__=='__main__':
+ with tf.Graph().as_default():
+ inputs = tf.zeros((32,1024,3))
+ outputs = get_model(inputs, tf.constant(True))
+ print(outputs)
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py
new file mode 100644
index 000000000..6b1c560bd
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py
@@ -0,0 +1,140 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import sys
+import os
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.join(BASE_DIR, '../utils'))
+import tf_util
+
+def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
+ """ Input (XYZ) Transform Net, input is BxNx3 gray image
+ Return:
+ Transformation matrix of size 3xK """
+ # 第一二句为获取point的shape,bitchsize = 32,pointnum = 1024
+ batch_size = point_cloud.get_shape()[0]
+ num_point = point_cloud.get_shape()[1]
+
+ # 第三句将输入的pointcloud拓展一维,变为32x1024x3x1的tensor,inputimage。
+ input_image = tf.expand_dims(point_cloud, -1)
+
+ # 第四、五、六句,则为搭建卷积层的过程,通过tf_util.conv2d函数实现。参考pointnet学习(八)tf_util.conv2d
+ # 第一层卷积“tconv1”输出output(shpe[32,1024,1,64])
+ net = tf_util.conv2d(input_image, 64, [1,3],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv1', bn_decay=bn_decay)
+ # 第二层“tconv2”输出output(shpe[32,1024,1,128])
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv2', bn_decay=bn_decay)
+ # 第三层“tconv3”输出output(shpe[32,1024,1,1024])
+ net = tf_util.conv2d(net, 1024, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv3', bn_decay=bn_decay)
+ # 第七句为搭建maxpool层。因此“transform_net1”包括三个2d卷积层以及一个maxpoling层“tmaxpool”。
+ # 输出为shape[32, 1, 1, 1024]的tensor
+ # 参考pointnet tf_util.max_pool2d
+ # 因为h, w, 都是1,所以可以将32个batch对应的每个input计算出来的1024个channel值取出来进行计算。
+ net = tf_util.max_pool2d(net, [num_point,1],
+ padding='VALID', scope='tmaxpool')
+
+ net = tf.reshape(net, [batch_size, -1])
+ # 将net通过一个fullyconnect层进行计算。计算之后net为32,256的tensor
+ net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
+ scope='tfc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
+ scope='tfc2', bn_decay=bn_decay)
+
+ # 再后面的操作,则是对fullyconnect的输出,乘以一个weight,256,3 * k(k = 3)
+ # 再加一个初始化为[1, 0, 0, 0, 1, 0, 0, 0, 1],shape为9的tensor
+ # biases最后得到32,9的tensor
+ # transform,再reshape成32,3,3的tensor,供后续预测对pointnet进行旋转,
+ with tf.compat.v1.variable_scope('transform_XYZ') as sc:
+ assert(K==3)
+ weights = tf.compat.v1.get_variable('weights', [256, 3*K],
+ initializer=tf.compat.v1.constant_initializer(0.0),
+ dtype=tf.float32)
+ biases = tf.compat.v1.get_variable('biases', [3*K],
+ initializer=tf.compat.v1.constant_initializer(0.0),
+ dtype=tf.float32)
+ biases = biases + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
+ transform = tf.matmul(net, weights)
+ transform = tf.nn.bias_add(transform, biases)
+
+ transform = tf.reshape(transform, [batch_size, 3, K])
+ return transform
+
+
+def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
+ """ Feature Transform Net, input is BxNx1xK
+ Return:
+ Transformation matrix of size KxK """
+ batch_size = inputs.get_shape()[0]
+ num_point = inputs.get_shape()[1]
+
+ net = tf_util.conv2d(inputs, 64, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv2', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1],
+ padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training,
+ scope='tconv3', bn_decay=bn_decay)
+ net = tf_util.max_pool2d(net, [num_point,1],
+ padding='VALID', scope='tmaxpool')
+
+ net = tf.reshape(net, [batch_size, -1])
+ net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
+ scope='tfc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
+ scope='tfc2', bn_decay=bn_decay)
+
+ with tf.compat.v1.variable_scope('transform_feat') as sc:
+ weights = tf.compat.v1.get_variable('weights', [256, K*K],
+ initializer=tf.compat.v1.constant_initializer(0.0),
+ dtype=tf.float32)
+ biases = tf.compat.v1.get_variable('biases', [K*K],
+ initializer=tf.compat.v1.constant_initializer(0.0),
+ dtype=tf.float32)
+ biases = biases + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
+ transform = tf.matmul(net, weights)
+ transform = tf.nn.bias_add(transform, biases)
+
+ transform = tf.reshape(transform, [batch_size, K, K])
+ return transform
--
Gitee
From b7266bd2ab76d249f868ecfe85760f9fcaa44eb4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:34:05 +0000
Subject: [PATCH 26/54] PointNet_ID2913_for_TensorFlow2.X
---
.../part_seg/download_data.sh | 12 +
.../part_seg/pointnet_part_seg.py | 189 ++++++++
.../part_seg/test.py | 299 +++++++++++++
.../part_seg/train.py | 422 ++++++++++++++++++
4 files changed, 922 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh
new file mode 100644
index 000000000..70f78dbf2
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+# Download original ShapeNetPart dataset (around 1GB)
+wget https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_v0.zip
+unzip shapenetcore_partanno_v0.zip
+rm shapenetcore_partanno_v0.zip
+
+# Download HDF5 for ShapeNet Part segmentation (around 346MB)
+wget https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip
+unzip shapenet_part_seg_hdf5_data.zip
+rm shapenet_part_seg_hdf5_data.zip
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py
new file mode 100644
index 000000000..ed475cddf
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py
@@ -0,0 +1,189 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tensorflow as tf
+import numpy as np
+import math
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.dirname(BASE_DIR))
+sys.path.append(os.path.join(BASE_DIR, '../utils'))
+import tf_util
+
+
+def get_transform_K(inputs, is_training, bn_decay=None, K = 3):
+ """ Transform Net, input is BxNx1xK gray image
+ Return:
+ Transformation matrix of size KxK """
+ batch_size = inputs.get_shape()[0].value
+ num_point = inputs.get_shape()[1].value
+
+ net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay)
+ net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')
+
+ net = tf.reshape(net, [batch_size, -1])
+ net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)
+
+ with tf.compat.v1.variable_scope('transform_feat') as sc:
+ weights = tf.compat.v1.get_variable('weights', [256, K*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32)
+ biases = tf.compat.v1.get_variable('biases', [K*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
+ transform = tf.matmul(net, weights)
+ transform = tf.nn.bias_add(transform, biases)
+
+ #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
+ transform = tf.reshape(transform, [batch_size, K, K])
+ return transform
+
+
+
+
+
+def get_transform(point_cloud, is_training, bn_decay=None, K = 3):
+ """ Transform Net, input is BxNx3 gray image
+ Return:
+ Transformation matrix of size 3xK """
+ batch_size = point_cloud.get_shape()[0].value
+ num_point = point_cloud.get_shape()[1].value
+
+ input_image = tf.expand_dims(point_cloud, -1)
+ net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
+ net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
+ net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')
+
+ net = tf.reshape(net, [batch_size, -1])
+ net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)
+
+ with tf.compat.v1.variable_scope('transform_XYZ') as sc:
+ assert(K==3)
+ weights = tf.compat.v1.get_variable('weights', [128, 3*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32)
+ biases = tf.compat.v1.get_variable('biases', [3*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
+ transform = tf.matmul(net, weights)
+ transform = tf.nn.bias_add(transform, biases)
+
+ #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
+ transform = tf.reshape(transform, [batch_size, 3, K])
+ return transform
+
+
+def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
+ batch_size, num_point, weight_decay, bn_decay=None):
+ """ ConvNet baseline, input is BxNx3 gray image """
+ end_points = {}
+
+ with tf.compat.v1.variable_scope('transform_net1') as sc:
+ K = 3
+ transform = get_transform(point_cloud, is_training, bn_decay, K = 3)
+ point_cloud_transformed = tf.matmul(point_cloud, transform)
+
+ input_image = tf.expand_dims(point_cloud_transformed, -1)
+ out1 = tf_util.conv2d(input_image, 64, [1,K], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
+ out2 = tf_util.conv2d(out1, 128, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
+ out3 = tf_util.conv2d(out2, 128, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
+
+
+ with tf.compat.v1.variable_scope('transform_net2') as sc:
+ K = 128
+ transform = get_transform_K(out3, is_training, bn_decay, K)
+
+ end_points['transform'] = transform
+
+ squeezed_out3 = tf.reshape(out3, [batch_size, num_point, 128])
+ net_transformed = tf.matmul(squeezed_out3, transform)
+ net_transformed = tf.expand_dims(net_transformed, [2])
+
+ out4 = tf_util.conv2d(net_transformed, 512, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
+ out5 = tf_util.conv2d(out4, 2048, [1,1], padding='VALID', stride=[1,1],
+ bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
+ out_max = tf_util.max_pool2d(out5, [num_point,1], padding='VALID', scope='maxpool')
+
+ # classification network
+ net = tf.reshape(out_max, [batch_size, -1])
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc1', bn_decay=bn_decay)
+ net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc2', bn_decay=bn_decay)
+ net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='cla/dp1')
+ net = tf_util.fully_connected(net, cat_num, activation_fn=None, scope='cla/fc3')
+
+ # segmentation network
+ one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
+ out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
+
+ expand = tf.tile(out_max, [1, num_point, 1, 1])
+ concat = tf.concat(axis=3, values=[expand, out1, out2, out3, out4, out5])
+
+ net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
+ bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay)
+ net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp1')
+ net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
+ bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay)
+ net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp2')
+ net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
+ bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay)
+ net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None,
+ bn=False, scope='seg/conv4', weight_decay=weight_decay)
+
+ net2 = tf.reshape(net2, [batch_size, num_point, part_num])
+
+ return net, net2, end_points
+
+def get_loss(l_pred, seg_pred, label, seg, weight, end_points):
+ per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label)
+ label_loss = tf.reduce_mean(input_tensor=per_instance_label_loss)
+
+ # size of seg_pred is batch_size x point_num x part_cat_num
+ # size of seg is batch_size x point_num
+ per_instance_seg_loss = tf.reduce_mean(input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
+ seg_loss = tf.reduce_mean(input_tensor=per_instance_seg_loss)
+
+ per_instance_seg_pred_res = tf.argmax(input=seg_pred, axis=2)
+
+ # Enforce the transformation as orthogonal matrix
+ transform = end_points['transform'] # BxKxK
+ K = transform.get_shape()[1].value
+ mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32)
+ mat_diff_loss = tf.nn.l2_loss(mat_diff)
+
+
+ total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3
+
+ return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
+
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py
new file mode 100644
index 000000000..5c2d5f35c
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py
@@ -0,0 +1,299 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import tensorflow as tf
+import json
+import numpy as np
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.dirname(BASE_DIR))
+import provider
+import pointnet_part_seg as model
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--model_path', default='train_results/trained_models/epoch_190.ckpt', help='Model checkpoint path')
+FLAGS = parser.parse_args()
+
+
+# DEFAULT SETTINGS
+pretrained_model_path = FLAGS.model_path # os.path.join(BASE_DIR, './pretrained_model/model.ckpt')
+hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data')
+ply_data_dir = os.path.join(BASE_DIR, './PartAnnotation')
+gpu_to_use = 0
+output_dir = os.path.join(BASE_DIR, './test_results')
+output_verbose = True # If true, output all color-coded part segmentation obj files
+
+# MAIN SCRIPT
+point_num = 3000 # the max number of points in the all testing data shapes
+batch_size = 1
+
+test_file_list = os.path.join(BASE_DIR, 'testing_ply_file_list.txt')
+
+oid2cpid = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r'))
+
+object2setofoid = {}
+for idx in range(len(oid2cpid)):
+ objid, pid = oid2cpid[idx]
+ if not objid in object2setofoid.keys():
+ object2setofoid[objid] = []
+ object2setofoid[objid].append(idx)
+
+all_obj_cat_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt')
+fin = open(all_obj_cat_file, 'r')
+lines = [line.rstrip() for line in fin.readlines()]
+objcats = [line.split()[1] for line in lines]
+objnames = [line.split()[0] for line in lines]
+on2oid = {objcats[i]:i for i in range(len(objcats))}
+fin.close()
+
+color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json')
+color_map = json.load(open(color_map_file, 'r'))
+
+NUM_OBJ_CATS = 16
+NUM_PART_CATS = 50
+
+cpid2oid = json.load(open(os.path.join(hdf5_data_dir, 'catid_partid_to_overallid.json'), 'r'))
+
+def printout(flog, data):
+ print(data)
+ flog.write(data + '\n')
+
+def output_color_point_cloud(data, seg, out_file):
+ with open(out_file, 'w') as f:
+ l = len(seg)
+ for i in range(l):
+ color = color_map[seg[i]]
+ f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
+
+def output_color_point_cloud_red_blue(data, seg, out_file):
+ with open(out_file, 'w') as f:
+ l = len(seg)
+ for i in range(l):
+ if seg[i] == 1:
+ color = [0, 0, 1]
+ elif seg[i] == 0:
+ color = [1, 0, 0]
+ else:
+ color = [0, 0, 0]
+
+ f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
+
+
+def pc_normalize(pc):
+ l = pc.shape[0]
+ centroid = np.mean(pc, axis=0)
+ pc = pc - centroid
+ m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
+ pc = pc / m
+ return pc
+
+def placeholder_inputs():
+ pointclouds_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, point_num, 3))
+ input_label_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, NUM_OBJ_CATS))
+ return pointclouds_ph, input_label_ph
+
+def output_color_point_cloud(data, seg, out_file):
+ with open(out_file, 'w') as f:
+ l = len(seg)
+ for i in range(l):
+ color = color_map[seg[i]]
+ f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
+
+def load_pts_seg_files(pts_file, seg_file, catid):
+ with open(pts_file, 'r') as f:
+ pts_str = [item.rstrip() for item in f.readlines()]
+ pts = np.array([np.float32(s.split()) for s in pts_str], dtype=np.float32)
+ with open(seg_file, 'r') as f:
+ part_ids = np.array([int(item.rstrip()) for item in f.readlines()], dtype=np.uint8)
+ seg = np.array([cpid2oid[catid+'_'+str(x)] for x in part_ids])
+ return pts, seg
+
+def pc_augment_to_point_num(pts, pn):
+ assert(pts.shape[0] <= pn)
+ cur_len = pts.shape[0]
+ res = np.array(pts)
+ while cur_len < pn:
+ res = np.concatenate((res, pts))
+ cur_len += pts.shape[0]
+ return res[:pn, :]
+
+def convert_label_to_one_hot(labels):
+ label_one_hot = np.zeros((labels.shape[0], NUM_OBJ_CATS))
+ for idx in range(labels.shape[0]):
+ label_one_hot[idx, labels[idx]] = 1
+ return label_one_hot
+
+def predict():
+ is_training = False
+
+ with tf.device('/gpu:'+str(gpu_to_use)):
+ pointclouds_ph, input_label_ph = placeholder_inputs()
+ is_training_ph = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ # simple model
+ pred, seg_pred, end_points = model.get_model(pointclouds_ph, input_label_ph, \
+ cat_num=NUM_OBJ_CATS, part_num=NUM_PART_CATS, is_training=is_training_ph, \
+ batch_size=batch_size, num_point=point_num, weight_decay=0.0, bn_decay=None)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.compat.v1.train.Saver()
+
+ # Later, launch the model, use the saver to restore variables from disk, and
+ # do some work with the model.
+
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+
+ with tf.compat.v1.Session(config=config) as sess:
+ if not os.path.exists(output_dir):
+ os.mkdir(output_dir)
+
+ flog = open(os.path.join(output_dir, 'log.txt'), 'w')
+
+ # Restore variables from disk.
+ printout(flog, 'Loading model %s' % pretrained_model_path)
+ saver.restore(sess, pretrained_model_path)
+ printout(flog, 'Model restored.')
+
+ # Note: the evaluation for the model with BN has to have some statistics
+ # Using some test datas as the statistics
+ batch_data = np.zeros([batch_size, point_num, 3]).astype(np.float32)
+
+ total_acc = 0.0
+ total_seen = 0
+ total_acc_iou = 0.0
+
+ total_per_cat_acc = np.zeros((NUM_OBJ_CATS)).astype(np.float32)
+ total_per_cat_iou = np.zeros((NUM_OBJ_CATS)).astype(np.float32)
+ total_per_cat_seen = np.zeros((NUM_OBJ_CATS)).astype(np.int32)
+
+ ffiles = open(test_file_list, 'r')
+ lines = [line.rstrip() for line in ffiles.readlines()]
+ pts_files = [line.split()[0] for line in lines]
+ seg_files = [line.split()[1] for line in lines]
+ labels = [line.split()[2] for line in lines]
+ ffiles.close()
+
+ len_pts_files = len(pts_files)
+ for shape_idx in range(len_pts_files):
+ if shape_idx % 100 == 0:
+ printout(flog, '%d/%d ...' % (shape_idx, len_pts_files))
+
+ cur_gt_label = on2oid[labels[shape_idx]]
+
+ cur_label_one_hot = np.zeros((1, NUM_OBJ_CATS), dtype=np.float32)
+ cur_label_one_hot[0, cur_gt_label] = 1
+
+ pts_file_to_load = os.path.join(ply_data_dir, pts_files[shape_idx])
+ seg_file_to_load = os.path.join(ply_data_dir, seg_files[shape_idx])
+
+ pts, seg = load_pts_seg_files(pts_file_to_load, seg_file_to_load, objcats[cur_gt_label])
+ ori_point_num = len(seg)
+
+ batch_data[0, ...] = pc_augment_to_point_num(pc_normalize(pts), point_num)
+
+ label_pred_val, seg_pred_res = sess.run([pred, seg_pred], feed_dict={
+ pointclouds_ph: batch_data,
+ input_label_ph: cur_label_one_hot,
+ is_training_ph: is_training,
+ })
+
+ label_pred_val = np.argmax(label_pred_val[0, :])
+
+ seg_pred_res = seg_pred_res[0, ...]
+
+ iou_oids = object2setofoid[objcats[cur_gt_label]]
+ non_cat_labels = list(set(np.arange(NUM_PART_CATS)).difference(set(iou_oids)))
+
+ mini = np.min(seg_pred_res)
+ seg_pred_res[:, non_cat_labels] = mini - 1000
+
+ seg_pred_val = np.argmax(seg_pred_res, axis=1)[:ori_point_num]
+
+ seg_acc = np.mean(seg_pred_val == seg)
+
+ total_acc += seg_acc
+ total_seen += 1
+
+ total_per_cat_seen[cur_gt_label] += 1
+ total_per_cat_acc[cur_gt_label] += seg_acc
+
+ mask = np.int32(seg_pred_val == seg)
+
+ total_iou = 0.0
+ iou_log = ''
+ for oid in iou_oids:
+ n_pred = np.sum(seg_pred_val == oid)
+ n_gt = np.sum(seg == oid)
+ n_intersect = np.sum(np.int32(seg == oid) * mask)
+ n_union = n_pred + n_gt - n_intersect
+ iou_log += '_' + str(n_pred)+'_'+str(n_gt)+'_'+str(n_intersect)+'_'+str(n_union)+'_'
+ if n_union == 0:
+ total_iou += 1
+ iou_log += '_1\n'
+ else:
+ total_iou += n_intersect * 1.0 / n_union
+ iou_log += '_'+str(n_intersect * 1.0 / n_union)+'\n'
+
+ avg_iou = total_iou / len(iou_oids)
+ total_acc_iou += avg_iou
+ total_per_cat_iou[cur_gt_label] += avg_iou
+
+ if output_verbose:
+ output_color_point_cloud(pts, seg, os.path.join(output_dir, str(shape_idx)+'_gt.obj'))
+ output_color_point_cloud(pts, seg_pred_val, os.path.join(output_dir, str(shape_idx)+'_pred.obj'))
+ output_color_point_cloud_red_blue(pts, np.int32(seg == seg_pred_val),
+ os.path.join(output_dir, str(shape_idx)+'_diff.obj'))
+
+ with open(os.path.join(output_dir, str(shape_idx)+'.log'), 'w') as fout:
+ fout.write('Total Point: %d\n\n' % ori_point_num)
+ fout.write('Ground Truth: %s\n' % objnames[cur_gt_label])
+ fout.write('Predict: %s\n\n' % objnames[label_pred_val])
+ fout.write('Accuracy: %f\n' % seg_acc)
+ fout.write('IoU: %f\n\n' % avg_iou)
+ fout.write('IoU details: %s\n' % iou_log)
+
+ printout(flog, 'Accuracy: %f' % (total_acc / total_seen))
+ printout(flog, 'IoU: %f' % (total_acc_iou / total_seen))
+
+ for cat_idx in range(NUM_OBJ_CATS):
+ printout(flog, '\t ' + objcats[cat_idx] + ' Total Number: ' + str(total_per_cat_seen[cat_idx]))
+ if total_per_cat_seen[cat_idx] > 0:
+ printout(flog, '\t ' + objcats[cat_idx] + ' Accuracy: ' + \
+ str(total_per_cat_acc[cat_idx] / total_per_cat_seen[cat_idx]))
+ printout(flog, '\t ' + objcats[cat_idx] + ' IoU: '+ \
+ str(total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx]))
+
+
+with tf.Graph().as_default():
+ predict()
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py
new file mode 100644
index 000000000..abff0ead3
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py
@@ -0,0 +1,422 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import subprocess
+import tensorflow as tf
+import numpy as np
+from datetime import datetime
+import json
+import os
+import sys
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(BASE_DIR)
+sys.path.append(os.path.dirname(BASE_DIR))
+import provider
+import pointnet_part_seg as model
+
+# DEFAULT SETTINGS
+parser = argparse.ArgumentParser()
+parser.add_argument('--gpu', type=int, default=1, help='GPU to use [default: GPU 0]')
+parser.add_argument('--batch', type=int, default=32, help='Batch Size during training [default: 32]')
+parser.add_argument('--epoch', type=int, default=200, help='Epoch to run [default: 50]')
+parser.add_argument('--point_num', type=int, default=2048, help='Point Number [256/512/1024/2048]')
+parser.add_argument('--output_dir', type=str, default='train_results', help='Directory that stores all training logs and trained models')
+parser.add_argument('--wd', type=float, default=0, help='Weight Decay [Default: 0.0]')
+FLAGS = parser.parse_args()
+
+hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data')
+
+# MAIN SCRIPT
+point_num = FLAGS.point_num
+batch_size = FLAGS.batch
+output_dir = FLAGS.output_dir
+
+if not os.path.exists(output_dir):
+ os.mkdir(output_dir)
+
+color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json')
+color_map = json.load(open(color_map_file, 'r'))
+
+all_obj_cats_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt')
+fin = open(all_obj_cats_file, 'r')
+lines = [line.rstrip() for line in fin.readlines()]
+all_obj_cats = [(line.split()[0], line.split()[1]) for line in lines]
+fin.close()
+
+all_cats = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r'))
+NUM_CATEGORIES = 16
+NUM_PART_CATS = len(all_cats)
+
+print('#### Batch Size: {0}'.format(batch_size))
+print('#### Point Number: {0}'.format(point_num))
+print('#### Training using GPU: {0}'.format(FLAGS.gpu))
+
+DECAY_STEP = 16881 * 20
+DECAY_RATE = 0.5
+
+LEARNING_RATE_CLIP = 1e-5
+
+BN_INIT_DECAY = 0.5
+BN_DECAY_DECAY_RATE = 0.5
+BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
+BN_DECAY_CLIP = 0.99
+
+BASE_LEARNING_RATE = 0.001
+MOMENTUM = 0.9
+TRAINING_EPOCHES = FLAGS.epoch
+print('### Training epoch: {0}'.format(TRAINING_EPOCHES))
+
+TRAINING_FILE_LIST = os.path.join(hdf5_data_dir, 'train_hdf5_file_list.txt')
+TESTING_FILE_LIST = os.path.join(hdf5_data_dir, 'val_hdf5_file_list.txt')
+
+MODEL_STORAGE_PATH = os.path.join(output_dir, 'trained_models')
+if not os.path.exists(MODEL_STORAGE_PATH):
+ os.mkdir(MODEL_STORAGE_PATH)
+
+LOG_STORAGE_PATH = os.path.join(output_dir, 'logs')
+if not os.path.exists(LOG_STORAGE_PATH):
+ os.mkdir(LOG_STORAGE_PATH)
+
+SUMMARIES_FOLDER = os.path.join(output_dir, 'summaries')
+if not os.path.exists(SUMMARIES_FOLDER):
+ os.mkdir(SUMMARIES_FOLDER)
+
+def printout(flog, data):
+ print(data)
+ flog.write(data + '\n')
+
+def placeholder_inputs():
+ pointclouds_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, point_num, 3))
+ input_label_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, NUM_CATEGORIES))
+ labels_ph = tf.compat.v1.placeholder(tf.int32, shape=(batch_size))
+ seg_ph = tf.compat.v1.placeholder(tf.int32, shape=(batch_size, point_num))
+ return pointclouds_ph, input_label_ph, labels_ph, seg_ph
+
+def convert_label_to_one_hot(labels):
+ label_one_hot = np.zeros((labels.shape[0], NUM_CATEGORIES))
+ for idx in range(labels.shape[0]):
+ label_one_hot[idx, labels[idx]] = 1
+ return label_one_hot
+
+def train():
+ with tf.Graph().as_default():
+ with tf.device('/gpu:'+str(FLAGS.gpu)):
+ pointclouds_ph, input_label_ph, labels_ph, seg_ph = placeholder_inputs()
+ is_training_ph = tf.compat.v1.placeholder(tf.bool, shape=())
+
+ batch = tf.Variable(0, trainable=False)
+ learning_rate = tf.compat.v1.train.exponential_decay(
+ BASE_LEARNING_RATE, # base learning rate
+ batch * batch_size, # global_var indicating the number of steps
+ DECAY_STEP, # step size
+ DECAY_RATE, # decay rate
+ staircase=True # Stair-case or continuous decreasing
+ )
+ learning_rate = tf.maximum(learning_rate, LEARNING_RATE_CLIP)
+
+ bn_momentum = tf.compat.v1.train.exponential_decay(
+ BN_INIT_DECAY,
+ batch*batch_size,
+ BN_DECAY_DECAY_STEP,
+ BN_DECAY_DECAY_RATE,
+ staircase=True)
+ bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
+
+ lr_op = tf.compat.v1.summary.scalar('learning_rate', learning_rate)
+ batch_op = tf.compat.v1.summary.scalar('batch_number', batch)
+ bn_decay_op = tf.compat.v1.summary.scalar('bn_decay', bn_decay)
+
+ labels_pred, seg_pred, end_points = model.get_model(pointclouds_ph, input_label_ph, \
+ is_training=is_training_ph, bn_decay=bn_decay, cat_num=NUM_CATEGORIES, \
+ part_num=NUM_PART_CATS, batch_size=batch_size, num_point=point_num, weight_decay=FLAGS.wd)
+
+ # model.py defines both classification net and segmentation net, which share the common global feature extractor network.
+ # In model.get_loss, we define the total loss to be weighted sum of the classification and segmentation losses.
+ # Here, we only train for segmentation network. Thus, we set weight to be 1.0.
+ loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res \
+ = model.get_loss(labels_pred, seg_pred, labels_ph, seg_ph, 1.0, end_points)
+
+ total_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ total_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+
+ label_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ label_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+
+ seg_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ seg_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+
+ label_training_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ label_testing_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ label_testing_acc_avg_cat_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+
+ seg_training_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ seg_testing_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+ seg_testing_acc_avg_cat_ph = tf.compat.v1.placeholder(tf.float32, shape=())
+
+ total_train_loss_sum_op = tf.compat.v1.summary.scalar('total_training_loss', total_training_loss_ph)
+ total_test_loss_sum_op = tf.compat.v1.summary.scalar('total_testing_loss', total_testing_loss_ph)
+
+ label_train_loss_sum_op = tf.compat.v1.summary.scalar('label_training_loss', label_training_loss_ph)
+ label_test_loss_sum_op = tf.compat.v1.summary.scalar('label_testing_loss', label_testing_loss_ph)
+
+ seg_train_loss_sum_op = tf.compat.v1.summary.scalar('seg_training_loss', seg_training_loss_ph)
+ seg_test_loss_sum_op = tf.compat.v1.summary.scalar('seg_testing_loss', seg_testing_loss_ph)
+
+ label_train_acc_sum_op = tf.compat.v1.summary.scalar('label_training_acc', label_training_acc_ph)
+ label_test_acc_sum_op = tf.compat.v1.summary.scalar('label_testing_acc', label_testing_acc_ph)
+ label_test_acc_avg_cat_op = tf.compat.v1.summary.scalar('label_testing_acc_avg_cat', label_testing_acc_avg_cat_ph)
+
+ seg_train_acc_sum_op = tf.compat.v1.summary.scalar('seg_training_acc', seg_training_acc_ph)
+ seg_test_acc_sum_op = tf.compat.v1.summary.scalar('seg_testing_acc', seg_testing_acc_ph)
+ seg_test_acc_avg_cat_op = tf.compat.v1.summary.scalar('seg_testing_acc_avg_cat', seg_testing_acc_avg_cat_ph)
+
+ train_variables = tf.compat.v1.trainable_variables()
+
+ trainer = tf.compat.v1.train.AdamOptimizer(learning_rate)
+ train_op = trainer.minimize(loss, var_list=train_variables, global_step=batch)
+
+ saver = tf.compat.v1.train.Saver()
+
+ config = tf.compat.v1.ConfigProto()
+ config.gpu_options.allow_growth = True
+ config.allow_soft_placement = True
+ sess = tf.compat.v1.Session(config=config)
+
+ init = tf.compat.v1.global_variables_initializer()
+ sess.run(init)
+
+ train_writer = tf.compat.v1.summary.FileWriter(SUMMARIES_FOLDER + '/train', sess.graph)
+ test_writer = tf.compat.v1.summary.FileWriter(SUMMARIES_FOLDER + '/test')
+
+ train_file_list = provider.getDataFiles(TRAINING_FILE_LIST)
+ num_train_file = len(train_file_list)
+ test_file_list = provider.getDataFiles(TESTING_FILE_LIST)
+ num_test_file = len(test_file_list)
+
+ fcmd = open(os.path.join(LOG_STORAGE_PATH, 'cmd.txt'), 'w')
+ fcmd.write(str(FLAGS))
+ fcmd.close()
+
+ # write logs to the disk
+ flog = open(os.path.join(LOG_STORAGE_PATH, 'log.txt'), 'w')
+
+ def train_one_epoch(train_file_idx, epoch_num):
+ is_training = True
+
+ for i in range(num_train_file):
+ cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[train_file_idx[i]])
+ printout(flog, 'Loading train file ' + cur_train_filename)
+
+ cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_train_filename)
+ cur_data, cur_labels, order = provider.shuffle_data(cur_data, np.squeeze(cur_labels))
+ cur_seg = cur_seg[order, ...]
+
+ cur_labels_one_hot = convert_label_to_one_hot(cur_labels)
+
+ num_data = len(cur_labels)
+ num_batch = num_data // batch_size
+
+ total_loss = 0.0
+ total_label_loss = 0.0
+ total_seg_loss = 0.0
+ total_label_acc = 0.0
+ total_seg_acc = 0.0
+
+ for j in range(num_batch):
+ begidx = j * batch_size
+ endidx = (j + 1) * batch_size
+
+ feed_dict = {
+ pointclouds_ph: cur_data[begidx: endidx, ...],
+ labels_ph: cur_labels[begidx: endidx, ...],
+ input_label_ph: cur_labels_one_hot[begidx: endidx, ...],
+ seg_ph: cur_seg[begidx: endidx, ...],
+ is_training_ph: is_training,
+ }
+
+ _, loss_val, label_loss_val, seg_loss_val, per_instance_label_loss_val, \
+ per_instance_seg_loss_val, label_pred_val, seg_pred_val, pred_seg_res \
+ = sess.run([train_op, loss, label_loss, seg_loss, per_instance_label_loss, \
+ per_instance_seg_loss, labels_pred, seg_pred, per_instance_seg_pred_res], \
+ feed_dict=feed_dict)
+
+ per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx: endidx, ...], axis=1)
+ average_part_acc = np.mean(per_instance_part_acc)
+
+ total_loss += loss_val
+ total_label_loss += label_loss_val
+ total_seg_loss += seg_loss_val
+
+ per_instance_label_pred = np.argmax(label_pred_val, axis=1)
+ total_label_acc += np.mean(np.float32(per_instance_label_pred == cur_labels[begidx: endidx, ...]))
+ total_seg_acc += average_part_acc
+
+ total_loss = total_loss * 1.0 / num_batch
+ total_label_loss = total_label_loss * 1.0 / num_batch
+ total_seg_loss = total_seg_loss * 1.0 / num_batch
+ total_label_acc = total_label_acc * 1.0 / num_batch
+ total_seg_acc = total_seg_acc * 1.0 / num_batch
+
+ lr_sum, bn_decay_sum, batch_sum, train_loss_sum, train_label_acc_sum, \
+ train_label_loss_sum, train_seg_loss_sum, train_seg_acc_sum = sess.run(\
+ [lr_op, bn_decay_op, batch_op, total_train_loss_sum_op, label_train_acc_sum_op, \
+ label_train_loss_sum_op, seg_train_loss_sum_op, seg_train_acc_sum_op], \
+ feed_dict={total_training_loss_ph: total_loss, label_training_loss_ph: total_label_loss, \
+ seg_training_loss_ph: total_seg_loss, label_training_acc_ph: total_label_acc, \
+ seg_training_acc_ph: total_seg_acc})
+
+ train_writer.add_summary(train_loss_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(train_label_loss_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(train_seg_loss_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(lr_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(bn_decay_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(train_label_acc_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(train_seg_acc_sum, i + epoch_num * num_train_file)
+ train_writer.add_summary(batch_sum, i + epoch_num * num_train_file)
+
+ printout(flog, '\tTraining Total Mean_loss: %f' % total_loss)
+ printout(flog, '\t\tTraining Label Mean_loss: %f' % total_label_loss)
+ printout(flog, '\t\tTraining Label Accuracy: %f' % total_label_acc)
+ printout(flog, '\t\tTraining Seg Mean_loss: %f' % total_seg_loss)
+ printout(flog, '\t\tTraining Seg Accuracy: %f' % total_seg_acc)
+
+ def eval_one_epoch(epoch_num):
+ is_training = False
+
+ total_loss = 0.0
+ total_label_loss = 0.0
+ total_seg_loss = 0.0
+ total_label_acc = 0.0
+ total_seg_acc = 0.0
+ total_seen = 0
+
+ total_label_acc_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.float32)
+ total_seg_acc_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.float32)
+ total_seen_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.int32)
+
+ for i in range(num_test_file):
+ cur_test_filename = os.path.join(hdf5_data_dir, test_file_list[i])
+ printout(flog, 'Loading test file ' + cur_test_filename)
+
+ cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_test_filename)
+ cur_labels = np.squeeze(cur_labels)
+
+ cur_labels_one_hot = convert_label_to_one_hot(cur_labels)
+
+ num_data = len(cur_labels)
+ num_batch = num_data // batch_size
+
+ for j in range(num_batch):
+ begidx = j * batch_size
+ endidx = (j + 1) * batch_size
+ feed_dict = {
+ pointclouds_ph: cur_data[begidx: endidx, ...],
+ labels_ph: cur_labels[begidx: endidx, ...],
+ input_label_ph: cur_labels_one_hot[begidx: endidx, ...],
+ seg_ph: cur_seg[begidx: endidx, ...],
+ is_training_ph: is_training,
+ }
+
+ loss_val, label_loss_val, seg_loss_val, per_instance_label_loss_val, \
+ per_instance_seg_loss_val, label_pred_val, seg_pred_val, pred_seg_res \
+ = sess.run([loss, label_loss, seg_loss, per_instance_label_loss, \
+ per_instance_seg_loss, labels_pred, seg_pred, per_instance_seg_pred_res], \
+ feed_dict=feed_dict)
+
+ per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx: endidx, ...], axis=1)
+ average_part_acc = np.mean(per_instance_part_acc)
+
+ total_seen += 1
+ total_loss += loss_val
+ total_label_loss += label_loss_val
+ total_seg_loss += seg_loss_val
+
+ per_instance_label_pred = np.argmax(label_pred_val, axis=1)
+ total_label_acc += np.mean(np.float32(per_instance_label_pred == cur_labels[begidx: endidx, ...]))
+ total_seg_acc += average_part_acc
+
+ for shape_idx in range(begidx, endidx):
+ total_seen_per_cat[cur_labels[shape_idx]] += 1
+ total_label_acc_per_cat[cur_labels[shape_idx]] += np.int32(per_instance_label_pred[shape_idx-begidx] == cur_labels[shape_idx])
+ total_seg_acc_per_cat[cur_labels[shape_idx]] += per_instance_part_acc[shape_idx - begidx]
+
+ total_loss = total_loss * 1.0 / total_seen
+ total_label_loss = total_label_loss * 1.0 / total_seen
+ total_seg_loss = total_seg_loss * 1.0 / total_seen
+ total_label_acc = total_label_acc * 1.0 / total_seen
+ total_seg_acc = total_seg_acc * 1.0 / total_seen
+
+ test_loss_sum, test_label_acc_sum, test_label_loss_sum, test_seg_loss_sum, test_seg_acc_sum = sess.run(\
+ [total_test_loss_sum_op, label_test_acc_sum_op, label_test_loss_sum_op, seg_test_loss_sum_op, seg_test_acc_sum_op], \
+ feed_dict={total_testing_loss_ph: total_loss, label_testing_loss_ph: total_label_loss, \
+ seg_testing_loss_ph: total_seg_loss, label_testing_acc_ph: total_label_acc, seg_testing_acc_ph: total_seg_acc})
+
+ test_writer.add_summary(test_loss_sum, (epoch_num+1) * num_train_file-1)
+ test_writer.add_summary(test_label_loss_sum, (epoch_num+1) * num_train_file-1)
+ test_writer.add_summary(test_seg_loss_sum, (epoch_num+1) * num_train_file-1)
+ test_writer.add_summary(test_label_acc_sum, (epoch_num+1) * num_train_file-1)
+ test_writer.add_summary(test_seg_acc_sum, (epoch_num+1) * num_train_file-1)
+
+ printout(flog, '\tTesting Total Mean_loss: %f' % total_loss)
+ printout(flog, '\t\tTesting Label Mean_loss: %f' % total_label_loss)
+ printout(flog, '\t\tTesting Label Accuracy: %f' % total_label_acc)
+ printout(flog, '\t\tTesting Seg Mean_loss: %f' % total_seg_loss)
+ printout(flog, '\t\tTesting Seg Accuracy: %f' % total_seg_acc)
+
+ for cat_idx in range(NUM_CATEGORIES):
+ if total_seen_per_cat[cat_idx] > 0:
+ printout(flog, '\n\t\tCategory %s Object Number: %d' % (all_obj_cats[cat_idx][0], total_seen_per_cat[cat_idx]))
+ printout(flog, '\t\tCategory %s Label Accuracy: %f' % (all_obj_cats[cat_idx][0], total_label_acc_per_cat[cat_idx]/total_seen_per_cat[cat_idx]))
+ printout(flog, '\t\tCategory %s Seg Accuracy: %f' % (all_obj_cats[cat_idx][0], total_seg_acc_per_cat[cat_idx]/total_seen_per_cat[cat_idx]))
+
+ if not os.path.exists(MODEL_STORAGE_PATH):
+ os.mkdir(MODEL_STORAGE_PATH)
+
+ for epoch in range(TRAINING_EPOCHES):
+ printout(flog, '\n<<< Testing on the test dataset ...')
+ eval_one_epoch(epoch)
+
+ printout(flog, '\n>>> Training for the epoch %d/%d ...' % (epoch, TRAINING_EPOCHES))
+
+ train_file_idx = np.arange(0, len(train_file_list))
+ np.random.shuffle(train_file_idx)
+
+ train_one_epoch(train_file_idx, epoch)
+
+ if (epoch+1) % 10 == 0:
+ cp_filename = saver.save(sess, os.path.join(MODEL_STORAGE_PATH, 'epoch_' + str(epoch+1)+'.ckpt'))
+ printout(flog, 'Successfully store the checkpoint model into ' + cp_filename)
+
+ flog.flush()
+
+ flog.close()
+
+if __name__=='__main__':
+ train()
--
Gitee
From b6ab2c80ef64d462216b4d3031db45a9af0f5b43 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:47:05 +0000
Subject: [PATCH 27/54] add
TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt.
---
.../PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt | 1 +
1 file changed, 1 insertion(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt
new file mode 100644
index 000000000..56a6051ca
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt
@@ -0,0 +1 @@
+1
\ No newline at end of file
--
Gitee
From 176aacdf28b2c2fd1ecfa7228b9357cf758307c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 06:54:53 +0000
Subject: [PATCH 28/54] add part_seg/testing_ply_file_list.txt.
---
.../part_seg/testing_ply_file_list.txt | 2874 +++++++++++++++++
1 file changed, 2874 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt
diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt
new file mode 100644
index 000000000..b1ff7f478
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt
@@ -0,0 +1,2874 @@
+03001627/points/355fa0f35b61fdd7aa74a6b5ee13e775.pts 03001627/expert_verified/points_label/355fa0f35b61fdd7aa74a6b5ee13e775.seg 03001627
+04379243/points/408c3db9b4ee6be2e9f3e9c758fef992.pts 04379243/expert_verified/points_label/408c3db9b4ee6be2e9f3e9c758fef992.seg 04379243
+02691156/points/a1708ad923f3b51abbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a1708ad923f3b51abbf3143b1cb6076a.seg 02691156
+03001627/points/2783a969fa42cdecbe31379a5751d820.pts 03001627/expert_verified/points_label/2783a969fa42cdecbe31379a5751d820.seg 03001627
+03001627/points/ed56af61297594bf1c4300651205adf3.pts 03001627/expert_verified/points_label/ed56af61297594bf1c4300651205adf3.seg 03001627
+03001627/points/c0857de5101f704f3c5e1addd9922bf2.pts 03001627/expert_verified/points_label/c0857de5101f704f3c5e1addd9922bf2.seg 03001627
+02691156/points/b72804a8bd3dbbaca8607f540cc62ba.pts 02691156/expert_verified/points_label/b72804a8bd3dbbaca8607f540cc62ba.seg 02691156
+03001627/points/df609533cd186278398c7598b0d2e5d5.pts 03001627/expert_verified/points_label/df609533cd186278398c7598b0d2e5d5.seg 03001627
+04379243/points/c24b7a315dbf2f3178ab7c8b395efbfe.pts 04379243/expert_verified/points_label/c24b7a315dbf2f3178ab7c8b395efbfe.seg 04379243
+03636649/points/b8c87ad9d4930983a8d82fc8a3e54728.pts 03636649/expert_verified/points_label/b8c87ad9d4930983a8d82fc8a3e54728.seg 03636649
+02691156/points/8add45a11c9fcb446eb5821e78d8898a.pts 02691156/expert_verified/points_label/8add45a11c9fcb446eb5821e78d8898a.seg 02691156
+04379243/points/94d6518cf1e00eaac013a7bed5288654.pts 04379243/expert_verified/points_label/94d6518cf1e00eaac013a7bed5288654.seg 04379243
+04379243/points/1dbb8fd083f96ad279b3e1be3524f72f.pts 04379243/expert_verified/points_label/1dbb8fd083f96ad279b3e1be3524f72f.seg 04379243
+03001627/points/452115e132539be4daaaeef365d8f6e5.pts 03001627/expert_verified/points_label/452115e132539be4daaaeef365d8f6e5.seg 03001627
+04379243/points/bd25dfa62c3c2cf772bd03149507655d.pts 04379243/expert_verified/points_label/bd25dfa62c3c2cf772bd03149507655d.seg 04379243
+03948459/points/b1bbe535a833635d91f9af3df5b0c8fc.pts 03948459/expert_verified/points_label/b1bbe535a833635d91f9af3df5b0c8fc.seg 03948459
+04379243/points/d41c8af82fe98a019fb4103277a6b93.pts 04379243/expert_verified/points_label/d41c8af82fe98a019fb4103277a6b93.seg 04379243
+03001627/points/3109a0b9f9bc5fecb4cd1bd556007aed.pts 03001627/expert_verified/points_label/3109a0b9f9bc5fecb4cd1bd556007aed.seg 03001627
+03001627/points/d38129a3301d31350b1fc43ca5e85e.pts 03001627/expert_verified/points_label/d38129a3301d31350b1fc43ca5e85e.seg 03001627
+03636649/points/495af808806f1727a753b1b88fff4abb.pts 03636649/expert_verified/points_label/495af808806f1727a753b1b88fff4abb.seg 03636649
+04379243/points/4d3cc502d4444c848cbb8bac2032149c.pts 04379243/expert_verified/points_label/4d3cc502d4444c848cbb8bac2032149c.seg 04379243
+02691156/points/ed7e1a38fe33830b87697d3904b168b.pts 02691156/expert_verified/points_label/ed7e1a38fe33830b87697d3904b168b.seg 02691156
+04379243/points/cf076ced8264a480cce90f0d61ed7a70.pts 04379243/expert_verified/points_label/cf076ced8264a480cce90f0d61ed7a70.seg 04379243
+04379243/points/c04b363fd824528bd42b9650f19dd425.pts 04379243/expert_verified/points_label/c04b363fd824528bd42b9650f19dd425.seg 04379243
+04379243/points/9705c2610980d0fdb2d0500bdfc28f70.pts 04379243/expert_verified/points_label/9705c2610980d0fdb2d0500bdfc28f70.seg 04379243
+02691156/points/de29a1335c332a5ef7bc9a344bb7bae5.pts 02691156/expert_verified/points_label/de29a1335c332a5ef7bc9a344bb7bae5.seg 02691156
+03001627/points/75d0664363f418efe461a9a9741d9415.pts 03001627/expert_verified/points_label/75d0664363f418efe461a9a9741d9415.seg 03001627
+03001627/points/3421ad5a45b85f7a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/3421ad5a45b85f7a4b3c42e318f3affc.seg 03001627
+03001627/points/c67a255a26e30abb6b9f3980da0b1dff.pts 03001627/expert_verified/points_label/c67a255a26e30abb6b9f3980da0b1dff.seg 03001627
+04379243/points/6791c92944c99c029f1deb04fb8ae481.pts 04379243/expert_verified/points_label/6791c92944c99c029f1deb04fb8ae481.seg 04379243
+04379243/points/4b5536d2e9c5b9b7febad4f49b26ec52.pts 04379243/expert_verified/points_label/4b5536d2e9c5b9b7febad4f49b26ec52.seg 04379243
+04379243/points/c5fc6c1e0d446d37acce1c6e70b58979.pts 04379243/expert_verified/points_label/c5fc6c1e0d446d37acce1c6e70b58979.seg 04379243
+03001627/points/9c8d3c5779871705d22218517e73100.pts 03001627/expert_verified/points_label/9c8d3c5779871705d22218517e73100.seg 03001627
+04379243/points/4f70d14dc276a9539a83764a2641fc5c.pts 04379243/expert_verified/points_label/4f70d14dc276a9539a83764a2641fc5c.seg 04379243
+04379243/points/9d8f0444a8c09adff0d4c8f4dd125299.pts 04379243/expert_verified/points_label/9d8f0444a8c09adff0d4c8f4dd125299.seg 04379243
+04379243/points/57fbb082f660c4f7716b680dedf77108.pts 04379243/expert_verified/points_label/57fbb082f660c4f7716b680dedf77108.seg 04379243
+02958343/points/cb19594e73992a3d51008e496c6cfd2e.pts 02958343/expert_verified/points_label/cb19594e73992a3d51008e496c6cfd2e.seg 02958343
+03624134/points/9d424831d05d363d870906b5178d97bd.pts 03624134/expert_verified/points_label/9d424831d05d363d870906b5178d97bd.seg 03624134
+03001627/points/b884ff155c4117a7508dd48e67ad44bc.pts 03001627/expert_verified/points_label/b884ff155c4117a7508dd48e67ad44bc.seg 03001627
+02958343/points/7a5eba46ba4cfac35aa429db266f0c30.pts 02958343/expert_verified/points_label/7a5eba46ba4cfac35aa429db266f0c30.seg 02958343
+02691156/points/4def53f149137451b0009f08a96f38a9.pts 02691156/expert_verified/points_label/4def53f149137451b0009f08a96f38a9.seg 02691156
+03001627/points/fa8f7c225d3b9f1def4a09e7eb872bd9.pts 03001627/expert_verified/points_label/fa8f7c225d3b9f1def4a09e7eb872bd9.seg 03001627
+04225987/points/f5d7698b5a57d61226e0640b67de606.pts 04225987/expert_verified/points_label/f5d7698b5a57d61226e0640b67de606.seg 04225987
+03001627/points/9aece6c6436cde6fd9ac1bf1eddffd24.pts 03001627/expert_verified/points_label/9aece6c6436cde6fd9ac1bf1eddffd24.seg 03001627
+04099429/points/15474cf9caa757a528eba1f0b7744e9.pts 04099429/expert_verified/points_label/15474cf9caa757a528eba1f0b7744e9.seg 04099429
+02691156/points/571cfb1da3d5b3704b5910188444efc8.pts 02691156/expert_verified/points_label/571cfb1da3d5b3704b5910188444efc8.seg 02691156
+03636649/points/5d97be0e2414bfe0a8930422448288ea.pts 03636649/expert_verified/points_label/5d97be0e2414bfe0a8930422448288ea.seg 03636649
+02958343/points/648ceaad362345518a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/648ceaad362345518a6cf8c6b92417f2.seg 02958343
+03001627/points/8a845bb67ee8486d6199d6fe090be061.pts 03001627/expert_verified/points_label/8a845bb67ee8486d6199d6fe090be061.seg 03001627
+04379243/points/3645a90e02d16f0584aa8fa8b66ba302.pts 04379243/expert_verified/points_label/3645a90e02d16f0584aa8fa8b66ba302.seg 04379243
+04379243/points/ecf3d40b14300d3c0c26b04b6b8e17a.pts 04379243/expert_verified/points_label/ecf3d40b14300d3c0c26b04b6b8e17a.seg 04379243
+04379243/points/a860e5edcaec268e615bcf72f8385966.pts 04379243/expert_verified/points_label/a860e5edcaec268e615bcf72f8385966.seg 04379243
+03001627/points/5edfec789343e0c3319f1c1eee46f332.pts 03001627/expert_verified/points_label/5edfec789343e0c3319f1c1eee46f332.seg 03001627
+02691156/points/92fb0d6a866fe7aca8607f540cc62ba.pts 02691156/expert_verified/points_label/92fb0d6a866fe7aca8607f540cc62ba.seg 02691156
+02958343/points/e4886a4d0c6ea960fe21694bd5f519d1.pts 02958343/expert_verified/points_label/e4886a4d0c6ea960fe21694bd5f519d1.seg 02958343
+03636649/points/e3ee6b31e54e95b7d42b9650f19dd425.pts 03636649/expert_verified/points_label/e3ee6b31e54e95b7d42b9650f19dd425.seg 03636649
+03467517/points/d546e034a6c659a425cd348738a8052a.pts 03467517/expert_verified/points_label/d546e034a6c659a425cd348738a8052a.seg 03467517
+03001627/points/26a6ce644504c5fa22963ea1e168015d.pts 03001627/expert_verified/points_label/26a6ce644504c5fa22963ea1e168015d.seg 03001627
+02691156/points/b2b1c1d5c757af8a7209009cfb89d4bd.pts 02691156/expert_verified/points_label/b2b1c1d5c757af8a7209009cfb89d4bd.seg 02691156
+03467517/points/4bd2492d56d6b8c537b5646da91e9ed0.pts 03467517/expert_verified/points_label/4bd2492d56d6b8c537b5646da91e9ed0.seg 03467517
+04379243/points/92ed9344484dd026dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/92ed9344484dd026dfd21203bf8b4b46.seg 04379243
+04379243/points/2d1d8a2f976387bd3145205f02ff9fc5.pts 04379243/expert_verified/points_label/2d1d8a2f976387bd3145205f02ff9fc5.seg 04379243
+03467517/points/5b7fcd85ce6fd1931377689fa4e4b2d6.pts 03467517/expert_verified/points_label/5b7fcd85ce6fd1931377689fa4e4b2d6.seg 03467517
+02691156/points/4cee36a2e8dd3b24b87697d3904b168b.pts 02691156/expert_verified/points_label/4cee36a2e8dd3b24b87697d3904b168b.seg 02691156
+03001627/points/f23c1bb951fa8909bc01640b1b5116e7.pts 03001627/expert_verified/points_label/f23c1bb951fa8909bc01640b1b5116e7.seg 03001627
+04379243/points/370b45eeeb9b11416f04d49e4de95b59.pts 04379243/expert_verified/points_label/370b45eeeb9b11416f04d49e4de95b59.seg 04379243
+03001627/points/3885255ca5d75e69da2260dc4a1fc2c6.pts 03001627/expert_verified/points_label/3885255ca5d75e69da2260dc4a1fc2c6.seg 03001627
+02691156/points/452c18f8997c53741adbb4c4e06ad649.pts 02691156/expert_verified/points_label/452c18f8997c53741adbb4c4e06ad649.seg 02691156
+03001627/points/8b39b501c9fa4d349b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/8b39b501c9fa4d349b9f2eb77f5e247e.seg 03001627
+04379243/points/94966aa8a7a6f540f6807434c358ea12.pts 04379243/expert_verified/points_label/94966aa8a7a6f540f6807434c358ea12.seg 04379243
+03001627/points/9b6f17ce2db29c4c9ae35d137ece64f9.pts 03001627/expert_verified/points_label/9b6f17ce2db29c4c9ae35d137ece64f9.seg 03001627
+03467517/points/85bef84a26a91bff9ce363b13bdd195d.pts 03467517/expert_verified/points_label/85bef84a26a91bff9ce363b13bdd195d.seg 03467517
+03624134/points/e98bc872371c852e15b040d25222e627.pts 03624134/expert_verified/points_label/e98bc872371c852e15b040d25222e627.seg 03624134
+04379243/points/5dff67091a2f7ef1ab988fe471b1bd06.pts 04379243/expert_verified/points_label/5dff67091a2f7ef1ab988fe471b1bd06.seg 04379243
+03001627/points/e6f37dff25ec4ca4f815ebdb2df45512.pts 03001627/expert_verified/points_label/e6f37dff25ec4ca4f815ebdb2df45512.seg 03001627
+02691156/points/85a15c26a6e9921ae008cc4902bfe3cd.pts 02691156/expert_verified/points_label/85a15c26a6e9921ae008cc4902bfe3cd.seg 02691156
+03001627/points/94371ddd6d62f7b762ec387b772e9e1.pts 03001627/expert_verified/points_label/94371ddd6d62f7b762ec387b772e9e1.seg 03001627
+02691156/points/4374a3b4b98e247b398db3ebdf468ed7.pts 02691156/expert_verified/points_label/4374a3b4b98e247b398db3ebdf468ed7.seg 02691156
+03948459/points/8fa02aab7237289667fdfbdf64f19325.pts 03948459/expert_verified/points_label/8fa02aab7237289667fdfbdf64f19325.seg 03948459
+04379243/points/9f1fcee83cacf964f4b6538438a0b930.pts 04379243/expert_verified/points_label/9f1fcee83cacf964f4b6538438a0b930.seg 04379243
+04225987/points/f5643778dbcd653655a834a7aafb0236.pts 04225987/expert_verified/points_label/f5643778dbcd653655a834a7aafb0236.seg 04225987
+03636649/points/cdbe11124dbf418167ac0fa90111fad0.pts 03636649/expert_verified/points_label/cdbe11124dbf418167ac0fa90111fad0.seg 03636649
+03001627/points/e3d23dc47ddd9620c9be65dfbd21428b.pts 03001627/expert_verified/points_label/e3d23dc47ddd9620c9be65dfbd21428b.seg 03001627
+03001627/points/efd0411eaf2396c4de7ed732f5aeea4.pts 03001627/expert_verified/points_label/efd0411eaf2396c4de7ed732f5aeea4.seg 03001627
+03636649/points/7ad15667f654fc08664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/7ad15667f654fc08664b3b9b23ddfcbc.seg 03636649
+04379243/points/55d5fce641343449d42b9650f19dd425.pts 04379243/expert_verified/points_label/55d5fce641343449d42b9650f19dd425.seg 04379243
+03467517/points/a31ef3a8c70b789b93f0194265a9746c.pts 03467517/expert_verified/points_label/a31ef3a8c70b789b93f0194265a9746c.seg 03467517
+03001627/points/ccfc857f35c138ede785b88cc9024b2a.pts 03001627/expert_verified/points_label/ccfc857f35c138ede785b88cc9024b2a.seg 03001627
+02691156/points/e3fd510add7b1aa3c19eb6ab3736de88.pts 02691156/expert_verified/points_label/e3fd510add7b1aa3c19eb6ab3736de88.seg 02691156
+03636649/points/213d911cc489c352b5db3f95d706a0c9.pts 03636649/expert_verified/points_label/213d911cc489c352b5db3f95d706a0c9.seg 03636649
+04225987/points/c171d90db4c4ba56cdb1768065dafd0c.pts 04225987/expert_verified/points_label/c171d90db4c4ba56cdb1768065dafd0c.seg 04225987
+03797390/points/10f6e09036350e92b3f21f1137c3c347.pts 03797390/expert_verified/points_label/10f6e09036350e92b3f21f1137c3c347.seg 03797390
+02691156/points/a374b0448461438ef3d4cc10d9776c62.pts 02691156/expert_verified/points_label/a374b0448461438ef3d4cc10d9776c62.seg 02691156
+03001627/points/b6457a76f24de9f67aa6f8353fce2005.pts 03001627/expert_verified/points_label/b6457a76f24de9f67aa6f8353fce2005.seg 03001627
+03001627/points/7fe08cd7a9b76c1dcbde89e0c48a01bf.pts 03001627/expert_verified/points_label/7fe08cd7a9b76c1dcbde89e0c48a01bf.seg 03001627
+03001627/points/58867a00409c47c0813a1237d2827540.pts 03001627/expert_verified/points_label/58867a00409c47c0813a1237d2827540.seg 03001627
+02958343/points/65e3e2893669a09cc7b48e36e31209b9.pts 02958343/expert_verified/points_label/65e3e2893669a09cc7b48e36e31209b9.seg 02958343
+03948459/points/edec08542b9312b712b38b1d99376c0b.pts 03948459/expert_verified/points_label/edec08542b9312b712b38b1d99376c0b.seg 03948459
+03636649/points/cd80cc92cf732e8d8a17805dbfb751e2.pts 03636649/expert_verified/points_label/cd80cc92cf732e8d8a17805dbfb751e2.seg 03636649
+03467517/points/87650e8ff3d85672381b7fbf79296afb.pts 03467517/expert_verified/points_label/87650e8ff3d85672381b7fbf79296afb.seg 03467517
+03636649/points/1e91664763d371937dd73da65dc0e6a7.pts 03636649/expert_verified/points_label/1e91664763d371937dd73da65dc0e6a7.seg 03636649
+04379243/points/104c8e90ecf0e5351ed672982b7954af.pts 04379243/expert_verified/points_label/104c8e90ecf0e5351ed672982b7954af.seg 04379243
+04379243/points/1834fac2f46a26f91933ffef19678834.pts 04379243/expert_verified/points_label/1834fac2f46a26f91933ffef19678834.seg 04379243
+04379243/points/ed0be8928caab4bdab610b0c94236463.pts 04379243/expert_verified/points_label/ed0be8928caab4bdab610b0c94236463.seg 04379243
+04379243/points/105f53a6471f3ceb4a420e3c1b966720.pts 04379243/expert_verified/points_label/105f53a6471f3ceb4a420e3c1b966720.seg 04379243
+04379243/points/7bf5f689da285153583ff8a5fc7c1869.pts 04379243/expert_verified/points_label/7bf5f689da285153583ff8a5fc7c1869.seg 04379243
+02958343/points/eface8341d001e9ceb01ae4a4788bd4f.pts 02958343/expert_verified/points_label/eface8341d001e9ceb01ae4a4788bd4f.seg 02958343
+03001627/points/517880899d26080471a782a4379556c7.pts 03001627/expert_verified/points_label/517880899d26080471a782a4379556c7.seg 03001627
+03001627/points/5ef3e4abd4386c8871bc6030acc85f1e.pts 03001627/expert_verified/points_label/5ef3e4abd4386c8871bc6030acc85f1e.seg 03001627
+03001627/points/3eb60e6679d1df1dde7eedbb2790491b.pts 03001627/expert_verified/points_label/3eb60e6679d1df1dde7eedbb2790491b.seg 03001627
+03001627/points/4702e6196503ff84f1c0e03f321d0b20.pts 03001627/expert_verified/points_label/4702e6196503ff84f1c0e03f321d0b20.seg 03001627
+02958343/points/b0a7789537663f7ba1ff2929b2f5cf19.pts 02958343/expert_verified/points_label/b0a7789537663f7ba1ff2929b2f5cf19.seg 02958343
+03636649/points/2ce7732982343c1d9792f6094a78f8d5.pts 03636649/expert_verified/points_label/2ce7732982343c1d9792f6094a78f8d5.seg 03636649
+03467517/points/78a75ce8dc8dc197dc2b574e941c815b.pts 03467517/expert_verified/points_label/78a75ce8dc8dc197dc2b574e941c815b.seg 03467517
+03636649/points/348d6ddf9e02cbddf647dc544bb0ab61.pts 03636649/expert_verified/points_label/348d6ddf9e02cbddf647dc544bb0ab61.seg 03636649
+03001627/points/e56087cd55cce8b4f41a4361d0ca9bc8.pts 03001627/expert_verified/points_label/e56087cd55cce8b4f41a4361d0ca9bc8.seg 03001627
+03642806/points/4d3dde22f529195bc887d5d9a11f3155.pts 03642806/expert_verified/points_label/4d3dde22f529195bc887d5d9a11f3155.seg 03642806
+03001627/points/78e1977bc5f0f4041552c6ecbda964b.pts 03001627/expert_verified/points_label/78e1977bc5f0f4041552c6ecbda964b.seg 03001627
+04379243/points/44360c91a7e91098d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/44360c91a7e91098d93768e7b9b1eabf.seg 04379243
+02691156/points/52ca6970fb09b561f9f7510373841dd9.pts 02691156/expert_verified/points_label/52ca6970fb09b561f9f7510373841dd9.seg 02691156
+02958343/points/383f8d508b6f25f565d21723f535417.pts 02958343/expert_verified/points_label/383f8d508b6f25f565d21723f535417.seg 02958343
+03001627/points/d6da5457b0682e24696b74614952b2d0.pts 03001627/expert_verified/points_label/d6da5457b0682e24696b74614952b2d0.seg 03001627
+02691156/points/9f5dda6f01bbe29bf810506e9ae2dcc2.pts 02691156/expert_verified/points_label/9f5dda6f01bbe29bf810506e9ae2dcc2.seg 02691156
+03467517/points/35e77edd3ae6ad4993f0194265a9746c.pts 03467517/expert_verified/points_label/35e77edd3ae6ad4993f0194265a9746c.seg 03467517
+03001627/points/590d04438aeffbb58f447453fccbd9d3.pts 03001627/expert_verified/points_label/590d04438aeffbb58f447453fccbd9d3.seg 03001627
+03001627/points/cdfa898eadf316122056b4bd5d870b47.pts 03001627/expert_verified/points_label/cdfa898eadf316122056b4bd5d870b47.seg 03001627
+03001627/points/8e678a54f2ee4e5e492d9da2668ec34c.pts 03001627/expert_verified/points_label/8e678a54f2ee4e5e492d9da2668ec34c.seg 03001627
+04379243/points/1804dd6f5c827c1a4bf8d5f43e57b138.pts 04379243/expert_verified/points_label/1804dd6f5c827c1a4bf8d5f43e57b138.seg 04379243
+02691156/points/23eed87ac79f1b152f9c405cf0817830.pts 02691156/expert_verified/points_label/23eed87ac79f1b152f9c405cf0817830.seg 02691156
+02691156/points/97bc5fffde64178f43afdb9c81ff2967.pts 02691156/expert_verified/points_label/97bc5fffde64178f43afdb9c81ff2967.seg 02691156
+03001627/points/3b1f1913f2bc0dc171dbe96559c7bcae.pts 03001627/expert_verified/points_label/3b1f1913f2bc0dc171dbe96559c7bcae.seg 03001627
+04379243/points/82e1c0b874b0a9e035cd53a06b1d2317.pts 04379243/expert_verified/points_label/82e1c0b874b0a9e035cd53a06b1d2317.seg 04379243
+03001627/points/e0a0d5c2ba6fdca215b55266697a17be.pts 03001627/expert_verified/points_label/e0a0d5c2ba6fdca215b55266697a17be.seg 03001627
+03636649/points/9b558be5e2b60e3eb09f0ca9c143fdfd.pts 03636649/expert_verified/points_label/9b558be5e2b60e3eb09f0ca9c143fdfd.seg 03636649
+03001627/points/813be9a8485050571563f0911e3e5fc0.pts 03001627/expert_verified/points_label/813be9a8485050571563f0911e3e5fc0.seg 03001627
+02958343/points/6ca9967adcf862a461c6c61410fc904b.pts 02958343/expert_verified/points_label/6ca9967adcf862a461c6c61410fc904b.seg 02958343
+03624134/points/5663637633c938d1395331ebe4786cd.pts 03624134/expert_verified/points_label/5663637633c938d1395331ebe4786cd.seg 03624134
+03636649/points/ec8dc2311d381a9e3d39d8012919dd25.pts 03636649/expert_verified/points_label/ec8dc2311d381a9e3d39d8012919dd25.seg 03636649
+04379243/points/b685208ccf38786a6f1e07a56c129dfc.pts 04379243/expert_verified/points_label/b685208ccf38786a6f1e07a56c129dfc.seg 04379243
+03636649/points/ce621e6df1ab9ae35d2cdb96c1afe34.pts 03636649/expert_verified/points_label/ce621e6df1ab9ae35d2cdb96c1afe34.seg 03636649
+02691156/points/b092d523bdd320e4ca8607f540cc62ba.pts 02691156/expert_verified/points_label/b092d523bdd320e4ca8607f540cc62ba.seg 02691156
+04379243/points/401fe961ec7b0cb5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/401fe961ec7b0cb5dcfcef693e7ec696.seg 04379243
+04225987/points/1e5fd1de723cc66cbb1ed6d4d8526a19.pts 04225987/expert_verified/points_label/1e5fd1de723cc66cbb1ed6d4d8526a19.seg 04225987
+03001627/points/b987a2ca54c6ddecb74697ced5978572.pts 03001627/expert_verified/points_label/b987a2ca54c6ddecb74697ced5978572.seg 03001627
+04379243/points/3e42e3386f4aea9277cf3bb06f394ad.pts 04379243/expert_verified/points_label/3e42e3386f4aea9277cf3bb06f394ad.seg 04379243
+02958343/points/1198255e3d20d2f323f3ca54768fe2ee.pts 02958343/expert_verified/points_label/1198255e3d20d2f323f3ca54768fe2ee.seg 02958343
+04379243/points/2b564ff0989caf58ab610b0c94236463.pts 04379243/expert_verified/points_label/2b564ff0989caf58ab610b0c94236463.seg 04379243
+03636649/points/941271c5d9b192eaccd8f9b9403fd602.pts 03636649/expert_verified/points_label/941271c5d9b192eaccd8f9b9403fd602.seg 03636649
+02691156/points/6aeae52e38f892a7e0091ae06332b2d5.pts 02691156/expert_verified/points_label/6aeae52e38f892a7e0091ae06332b2d5.seg 02691156
+04379243/points/4cdfd605352adcb0da13974b3533fb59.pts 04379243/expert_verified/points_label/4cdfd605352adcb0da13974b3533fb59.seg 04379243
+04379243/points/7c24e4f8778e224799a5e8f6c5baa224.pts 04379243/expert_verified/points_label/7c24e4f8778e224799a5e8f6c5baa224.seg 04379243
+03001627/points/6272c21e439e0205c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/6272c21e439e0205c8687ff9b0b4e4ac.seg 03001627
+02691156/points/acd8f367c36a3d84fc7a6d75b3d807ff.pts 02691156/expert_verified/points_label/acd8f367c36a3d84fc7a6d75b3d807ff.seg 02691156
+04379243/points/d58bdda16e6bba6f796740c80be6053.pts 04379243/expert_verified/points_label/d58bdda16e6bba6f796740c80be6053.seg 04379243
+03636649/points/f97506704760741b460fa882e24b7e4a.pts 03636649/expert_verified/points_label/f97506704760741b460fa882e24b7e4a.seg 03636649
+03636649/points/9f5c3ea9f8254b8bd42b9650f19dd425.pts 03636649/expert_verified/points_label/9f5c3ea9f8254b8bd42b9650f19dd425.seg 03636649
+03797390/points/79e673336e836d1333becb3a9550cbb1.pts 03797390/expert_verified/points_label/79e673336e836d1333becb3a9550cbb1.seg 03797390
+03948459/points/2d573d37cce5b48b9f433921788191f3.pts 03948459/expert_verified/points_label/2d573d37cce5b48b9f433921788191f3.seg 03948459
+04379243/points/7aaad1c5c2be8c24a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/7aaad1c5c2be8c24a9ed7bb5b55809f8.seg 04379243
+04379243/points/c6c412c771ab0ae015a34fa27bdf3d03.pts 04379243/expert_verified/points_label/c6c412c771ab0ae015a34fa27bdf3d03.seg 04379243
+03467517/points/819251e11b46438ff6ff9bebca919581.pts 03467517/expert_verified/points_label/819251e11b46438ff6ff9bebca919581.seg 03467517
+03001627/points/51f4ea68be319fe8990e5087098e19c.pts 03001627/expert_verified/points_label/51f4ea68be319fe8990e5087098e19c.seg 03001627
+03467517/points/66b24797480ba515d57700c05b1862d8.pts 03467517/expert_verified/points_label/66b24797480ba515d57700c05b1862d8.seg 03467517
+03790512/points/9d3b07f4475d501e8249f134aca4c817.pts 03790512/expert_verified/points_label/9d3b07f4475d501e8249f134aca4c817.seg 03790512
+04379243/points/72cfb60a075369ab7252c133a7e17d94.pts 04379243/expert_verified/points_label/72cfb60a075369ab7252c133a7e17d94.seg 04379243
+04379243/points/12a2733fc5f6b31ef8574543281e850f.pts 04379243/expert_verified/points_label/12a2733fc5f6b31ef8574543281e850f.seg 04379243
+03636649/points/aed950102f1e9c7a659dda512294c744.pts 03636649/expert_verified/points_label/aed950102f1e9c7a659dda512294c744.seg 03636649
+03001627/points/3126c6e9277b775b245ac1812a4e4d0c.pts 03001627/expert_verified/points_label/3126c6e9277b775b245ac1812a4e4d0c.seg 03001627
+02958343/points/8decf42b145f98d148d2ba4615e03b21.pts 02958343/expert_verified/points_label/8decf42b145f98d148d2ba4615e03b21.seg 02958343
+03467517/points/2f9bd6e61e038d8fd4b4ae2ff4c58b57.pts 03467517/expert_verified/points_label/2f9bd6e61e038d8fd4b4ae2ff4c58b57.seg 03467517
+03467517/points/6a983b2ff1b8a42e1285d7bfa3e922e4.pts 03467517/expert_verified/points_label/6a983b2ff1b8a42e1285d7bfa3e922e4.seg 03467517
+03261776/points/e33d6e8e39a75268957b6a4f3924d982.pts 03261776/expert_verified/points_label/e33d6e8e39a75268957b6a4f3924d982.seg 03261776
+04379243/points/fe2f2b120d84ed909b896cf832106977.pts 04379243/expert_verified/points_label/fe2f2b120d84ed909b896cf832106977.seg 04379243
+02958343/points/1328a95d69cefe32f200a72c9245aee7.pts 02958343/expert_verified/points_label/1328a95d69cefe32f200a72c9245aee7.seg 02958343
+03001627/points/58409b308683d908ca2bec46a3b47519.pts 03001627/expert_verified/points_label/58409b308683d908ca2bec46a3b47519.seg 03001627
+03001627/points/507a5070cde81fd867936ca58e67cec6.pts 03001627/expert_verified/points_label/507a5070cde81fd867936ca58e67cec6.seg 03001627
+04379243/points/ec68e1edbb7e9bc7e93cebb6ba9ca43e.pts 04379243/expert_verified/points_label/ec68e1edbb7e9bc7e93cebb6ba9ca43e.seg 04379243
+03001627/points/7facccfa81369078a8930422448288ea.pts 03001627/expert_verified/points_label/7facccfa81369078a8930422448288ea.seg 03001627
+03001627/points/be0c5a0e91c99e804e1a714ee619465a.pts 03001627/expert_verified/points_label/be0c5a0e91c99e804e1a714ee619465a.seg 03001627
+03001627/points/d73e46e07bdb3fe75fe4ecea39e8bd40.pts 03001627/expert_verified/points_label/d73e46e07bdb3fe75fe4ecea39e8bd40.seg 03001627
+03636649/points/122fb7bfa09c184ca249f8489bc060dd.pts 03636649/expert_verified/points_label/122fb7bfa09c184ca249f8489bc060dd.seg 03636649
+03001627/points/9ef3323c6ced7dfef313a0fb5fd4d79.pts 03001627/expert_verified/points_label/9ef3323c6ced7dfef313a0fb5fd4d79.seg 03001627
+02691156/points/d8452d4fe51f2bab3554ccf8c30febe7.pts 02691156/expert_verified/points_label/d8452d4fe51f2bab3554ccf8c30febe7.seg 02691156
+02691156/points/d59d75f52ac9b241ae0d772a1c85134a.pts 02691156/expert_verified/points_label/d59d75f52ac9b241ae0d772a1c85134a.seg 02691156
+02691156/points/f9e80ce23d9536623fddedb0bf24c68a.pts 02691156/expert_verified/points_label/f9e80ce23d9536623fddedb0bf24c68a.seg 02691156
+02691156/points/e69631d34410f99ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/e69631d34410f99ac4f72bf08dc79a6.seg 02691156
+04379243/points/f7196ec7d732af5166decb1b3cdc5557.pts 04379243/expert_verified/points_label/f7196ec7d732af5166decb1b3cdc5557.seg 04379243
+03261776/points/c5e47b627cb7818f17e22b7299bb7bc6.pts 03261776/expert_verified/points_label/c5e47b627cb7818f17e22b7299bb7bc6.seg 03261776
+03001627/points/5a60c649a221293d72ed554eb3baedcc.pts 03001627/expert_verified/points_label/5a60c649a221293d72ed554eb3baedcc.seg 03001627
+04379243/points/b117aac2e13630bb5d23c9bbb429abf9.pts 04379243/expert_verified/points_label/b117aac2e13630bb5d23c9bbb429abf9.seg 04379243
+03642806/points/e4c34c87ed1bc2191ef7a71d6e01357e.pts 03642806/expert_verified/points_label/e4c34c87ed1bc2191ef7a71d6e01357e.seg 03642806
+02691156/points/3fb7ceab42d7b17219ba010ddb4974fe.pts 02691156/expert_verified/points_label/3fb7ceab42d7b17219ba010ddb4974fe.seg 02691156
+04379243/points/fc472163ea149f8e19fb4103277a6b93.pts 04379243/expert_verified/points_label/fc472163ea149f8e19fb4103277a6b93.seg 04379243
+03001627/points/5ef73c9bee1b4adcd019a8a03d4a2a3.pts 03001627/expert_verified/points_label/5ef73c9bee1b4adcd019a8a03d4a2a3.seg 03001627
+02691156/points/384e72f69e6f24404cb288947cda4a2c.pts 02691156/expert_verified/points_label/384e72f69e6f24404cb288947cda4a2c.seg 02691156
+03636649/points/3fca250636e2b47a8d0fc77aab7a8d33.pts 03636649/expert_verified/points_label/3fca250636e2b47a8d0fc77aab7a8d33.seg 03636649
+04379243/points/46957ba752c3554bd42b9650f19dd425.pts 04379243/expert_verified/points_label/46957ba752c3554bd42b9650f19dd425.seg 04379243
+03001627/points/bce7ff621a5440bb34ee5c94ebdf7f1d.pts 03001627/expert_verified/points_label/bce7ff621a5440bb34ee5c94ebdf7f1d.seg 03001627
+02691156/points/66ae19841350ac2d4ba2821676102936.pts 02691156/expert_verified/points_label/66ae19841350ac2d4ba2821676102936.seg 02691156
+03001627/points/e53b07b648e8d041107a17cfae0b6df6.pts 03001627/expert_verified/points_label/e53b07b648e8d041107a17cfae0b6df6.seg 03001627
+03624134/points/d1c757548ead4a4d8d03ca4865da5b6.pts 03624134/expert_verified/points_label/d1c757548ead4a4d8d03ca4865da5b6.seg 03624134
+04379243/points/d19b4bde0766723c9b3bb0ef2a08be04.pts 04379243/expert_verified/points_label/d19b4bde0766723c9b3bb0ef2a08be04.seg 04379243
+03001627/points/6ecec258a1b6fe2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/6ecec258a1b6fe2a6fee8e2140acec9.seg 03001627
+02691156/points/ab95a4e7f2d3cf9ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ab95a4e7f2d3cf9ca8607f540cc62ba.seg 02691156
+03624134/points/b61c9b5f29ad581c860a45e027159a9a.pts 03624134/expert_verified/points_label/b61c9b5f29ad581c860a45e027159a9a.seg 03624134
+03001627/points/c7da2d72f9927f1881dff5c2e57ad46e.pts 03001627/expert_verified/points_label/c7da2d72f9927f1881dff5c2e57ad46e.seg 03001627
+04379243/points/b9886dd3c4a651f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/b9886dd3c4a651f3664b3b9b23ddfcbc.seg 04379243
+02691156/points/abc465975af79827dfb86dddee1d6ac3.pts 02691156/expert_verified/points_label/abc465975af79827dfb86dddee1d6ac3.seg 02691156
+03636649/points/7be01530bf43f2ed8a83637b92bdc7.pts 03636649/expert_verified/points_label/7be01530bf43f2ed8a83637b92bdc7.seg 03636649
+02691156/points/b81339a2f1dbc0de9598ceb95c7f0752.pts 02691156/expert_verified/points_label/b81339a2f1dbc0de9598ceb95c7f0752.seg 02691156
+03001627/points/69709cb300ae3784ee72e5c46412e9a7.pts 03001627/expert_verified/points_label/69709cb300ae3784ee72e5c46412e9a7.seg 03001627
+03001627/points/ec25a41ca233ed096e5a467428553af2.pts 03001627/expert_verified/points_label/ec25a41ca233ed096e5a467428553af2.seg 03001627
+04379243/points/4e9394f9f64859aef4ef86403cccc399.pts 04379243/expert_verified/points_label/4e9394f9f64859aef4ef86403cccc399.seg 04379243
+04379243/points/c477235c02413bfc44d2ca62bee212a0.pts 04379243/expert_verified/points_label/c477235c02413bfc44d2ca62bee212a0.seg 04379243
+04379243/points/41b0491fdb14d41bd25ca1a27cf9bdec.pts 04379243/expert_verified/points_label/41b0491fdb14d41bd25ca1a27cf9bdec.seg 04379243
+02691156/points/59eecc0a983a27a8130cc35407fba74a.pts 02691156/expert_verified/points_label/59eecc0a983a27a8130cc35407fba74a.seg 02691156
+03467517/points/22129fab1497437cc3f912172873d52f.pts 03467517/expert_verified/points_label/22129fab1497437cc3f912172873d52f.seg 03467517
+04379243/points/6365205d2324234fc8a1efeb4b91d393.pts 04379243/expert_verified/points_label/6365205d2324234fc8a1efeb4b91d393.seg 04379243
+03001627/points/2a75b2bb82d7f77c3f9d6e0ade5188b0.pts 03001627/expert_verified/points_label/2a75b2bb82d7f77c3f9d6e0ade5188b0.seg 03001627
+03001627/points/8f226d6b3089d3b7bca860dd9b04c52c.pts 03001627/expert_verified/points_label/8f226d6b3089d3b7bca860dd9b04c52c.seg 03001627
+03624134/points/5e515b18ed17a418b056c98b2e5e5e4e.pts 03624134/expert_verified/points_label/5e515b18ed17a418b056c98b2e5e5e4e.seg 03624134
+02691156/points/5bc41589eba11a4e15477d594f1fbd99.pts 02691156/expert_verified/points_label/5bc41589eba11a4e15477d594f1fbd99.seg 02691156
+03001627/points/2bbf00f0c583fd8a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/2bbf00f0c583fd8a4b3c42e318f3affc.seg 03001627
+03790512/points/9e9300a6e1caec217395d58407f193ba.pts 03790512/expert_verified/points_label/9e9300a6e1caec217395d58407f193ba.seg 03790512
+03636649/points/81894e0739e3fea9d49b2e04785f8492.pts 03636649/expert_verified/points_label/81894e0739e3fea9d49b2e04785f8492.seg 03636649
+02958343/points/cdc8453c63ffc13e20f29d4da2b76f7a.pts 02958343/expert_verified/points_label/cdc8453c63ffc13e20f29d4da2b76f7a.seg 02958343
+04379243/points/7a0b6685a30298fb8ae8d7de284e7d2.pts 04379243/expert_verified/points_label/7a0b6685a30298fb8ae8d7de284e7d2.seg 04379243
+03001627/points/c5ee6b77f9f84adeed52100e321c9f3e.pts 03001627/expert_verified/points_label/c5ee6b77f9f84adeed52100e321c9f3e.seg 03001627
+04379243/points/4e87db85d5dab96822339a4b4aacca6b.pts 04379243/expert_verified/points_label/4e87db85d5dab96822339a4b4aacca6b.seg 04379243
+02958343/points/6dbae14e481e8fb9333e0bf0b765fa12.pts 02958343/expert_verified/points_label/6dbae14e481e8fb9333e0bf0b765fa12.seg 02958343
+03467517/points/bad8978268948ea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/bad8978268948ea3d3eb77b119df6d.seg 03467517
+03001627/points/c552529c54b0612e53041c49040be3d5.pts 03001627/expert_verified/points_label/c552529c54b0612e53041c49040be3d5.seg 03001627
+02958343/points/dca8ed788347b28c171cf359a50c99bc.pts 02958343/expert_verified/points_label/dca8ed788347b28c171cf359a50c99bc.seg 02958343
+04379243/points/99720647e210078beaf288f952624966.pts 04379243/expert_verified/points_label/99720647e210078beaf288f952624966.seg 04379243
+03001627/points/b1f4b2c32f8a2fa77ee217c21e683487.pts 03001627/expert_verified/points_label/b1f4b2c32f8a2fa77ee217c21e683487.seg 03001627
+04379243/points/41cdb5b619790d5a74eb542502c2205f.pts 04379243/expert_verified/points_label/41cdb5b619790d5a74eb542502c2205f.seg 04379243
+04379243/points/a25141a07c77c25467de2aaf749e5256.pts 04379243/expert_verified/points_label/a25141a07c77c25467de2aaf749e5256.seg 04379243
+04379243/points/e9c3a3aa2278608bec15b38012222fa8.pts 04379243/expert_verified/points_label/e9c3a3aa2278608bec15b38012222fa8.seg 04379243
+03636649/points/8e025c4aa0b0201a81a172d69c52a28a.pts 03636649/expert_verified/points_label/8e025c4aa0b0201a81a172d69c52a28a.seg 03636649
+03001627/points/e175bc785390e8f6c05575120a46cd3b.pts 03001627/expert_verified/points_label/e175bc785390e8f6c05575120a46cd3b.seg 03001627
+02691156/points/ecb4ae05d7dd135a619550d2af0b6117.pts 02691156/expert_verified/points_label/ecb4ae05d7dd135a619550d2af0b6117.seg 02691156
+02691156/points/87069f21b11c180799a771d197c7b487.pts 02691156/expert_verified/points_label/87069f21b11c180799a771d197c7b487.seg 02691156
+02691156/points/ca11efc8928c10908b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/ca11efc8928c10908b96ae1a0a8b84ec.seg 02691156
+03790512/points/365c1f92a54c8cb52a45a87054fa7272.pts 03790512/expert_verified/points_label/365c1f92a54c8cb52a45a87054fa7272.seg 03790512
+03636649/points/23040992da19679aaa7cb30470f3273c.pts 03636649/expert_verified/points_label/23040992da19679aaa7cb30470f3273c.seg 03636649
+02691156/points/9441549e323552f2f001dddaf44c449b.pts 02691156/expert_verified/points_label/9441549e323552f2f001dddaf44c449b.seg 02691156
+02958343/points/17bfc66c6bc0a99d68c415156b102065.pts 02958343/expert_verified/points_label/17bfc66c6bc0a99d68c415156b102065.seg 02958343
+03001627/points/671d34c27cc0f1bf2deeb5ec76cf103b.pts 03001627/expert_verified/points_label/671d34c27cc0f1bf2deeb5ec76cf103b.seg 03001627
+03642806/points/464edfe14e9fa45c3394926146371698.pts 03642806/expert_verified/points_label/464edfe14e9fa45c3394926146371698.seg 03642806
+04379243/points/279c8601278e827dab610b0c94236463.pts 04379243/expert_verified/points_label/279c8601278e827dab610b0c94236463.seg 04379243
+04379243/points/29d9c6d84c6a126917b431cae0dd70ed.pts 04379243/expert_verified/points_label/29d9c6d84c6a126917b431cae0dd70ed.seg 04379243
+04379243/points/5d3d902051858e56ed1397afd2317e5b.pts 04379243/expert_verified/points_label/5d3d902051858e56ed1397afd2317e5b.seg 04379243
+02958343/points/aa78d4465ae18312711f9e3a79a13dcf.pts 02958343/expert_verified/points_label/aa78d4465ae18312711f9e3a79a13dcf.seg 02958343
+03001627/points/d561ff6788ab46517b016084e2ae95e.pts 03001627/expert_verified/points_label/d561ff6788ab46517b016084e2ae95e.seg 03001627
+03001627/points/b24ed89d85b74771216fff6094e6695c.pts 03001627/expert_verified/points_label/b24ed89d85b74771216fff6094e6695c.seg 03001627
+03636649/points/f6eeb5d67c32616648fda83c10428379.pts 03636649/expert_verified/points_label/f6eeb5d67c32616648fda83c10428379.seg 03636649
+03001627/points/3b3a9f4e3aa9f2f4d39a194653571dfc.pts 03001627/expert_verified/points_label/3b3a9f4e3aa9f2f4d39a194653571dfc.seg 03001627
+03001627/points/bd0b06e158bcee8ac0d89fc15154c9a2.pts 03001627/expert_verified/points_label/bd0b06e158bcee8ac0d89fc15154c9a2.seg 03001627
+04379243/points/89251f322490e7047e38640a31d0bc3.pts 04379243/expert_verified/points_label/89251f322490e7047e38640a31d0bc3.seg 04379243
+03001627/points/935f5e58e9e15231febad4f49b26ec52.pts 03001627/expert_verified/points_label/935f5e58e9e15231febad4f49b26ec52.seg 03001627
+03467517/points/8f59fee745f1e37ea5c8e9fc8b2242fd.pts 03467517/expert_verified/points_label/8f59fee745f1e37ea5c8e9fc8b2242fd.seg 03467517
+02691156/points/fddcb2b3d45ce98e641c309f1fd7e183.pts 02691156/expert_verified/points_label/fddcb2b3d45ce98e641c309f1fd7e183.seg 02691156
+03001627/points/d915d2f1664bf76e71a70be9f12ce8b0.pts 03001627/expert_verified/points_label/d915d2f1664bf76e71a70be9f12ce8b0.seg 03001627
+02958343/points/1ae9732840a315afab2c2809513f396e.pts 02958343/expert_verified/points_label/1ae9732840a315afab2c2809513f396e.seg 02958343
+04379243/points/b658e507c84d6202610c2a68437007d6.pts 04379243/expert_verified/points_label/b658e507c84d6202610c2a68437007d6.seg 04379243
+02958343/points/707d1e19b465d075adbfb30d8d1b297e.pts 02958343/expert_verified/points_label/707d1e19b465d075adbfb30d8d1b297e.seg 02958343
+04379243/points/5b74412eba257e5182b796aa5845e185.pts 04379243/expert_verified/points_label/5b74412eba257e5182b796aa5845e185.seg 04379243
+03636649/points/a801be11157a7f243d39d8012919dd25.pts 03636649/expert_verified/points_label/a801be11157a7f243d39d8012919dd25.seg 03636649
+02691156/points/26e10058cf9835aaca8607f540cc62ba.pts 02691156/expert_verified/points_label/26e10058cf9835aaca8607f540cc62ba.seg 02691156
+03636649/points/bc704db7b62582e5d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/bc704db7b62582e5d1cbf3e52b9b6237.seg 03636649
+02691156/points/d2e2e23f5be557e2d1ab3b031c100cb1.pts 02691156/expert_verified/points_label/d2e2e23f5be557e2d1ab3b031c100cb1.seg 02691156
+03001627/points/920af478601258e24762da3a3017ade.pts 03001627/expert_verified/points_label/920af478601258e24762da3a3017ade.seg 03001627
+03001627/points/3ffd794e5100258483bc207d8a5912e3.pts 03001627/expert_verified/points_label/3ffd794e5100258483bc207d8a5912e3.seg 03001627
+04379243/points/69c536d9e450cb79436e6787c76ef3f0.pts 04379243/expert_verified/points_label/69c536d9e450cb79436e6787c76ef3f0.seg 04379243
+04379243/points/6cf6a546e2ecbffe815a7efb12912.pts 04379243/expert_verified/points_label/6cf6a546e2ecbffe815a7efb12912.seg 04379243
+03001627/points/815f436a40c28da51f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/815f436a40c28da51f56aa11cd5e0c3e.seg 03001627
+03642806/points/4504a4d244d05ddbf5f79806bd65844f.pts 03642806/expert_verified/points_label/4504a4d244d05ddbf5f79806bd65844f.seg 03642806
+04379243/points/8ad9868947e7391113625562b56161f0.pts 04379243/expert_verified/points_label/8ad9868947e7391113625562b56161f0.seg 04379243
+03001627/points/6b9c3d42724275cf7a5c8cd74a7bc29a.pts 03001627/expert_verified/points_label/6b9c3d42724275cf7a5c8cd74a7bc29a.seg 03001627
+04379243/points/67e32538a35a5011a0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/67e32538a35a5011a0ab1d82ef09f78f.seg 04379243
+03624134/points/2743e37a65e198d51592d7a04a86fa53.pts 03624134/expert_verified/points_label/2743e37a65e198d51592d7a04a86fa53.seg 03624134
+04379243/points/12df5c215f4364b7fe388cf6c4c3705d.pts 04379243/expert_verified/points_label/12df5c215f4364b7fe388cf6c4c3705d.seg 04379243
+02958343/points/55e0897c0ac089a6da5cb3be8feeaadc.pts 02958343/expert_verified/points_label/55e0897c0ac089a6da5cb3be8feeaadc.seg 02958343
+02773838/points/4e4fcfffec161ecaed13f430b2941481.pts 02773838/expert_verified/points_label/4e4fcfffec161ecaed13f430b2941481.seg 02773838
+04379243/points/8ce70dead5119191cc3492a06e9bd850.pts 04379243/expert_verified/points_label/8ce70dead5119191cc3492a06e9bd850.seg 04379243
+02691156/points/e033b6ad34586a86cc1c9e8218bfe7fc.pts 02691156/expert_verified/points_label/e033b6ad34586a86cc1c9e8218bfe7fc.seg 02691156
+03636649/points/600b2f00113ad714e2367b9e27f16a71.pts 03636649/expert_verified/points_label/600b2f00113ad714e2367b9e27f16a71.seg 03636649
+04379243/points/a74cad1781afed87dcfcef693e7ec696.pts 04379243/expert_verified/points_label/a74cad1781afed87dcfcef693e7ec696.seg 04379243
+03001627/points/5402eecc67e489502fa77440dcb93214.pts 03001627/expert_verified/points_label/5402eecc67e489502fa77440dcb93214.seg 03001627
+03001627/points/d5bd6ea417eba6ce456cbf78e1e89022.pts 03001627/expert_verified/points_label/d5bd6ea417eba6ce456cbf78e1e89022.seg 03001627
+03001627/points/d4edd167061dac5f52a3901fa1436b1a.pts 03001627/expert_verified/points_label/d4edd167061dac5f52a3901fa1436b1a.seg 03001627
+03636649/points/9fc3ddc511f4ef62dced62abd38a02b0.pts 03636649/expert_verified/points_label/9fc3ddc511f4ef62dced62abd38a02b0.seg 03636649
+02691156/points/92a83ecaa10e8d3f78e919a72d9a39e7.pts 02691156/expert_verified/points_label/92a83ecaa10e8d3f78e919a72d9a39e7.seg 02691156
+03001627/points/fee36ec8c8ae503fc68456e8da5b9a30.pts 03001627/expert_verified/points_label/fee36ec8c8ae503fc68456e8da5b9a30.seg 03001627
+04379243/points/1df409cfefbb51658b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/1df409cfefbb51658b9b51ae4415d5aa.seg 04379243
+03001627/points/76283716a2c6586e266d673a6188bf4c.pts 03001627/expert_verified/points_label/76283716a2c6586e266d673a6188bf4c.seg 03001627
+04379243/points/29b2aaca87d19a3c5759f4335ff2e408.pts 04379243/expert_verified/points_label/29b2aaca87d19a3c5759f4335ff2e408.seg 04379243
+04379243/points/21ca4d36a0f6fa69b937d98d58545fa.pts 04379243/expert_verified/points_label/21ca4d36a0f6fa69b937d98d58545fa.seg 04379243
+02691156/points/da1acb401541235be4d2773f0358b43b.pts 02691156/expert_verified/points_label/da1acb401541235be4d2773f0358b43b.seg 02691156
+04379243/points/553c416f33c5e5e18b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/553c416f33c5e5e18b9b51ae4415d5aa.seg 04379243
+04379243/points/174832b73cd6d91c9856fa70a578baeb.pts 04379243/expert_verified/points_label/174832b73cd6d91c9856fa70a578baeb.seg 04379243
+02691156/points/1c2e9dedbcf511e616a077c4c0fc1181.pts 02691156/expert_verified/points_label/1c2e9dedbcf511e616a077c4c0fc1181.seg 02691156
+03001627/points/893c689b192bbe33ebadcdfba7971b71.pts 03001627/expert_verified/points_label/893c689b192bbe33ebadcdfba7971b71.seg 03001627
+04379243/points/52037005fbff92d08fa35606145b47dc.pts 04379243/expert_verified/points_label/52037005fbff92d08fa35606145b47dc.seg 04379243
+04225987/points/e38a4e6fb32b51a1bebb1fbb949ea955.pts 04225987/expert_verified/points_label/e38a4e6fb32b51a1bebb1fbb949ea955.seg 04225987
+03636649/points/42bc0dce81734d892610e2a20d7c4b61.pts 03636649/expert_verified/points_label/42bc0dce81734d892610e2a20d7c4b61.seg 03636649
+04379243/points/cb7ebc943b1b424988386fe1512ed26f.pts 04379243/expert_verified/points_label/cb7ebc943b1b424988386fe1512ed26f.seg 04379243
+03624134/points/2d6e9b23e171760c3e332fb3cb6ebe50.pts 03624134/expert_verified/points_label/2d6e9b23e171760c3e332fb3cb6ebe50.seg 03624134
+04379243/points/d05ff7b47febe58a656db3f863b4b796.pts 04379243/expert_verified/points_label/d05ff7b47febe58a656db3f863b4b796.seg 04379243
+03636649/points/e178ab3b967c7fddc901d9dddb735c9f.pts 03636649/expert_verified/points_label/e178ab3b967c7fddc901d9dddb735c9f.seg 03636649
+04379243/points/527b2d1e964f056383be1aa5a5ab0c80.pts 04379243/expert_verified/points_label/527b2d1e964f056383be1aa5a5ab0c80.seg 04379243
+03001627/points/f1a1bb6ad29d703078d928ba1c4a6f75.pts 03001627/expert_verified/points_label/f1a1bb6ad29d703078d928ba1c4a6f75.seg 03001627
+04379243/points/ed9dc0937009dc031311158f08f2982a.pts 04379243/expert_verified/points_label/ed9dc0937009dc031311158f08f2982a.seg 04379243
+02691156/points/e41c5719ad09055f1b880c747ee1f83.pts 02691156/expert_verified/points_label/e41c5719ad09055f1b880c747ee1f83.seg 02691156
+04379243/points/34bbe284f7499df071a782a4379556c7.pts 04379243/expert_verified/points_label/34bbe284f7499df071a782a4379556c7.seg 04379243
+02691156/points/973df01cea43c7f690b1d6deb98feec6.pts 02691156/expert_verified/points_label/973df01cea43c7f690b1d6deb98feec6.seg 02691156
+03001627/points/ed97d1c954fca49851ceffe90913a32.pts 03001627/expert_verified/points_label/ed97d1c954fca49851ceffe90913a32.seg 03001627
+03001627/points/3a74e3d5172ee94fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/3a74e3d5172ee94fdef1c01cbd4ae0c.seg 03001627
+04379243/points/194b279c7e892a2d15fa8082e5524f79.pts 04379243/expert_verified/points_label/194b279c7e892a2d15fa8082e5524f79.seg 04379243
+04379243/points/23ece3bf871619366ff454af1e8947f3.pts 04379243/expert_verified/points_label/23ece3bf871619366ff454af1e8947f3.seg 04379243
+02691156/points/7de379891610f5feaf7dd1bfd65143a9.pts 02691156/expert_verified/points_label/7de379891610f5feaf7dd1bfd65143a9.seg 02691156
+04379243/points/54ba7e77a2bf5fe3158b7df020486ff2.pts 04379243/expert_verified/points_label/54ba7e77a2bf5fe3158b7df020486ff2.seg 04379243
+03001627/points/39825fb4341ebd1ccb002c1e2b5fc68b.pts 03001627/expert_verified/points_label/39825fb4341ebd1ccb002c1e2b5fc68b.seg 03001627
+03001627/points/a32febea4a0ac30171a782a4379556c7.pts 03001627/expert_verified/points_label/a32febea4a0ac30171a782a4379556c7.seg 03001627
+02691156/points/b9ba988dd9a6cf426e8b6dd39a855b69.pts 02691156/expert_verified/points_label/b9ba988dd9a6cf426e8b6dd39a855b69.seg 02691156
+02691156/points/37b1f7f02c4b87dbca8607f540cc62ba.pts 02691156/expert_verified/points_label/37b1f7f02c4b87dbca8607f540cc62ba.seg 02691156
+04379243/points/8ce538a671c6e684d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/8ce538a671c6e684d93768e7b9b1eabf.seg 04379243
+04225987/points/48bf45bffab55d7cf14c37b285d25cdf.pts 04225987/expert_verified/points_label/48bf45bffab55d7cf14c37b285d25cdf.seg 04225987
+02691156/points/820ba20e5da8325f19ba010ddb4974fe.pts 02691156/expert_verified/points_label/820ba20e5da8325f19ba010ddb4974fe.seg 02691156
+02691156/points/ff52c059efaca3c1ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ff52c059efaca3c1ca8607f540cc62ba.seg 02691156
+04379243/points/99737ff619cae25d6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/99737ff619cae25d6effbd64ad6b71b8.seg 04379243
+04379243/points/e3b7fbed310c2c397c8d78b9aede742.pts 04379243/expert_verified/points_label/e3b7fbed310c2c397c8d78b9aede742.seg 04379243
+03001627/points/e8eedd37cb054e37b59d74a7c956bd18.pts 03001627/expert_verified/points_label/e8eedd37cb054e37b59d74a7c956bd18.seg 03001627
+03790512/points/8134a965cc0b134bb37378f3c85478b4.pts 03790512/expert_verified/points_label/8134a965cc0b134bb37378f3c85478b4.seg 03790512
+03636649/points/da5f13f4048dbd72fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/da5f13f4048dbd72fcb8d8c6d4df8143.seg 03636649
+03001627/points/f5d8dd0309401ebac47a35332c17cce2.pts 03001627/expert_verified/points_label/f5d8dd0309401ebac47a35332c17cce2.seg 03001627
+02691156/points/521eab9363fdc2a07209009cfb89d4bd.pts 02691156/expert_verified/points_label/521eab9363fdc2a07209009cfb89d4bd.seg 02691156
+03636649/points/b1e552b454366a9d7787152e5befb05b.pts 03636649/expert_verified/points_label/b1e552b454366a9d7787152e5befb05b.seg 03636649
+02958343/points/8590a6c8270375e34b5a812ecf553410.pts 02958343/expert_verified/points_label/8590a6c8270375e34b5a812ecf553410.seg 02958343
+04379243/points/d46537f513283d6cdcfcef693e7ec696.pts 04379243/expert_verified/points_label/d46537f513283d6cdcfcef693e7ec696.seg 04379243
+03001627/points/60a5795c905f3bb157f5033576317e1.pts 03001627/expert_verified/points_label/60a5795c905f3bb157f5033576317e1.seg 03001627
+02691156/points/8996445c6d2407c0fb5c1b0f759e2bc1.pts 02691156/expert_verified/points_label/8996445c6d2407c0fb5c1b0f759e2bc1.seg 02691156
+03624134/points/5e15d63317014f30ceea8802f71596b5.pts 03624134/expert_verified/points_label/5e15d63317014f30ceea8802f71596b5.seg 03624134
+03642806/points/9d48ab8c41174e60888cad7f6c0e6001.pts 03642806/expert_verified/points_label/9d48ab8c41174e60888cad7f6c0e6001.seg 03642806
+04379243/points/4cd35d6ec155d39633207e4c3ac155a4.pts 04379243/expert_verified/points_label/4cd35d6ec155d39633207e4c3ac155a4.seg 04379243
+04379243/points/884d2cc0d3aa8a72640e544a5d67c33a.pts 04379243/expert_verified/points_label/884d2cc0d3aa8a72640e544a5d67c33a.seg 04379243
+03001627/points/8191bad981637a71b356ab8b24c147.pts 03001627/expert_verified/points_label/8191bad981637a71b356ab8b24c147.seg 03001627
+03261776/points/de3b9b253e8f1aaf8b15c58b209760b5.pts 03261776/expert_verified/points_label/de3b9b253e8f1aaf8b15c58b209760b5.seg 03261776
+03636649/points/5b744ac897fe8bc557f40ff86fe708ff.pts 03636649/expert_verified/points_label/5b744ac897fe8bc557f40ff86fe708ff.seg 03636649
+04379243/points/6cd84ff61583805c85e2af9bf984f0b5.pts 04379243/expert_verified/points_label/6cd84ff61583805c85e2af9bf984f0b5.seg 04379243
+04379243/points/e65066d6b0b83719c3bd24f986301745.pts 04379243/expert_verified/points_label/e65066d6b0b83719c3bd24f986301745.seg 04379243
+04379243/points/f3efcbd9745da90619fb4103277a6b93.pts 04379243/expert_verified/points_label/f3efcbd9745da90619fb4103277a6b93.seg 04379243
+04379243/points/8ac4d93e65b9d58d9b937d98d58545fa.pts 04379243/expert_verified/points_label/8ac4d93e65b9d58d9b937d98d58545fa.seg 04379243
+03636649/points/b69c3a0a46b932e3d3c1fbbc2200e255.pts 03636649/expert_verified/points_label/b69c3a0a46b932e3d3c1fbbc2200e255.seg 03636649
+03636649/points/5c7965b0835a1a241de9bf5a9c22fde.pts 03636649/expert_verified/points_label/5c7965b0835a1a241de9bf5a9c22fde.seg 03636649
+03001627/points/27ea798c55699b6d2c528d33bca1ac2.pts 03001627/expert_verified/points_label/27ea798c55699b6d2c528d33bca1ac2.seg 03001627
+03467517/points/dc623742d6d1518e19959b248340fafd.pts 03467517/expert_verified/points_label/dc623742d6d1518e19959b248340fafd.seg 03467517
+03001627/points/c6cb59e7645dd14d661ff085a0f14b7.pts 03001627/expert_verified/points_label/c6cb59e7645dd14d661ff085a0f14b7.seg 03001627
+03948459/points/a3679104af613021912d826efe946a9f.pts 03948459/expert_verified/points_label/a3679104af613021912d826efe946a9f.seg 03948459
+03467517/points/b6d2d35747549a5b93f0194265a9746c.pts 03467517/expert_verified/points_label/b6d2d35747549a5b93f0194265a9746c.seg 03467517
+02691156/points/2c1fff0653854166e7a636089598229.pts 02691156/expert_verified/points_label/2c1fff0653854166e7a636089598229.seg 02691156
+04379243/points/1040cd764facf6981190e285a2cbc9c.pts 04379243/expert_verified/points_label/1040cd764facf6981190e285a2cbc9c.seg 04379243
+03001627/points/485831d92925bf03f3d7c13662c10792.pts 03001627/expert_verified/points_label/485831d92925bf03f3d7c13662c10792.seg 03001627
+03636649/points/284986b4c72d624abd73284bc3c3cbac.pts 03636649/expert_verified/points_label/284986b4c72d624abd73284bc3c3cbac.seg 03636649
+02691156/points/4c008f39378be18bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/4c008f39378be18bc0909d98a1ff2b4.seg 02691156
+04379243/points/9611888ee0db1ecaf7d4d3ced798ad90.pts 04379243/expert_verified/points_label/9611888ee0db1ecaf7d4d3ced798ad90.seg 04379243
+03467517/points/12e30808350dd945f4b498e11fb60a4b.pts 03467517/expert_verified/points_label/12e30808350dd945f4b498e11fb60a4b.seg 03467517
+03467517/points/3243edb05f5e8803ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3243edb05f5e8803ac61a2f8346a8f.seg 03467517
+04379243/points/ec4675f62f6946118cbb8bac2032149c.pts 04379243/expert_verified/points_label/ec4675f62f6946118cbb8bac2032149c.seg 04379243
+04379243/points/eb00a4e8b33d257cad16260d4d73b56.pts 04379243/expert_verified/points_label/eb00a4e8b33d257cad16260d4d73b56.seg 04379243
+03001627/points/5607b02869c1f8a019fb4103277a6b93.pts 03001627/expert_verified/points_label/5607b02869c1f8a019fb4103277a6b93.seg 03001627
+03636649/points/d456beea1501f278f70220cd6be776f7.pts 03636649/expert_verified/points_label/d456beea1501f278f70220cd6be776f7.seg 03636649
+02691156/points/3feeb5f8ecbfcb4ba8f0518e94fcfb22.pts 02691156/expert_verified/points_label/3feeb5f8ecbfcb4ba8f0518e94fcfb22.seg 02691156
+04379243/points/fe130356df1977499c2a886f3b75f1ff.pts 04379243/expert_verified/points_label/fe130356df1977499c2a886f3b75f1ff.seg 04379243
+02958343/points/aa7f127bb8cd9db73755eb267a6f3b6b.pts 02958343/expert_verified/points_label/aa7f127bb8cd9db73755eb267a6f3b6b.seg 02958343
+04379243/points/84a3c87bba5a472af51f77a6d7299806.pts 04379243/expert_verified/points_label/84a3c87bba5a472af51f77a6d7299806.seg 04379243
+04099429/points/2de8ee55ff69502863098049d14fe32f.pts 04099429/expert_verified/points_label/2de8ee55ff69502863098049d14fe32f.seg 04099429
+03624134/points/539ff9b2a7a0329e759e4c424bcdaafe.pts 03624134/expert_verified/points_label/539ff9b2a7a0329e759e4c424bcdaafe.seg 03624134
+03948459/points/f3f6678898938575575e33965575974.pts 03948459/expert_verified/points_label/f3f6678898938575575e33965575974.seg 03948459
+04379243/points/c26dfd3453d81bf7788eb1f5e7ba6e7b.pts 04379243/expert_verified/points_label/c26dfd3453d81bf7788eb1f5e7ba6e7b.seg 04379243
+03001627/points/8117c55b8bbdbbc54c5c5c89015f1980.pts 03001627/expert_verified/points_label/8117c55b8bbdbbc54c5c5c89015f1980.seg 03001627
+03624134/points/40ccb8ac250e0ea5880595487ba7a30b.pts 03624134/expert_verified/points_label/40ccb8ac250e0ea5880595487ba7a30b.seg 03624134
+04379243/points/a0d2754011acdcc9d8a0e410093d6619.pts 04379243/expert_verified/points_label/a0d2754011acdcc9d8a0e410093d6619.seg 04379243
+03790512/points/5bd41c7d3e158ac93ff4d2f5a7608a24.pts 03790512/expert_verified/points_label/5bd41c7d3e158ac93ff4d2f5a7608a24.seg 03790512
+04379243/points/8f440a7c0e2af79f3ed0ffd59feeec00.pts 04379243/expert_verified/points_label/8f440a7c0e2af79f3ed0ffd59feeec00.seg 04379243
+03001627/points/734ac9809aada180d18df440db206fb1.pts 03001627/expert_verified/points_label/734ac9809aada180d18df440db206fb1.seg 03001627
+03001627/points/54f33a7cb3621d5ced98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/54f33a7cb3621d5ced98cca8f0ccd5f7.seg 03001627
+03001627/points/d274fc14092387c1e17e1cb731e2fa4f.pts 03001627/expert_verified/points_label/d274fc14092387c1e17e1cb731e2fa4f.seg 03001627
+03636649/points/6ccb43088eda061dbfc838749f053cf9.pts 03636649/expert_verified/points_label/6ccb43088eda061dbfc838749f053cf9.seg 03636649
+02773838/points/1b9ef45fefefa35ed13f430b2941481.pts 02773838/expert_verified/points_label/1b9ef45fefefa35ed13f430b2941481.seg 02773838
+03001627/points/35053caa62eea36c116cc4e115d5fd2.pts 03001627/expert_verified/points_label/35053caa62eea36c116cc4e115d5fd2.seg 03001627
+04379243/points/b893c20bfb5d718371a782a4379556c7.pts 04379243/expert_verified/points_label/b893c20bfb5d718371a782a4379556c7.seg 04379243
+04379243/points/1a5062241d7903076f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/1a5062241d7903076f88aa1b7f7cc6c6.seg 04379243
+02958343/points/add26d8f4f91ba04c84b95bddf75b22d.pts 02958343/expert_verified/points_label/add26d8f4f91ba04c84b95bddf75b22d.seg 02958343
+03636649/points/f85f26c5a807b22312bea13341a54c3f.pts 03636649/expert_verified/points_label/f85f26c5a807b22312bea13341a54c3f.seg 03636649
+03001627/points/8a232028c2b2cfad43649af30eba8304.pts 03001627/expert_verified/points_label/8a232028c2b2cfad43649af30eba8304.seg 03001627
+03636649/points/3a5a0f4c78e17b284f0c4075db76b7c.pts 03636649/expert_verified/points_label/3a5a0f4c78e17b284f0c4075db76b7c.seg 03636649
+04379243/points/df811f7a858750875634c21965ee6bab.pts 04379243/expert_verified/points_label/df811f7a858750875634c21965ee6bab.seg 04379243
+02691156/points/48706d323b9041d5438a95791ca4064d.pts 02691156/expert_verified/points_label/48706d323b9041d5438a95791ca4064d.seg 02691156
+03790512/points/170cfc531a4fd09fe6905ba5363784c3.pts 03790512/expert_verified/points_label/170cfc531a4fd09fe6905ba5363784c3.seg 03790512
+03467517/points/d4b2ddb52e8dcd3593f0194265a9746c.pts 03467517/expert_verified/points_label/d4b2ddb52e8dcd3593f0194265a9746c.seg 03467517
+03636649/points/2af78c0b040634e5881cd5e2fd8f0f3b.pts 03636649/expert_verified/points_label/2af78c0b040634e5881cd5e2fd8f0f3b.seg 03636649
+04379243/points/90cd6a48cf2789a9b430d97a45d5824.pts 04379243/expert_verified/points_label/90cd6a48cf2789a9b430d97a45d5824.seg 04379243
+03001627/points/43290694390ad1adfc735c9ceab0161a.pts 03001627/expert_verified/points_label/43290694390ad1adfc735c9ceab0161a.seg 03001627
+03636649/points/ed57181b9e7644a3f51f77a6d7299806.pts 03636649/expert_verified/points_label/ed57181b9e7644a3f51f77a6d7299806.seg 03636649
+03261776/points/a9661a8bb610d902957b6a4f3924d982.pts 03261776/expert_verified/points_label/a9661a8bb610d902957b6a4f3924d982.seg 03261776
+02691156/points/b31bbc50a0d3a4366cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/b31bbc50a0d3a4366cf1b4a8fc3914e.seg 02691156
+03001627/points/cd5ad4afabaed0d3e762624dc3c8fa2a.pts 03001627/expert_verified/points_label/cd5ad4afabaed0d3e762624dc3c8fa2a.seg 03001627
+02958343/points/d2e1dc21db9b45df6436916a86a90ed7.pts 02958343/expert_verified/points_label/d2e1dc21db9b45df6436916a86a90ed7.seg 02958343
+02691156/points/de9e093bb17848c3b2bd4a92202f8700.pts 02691156/expert_verified/points_label/de9e093bb17848c3b2bd4a92202f8700.seg 02691156
+03467517/points/40cd2cafde62ff7ca24eeca91f583600.pts 03467517/expert_verified/points_label/40cd2cafde62ff7ca24eeca91f583600.seg 03467517
+02958343/points/56e0fef0632aed0f1d27be7764701cfe.pts 02958343/expert_verified/points_label/56e0fef0632aed0f1d27be7764701cfe.seg 02958343
+04379243/points/a4d149a48607de3d92f4c88fd91c6b1b.pts 04379243/expert_verified/points_label/a4d149a48607de3d92f4c88fd91c6b1b.seg 04379243
+03636649/points/45f11cb4099c9c87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/45f11cb4099c9c87bbc7a6acbd8f058b.seg 03636649
+04379243/points/3558aeeb9698722acf19858fd1963d10.pts 04379243/expert_verified/points_label/3558aeeb9698722acf19858fd1963d10.seg 04379243
+03636649/points/2a52bd01472ec7e1589ec67c01f5c1a7.pts 03636649/expert_verified/points_label/2a52bd01472ec7e1589ec67c01f5c1a7.seg 03636649
+03467517/points/58bb21c325f021088f01c8e793a6e062.pts 03467517/expert_verified/points_label/58bb21c325f021088f01c8e793a6e062.seg 03467517
+04379243/points/3997cdee934a9b238eb3bc6c6d15f9bf.pts 04379243/expert_verified/points_label/3997cdee934a9b238eb3bc6c6d15f9bf.seg 04379243
+03001627/points/c4cab2a416a4537e2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/c4cab2a416a4537e2871cc0b3cc1a485.seg 03001627
+04379243/points/6aaa78b81528f4846674ff79eed6185a.pts 04379243/expert_verified/points_label/6aaa78b81528f4846674ff79eed6185a.seg 04379243
+03636649/points/fd5f6ab819910a66dc7f95a5a82e36f7.pts 03636649/expert_verified/points_label/fd5f6ab819910a66dc7f95a5a82e36f7.seg 03636649
+04379243/points/8e3303cae6cc104bad4f8ccb153c24e.pts 04379243/expert_verified/points_label/8e3303cae6cc104bad4f8ccb153c24e.seg 04379243
+03001627/points/2f0318b23d899a84493f17f4fe9b9eb2.pts 03001627/expert_verified/points_label/2f0318b23d899a84493f17f4fe9b9eb2.seg 03001627
+04379243/points/2406cdcd4c60c84132884c4c87a2e061.pts 04379243/expert_verified/points_label/2406cdcd4c60c84132884c4c87a2e061.seg 04379243
+03790512/points/55caf44a43f2c04d468bac13e007a6e9.pts 03790512/expert_verified/points_label/55caf44a43f2c04d468bac13e007a6e9.seg 03790512
+03001627/points/ee665ce6679ac8cfb502ac2eb9128f9a.pts 03001627/expert_verified/points_label/ee665ce6679ac8cfb502ac2eb9128f9a.seg 03001627
+02691156/points/32edb6ba5788dc12d8ff6111270336a9.pts 02691156/expert_verified/points_label/32edb6ba5788dc12d8ff6111270336a9.seg 02691156
+03636649/points/d0fde1daedab10365240248232b90795.pts 03636649/expert_verified/points_label/d0fde1daedab10365240248232b90795.seg 03636649
+04379243/points/61b88b501933ebae8f7068c66465c4d6.pts 04379243/expert_verified/points_label/61b88b501933ebae8f7068c66465c4d6.seg 04379243
+03001627/points/93556cf01e19f638bf80985a99195eb8.pts 03001627/expert_verified/points_label/93556cf01e19f638bf80985a99195eb8.seg 03001627
+04379243/points/f3b8c91c5dd1cb6b8722573b29f0d6d8.pts 04379243/expert_verified/points_label/f3b8c91c5dd1cb6b8722573b29f0d6d8.seg 04379243
+04379243/points/eae36b396f6b5f97664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/eae36b396f6b5f97664b3b9b23ddfcbc.seg 04379243
+03624134/points/8bd5c4f395695ebdf40d02cc9d84a93a.pts 03624134/expert_verified/points_label/8bd5c4f395695ebdf40d02cc9d84a93a.seg 03624134
+03001627/points/8c81ff18e04584547f409062bafc8e2.pts 03001627/expert_verified/points_label/8c81ff18e04584547f409062bafc8e2.seg 03001627
+03001627/points/77e7660d71c6f3befebad4f49b26ec52.pts 03001627/expert_verified/points_label/77e7660d71c6f3befebad4f49b26ec52.seg 03001627
+03261776/points/bc404e52bfcd2038538cf6df9faa9b65.pts 03261776/expert_verified/points_label/bc404e52bfcd2038538cf6df9faa9b65.seg 03261776
+03001627/points/f09af71bebd4bea8a2651abaf391628e.pts 03001627/expert_verified/points_label/f09af71bebd4bea8a2651abaf391628e.seg 03001627
+03001627/points/8c8efbe62a1547942b90a0fb76278f6f.pts 03001627/expert_verified/points_label/8c8efbe62a1547942b90a0fb76278f6f.seg 03001627
+04379243/points/aed5697ff59e3d3035478a6869a3602d.pts 04379243/expert_verified/points_label/aed5697ff59e3d3035478a6869a3602d.seg 04379243
+02691156/points/5ac00867c7d78b1690b1d6deb98feec6.pts 02691156/expert_verified/points_label/5ac00867c7d78b1690b1d6deb98feec6.seg 02691156
+03001627/points/c709aa613431c0538a653a9f65a410f6.pts 03001627/expert_verified/points_label/c709aa613431c0538a653a9f65a410f6.seg 03001627
+03624134/points/8facbe9d9f4da233d15a5887ec2183c9.pts 03624134/expert_verified/points_label/8facbe9d9f4da233d15a5887ec2183c9.seg 03624134
+03642806/points/dbcd5a88a9d4f1d7579cfe4420588034.pts 03642806/expert_verified/points_label/dbcd5a88a9d4f1d7579cfe4420588034.seg 03642806
+03636649/points/f29a94f969dd55ffc35131da26f8061a.pts 03636649/expert_verified/points_label/f29a94f969dd55ffc35131da26f8061a.seg 03636649
+02958343/points/5e014eb2bd03daab9fbe97de4a41d527.pts 02958343/expert_verified/points_label/5e014eb2bd03daab9fbe97de4a41d527.seg 02958343
+04379243/points/7105bd044f464358beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/7105bd044f464358beedb4c8fd29e2d1.seg 04379243
+04379243/points/c827c0d4ef212f2b30cb1fe6fdc7d605.pts 04379243/expert_verified/points_label/c827c0d4ef212f2b30cb1fe6fdc7d605.seg 04379243
+04379243/points/19bc9c781df1da46824080f516909671.pts 04379243/expert_verified/points_label/19bc9c781df1da46824080f516909671.seg 04379243
+03001627/points/71b53a5f441d45b742b7e4c0136bdb7e.pts 03001627/expert_verified/points_label/71b53a5f441d45b742b7e4c0136bdb7e.seg 03001627
+02958343/points/e7e94f8dbbe8c1e9784da3853aae78cd.pts 02958343/expert_verified/points_label/e7e94f8dbbe8c1e9784da3853aae78cd.seg 02958343
+03790512/points/832c4a316c419228b37378f3c85478b4.pts 03790512/expert_verified/points_label/832c4a316c419228b37378f3c85478b4.seg 03790512
+02954340/points/c7122c44495a5ac6aceb0fa31f18f016.pts 02954340/expert_verified/points_label/c7122c44495a5ac6aceb0fa31f18f016.seg 02954340
+03001627/points/6b32d3a9198f8b03d1dcc55e36186e4e.pts 03001627/expert_verified/points_label/6b32d3a9198f8b03d1dcc55e36186e4e.seg 03001627
+03636649/points/7893d0b50a7b6a768ec45924afa4ac91.pts 03636649/expert_verified/points_label/7893d0b50a7b6a768ec45924afa4ac91.seg 03636649
+02691156/points/befcb95d80e0e49119ba010ddb4974fe.pts 02691156/expert_verified/points_label/befcb95d80e0e49119ba010ddb4974fe.seg 02691156
+03001627/points/b70600293bab55c0593ebeeedbff73b.pts 03001627/expert_verified/points_label/b70600293bab55c0593ebeeedbff73b.seg 03001627
+02691156/points/7fedb48b457ee9f31629b98cc1b1b992.pts 02691156/expert_verified/points_label/7fedb48b457ee9f31629b98cc1b1b992.seg 02691156
+04099429/points/e04bda8655d9e606ebcdf982796b4fa.pts 04099429/expert_verified/points_label/e04bda8655d9e606ebcdf982796b4fa.seg 04099429
+04379243/points/25bcea593e4314c3436e6787c76ef3f0.pts 04379243/expert_verified/points_label/25bcea593e4314c3436e6787c76ef3f0.seg 04379243
+03636649/points/f3a9cc3060fd6b0e6e4f8fc909e0d34e.pts 03636649/expert_verified/points_label/f3a9cc3060fd6b0e6e4f8fc909e0d34e.seg 03636649
+04379243/points/516928532093f765bababe11fcea8796.pts 04379243/expert_verified/points_label/516928532093f765bababe11fcea8796.seg 04379243
+03001627/points/31569815c88e79de4458bae25a4e518a.pts 03001627/expert_verified/points_label/31569815c88e79de4458bae25a4e518a.seg 03001627
+03001627/points/a08ad49c281128ea53615647c93fc704.pts 03001627/expert_verified/points_label/a08ad49c281128ea53615647c93fc704.seg 03001627
+03642806/points/f5fc954736b06be15fd06491ae919ea3.pts 03642806/expert_verified/points_label/f5fc954736b06be15fd06491ae919ea3.seg 03642806
+04379243/points/15b495c101881d96e2367b9e27f16a71.pts 04379243/expert_verified/points_label/15b495c101881d96e2367b9e27f16a71.seg 04379243
+02691156/points/ebd991666f177f8f575bf8a4b14be4f4.pts 02691156/expert_verified/points_label/ebd991666f177f8f575bf8a4b14be4f4.seg 02691156
+02691156/points/f7739764eb1c78a053f370d353cea84.pts 02691156/expert_verified/points_label/f7739764eb1c78a053f370d353cea84.seg 02691156
+03636649/points/8a6d770e6b4942c5ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/8a6d770e6b4942c5ef3a2c64cef919d0.seg 03636649
+04379243/points/2fcc875b28c5557dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2fcc875b28c5557dcfcef693e7ec696.seg 04379243
+03636649/points/896abd405c79547086485c798787f66b.pts 03636649/expert_verified/points_label/896abd405c79547086485c798787f66b.seg 03636649
+02691156/points/356a633ea047c549ca8607f540cc62ba.pts 02691156/expert_verified/points_label/356a633ea047c549ca8607f540cc62ba.seg 02691156
+03001627/points/c983108db7fcfa3619fb4103277a6b93.pts 03001627/expert_verified/points_label/c983108db7fcfa3619fb4103277a6b93.seg 03001627
+04225987/points/97f85bc59f09a9f455c660e6cd8e92b.pts 04225987/expert_verified/points_label/97f85bc59f09a9f455c660e6cd8e92b.seg 04225987
+03636649/points/69a708be7245f4c9786e8e92cc08146.pts 03636649/expert_verified/points_label/69a708be7245f4c9786e8e92cc08146.seg 03636649
+04379243/points/f71296c0a7e93ec282db9fca4b68095.pts 04379243/expert_verified/points_label/f71296c0a7e93ec282db9fca4b68095.seg 04379243
+02691156/points/33faf711ed54a4d3db22b838c125a50b.pts 02691156/expert_verified/points_label/33faf711ed54a4d3db22b838c125a50b.seg 02691156
+03642806/points/5d544ee4b094c6606436916a86a90ed7.pts 03642806/expert_verified/points_label/5d544ee4b094c6606436916a86a90ed7.seg 03642806
+02691156/points/a0d63ee7fd87f93619ba010ddb4974fe.pts 02691156/expert_verified/points_label/a0d63ee7fd87f93619ba010ddb4974fe.seg 02691156
+03001627/points/e30b412be565a1026efe57da6d3d385e.pts 03001627/expert_verified/points_label/e30b412be565a1026efe57da6d3d385e.seg 03001627
+04379243/points/fe5e1df0653804d6ce4670b160b81e9.pts 04379243/expert_verified/points_label/fe5e1df0653804d6ce4670b160b81e9.seg 04379243
+02691156/points/fd41d04f1aabbaea3fddedb0bf24c68a.pts 02691156/expert_verified/points_label/fd41d04f1aabbaea3fddedb0bf24c68a.seg 02691156
+03624134/points/e79481b2fde3a3ab340fbf70397ab69a.pts 03624134/expert_verified/points_label/e79481b2fde3a3ab340fbf70397ab69a.seg 03624134
+04379243/points/d06d27bc9ad1faabd7bf6fb68df7f786.pts 04379243/expert_verified/points_label/d06d27bc9ad1faabd7bf6fb68df7f786.seg 04379243
+03001627/points/e4931ffa06d7b05cb04cb542e2c50eb4.pts 03001627/expert_verified/points_label/e4931ffa06d7b05cb04cb542e2c50eb4.seg 03001627
+03001627/points/d4b5f8edc72b4676f4175ee3a177350a.pts 03001627/expert_verified/points_label/d4b5f8edc72b4676f4175ee3a177350a.seg 03001627
+03636649/points/4f16fffbe480b835276206fae5d3c473.pts 03636649/expert_verified/points_label/4f16fffbe480b835276206fae5d3c473.seg 03636649
+03001627/points/8ade914cd21b6e49656f29b05c68d39f.pts 03001627/expert_verified/points_label/8ade914cd21b6e49656f29b05c68d39f.seg 03001627
+03001627/points/1e304b967d5253d5dd079f8cece51712.pts 03001627/expert_verified/points_label/1e304b967d5253d5dd079f8cece51712.seg 03001627
+04379243/points/6d0ef6312f8af87a53e946fb2184f0c4.pts 04379243/expert_verified/points_label/6d0ef6312f8af87a53e946fb2184f0c4.seg 04379243
+03948459/points/79c0cac016998c7cf7ba4a82f8032357.pts 03948459/expert_verified/points_label/79c0cac016998c7cf7ba4a82f8032357.seg 03948459
+03642806/points/b51683c6285fa0f69067ac5c9d4ee692.pts 03642806/expert_verified/points_label/b51683c6285fa0f69067ac5c9d4ee692.seg 03642806
+04379243/points/93cdfd14889492dd91a4fd87fee47737.pts 04379243/expert_verified/points_label/93cdfd14889492dd91a4fd87fee47737.seg 04379243
+03636649/points/da8141b45da808199a06a7de97b096dc.pts 03636649/expert_verified/points_label/da8141b45da808199a06a7de97b096dc.seg 03636649
+04379243/points/7d22cd72bf2762b19a4b266ed4d507c9.pts 04379243/expert_verified/points_label/7d22cd72bf2762b19a4b266ed4d507c9.seg 04379243
+04225987/points/aa886bed91a13113d5498a74ca9ca78b.pts 04225987/expert_verified/points_label/aa886bed91a13113d5498a74ca9ca78b.seg 04225987
+04379243/points/55547d2fae0e3dc21705bfd3afcd10e.pts 04379243/expert_verified/points_label/55547d2fae0e3dc21705bfd3afcd10e.seg 04379243
+04379243/points/222c56ff9cddbaf4139eb23f7c8036f.pts 04379243/expert_verified/points_label/222c56ff9cddbaf4139eb23f7c8036f.seg 04379243
+03636649/points/292f1f97a543d735dedf3c967c85981a.pts 03636649/expert_verified/points_label/292f1f97a543d735dedf3c967c85981a.seg 03636649
+04379243/points/9e2318099f77d3df3527ecfeb345775f.pts 04379243/expert_verified/points_label/9e2318099f77d3df3527ecfeb345775f.seg 04379243
+04379243/points/6ace903899706a5819fb4103277a6b93.pts 04379243/expert_verified/points_label/6ace903899706a5819fb4103277a6b93.seg 04379243
+03636649/points/c080aefc6cbff8c81185ac82ed4da80d.pts 03636649/expert_verified/points_label/c080aefc6cbff8c81185ac82ed4da80d.seg 03636649
+03790512/points/9dd4ae1c34af4766b4f2746c8140d6d6.pts 03790512/expert_verified/points_label/9dd4ae1c34af4766b4f2746c8140d6d6.seg 03790512
+03001627/points/e199b1f6a70c9f56df44d20a516c07b3.pts 03001627/expert_verified/points_label/e199b1f6a70c9f56df44d20a516c07b3.seg 03001627
+04379243/points/8129d4c51abc3356bababe11fcea8796.pts 04379243/expert_verified/points_label/8129d4c51abc3356bababe11fcea8796.seg 04379243
+03001627/points/c9d8573a048c0e959c0ca344f487323e.pts 03001627/expert_verified/points_label/c9d8573a048c0e959c0ca344f487323e.seg 03001627
+04379243/points/25eefc5a3c7b30e1f103d473de33521a.pts 04379243/expert_verified/points_label/25eefc5a3c7b30e1f103d473de33521a.seg 04379243
+03624134/points/c20cca071ea58e3ef2c542131520d62e.pts 03624134/expert_verified/points_label/c20cca071ea58e3ef2c542131520d62e.seg 03624134
+03001627/points/c86cfe147872280463626070a93463cf.pts 03001627/expert_verified/points_label/c86cfe147872280463626070a93463cf.seg 03001627
+03001627/points/3853339519aca1bdfcd4910413c446d9.pts 03001627/expert_verified/points_label/3853339519aca1bdfcd4910413c446d9.seg 03001627
+03001627/points/8cb44a50906b827615e7ec87bf4cc5ab.pts 03001627/expert_verified/points_label/8cb44a50906b827615e7ec87bf4cc5ab.seg 03001627
+02691156/points/fd9f1cdaa381599bca8607f540cc62ba.pts 02691156/expert_verified/points_label/fd9f1cdaa381599bca8607f540cc62ba.seg 02691156
+03001627/points/80dabf9ddbdc92f681806e3880250dff.pts 03001627/expert_verified/points_label/80dabf9ddbdc92f681806e3880250dff.seg 03001627
+04379243/points/5919dea71f3bcb071d54ab02e78bef2.pts 04379243/expert_verified/points_label/5919dea71f3bcb071d54ab02e78bef2.seg 04379243
+03636649/points/292ba732e002629e68c2f5eb1dd4dfaa.pts 03636649/expert_verified/points_label/292ba732e002629e68c2f5eb1dd4dfaa.seg 03636649
+04379243/points/5d77e8f6ad3741a0c30ab36bf7b0552.pts 04379243/expert_verified/points_label/5d77e8f6ad3741a0c30ab36bf7b0552.seg 04379243
+03467517/points/21a517abc4729e6e352e5d4d2615db5b.pts 03467517/expert_verified/points_label/21a517abc4729e6e352e5d4d2615db5b.seg 03467517
+03467517/points/6554f6429eb7b67585e3c97721f726e4.pts 03467517/expert_verified/points_label/6554f6429eb7b67585e3c97721f726e4.seg 03467517
+02958343/points/f84ba2039d0a4ec5afe717997470b28d.pts 02958343/expert_verified/points_label/f84ba2039d0a4ec5afe717997470b28d.seg 02958343
+02691156/points/29fd29045703ff18b4a8b7176ed97248.pts 02691156/expert_verified/points_label/29fd29045703ff18b4a8b7176ed97248.seg 02691156
+03467517/points/a7f449a1f2cd1f1693f0194265a9746c.pts 03467517/expert_verified/points_label/a7f449a1f2cd1f1693f0194265a9746c.seg 03467517
+03790512/points/7fcee59a33976221a88e8cb97b773125.pts 03790512/expert_verified/points_label/7fcee59a33976221a88e8cb97b773125.seg 03790512
+04099429/points/2407c2684ee757e89c4176ab56cb612.pts 04099429/expert_verified/points_label/2407c2684ee757e89c4176ab56cb612.seg 04099429
+04379243/points/f621e2ad900ad48535836c728d324152.pts 04379243/expert_verified/points_label/f621e2ad900ad48535836c728d324152.seg 04379243
+03001627/points/9a54daea9071a536bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a54daea9071a536bf80985a99195eb8.seg 03001627
+03001627/points/fd9e909b082d8175d319c38340319ae4.pts 03001627/expert_verified/points_label/fd9e909b082d8175d319c38340319ae4.seg 03001627
+03001627/points/a8dd9990ecd74c45435897641a7ee684.pts 03001627/expert_verified/points_label/a8dd9990ecd74c45435897641a7ee684.seg 03001627
+03636649/points/c6424950ca9447627d8864caa856253b.pts 03636649/expert_verified/points_label/c6424950ca9447627d8864caa856253b.seg 03636649
+03948459/points/7f3ec97cfaea31137504cc74f24f0eee.pts 03948459/expert_verified/points_label/7f3ec97cfaea31137504cc74f24f0eee.seg 03948459
+02691156/points/43abe330362164e99be82ec29531a70f.pts 02691156/expert_verified/points_label/43abe330362164e99be82ec29531a70f.seg 02691156
+03001627/points/499c4b519c708ae84cd08aa7c510fb85.pts 03001627/expert_verified/points_label/499c4b519c708ae84cd08aa7c510fb85.seg 03001627
+04379243/points/4c7931492b41f960d50eef20e0914a48.pts 04379243/expert_verified/points_label/4c7931492b41f960d50eef20e0914a48.seg 04379243
+03001627/points/3f36e261cc87648ac3bd24f986301745.pts 03001627/expert_verified/points_label/3f36e261cc87648ac3bd24f986301745.seg 03001627
+03001627/points/a09a88c11d0b27368821ad3452f1c8c9.pts 03001627/expert_verified/points_label/a09a88c11d0b27368821ad3452f1c8c9.seg 03001627
+04379243/points/89cc879f005dcf50f1f50f6a678fb494.pts 04379243/expert_verified/points_label/89cc879f005dcf50f1f50f6a678fb494.seg 04379243
+02958343/points/d34b0494fc4d756ab927782fc69a1fbb.pts 02958343/expert_verified/points_label/d34b0494fc4d756ab927782fc69a1fbb.seg 02958343
+02958343/points/705840df46a582e2ac826a3c82da491.pts 02958343/expert_verified/points_label/705840df46a582e2ac826a3c82da491.seg 02958343
+02691156/points/74a5f937c22aa08a3e70653c1b3170b5.pts 02691156/expert_verified/points_label/74a5f937c22aa08a3e70653c1b3170b5.seg 02691156
+03948459/points/a0a1633186261a031274aa253a241db2.pts 03948459/expert_verified/points_label/a0a1633186261a031274aa253a241db2.seg 03948459
+03001627/points/2de04227fae28e70b6eb6f056d511fe1.pts 03001627/expert_verified/points_label/2de04227fae28e70b6eb6f056d511fe1.seg 03001627
+02691156/points/1e9ef313876bfba7d02c6d35cc802839.pts 02691156/expert_verified/points_label/1e9ef313876bfba7d02c6d35cc802839.seg 02691156
+03636649/points/e99793b871d27333d42b9650f19dd425.pts 03636649/expert_verified/points_label/e99793b871d27333d42b9650f19dd425.seg 03636649
+03001627/points/7228d43e00af4c1e2746490e2236e9a8.pts 03001627/expert_verified/points_label/7228d43e00af4c1e2746490e2236e9a8.seg 03001627
+03636649/points/66111d2c7a23b0feb404555b84577afb.pts 03636649/expert_verified/points_label/66111d2c7a23b0feb404555b84577afb.seg 03636649
+03001627/points/2499541ace317cbb8cb5d9909aeb1309.pts 03001627/expert_verified/points_label/2499541ace317cbb8cb5d9909aeb1309.seg 03001627
+04379243/points/d151d9f45d8b14536cd661fb5fd95741.pts 04379243/expert_verified/points_label/d151d9f45d8b14536cd661fb5fd95741.seg 04379243
+03001627/points/ea7be2b97e78d5b35a4480134e0cdd21.pts 03001627/expert_verified/points_label/ea7be2b97e78d5b35a4480134e0cdd21.seg 03001627
+02958343/points/9c35f00f81110738783854950b26f0d3.pts 02958343/expert_verified/points_label/9c35f00f81110738783854950b26f0d3.seg 02958343
+03001627/points/e30bd575bbd6c68c9710e093c764abec.pts 03001627/expert_verified/points_label/e30bd575bbd6c68c9710e093c764abec.seg 03001627
+03790512/points/61b17f12bec91d057395d58407f193ba.pts 03790512/expert_verified/points_label/61b17f12bec91d057395d58407f193ba.seg 03790512
+04379243/points/cd895c35fff495cdd0b93fa304cfa755.pts 04379243/expert_verified/points_label/cd895c35fff495cdd0b93fa304cfa755.seg 04379243
+02958343/points/b70d970f8020c25dd141480e2c154d3.pts 02958343/expert_verified/points_label/b70d970f8020c25dd141480e2c154d3.seg 02958343
+04379243/points/2642d805c53e243d629f73b53bd7a234.pts 04379243/expert_verified/points_label/2642d805c53e243d629f73b53bd7a234.seg 04379243
+04379243/points/1bce2f4937d36446a32c566d71fa585c.pts 04379243/expert_verified/points_label/1bce2f4937d36446a32c566d71fa585c.seg 04379243
+04379243/points/7c1bcea89b0037a2d67bd369ec608dad.pts 04379243/expert_verified/points_label/7c1bcea89b0037a2d67bd369ec608dad.seg 04379243
+04379243/points/3154c61c595bd600e56ddd87eb888f65.pts 04379243/expert_verified/points_label/3154c61c595bd600e56ddd87eb888f65.seg 04379243
+03001627/points/7a1de77ca204eaf28a514cac7cb18507.pts 03001627/expert_verified/points_label/7a1de77ca204eaf28a514cac7cb18507.seg 03001627
+04379243/points/77ecc55547840f06d42b9650f19dd425.pts 04379243/expert_verified/points_label/77ecc55547840f06d42b9650f19dd425.seg 04379243
+02691156/points/9a8aecab136ce50db7ef47444625afb2.pts 02691156/expert_verified/points_label/9a8aecab136ce50db7ef47444625afb2.seg 02691156
+02958343/points/24866846d728484e1d1a964dea8a7aab.pts 02958343/expert_verified/points_label/24866846d728484e1d1a964dea8a7aab.seg 02958343
+04099429/points/9b75297c580ff937b61ce5beb9f92726.pts 04099429/expert_verified/points_label/9b75297c580ff937b61ce5beb9f92726.seg 04099429
+04225987/points/90dbe261a4d56dcf1082f2ea630bf69e.pts 04225987/expert_verified/points_label/90dbe261a4d56dcf1082f2ea630bf69e.seg 04225987
+03001627/points/81b27636162e148bb3fb065fa3089331.pts 03001627/expert_verified/points_label/81b27636162e148bb3fb065fa3089331.seg 03001627
+03642806/points/66d47a84a3d522dc9311bf79d4774e73.pts 03642806/expert_verified/points_label/66d47a84a3d522dc9311bf79d4774e73.seg 03642806
+03001627/points/2a05ae00b701fda36567137a59cb1a56.pts 03001627/expert_verified/points_label/2a05ae00b701fda36567137a59cb1a56.seg 03001627
+04379243/points/79df23303a3192c1cdf1dfd78f33901b.pts 04379243/expert_verified/points_label/79df23303a3192c1cdf1dfd78f33901b.seg 04379243
+04379243/points/bf17779bec6abccf161bc5243aab8ea4.pts 04379243/expert_verified/points_label/bf17779bec6abccf161bc5243aab8ea4.seg 04379243
+03001627/points/ece1a921c1bfd44947f5e245ee376525.pts 03001627/expert_verified/points_label/ece1a921c1bfd44947f5e245ee376525.seg 03001627
+03636649/points/15c51ecb58bf304fef3a2c64cef919d0.pts 03636649/expert_verified/points_label/15c51ecb58bf304fef3a2c64cef919d0.seg 03636649
+04379243/points/5d93e285b2006520ab610b0c94236463.pts 04379243/expert_verified/points_label/5d93e285b2006520ab610b0c94236463.seg 04379243
+03636649/points/b2d5929e66044aeac7db9c21ccfbc4a1.pts 03636649/expert_verified/points_label/b2d5929e66044aeac7db9c21ccfbc4a1.seg 03636649
+04379243/points/f3164e1781a296597f6f00dc967c386.pts 04379243/expert_verified/points_label/f3164e1781a296597f6f00dc967c386.seg 04379243
+04379243/points/798a07e42d76013582695d8aaeacccc5.pts 04379243/expert_verified/points_label/798a07e42d76013582695d8aaeacccc5.seg 04379243
+03948459/points/cc014e78b5cd9e7ed957eaf7f4edb205.pts 03948459/expert_verified/points_label/cc014e78b5cd9e7ed957eaf7f4edb205.seg 03948459
+03636649/points/b3a98808fb1ccd892a5041fadf25a502.pts 03636649/expert_verified/points_label/b3a98808fb1ccd892a5041fadf25a502.seg 03636649
+04379243/points/9472c006a5d35b9ab606ece4189242ff.pts 04379243/expert_verified/points_label/9472c006a5d35b9ab606ece4189242ff.seg 04379243
+03001627/points/3f04adffb69b5ebee95cd0dc8c2f0e83.pts 03001627/expert_verified/points_label/3f04adffb69b5ebee95cd0dc8c2f0e83.seg 03001627
+03001627/points/26aa22bd1da8b8c5b1a5c6ecbc81953c.pts 03001627/expert_verified/points_label/26aa22bd1da8b8c5b1a5c6ecbc81953c.seg 03001627
+03001627/points/f68ecc9ec512915f36d8dd30a594b2af.pts 03001627/expert_verified/points_label/f68ecc9ec512915f36d8dd30a594b2af.seg 03001627
+03642806/points/6489453e322cdb53f9f3c6290096f50f.pts 03642806/expert_verified/points_label/6489453e322cdb53f9f3c6290096f50f.seg 03642806
+03001627/points/c53fa6829ec9a947d13b7d13ee32497.pts 03001627/expert_verified/points_label/c53fa6829ec9a947d13b7d13ee32497.seg 03001627
+04379243/points/7f1bd688960e2c1b97f2016c3d6097c9.pts 04379243/expert_verified/points_label/7f1bd688960e2c1b97f2016c3d6097c9.seg 04379243
+02958343/points/edb2ab8a1d7e20f36436916a86a90ed7.pts 02958343/expert_verified/points_label/edb2ab8a1d7e20f36436916a86a90ed7.seg 02958343
+04379243/points/159a2a760327ca5bababe11fcea8796.pts 04379243/expert_verified/points_label/159a2a760327ca5bababe11fcea8796.seg 04379243
+02958343/points/988108a7536d686824065b218dc1b5b9.pts 02958343/expert_verified/points_label/988108a7536d686824065b218dc1b5b9.seg 02958343
+03636649/points/c695408a86062c4d242ea50288b3f64.pts 03636649/expert_verified/points_label/c695408a86062c4d242ea50288b3f64.seg 03636649
+04379243/points/2e7cb2cbfbbb4d002ee19ebe356c2dcb.pts 04379243/expert_verified/points_label/2e7cb2cbfbbb4d002ee19ebe356c2dcb.seg 04379243
+02691156/points/3d23703a618ce7df1e569ed4e4cfe84.pts 02691156/expert_verified/points_label/3d23703a618ce7df1e569ed4e4cfe84.seg 02691156
+03636649/points/97b7d9aabe38f91df11c97be803c47d.pts 03636649/expert_verified/points_label/97b7d9aabe38f91df11c97be803c47d.seg 03636649
+04379243/points/5be1589df948b227c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/5be1589df948b227c955e5ed03ef3a2f.seg 04379243
+04379243/points/8ea7ca2c8b48eb68ab610b0c94236463.pts 04379243/expert_verified/points_label/8ea7ca2c8b48eb68ab610b0c94236463.seg 04379243
+02958343/points/eb56379e243b0e2090da6b3e2ed8b49d.pts 02958343/expert_verified/points_label/eb56379e243b0e2090da6b3e2ed8b49d.seg 02958343
+03001627/points/cc30a723aeba69a139e0f39f5249b0ba.pts 03001627/expert_verified/points_label/cc30a723aeba69a139e0f39f5249b0ba.seg 03001627
+03001627/points/ff8efd10f5e6c5c7c6c0380e62f2644.pts 03001627/expert_verified/points_label/ff8efd10f5e6c5c7c6c0380e62f2644.seg 03001627
+02691156/points/d0ee4253d406b3f05e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/d0ee4253d406b3f05e9e2656aff7dd5b.seg 02691156
+02691156/points/9afe827a622d8ca28699933784576e73.pts 02691156/expert_verified/points_label/9afe827a622d8ca28699933784576e73.seg 02691156
+03467517/points/d82fc6db200cdf6ea24eeca91f583600.pts 03467517/expert_verified/points_label/d82fc6db200cdf6ea24eeca91f583600.seg 03467517
+03642806/points/6123321e3af0b6328204b359ccd3949e.pts 03642806/expert_verified/points_label/6123321e3af0b6328204b359ccd3949e.seg 03642806
+03636649/points/e15defcb3dd448094fffb007974c9976.pts 03636649/expert_verified/points_label/e15defcb3dd448094fffb007974c9976.seg 03636649
+03001627/points/c7fe45610d10cb108ad3a7d07aac2767.pts 03001627/expert_verified/points_label/c7fe45610d10cb108ad3a7d07aac2767.seg 03001627
+04379243/points/bfaa1c23d2622422ad16260d4d73b56.pts 04379243/expert_verified/points_label/bfaa1c23d2622422ad16260d4d73b56.seg 04379243
+04379243/points/8e3fc5f1f8e9658ce8b2b8dc0c816caf.pts 04379243/expert_verified/points_label/8e3fc5f1f8e9658ce8b2b8dc0c816caf.seg 04379243
+03467517/points/1a96f73d0929bd4793f0194265a9746c.pts 03467517/expert_verified/points_label/1a96f73d0929bd4793f0194265a9746c.seg 03467517
+02691156/points/86b11ae736659136ca8607f540cc62ba.pts 02691156/expert_verified/points_label/86b11ae736659136ca8607f540cc62ba.seg 02691156
+04379243/points/4c4c719ac4b61d8f812c9aaa38f9a422.pts 04379243/expert_verified/points_label/4c4c719ac4b61d8f812c9aaa38f9a422.seg 04379243
+04379243/points/443eca86041e57ab1e99b149cff6a230.pts 04379243/expert_verified/points_label/443eca86041e57ab1e99b149cff6a230.seg 04379243
+03948459/points/6b2d89a7f2b173f0d9deb3f829cc2475.pts 03948459/expert_verified/points_label/6b2d89a7f2b173f0d9deb3f829cc2475.seg 03948459
+04379243/points/8d84471c4af977d917271868b642acd3.pts 04379243/expert_verified/points_label/8d84471c4af977d917271868b642acd3.seg 04379243
+03636649/points/b78bef16d4f44844931e98da3a93e73e.pts 03636649/expert_verified/points_label/b78bef16d4f44844931e98da3a93e73e.seg 03636649
+03636649/points/29985e44b73051d923500a5b036df62e.pts 03636649/expert_verified/points_label/29985e44b73051d923500a5b036df62e.seg 03636649
+03642806/points/4f3575df3821e08c466909b3e9553909.pts 03642806/expert_verified/points_label/4f3575df3821e08c466909b3e9553909.seg 03642806
+03001627/points/3774a2b8c71e70b9f18a36d57b7cced0.pts 03001627/expert_verified/points_label/3774a2b8c71e70b9f18a36d57b7cced0.seg 03001627
+03001627/points/3ea40a75f22515557dcf230d8b7d162e.pts 03001627/expert_verified/points_label/3ea40a75f22515557dcf230d8b7d162e.seg 03001627
+03001627/points/33c4f94e97c3fefd19fb4103277a6b93.pts 03001627/expert_verified/points_label/33c4f94e97c3fefd19fb4103277a6b93.seg 03001627
+03636649/points/d7760d5f9e1e6a622cd2160e449d45ae.pts 03636649/expert_verified/points_label/d7760d5f9e1e6a622cd2160e449d45ae.seg 03636649
+02954340/points/7f9ddfff396634f17790cd6f6e8952aa.pts 02954340/expert_verified/points_label/7f9ddfff396634f17790cd6f6e8952aa.seg 02954340
+03001627/points/5e706e87ca60bd19ecb01bc908e8cea6.pts 03001627/expert_verified/points_label/5e706e87ca60bd19ecb01bc908e8cea6.seg 03001627
+04379243/points/90c19c729cabdb864b8710a3469971b1.pts 04379243/expert_verified/points_label/90c19c729cabdb864b8710a3469971b1.seg 04379243
+02691156/points/d08471df3e76602427743256ca3834f.pts 02691156/expert_verified/points_label/d08471df3e76602427743256ca3834f.seg 02691156
+02958343/points/67c229c70e64a25e69c2e0a91b39f742.pts 02958343/expert_verified/points_label/67c229c70e64a25e69c2e0a91b39f742.seg 02958343
+04379243/points/1011e1c9812b84d2a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/1011e1c9812b84d2a9ed7bb5b55809f8.seg 04379243
+03636649/points/3e2d51c40b37c9c086052e834fbd2c4a.pts 03636649/expert_verified/points_label/3e2d51c40b37c9c086052e834fbd2c4a.seg 03636649
+03001627/points/6b385a32489bab4abbc7a6acbd8f058b.pts 03001627/expert_verified/points_label/6b385a32489bab4abbc7a6acbd8f058b.seg 03001627
+03001627/points/61d29e8133da0b58d1fd43e2bf80195.pts 03001627/expert_verified/points_label/61d29e8133da0b58d1fd43e2bf80195.seg 03001627
+04379243/points/d5f2968e4b7254ccf4104961857ca9c.pts 04379243/expert_verified/points_label/d5f2968e4b7254ccf4104961857ca9c.seg 04379243
+04379243/points/30c9865cfc4294a7ad16260d4d73b56.pts 04379243/expert_verified/points_label/30c9865cfc4294a7ad16260d4d73b56.seg 04379243
+03001627/points/76919a456a23b9779368d1198f406e7.pts 03001627/expert_verified/points_label/76919a456a23b9779368d1198f406e7.seg 03001627
+03001627/points/c12da8acb2c7973597e755dddca14449.pts 03001627/expert_verified/points_label/c12da8acb2c7973597e755dddca14449.seg 03001627
+02958343/points/a5dcd1196a1ffa9739f20966eb25504f.pts 02958343/expert_verified/points_label/a5dcd1196a1ffa9739f20966eb25504f.seg 02958343
+02691156/points/1deb997079e0b3cd6c1cd53dbc9f7b8e.pts 02691156/expert_verified/points_label/1deb997079e0b3cd6c1cd53dbc9f7b8e.seg 02691156
+03636649/points/afb7cc3bbc3595a4e9b3dff83c7ff715.pts 03636649/expert_verified/points_label/afb7cc3bbc3595a4e9b3dff83c7ff715.seg 03636649
+03636649/points/b4aee889d5e2a826f6747912091f1965.pts 03636649/expert_verified/points_label/b4aee889d5e2a826f6747912091f1965.seg 03636649
+03636649/points/ea71ba1d8d8c8e5888a1de3dc61bfeef.pts 03636649/expert_verified/points_label/ea71ba1d8d8c8e5888a1de3dc61bfeef.seg 03636649
+02958343/points/b0c2225ab347e28f1a48cf85d161a723.pts 02958343/expert_verified/points_label/b0c2225ab347e28f1a48cf85d161a723.seg 02958343
+03001627/points/1ab8a3b55c14a7b27eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/1ab8a3b55c14a7b27eaeab1f0c9120b7.seg 03001627
+03261776/points/c6d19db35f69bae7b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/c6d19db35f69bae7b6d9c2cee7f2f72b.seg 03261776
+03001627/points/6d6e634ff34bd350c511e6b9b3b344f3.pts 03001627/expert_verified/points_label/6d6e634ff34bd350c511e6b9b3b344f3.seg 03001627
+02691156/points/ce682d7a2bbf77b6fc4b92d3d335214a.pts 02691156/expert_verified/points_label/ce682d7a2bbf77b6fc4b92d3d335214a.seg 02691156
+03261776/points/943048e64cc2bc980a070963925e308.pts 03261776/expert_verified/points_label/943048e64cc2bc980a070963925e308.seg 03261776
+03642806/points/5a63c5f29f0bc0eb12d8efb2f101da03.pts 03642806/expert_verified/points_label/5a63c5f29f0bc0eb12d8efb2f101da03.seg 03642806
+04379243/points/19678fdb9bc926505e4b35ff1ea95f37.pts 04379243/expert_verified/points_label/19678fdb9bc926505e4b35ff1ea95f37.seg 04379243
+02958343/points/52f2a2472411fe2e6b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/52f2a2472411fe2e6b418c7d9fedcaa9.seg 02958343
+03001627/points/1ee92a9d78cccbda98d2e7dbe701ca48.pts 03001627/expert_verified/points_label/1ee92a9d78cccbda98d2e7dbe701ca48.seg 03001627
+03001627/points/795f38ce5d8519938077cafed2bb8242.pts 03001627/expert_verified/points_label/795f38ce5d8519938077cafed2bb8242.seg 03001627
+03001627/points/5e5121cc58c4fea78ce66f12ba927a2b.pts 03001627/expert_verified/points_label/5e5121cc58c4fea78ce66f12ba927a2b.seg 03001627
+03001627/points/b998016472e9dd7a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b998016472e9dd7a9b9f2eb77f5e247e.seg 03001627
+04379243/points/30b506e5e1fc282afdfcfddf24fb29ec.pts 04379243/expert_verified/points_label/30b506e5e1fc282afdfcfddf24fb29ec.seg 04379243
+03624134/points/bcd7ed830358dbd6d58ea69ee1ced10e.pts 03624134/expert_verified/points_label/bcd7ed830358dbd6d58ea69ee1ced10e.seg 03624134
+03001627/points/40d202afdcc49c6d35836c728d324152.pts 03001627/expert_verified/points_label/40d202afdcc49c6d35836c728d324152.seg 03001627
+03467517/points/fdb74c27462dfd837c481698bd5233b4.pts 03467517/expert_verified/points_label/fdb74c27462dfd837c481698bd5233b4.seg 03467517
+02691156/points/dc7c5d12854b9467b96212c8f6cd06e.pts 02691156/expert_verified/points_label/dc7c5d12854b9467b96212c8f6cd06e.seg 02691156
+02691156/points/48e9c61de4db838d84b83051fa0ae5d2.pts 02691156/expert_verified/points_label/48e9c61de4db838d84b83051fa0ae5d2.seg 02691156
+04379243/points/d187561a6b0cbd0acaed5ce7390f30b7.pts 04379243/expert_verified/points_label/d187561a6b0cbd0acaed5ce7390f30b7.seg 04379243
+04379243/points/ae9e04d050f5cba1492d9da2668ec34c.pts 04379243/expert_verified/points_label/ae9e04d050f5cba1492d9da2668ec34c.seg 04379243
+04379243/points/72c884f3b9b9119966f379f51753f72b.pts 04379243/expert_verified/points_label/72c884f3b9b9119966f379f51753f72b.seg 04379243
+02691156/points/917694a71164f2148e8405d6c51a908.pts 02691156/expert_verified/points_label/917694a71164f2148e8405d6c51a908.seg 02691156
+03001627/points/a2441f03fed7c13def31f91fe6afc8fa.pts 03001627/expert_verified/points_label/a2441f03fed7c13def31f91fe6afc8fa.seg 03001627
+03001627/points/49c955a80749d2e1a5ffdf44ff86b795.pts 03001627/expert_verified/points_label/49c955a80749d2e1a5ffdf44ff86b795.seg 03001627
+03636649/points/c43c89d862e10552b24ecc319936dfe2.pts 03636649/expert_verified/points_label/c43c89d862e10552b24ecc319936dfe2.seg 03636649
+03636649/points/e5ff9311bee487f5ca4aaad7dc0e3a16.pts 03636649/expert_verified/points_label/e5ff9311bee487f5ca4aaad7dc0e3a16.seg 03636649
+02958343/points/ba0ac1d1e25d3fad63f2c3a55558a78f.pts 02958343/expert_verified/points_label/ba0ac1d1e25d3fad63f2c3a55558a78f.seg 02958343
+04379243/points/2f58b1ca8634a6b48b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/2f58b1ca8634a6b48b9b51ae4415d5aa.seg 04379243
+03001627/points/c585ee093bfd52af6512b7b24f3d84.pts 03001627/expert_verified/points_label/c585ee093bfd52af6512b7b24f3d84.seg 03001627
+03001627/points/46f6a6e0f239282fc8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/46f6a6e0f239282fc8687ff9b0b4e4ac.seg 03001627
+03642806/points/f72dc1ffeae0168aadcfd37206a0d18b.pts 03642806/expert_verified/points_label/f72dc1ffeae0168aadcfd37206a0d18b.seg 03642806
+03948459/points/1e83ef6ed5d0b78b7efb854782e23566.pts 03948459/expert_verified/points_label/1e83ef6ed5d0b78b7efb854782e23566.seg 03948459
+03001627/points/95e5f6e550761aefe65b629e4a22f51e.pts 03001627/expert_verified/points_label/95e5f6e550761aefe65b629e4a22f51e.seg 03001627
+03001627/points/b38d05caee69c7ac8fc6229eb64e56a.pts 03001627/expert_verified/points_label/b38d05caee69c7ac8fc6229eb64e56a.seg 03001627
+02691156/points/4ff50b9f815c58acca8607f540cc62ba.pts 02691156/expert_verified/points_label/4ff50b9f815c58acca8607f540cc62ba.seg 02691156
+03636649/points/78a11c0b8e964c9b41657e31b569b105.pts 03636649/expert_verified/points_label/78a11c0b8e964c9b41657e31b569b105.seg 03636649
+02958343/points/b1f75a8e8b9e921a8a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/b1f75a8e8b9e921a8a6cf8c6b92417f2.seg 02958343
+02958343/points/a836fc66c01eccca58c27e607f6e2d4c.pts 02958343/expert_verified/points_label/a836fc66c01eccca58c27e607f6e2d4c.seg 02958343
+02691156/points/fac4af109beb0108b4f192eea1889928.pts 02691156/expert_verified/points_label/fac4af109beb0108b4f192eea1889928.seg 02691156
+03467517/points/b9c10bf6fc2095f93f0194265a9746c.pts 03467517/expert_verified/points_label/b9c10bf6fc2095f93f0194265a9746c.seg 03467517
+02691156/points/b976a48c015d6ced5e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/b976a48c015d6ced5e9e2656aff7dd5b.seg 02691156
+04379243/points/889f48aa85accd2ee73947fdf756a329.pts 04379243/expert_verified/points_label/889f48aa85accd2ee73947fdf756a329.seg 04379243
+02691156/points/b6d61068ef2bf2d46059aeb39e538eb2.pts 02691156/expert_verified/points_label/b6d61068ef2bf2d46059aeb39e538eb2.seg 02691156
+04379243/points/d94de64641651a2079b3e1be3524f72f.pts 04379243/expert_verified/points_label/d94de64641651a2079b3e1be3524f72f.seg 04379243
+03001627/points/117bd6da01905949a81116f5456ee312.pts 03001627/expert_verified/points_label/117bd6da01905949a81116f5456ee312.seg 03001627
+03636649/points/845542d0f578a9db1ec48bc3c478566d.pts 03636649/expert_verified/points_label/845542d0f578a9db1ec48bc3c478566d.seg 03636649
+04379243/points/9391dcc782fa7f6bfdad344760a9dafd.pts 04379243/expert_verified/points_label/9391dcc782fa7f6bfdad344760a9dafd.seg 04379243
+04379243/points/fe99a1127734f7852b70eac6546e93fd.pts 04379243/expert_verified/points_label/fe99a1127734f7852b70eac6546e93fd.seg 04379243
+03001627/points/4e358c2dc0513971f98c0761af40e04.pts 03001627/expert_verified/points_label/4e358c2dc0513971f98c0761af40e04.seg 03001627
+03636649/points/53afad2e573b26b141657e31b569b105.pts 03636649/expert_verified/points_label/53afad2e573b26b141657e31b569b105.seg 03636649
+04379243/points/3e51742cb382aa1f79b3e1be3524f72f.pts 04379243/expert_verified/points_label/3e51742cb382aa1f79b3e1be3524f72f.seg 04379243
+02958343/points/4f17af1ca7ae689d409b2c4484d833cc.pts 02958343/expert_verified/points_label/4f17af1ca7ae689d409b2c4484d833cc.seg 02958343
+03467517/points/c739664436ac5237aa0c867d5b070a5d.pts 03467517/expert_verified/points_label/c739664436ac5237aa0c867d5b070a5d.seg 03467517
+03797390/points/61c10dccfa8e508e2d66cbf6a91063.pts 03797390/expert_verified/points_label/61c10dccfa8e508e2d66cbf6a91063.seg 03797390
+03467517/points/aa86d20d03b2303593f0194265a9746c.pts 03467517/expert_verified/points_label/aa86d20d03b2303593f0194265a9746c.seg 03467517
+04379243/points/2f98d5e721e84debaa8081a7009091db.pts 04379243/expert_verified/points_label/2f98d5e721e84debaa8081a7009091db.seg 04379243
+04379243/points/2a0f853dadd841f96f1e07a56c129dfc.pts 04379243/expert_verified/points_label/2a0f853dadd841f96f1e07a56c129dfc.seg 04379243
+03001627/points/8031478c3fe31ddcc337647acafe65f0.pts 03001627/expert_verified/points_label/8031478c3fe31ddcc337647acafe65f0.seg 03001627
+03636649/points/a53112591be182b9d93768e7b9b1eabf.pts 03636649/expert_verified/points_label/a53112591be182b9d93768e7b9b1eabf.seg 03636649
+03001627/points/5bc916f8b9d0a7c6b40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/5bc916f8b9d0a7c6b40f0ac0fb9a650d.seg 03001627
+02691156/points/f2d4b8440d4bde5330afbcb38d77d0c3.pts 02691156/expert_verified/points_label/f2d4b8440d4bde5330afbcb38d77d0c3.seg 02691156
+03001627/points/e4274fc2b9e4a5511882515d09f3979e.pts 03001627/expert_verified/points_label/e4274fc2b9e4a5511882515d09f3979e.seg 03001627
+03001627/points/9ab18a33335373b2659dda512294c744.pts 03001627/expert_verified/points_label/9ab18a33335373b2659dda512294c744.seg 03001627
+04379243/points/32ea6609eb659a2cec3367bccf60e518.pts 04379243/expert_verified/points_label/32ea6609eb659a2cec3367bccf60e518.seg 04379243
+04379243/points/759cb93134fd5efde76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/759cb93134fd5efde76bc197b3a3ffc0.seg 04379243
+03001627/points/b8b5e172ee58899df2d9e72ba502035.pts 03001627/expert_verified/points_label/b8b5e172ee58899df2d9e72ba502035.seg 03001627
+03001627/points/1886b3e3f3d4af3ace522e6dda26fb51.pts 03001627/expert_verified/points_label/1886b3e3f3d4af3ace522e6dda26fb51.seg 03001627
+03948459/points/3f5f657bec9a21814ce6ac98dc4781fe.pts 03948459/expert_verified/points_label/3f5f657bec9a21814ce6ac98dc4781fe.seg 03948459
+04379243/points/5adf5a7173e588ad76e9713f57a5fcb6.pts 04379243/expert_verified/points_label/5adf5a7173e588ad76e9713f57a5fcb6.seg 04379243
+03001627/points/f33b6f791e9d64387d01b77e04a0bc7b.pts 03001627/expert_verified/points_label/f33b6f791e9d64387d01b77e04a0bc7b.seg 03001627
+04379243/points/4e928377ae98ed8d99e8bf807e902261.pts 04379243/expert_verified/points_label/4e928377ae98ed8d99e8bf807e902261.seg 04379243
+03001627/points/d7867d215f52107ba5e8cf3aa1686d66.pts 03001627/expert_verified/points_label/d7867d215f52107ba5e8cf3aa1686d66.seg 03001627
+02691156/points/bddc2c1a4fae008947a1dbf5fd48a4dd.pts 02691156/expert_verified/points_label/bddc2c1a4fae008947a1dbf5fd48a4dd.seg 02691156
+02958343/points/bafacc7f28509d4157abc6fa0d632bc7.pts 02958343/expert_verified/points_label/bafacc7f28509d4157abc6fa0d632bc7.seg 02958343
+02691156/points/a14b262838529c2c81e1d9f6b27f1a92.pts 02691156/expert_verified/points_label/a14b262838529c2c81e1d9f6b27f1a92.seg 02691156
+03001627/points/38afa26a419ea3abed040525648fc6d7.pts 03001627/expert_verified/points_label/38afa26a419ea3abed040525648fc6d7.seg 03001627
+04379243/points/79f63a1564928af071a782a4379556c7.pts 04379243/expert_verified/points_label/79f63a1564928af071a782a4379556c7.seg 04379243
+04379243/points/cbd1cd9b5423f890beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/cbd1cd9b5423f890beedb4c8fd29e2d1.seg 04379243
+02691156/points/d74767519393a937f73e5bc170b7e2be.pts 02691156/expert_verified/points_label/d74767519393a937f73e5bc170b7e2be.seg 02691156
+03001627/points/9a82269e56737217e16571f1d370cad9.pts 03001627/expert_verified/points_label/9a82269e56737217e16571f1d370cad9.seg 03001627
+03001627/points/6e1e73e14637a28da1c367d7a459a9b7.pts 03001627/expert_verified/points_label/6e1e73e14637a28da1c367d7a459a9b7.seg 03001627
+03797390/points/eecb13f61a93b4048f58d8b19de93f99.pts 03797390/expert_verified/points_label/eecb13f61a93b4048f58d8b19de93f99.seg 03797390
+03001627/points/4f7523a3d276bfae4b3c42e318f3affc.pts 03001627/expert_verified/points_label/4f7523a3d276bfae4b3c42e318f3affc.seg 03001627
+03624134/points/f19fe19693937db1cb03b57fca000b1f.pts 03624134/expert_verified/points_label/f19fe19693937db1cb03b57fca000b1f.seg 03624134
+02958343/points/c3858a8b73dcb137e3bdba9430565083.pts 02958343/expert_verified/points_label/c3858a8b73dcb137e3bdba9430565083.seg 02958343
+04379243/points/3ce930bb150aef8a69fb38085fbc320c.pts 04379243/expert_verified/points_label/3ce930bb150aef8a69fb38085fbc320c.seg 04379243
+04379243/points/75e3cbf4b1ef0df971a782a4379556c7.pts 04379243/expert_verified/points_label/75e3cbf4b1ef0df971a782a4379556c7.seg 04379243
+04379243/points/5040f8f3e2293db448e116352760c52d.pts 04379243/expert_verified/points_label/5040f8f3e2293db448e116352760c52d.seg 04379243
+04379243/points/edaf24be15738ea2c5d1c45cadcaa3eb.pts 04379243/expert_verified/points_label/edaf24be15738ea2c5d1c45cadcaa3eb.seg 04379243
+04379243/points/6fb52c296531dc17beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/6fb52c296531dc17beedb4c8fd29e2d1.seg 04379243
+04379243/points/e777df6ffb40e3a1853d412328e7e7a6.pts 04379243/expert_verified/points_label/e777df6ffb40e3a1853d412328e7e7a6.seg 04379243
+03001627/points/9c103621101bcf9919fb4103277a6b93.pts 03001627/expert_verified/points_label/9c103621101bcf9919fb4103277a6b93.seg 03001627
+03001627/points/5d20adaf6d8f89fa2f1c10544d7d6f.pts 03001627/expert_verified/points_label/5d20adaf6d8f89fa2f1c10544d7d6f.seg 03001627
+02691156/points/b80bd34ab330babbc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/b80bd34ab330babbc8727b27ee96a4b7.seg 02691156
+04379243/points/50d898f6d1c05cee2d99129afd32edf4.pts 04379243/expert_verified/points_label/50d898f6d1c05cee2d99129afd32edf4.seg 04379243
+04379243/points/c0c836c630cdb4bb664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/c0c836c630cdb4bb664b3b9b23ddfcbc.seg 04379243
+03790512/points/a1553e0bb7897a7ace0bf41e5f45753d.pts 03790512/expert_verified/points_label/a1553e0bb7897a7ace0bf41e5f45753d.seg 03790512
+03467517/points/7701180906a0aa156a7ae841f1f88f87.pts 03467517/expert_verified/points_label/7701180906a0aa156a7ae841f1f88f87.seg 03467517
+03467517/points/3ef569c13f4ab5f83ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3ef569c13f4ab5f83ac61a2f8346a8f.seg 03467517
+03636649/points/3834d7f376879c03eca29403b7226aa1.pts 03636649/expert_verified/points_label/3834d7f376879c03eca29403b7226aa1.seg 03636649
+02958343/points/34ab29cea66952f16f48edd113a40fce.pts 02958343/expert_verified/points_label/34ab29cea66952f16f48edd113a40fce.seg 02958343
+02958343/points/e24f388736f4e6fd2cdd250493632937.pts 02958343/expert_verified/points_label/e24f388736f4e6fd2cdd250493632937.seg 02958343
+03001627/points/3ae022522800685c610195e4fb10d1de.pts 03001627/expert_verified/points_label/3ae022522800685c610195e4fb10d1de.seg 03001627
+02691156/points/49660fd24e5c2fbab87697d3904b168b.pts 02691156/expert_verified/points_label/49660fd24e5c2fbab87697d3904b168b.seg 02691156
+03642806/points/2d5d4d79cd464298566636e42679cc7f.pts 03642806/expert_verified/points_label/2d5d4d79cd464298566636e42679cc7f.seg 03642806
+04379243/points/7988dedacce42552ab610b0c94236463.pts 04379243/expert_verified/points_label/7988dedacce42552ab610b0c94236463.seg 04379243
+04379243/points/91ed62f2b3fd5919f12d7184a2ad3430.pts 04379243/expert_verified/points_label/91ed62f2b3fd5919f12d7184a2ad3430.seg 04379243
+03001627/points/a5898fefb1733333a82b0d8d157287f5.pts 03001627/expert_verified/points_label/a5898fefb1733333a82b0d8d157287f5.seg 03001627
+04379243/points/b4ef1de99422b08768661782af60b711.pts 04379243/expert_verified/points_label/b4ef1de99422b08768661782af60b711.seg 04379243
+03001627/points/df2b7e697ab6ca0f155d75bbf62b80.pts 03001627/expert_verified/points_label/df2b7e697ab6ca0f155d75bbf62b80.seg 03001627
+03467517/points/408a8e1b51266b9ccc34b900bb2492e.pts 03467517/expert_verified/points_label/408a8e1b51266b9ccc34b900bb2492e.seg 03467517
+03001627/points/597f2b2153af0c544aabcf2a7cb640f9.pts 03001627/expert_verified/points_label/597f2b2153af0c544aabcf2a7cb640f9.seg 03001627
+03001627/points/6870fbd4a7b733b0674f1c30a8cad95a.pts 03001627/expert_verified/points_label/6870fbd4a7b733b0674f1c30a8cad95a.seg 03001627
+03001627/points/e35d7d19dcdc9e5c30e06a011e63236a.pts 03001627/expert_verified/points_label/e35d7d19dcdc9e5c30e06a011e63236a.seg 03001627
+04225987/points/58ade10f7f87edc6e860048d7ced02e3.pts 04225987/expert_verified/points_label/58ade10f7f87edc6e860048d7ced02e3.seg 04225987
+04379243/points/39cf5ae2b497715a84253b2030fab070.pts 04379243/expert_verified/points_label/39cf5ae2b497715a84253b2030fab070.seg 04379243
+04379243/points/ab7b0db92f96381f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/ab7b0db92f96381f8cbb8bac2032149c.seg 04379243
+03001627/points/b117b01ab380362db8134b0fbf68257d.pts 03001627/expert_verified/points_label/b117b01ab380362db8134b0fbf68257d.seg 03001627
+03467517/points/913f3c90f5b78256e98e318d424a4bb9.pts 03467517/expert_verified/points_label/913f3c90f5b78256e98e318d424a4bb9.seg 03467517
+04379243/points/831985fb385a5b2a9ae2d75b4fc35b7.pts 04379243/expert_verified/points_label/831985fb385a5b2a9ae2d75b4fc35b7.seg 04379243
+03467517/points/482b8b9a225b6ca1d57700c05b1862d8.pts 03467517/expert_verified/points_label/482b8b9a225b6ca1d57700c05b1862d8.seg 03467517
+03001627/points/93a6876247c7a015d84b8ba651dfb8ac.pts 03001627/expert_verified/points_label/93a6876247c7a015d84b8ba651dfb8ac.seg 03001627
+04379243/points/a78273aa10b2dfb0bc8d334f99e7f52.pts 04379243/expert_verified/points_label/a78273aa10b2dfb0bc8d334f99e7f52.seg 04379243
+04379243/points/3c686ac317c496f9a71c812e027f94d9.pts 04379243/expert_verified/points_label/3c686ac317c496f9a71c812e027f94d9.seg 04379243
+02691156/points/50755e616df58fe566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/50755e616df58fe566cf1b4a8fc3914e.seg 02691156
+03001627/points/8cedc8e684d60ff42a06d8c81262ef96.pts 03001627/expert_verified/points_label/8cedc8e684d60ff42a06d8c81262ef96.seg 03001627
+04379243/points/f74c321042dbc8e684d78f017ff73fd6.pts 04379243/expert_verified/points_label/f74c321042dbc8e684d78f017ff73fd6.seg 04379243
+02958343/points/5130947e5f18e73a8321b7d65a99d2a.pts 02958343/expert_verified/points_label/5130947e5f18e73a8321b7d65a99d2a.seg 02958343
+03261776/points/f5d210ff14ca9d29b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/f5d210ff14ca9d29b6d9c2cee7f2f72b.seg 03261776
+03001627/points/d36de0f850783d8fd6b3090036b71698.pts 03001627/expert_verified/points_label/d36de0f850783d8fd6b3090036b71698.seg 03001627
+03001627/points/6897c2665267cca39eea64ae4d2b4158.pts 03001627/expert_verified/points_label/6897c2665267cca39eea64ae4d2b4158.seg 03001627
+03001627/points/6e98c5d61e008b4c2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/6e98c5d61e008b4c2871cc0b3cc1a485.seg 03001627
+02958343/points/92f697d036addb55ed576c2966428f.pts 02958343/expert_verified/points_label/92f697d036addb55ed576c2966428f.seg 02958343
+04379243/points/f3fd419f725aa894ba5342d638d0c267.pts 04379243/expert_verified/points_label/f3fd419f725aa894ba5342d638d0c267.seg 04379243
+04379243/points/62eff79cf2e75bc2765ee729adbdf968.pts 04379243/expert_verified/points_label/62eff79cf2e75bc2765ee729adbdf968.seg 04379243
+03001627/points/98a1f8651c962402492d9da2668ec34c.pts 03001627/expert_verified/points_label/98a1f8651c962402492d9da2668ec34c.seg 03001627
+03636649/points/d90639e69c82f864eb2d9895648d1206.pts 03636649/expert_verified/points_label/d90639e69c82f864eb2d9895648d1206.seg 03636649
+02954340/points/a1494210f6774b87b3e0e60b857dde8f.pts 02954340/expert_verified/points_label/a1494210f6774b87b3e0e60b857dde8f.seg 02954340
+03467517/points/d528407fe43b5df193f0194265a9746c.pts 03467517/expert_verified/points_label/d528407fe43b5df193f0194265a9746c.seg 03467517
+03636649/points/776e4b38023091002cd2160e449d45ae.pts 03636649/expert_verified/points_label/776e4b38023091002cd2160e449d45ae.seg 03636649
+04379243/points/91df49ec00f2c5ce73f1ca2ca101a20d.pts 04379243/expert_verified/points_label/91df49ec00f2c5ce73f1ca2ca101a20d.seg 04379243
+04379243/points/47f25d5b367326ceaaf15b62af6b513f.pts 04379243/expert_verified/points_label/47f25d5b367326ceaaf15b62af6b513f.seg 04379243
+04379243/points/f5d6579b3a1f5a879d2be74cfb51ade1.pts 04379243/expert_verified/points_label/f5d6579b3a1f5a879d2be74cfb51ade1.seg 04379243
+02691156/points/f6ea6663b48bf78261f1ef59130c405d.pts 02691156/expert_verified/points_label/f6ea6663b48bf78261f1ef59130c405d.seg 02691156
+03001627/points/63da17eda9d415b5319c5e90e9cc9126.pts 03001627/expert_verified/points_label/63da17eda9d415b5319c5e90e9cc9126.seg 03001627
+02691156/points/9fb60716f0f5a2b84408eb298433d643.pts 02691156/expert_verified/points_label/9fb60716f0f5a2b84408eb298433d643.seg 02691156
+02773838/points/5161d9adede671d6edc32c5c9ec9f827.pts 02773838/expert_verified/points_label/5161d9adede671d6edc32c5c9ec9f827.seg 02773838
+04379243/points/696beb1883be838cc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/696beb1883be838cc955e5ed03ef3a2f.seg 04379243
+03001627/points/bc184c3cbe3349b19fb4103277a6b93.pts 03001627/expert_verified/points_label/bc184c3cbe3349b19fb4103277a6b93.seg 03001627
+03642806/points/28fbfd8b8c9c6f16e1e44e2fc05361d9.pts 03642806/expert_verified/points_label/28fbfd8b8c9c6f16e1e44e2fc05361d9.seg 03642806
+04379243/points/506e4e67efe1794c1dacbc3d67b5a11a.pts 04379243/expert_verified/points_label/506e4e67efe1794c1dacbc3d67b5a11a.seg 04379243
+02691156/points/a48676cfe44fd9bee40acb87a6be88b3.pts 02691156/expert_verified/points_label/a48676cfe44fd9bee40acb87a6be88b3.seg 02691156
+04379243/points/9e5926bfdc7f01749e65a3d2929a9516.pts 04379243/expert_verified/points_label/9e5926bfdc7f01749e65a3d2929a9516.seg 04379243
+04379243/points/dc47d49db6ac670635d498476a30ff0e.pts 04379243/expert_verified/points_label/dc47d49db6ac670635d498476a30ff0e.seg 04379243
+04379243/points/33c6e3b21a67b750e78d7b497732dce1.pts 04379243/expert_verified/points_label/33c6e3b21a67b750e78d7b497732dce1.seg 04379243
+04379243/points/27295a6f585b7817febad4f49b26ec52.pts 04379243/expert_verified/points_label/27295a6f585b7817febad4f49b26ec52.seg 04379243
+03624134/points/6f8b660661269406504c6b6d62466c67.pts 03624134/expert_verified/points_label/6f8b660661269406504c6b6d62466c67.seg 03624134
+03642806/points/dbc61cbed5f7f2b33c1abb78f1519c49.pts 03642806/expert_verified/points_label/dbc61cbed5f7f2b33c1abb78f1519c49.seg 03642806
+03001627/points/374bec02e71fe06528b4c5ec471dc963.pts 03001627/expert_verified/points_label/374bec02e71fe06528b4c5ec471dc963.seg 03001627
+03001627/points/b41aaea5754adae0444b41d6d7f557fa.pts 03001627/expert_verified/points_label/b41aaea5754adae0444b41d6d7f557fa.seg 03001627
+03001627/points/7f4f73ad1b3f882ba14472becb07b261.pts 03001627/expert_verified/points_label/7f4f73ad1b3f882ba14472becb07b261.seg 03001627
+03001627/points/b80122c3a0543a7b7eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/b80122c3a0543a7b7eaeab1f0c9120b7.seg 03001627
+04379243/points/2e4fbab46e264616d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/2e4fbab46e264616d93768e7b9b1eabf.seg 04379243
+03001627/points/4a12589099b05c51e13b3410f3683610.pts 03001627/expert_verified/points_label/4a12589099b05c51e13b3410f3683610.seg 03001627
+03001627/points/bc523df998d94c7223ac0bd64c9cb255.pts 03001627/expert_verified/points_label/bc523df998d94c7223ac0bd64c9cb255.seg 03001627
+02691156/points/218caa58819e10d1fe40308d822f996c.pts 02691156/expert_verified/points_label/218caa58819e10d1fe40308d822f996c.seg 02691156
+04379243/points/a5e951c9d7a9a93f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/a5e951c9d7a9a93f8cbb8bac2032149c.seg 04379243
+03636649/points/f228f6cd86162beb659dda512294c744.pts 03636649/expert_verified/points_label/f228f6cd86162beb659dda512294c744.seg 03636649
+03467517/points/648a820e550bdfd093f0194265a9746c.pts 03467517/expert_verified/points_label/648a820e550bdfd093f0194265a9746c.seg 03467517
+03624134/points/8f61777bf6b57fedc13545c5b1a2e607.pts 03624134/expert_verified/points_label/8f61777bf6b57fedc13545c5b1a2e607.seg 03624134
+03001627/points/bb9efb4912a018b3c329e2758ab09ecb.pts 03001627/expert_verified/points_label/bb9efb4912a018b3c329e2758ab09ecb.seg 03001627
+03001627/points/fdac1f9c0b030841c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/fdac1f9c0b030841c8687ff9b0b4e4ac.seg 03001627
+02691156/points/8ac8c21b63ff535fca8607f540cc62ba.pts 02691156/expert_verified/points_label/8ac8c21b63ff535fca8607f540cc62ba.seg 02691156
+03467517/points/4e4d180e78d8b52a93f0194265a9746c.pts 03467517/expert_verified/points_label/4e4d180e78d8b52a93f0194265a9746c.seg 03467517
+03636649/points/7bc1b202ebf000625949e084b65603cf.pts 03636649/expert_verified/points_label/7bc1b202ebf000625949e084b65603cf.seg 03636649
+03001627/points/3c8362c1e57c30d7e6c5cd45aa112726.pts 03001627/expert_verified/points_label/3c8362c1e57c30d7e6c5cd45aa112726.seg 03001627
+03001627/points/5510d5af1ab5714b3c42e318f3affc.pts 03001627/expert_verified/points_label/5510d5af1ab5714b3c42e318f3affc.seg 03001627
+04379243/points/4d393b562df7cfad9a16b095d67f7209.pts 04379243/expert_verified/points_label/4d393b562df7cfad9a16b095d67f7209.seg 04379243
+03797390/points/e984fd7e97c2be347eaeab1f0c9120b7.pts 03797390/expert_verified/points_label/e984fd7e97c2be347eaeab1f0c9120b7.seg 03797390
+03001627/points/483d22dbbee32ee54e5c7d89bdfc49a3.pts 03001627/expert_verified/points_label/483d22dbbee32ee54e5c7d89bdfc49a3.seg 03001627
+02691156/points/a5cd14be786fc8175e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/a5cd14be786fc8175e9e2656aff7dd5b.seg 02691156
+03636649/points/d4bbd93c0d85e77d7934a0d24a61231.pts 03636649/expert_verified/points_label/d4bbd93c0d85e77d7934a0d24a61231.seg 03636649
+03467517/points/7027bc171baae1d663e148e250c0340d.pts 03467517/expert_verified/points_label/7027bc171baae1d663e148e250c0340d.seg 03467517
+03636649/points/1a44dd6ee873d443da13974b3533fb59.pts 03636649/expert_verified/points_label/1a44dd6ee873d443da13974b3533fb59.seg 03636649
+04379243/points/2e3037a285fd8b5c1be2a853ec4f9e8.pts 04379243/expert_verified/points_label/2e3037a285fd8b5c1be2a853ec4f9e8.seg 04379243
+04379243/points/e3b585b15506fa7113f96345312df593.pts 04379243/expert_verified/points_label/e3b585b15506fa7113f96345312df593.seg 04379243
+02958343/points/ee1d28a50a2b71e129348d14ca881f7d.pts 02958343/expert_verified/points_label/ee1d28a50a2b71e129348d14ca881f7d.seg 02958343
+03001627/points/22af872ac796ed26ff8d7c1096fae070.pts 03001627/expert_verified/points_label/22af872ac796ed26ff8d7c1096fae070.seg 03001627
+03642806/points/9b4ab67eb448c49c11ced4a54f2e6229.pts 03642806/expert_verified/points_label/9b4ab67eb448c49c11ced4a54f2e6229.seg 03642806
+03624134/points/1640911b9dc0ef0da95c6095f89cd899.pts 03624134/expert_verified/points_label/1640911b9dc0ef0da95c6095f89cd899.seg 03624134
+03001627/points/f6810de4042cc5ce57bd4bc6eae9b341.pts 03001627/expert_verified/points_label/f6810de4042cc5ce57bd4bc6eae9b341.seg 03001627
+03001627/points/c46eb7460be602b6bf80985a99195eb8.pts 03001627/expert_verified/points_label/c46eb7460be602b6bf80985a99195eb8.seg 03001627
+03624134/points/debbbf239d59d8724662dc124dd336ed.pts 03624134/expert_verified/points_label/debbbf239d59d8724662dc124dd336ed.seg 03624134
+04379243/points/5b51e63726f21bb6a75d03186a0409e2.pts 04379243/expert_verified/points_label/5b51e63726f21bb6a75d03186a0409e2.seg 04379243
+02691156/points/b59a7cab8e95f6eaf3a7414a84b5637.pts 02691156/expert_verified/points_label/b59a7cab8e95f6eaf3a7414a84b5637.seg 02691156
+03001627/points/52c32b187590e8f3bba5aaac798c64af.pts 03001627/expert_verified/points_label/52c32b187590e8f3bba5aaac798c64af.seg 03001627
+03001627/points/1c173d970e21e9a8be95ff480950e9ef.pts 03001627/expert_verified/points_label/1c173d970e21e9a8be95ff480950e9ef.seg 03001627
+03624134/points/7238d0009faeacb5fd770de1635caa0.pts 03624134/expert_verified/points_label/7238d0009faeacb5fd770de1635caa0.seg 03624134
+04379243/points/cc554812025dc498e7ed5b5b11f935c9.pts 04379243/expert_verified/points_label/cc554812025dc498e7ed5b5b11f935c9.seg 04379243
+04379243/points/fff492e352c8cb336240c88cd4684446.pts 04379243/expert_verified/points_label/fff492e352c8cb336240c88cd4684446.seg 04379243
+03636649/points/e0a2948797cc33b2e19a0cc107ada7cd.pts 03636649/expert_verified/points_label/e0a2948797cc33b2e19a0cc107ada7cd.seg 03636649
+03636649/points/fe02f6594ed8b96ae85a3dc26b76b2ae.pts 03636649/expert_verified/points_label/fe02f6594ed8b96ae85a3dc26b76b2ae.seg 03636649
+04379243/points/d4a7a1dc0f1a51986f15d61c214769af.pts 04379243/expert_verified/points_label/d4a7a1dc0f1a51986f15d61c214769af.seg 04379243
+03624134/points/3dbda789bc59a5f99246ea0301684d80.pts 03624134/expert_verified/points_label/3dbda789bc59a5f99246ea0301684d80.seg 03624134
+04379243/points/b82e068c2c18cd67b09f0ca9c143fdfd.pts 04379243/expert_verified/points_label/b82e068c2c18cd67b09f0ca9c143fdfd.seg 04379243
+03001627/points/b360f2264526521f1dee989d1177ef4e.pts 03001627/expert_verified/points_label/b360f2264526521f1dee989d1177ef4e.seg 03001627
+02691156/points/8ff8f3c845e7ae8443afdb9c81ff2967.pts 02691156/expert_verified/points_label/8ff8f3c845e7ae8443afdb9c81ff2967.seg 02691156
+03001627/points/ea87765cf9dbe2fe55f46d55537192b6.pts 03001627/expert_verified/points_label/ea87765cf9dbe2fe55f46d55537192b6.seg 03001627
+03001627/points/df23ca11080bb439676c272956dad3c2.pts 03001627/expert_verified/points_label/df23ca11080bb439676c272956dad3c2.seg 03001627
+03790512/points/a3dfeae5bced3533b37378f3c85478b4.pts 03790512/expert_verified/points_label/a3dfeae5bced3533b37378f3c85478b4.seg 03790512
+04379243/points/9af7a071bbd432baa5526f91aecc0c37.pts 04379243/expert_verified/points_label/9af7a071bbd432baa5526f91aecc0c37.seg 04379243
+03001627/points/a8b5f5b6bf0cb2d6876b399a99a15c0f.pts 03001627/expert_verified/points_label/a8b5f5b6bf0cb2d6876b399a99a15c0f.seg 03001627
+03001627/points/c7e590c0390e8d5debe67d9b32c3ddf8.pts 03001627/expert_verified/points_label/c7e590c0390e8d5debe67d9b32c3ddf8.seg 03001627
+03790512/points/4f30742005b7c20e883158c0007ed9ba.pts 03790512/expert_verified/points_label/4f30742005b7c20e883158c0007ed9ba.seg 03790512
+04379243/points/40b632472f8e69a7664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/40b632472f8e69a7664b3b9b23ddfcbc.seg 04379243
+03467517/points/d71c17b4d1ffa131f10a27cbb87f3a5.pts 03467517/expert_verified/points_label/d71c17b4d1ffa131f10a27cbb87f3a5.seg 03467517
+04379243/points/f563e9cd92a0dbe5a07b1c1d0ca9cf45.pts 04379243/expert_verified/points_label/f563e9cd92a0dbe5a07b1c1d0ca9cf45.seg 04379243
+03797390/points/1a97f3c83016abca21d0de04f408950f.pts 03797390/expert_verified/points_label/1a97f3c83016abca21d0de04f408950f.seg 03797390
+04379243/points/c3135e3b21b42e132449009b96f8a6ed.pts 04379243/expert_verified/points_label/c3135e3b21b42e132449009b96f8a6ed.seg 04379243
+03636649/points/89b168160388c29da996f5a90dae9cac.pts 03636649/expert_verified/points_label/89b168160388c29da996f5a90dae9cac.seg 03636649
+02958343/points/8bbbfdbec9251733ace5721ccacba16.pts 02958343/expert_verified/points_label/8bbbfdbec9251733ace5721ccacba16.seg 02958343
+04379243/points/db5a895ae7358c97b66213207f46bee7.pts 04379243/expert_verified/points_label/db5a895ae7358c97b66213207f46bee7.seg 04379243
+03001627/points/6a28919186eb55ecf69d0cf4fdc89b12.pts 03001627/expert_verified/points_label/6a28919186eb55ecf69d0cf4fdc89b12.seg 03001627
+04379243/points/e7169243daef074dc82dc2efb3363de1.pts 04379243/expert_verified/points_label/e7169243daef074dc82dc2efb3363de1.seg 04379243
+03467517/points/4ae5a491c3ffb473462c6cdd250c26bb.pts 03467517/expert_verified/points_label/4ae5a491c3ffb473462c6cdd250c26bb.seg 03467517
+04379243/points/e1a8e9e2059f4792fbb8cbddab1c2002.pts 04379243/expert_verified/points_label/e1a8e9e2059f4792fbb8cbddab1c2002.seg 04379243
+03467517/points/364f85832427992343820c03f9f59458.pts 03467517/expert_verified/points_label/364f85832427992343820c03f9f59458.seg 03467517
+02958343/points/4822076e48b366371f0d59cde6139796.pts 02958343/expert_verified/points_label/4822076e48b366371f0d59cde6139796.seg 02958343
+03636649/points/d34a10201a5448a253cf897b7fc1d12.pts 03636649/expert_verified/points_label/d34a10201a5448a253cf897b7fc1d12.seg 03636649
+03467517/points/77095861248c816693f0194265a9746c.pts 03467517/expert_verified/points_label/77095861248c816693f0194265a9746c.seg 03467517
+04379243/points/dacde6546ca2e07f66dc6ea1ac82d91f.pts 04379243/expert_verified/points_label/dacde6546ca2e07f66dc6ea1ac82d91f.seg 04379243
+03636649/points/670ad2964ad5a98c9f1a71e46bbde97c.pts 03636649/expert_verified/points_label/670ad2964ad5a98c9f1a71e46bbde97c.seg 03636649
+02691156/points/77c9fd0f0c6b0e9fca8607f540cc62ba.pts 02691156/expert_verified/points_label/77c9fd0f0c6b0e9fca8607f540cc62ba.seg 02691156
+03001627/points/5fc6b04623ae6a9963ed57e35c972b4b.pts 03001627/expert_verified/points_label/5fc6b04623ae6a9963ed57e35c972b4b.seg 03001627
+02958343/points/f18093ac0242d439f500cc506a763c18.pts 02958343/expert_verified/points_label/f18093ac0242d439f500cc506a763c18.seg 02958343
+03001627/points/2fed64c67552aa689c1db271ad9472a7.pts 03001627/expert_verified/points_label/2fed64c67552aa689c1db271ad9472a7.seg 03001627
+03001627/points/bf7e8e0dc4f4038cc2567be77cb7ab45.pts 03001627/expert_verified/points_label/bf7e8e0dc4f4038cc2567be77cb7ab45.seg 03001627
+04379243/points/690e073a4000c7ae540e292bd26f307a.pts 04379243/expert_verified/points_label/690e073a4000c7ae540e292bd26f307a.seg 04379243
+03467517/points/5fc56e6d220d775e381b7fbf79296afb.pts 03467517/expert_verified/points_label/5fc56e6d220d775e381b7fbf79296afb.seg 03467517
+04379243/points/8af3fd230ea7ac6518101790733ed6b2.pts 04379243/expert_verified/points_label/8af3fd230ea7ac6518101790733ed6b2.seg 04379243
+03636649/points/80436dff2a30721849655ac7c771b113.pts 03636649/expert_verified/points_label/80436dff2a30721849655ac7c771b113.seg 03636649
+03790512/points/b767982d38b5171e429f1c522640e6f0.pts 03790512/expert_verified/points_label/b767982d38b5171e429f1c522640e6f0.seg 03790512
+03001627/points/40e6fb27aeb9c9ab44f999802029a79a.pts 03001627/expert_verified/points_label/40e6fb27aeb9c9ab44f999802029a79a.seg 03001627
+04379243/points/59e1afdec89de9442b70eac6546e93fd.pts 04379243/expert_verified/points_label/59e1afdec89de9442b70eac6546e93fd.seg 04379243
+02691156/points/43d8125d940bb2ae850f318836ee7512.pts 02691156/expert_verified/points_label/43d8125d940bb2ae850f318836ee7512.seg 02691156
+02691156/points/cbc9d6ae9d22fcc57f3efc94c2d31dc5.pts 02691156/expert_verified/points_label/cbc9d6ae9d22fcc57f3efc94c2d31dc5.seg 02691156
+04379243/points/f585560965413925d706ecb3379aa341.pts 04379243/expert_verified/points_label/f585560965413925d706ecb3379aa341.seg 04379243
+04379243/points/adee49b8f5251efeaade78cbbf8fad3b.pts 04379243/expert_verified/points_label/adee49b8f5251efeaade78cbbf8fad3b.seg 04379243
+03261776/points/ccf84f2cbd3ebeb247ba1bc05b9a0f37.pts 03261776/expert_verified/points_label/ccf84f2cbd3ebeb247ba1bc05b9a0f37.seg 03261776
+03001627/points/2343e2c4fa69f33a2ff834514c92e8fd.pts 03001627/expert_verified/points_label/2343e2c4fa69f33a2ff834514c92e8fd.seg 03001627
+03636649/points/1d89da4ac1538ada9c949ae6274aa016.pts 03636649/expert_verified/points_label/1d89da4ac1538ada9c949ae6274aa016.seg 03636649
+03001627/points/51e14c516e45ec3b18ed59365c9648a7.pts 03001627/expert_verified/points_label/51e14c516e45ec3b18ed59365c9648a7.seg 03001627
+03001627/points/1e276a016b664e424d678187b8261d95.pts 03001627/expert_verified/points_label/1e276a016b664e424d678187b8261d95.seg 03001627
+03636649/points/4deef34d95367b58c0d95250e682f6ee.pts 03636649/expert_verified/points_label/4deef34d95367b58c0d95250e682f6ee.seg 03636649
+03001627/points/5d3eff6a1b9a119da011ccf7cbabf68e.pts 03001627/expert_verified/points_label/5d3eff6a1b9a119da011ccf7cbabf68e.seg 03001627
+04379243/points/9afaf5ab87a889f67acae9ce58893de5.pts 04379243/expert_verified/points_label/9afaf5ab87a889f67acae9ce58893de5.seg 04379243
+04379243/points/5431993203dfcf797ec12e029bc725db.pts 04379243/expert_verified/points_label/5431993203dfcf797ec12e029bc725db.seg 04379243
+03001627/points/6a01eed3a575987211e48e4bcdc4a2a3.pts 03001627/expert_verified/points_label/6a01eed3a575987211e48e4bcdc4a2a3.seg 03001627
+02958343/points/a8f2c3adc0671c15c64e95fc6a597455.pts 02958343/expert_verified/points_label/a8f2c3adc0671c15c64e95fc6a597455.seg 02958343
+04379243/points/f60960ae4dc8e293c8ce22a41ea48e48.pts 04379243/expert_verified/points_label/f60960ae4dc8e293c8ce22a41ea48e48.seg 04379243
+03624134/points/3a4f0118a57093cbf7c4ed45ce654123.pts 03624134/expert_verified/points_label/3a4f0118a57093cbf7c4ed45ce654123.seg 03624134
+03636649/points/52783aa89adf06f3250c527721570ba0.pts 03636649/expert_verified/points_label/52783aa89adf06f3250c527721570ba0.seg 03636649
+03001627/points/b13a4df698183bf9afb6676a5cd782b6.pts 03001627/expert_verified/points_label/b13a4df698183bf9afb6676a5cd782b6.seg 03001627
+03636649/points/26f725bb6578936cd247b9308cd5c441.pts 03636649/expert_verified/points_label/26f725bb6578936cd247b9308cd5c441.seg 03636649
+03001627/points/6df1ecffaa0abdbf327289c00b6dc9ca.pts 03001627/expert_verified/points_label/6df1ecffaa0abdbf327289c00b6dc9ca.seg 03001627
+04379243/points/3c475d9f0433a7eaad2650d014e970a5.pts 04379243/expert_verified/points_label/3c475d9f0433a7eaad2650d014e970a5.seg 04379243
+02958343/points/fee1c13922c07e8711b978ff9450f61b.pts 02958343/expert_verified/points_label/fee1c13922c07e8711b978ff9450f61b.seg 02958343
+04379243/points/6bc941dbd290c7f21acdac000802e11c.pts 04379243/expert_verified/points_label/6bc941dbd290c7f21acdac000802e11c.seg 04379243
+02958343/points/6333b9c777384ad14362be10a3fc8255.pts 02958343/expert_verified/points_label/6333b9c777384ad14362be10a3fc8255.seg 02958343
+03001627/points/9a35f15e924e19db637adadafee6f182.pts 03001627/expert_verified/points_label/9a35f15e924e19db637adadafee6f182.seg 03001627
+03001627/points/b0531a0d44fc22144224ee0743294f79.pts 03001627/expert_verified/points_label/b0531a0d44fc22144224ee0743294f79.seg 03001627
+03636649/points/913ff6452d0ea43c9d62807daf4a2134.pts 03636649/expert_verified/points_label/913ff6452d0ea43c9d62807daf4a2134.seg 03636649
+03467517/points/e45f323ce7ecab8393f0194265a9746c.pts 03467517/expert_verified/points_label/e45f323ce7ecab8393f0194265a9746c.seg 03467517
+02691156/points/aa2af754642256c08699933784576e73.pts 02691156/expert_verified/points_label/aa2af754642256c08699933784576e73.seg 02691156
+04379243/points/75b308ba45762ad499e8bf807e902261.pts 04379243/expert_verified/points_label/75b308ba45762ad499e8bf807e902261.seg 04379243
+03001627/points/3622d983fd6d7b98e3a73d090627e9ba.pts 03001627/expert_verified/points_label/3622d983fd6d7b98e3a73d090627e9ba.seg 03001627
+04225987/points/db4c8bf323465e4c537d393009a79347.pts 04225987/expert_verified/points_label/db4c8bf323465e4c537d393009a79347.seg 04225987
+04379243/points/132bfde1fabe9ab771a782a4379556c7.pts 04379243/expert_verified/points_label/132bfde1fabe9ab771a782a4379556c7.seg 04379243
+03001627/points/3dc8243b17bc790620768660cf080d12.pts 03001627/expert_verified/points_label/3dc8243b17bc790620768660cf080d12.seg 03001627
+04379243/points/ccb96ea5f047c97f278d386bfa54545.pts 04379243/expert_verified/points_label/ccb96ea5f047c97f278d386bfa54545.seg 04379243
+04379243/points/14ae5631e7dfa10430bbd4cddd04c77b.pts 04379243/expert_verified/points_label/14ae5631e7dfa10430bbd4cddd04c77b.seg 04379243
+04379243/points/78a81cbd2a5720d93a938fdd57fac3b4.pts 04379243/expert_verified/points_label/78a81cbd2a5720d93a938fdd57fac3b4.seg 04379243
+04379243/points/307bdd2a06137694a10ff7fd5e43a633.pts 04379243/expert_verified/points_label/307bdd2a06137694a10ff7fd5e43a633.seg 04379243
+03001627/points/f3573756e64259f2b29d280b4e59c527.pts 03001627/expert_verified/points_label/f3573756e64259f2b29d280b4e59c527.seg 03001627
+04379243/points/1815c6431b06dfb4f008d8a3590fb522.pts 04379243/expert_verified/points_label/1815c6431b06dfb4f008d8a3590fb522.seg 04379243
+04379243/points/7fda06ada2d897baadab4c26397edfab.pts 04379243/expert_verified/points_label/7fda06ada2d897baadab4c26397edfab.seg 04379243
+04379243/points/86b48365b2bd587e61830bc1b4d6c5ea.pts 04379243/expert_verified/points_label/86b48365b2bd587e61830bc1b4d6c5ea.seg 04379243
+03948459/points/6aae44dd39fb9476f059c10da31213ea.pts 03948459/expert_verified/points_label/6aae44dd39fb9476f059c10da31213ea.seg 03948459
+04379243/points/424c77a1f39ac41620dd2dd4d7d7656c.pts 04379243/expert_verified/points_label/424c77a1f39ac41620dd2dd4d7d7656c.seg 04379243
+03001627/points/8778c23fd21bdebf8a80d99ff4e76c20.pts 03001627/expert_verified/points_label/8778c23fd21bdebf8a80d99ff4e76c20.seg 03001627
+03001627/points/257deb231ce652169f2349486c570dd4.pts 03001627/expert_verified/points_label/257deb231ce652169f2349486c570dd4.seg 03001627
+03642806/points/e5559cd005d5c4942a7b0c74c5f22fc4.pts 03642806/expert_verified/points_label/e5559cd005d5c4942a7b0c74c5f22fc4.seg 03642806
+03001627/points/986e49bd8314d7424addf6a5f8726274.pts 03001627/expert_verified/points_label/986e49bd8314d7424addf6a5f8726274.seg 03001627
+04379243/points/b3fc5247186936f1dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b3fc5247186936f1dcfcef693e7ec696.seg 04379243
+02691156/points/da9d111e1175d318bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/da9d111e1175d318bbf3143b1cb6076a.seg 02691156
+04379243/points/54b26954e478b1a34ea8d5f5f27d7ce3.pts 04379243/expert_verified/points_label/54b26954e478b1a34ea8d5f5f27d7ce3.seg 04379243
+03001627/points/2d44744a7ea0bf724b3c42e318f3affc.pts 03001627/expert_verified/points_label/2d44744a7ea0bf724b3c42e318f3affc.seg 03001627
+04379243/points/9dd63148e5b0a4f79eaa55bb236fb6e1.pts 04379243/expert_verified/points_label/9dd63148e5b0a4f79eaa55bb236fb6e1.seg 04379243
+04379243/points/6ab7ebf9b94176456f1e07a56c129dfc.pts 04379243/expert_verified/points_label/6ab7ebf9b94176456f1e07a56c129dfc.seg 04379243
+03001627/points/6aaa9bd6e835eb0f9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6aaa9bd6e835eb0f9b9f2eb77f5e247e.seg 03001627
+03636649/points/34020466b4342812218c9f1216abefd.pts 03636649/expert_verified/points_label/34020466b4342812218c9f1216abefd.seg 03636649
+03001627/points/df7735e2bce09a511f98c0761af40e04.pts 03001627/expert_verified/points_label/df7735e2bce09a511f98c0761af40e04.seg 03001627
+03636649/points/1d963d5c54613202b0aa15078ea6f391.pts 03636649/expert_verified/points_label/1d963d5c54613202b0aa15078ea6f391.seg 03636649
+03636649/points/5a9e0dd068e2436bd7ebac63aa51083.pts 03636649/expert_verified/points_label/5a9e0dd068e2436bd7ebac63aa51083.seg 03636649
+03001627/points/b1f50d8d41a8c53b6197fd390b16d14d.pts 03001627/expert_verified/points_label/b1f50d8d41a8c53b6197fd390b16d14d.seg 03001627
+03001627/points/285931af369b12c2ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/285931af369b12c2ccd42a2d6eea63ed.seg 03001627
+03636649/points/69429d8ffb5009a82060e7309fc3fc6.pts 03636649/expert_verified/points_label/69429d8ffb5009a82060e7309fc3fc6.seg 03636649
+04379243/points/63b53646b3562677d395837145ded71.pts 04379243/expert_verified/points_label/63b53646b3562677d395837145ded71.seg 04379243
+03001627/points/ee5ee3f6759aabacf2f43e6f841bd32b.pts 03001627/expert_verified/points_label/ee5ee3f6759aabacf2f43e6f841bd32b.seg 03001627
+02691156/points/bdfbf1c555dacd9d325212819caa597d.pts 02691156/expert_verified/points_label/bdfbf1c555dacd9d325212819caa597d.seg 02691156
+04379243/points/9f321f05a7808719ab610b0c94236463.pts 04379243/expert_verified/points_label/9f321f05a7808719ab610b0c94236463.seg 04379243
+03624134/points/fb1f385d487d13d7aa0079d6fb0f853c.pts 03624134/expert_verified/points_label/fb1f385d487d13d7aa0079d6fb0f853c.seg 03624134
+04379243/points/109738784a0a6129a02c88fe01f2b9c1.pts 04379243/expert_verified/points_label/109738784a0a6129a02c88fe01f2b9c1.seg 04379243
+03467517/points/65e3bdc247b3ce3d4de904d1abbce016.pts 03467517/expert_verified/points_label/65e3bdc247b3ce3d4de904d1abbce016.seg 03467517
+02691156/points/94ce3a5ad2576e73a5cac89017eae8d1.pts 02691156/expert_verified/points_label/94ce3a5ad2576e73a5cac89017eae8d1.seg 02691156
+03001627/points/80fab0c55a60abb7dafb0be26f6b45d5.pts 03001627/expert_verified/points_label/80fab0c55a60abb7dafb0be26f6b45d5.seg 03001627
+04379243/points/e6ee101d3cb13bdd16a2b5862518c93.pts 04379243/expert_verified/points_label/e6ee101d3cb13bdd16a2b5862518c93.seg 04379243
+04379243/points/6f2ffe8c014a6a458af30108ea9ccb6c.pts 04379243/expert_verified/points_label/6f2ffe8c014a6a458af30108ea9ccb6c.seg 04379243
+02958343/points/504793ed2da6cf7eba3e2415e22cd45c.pts 02958343/expert_verified/points_label/504793ed2da6cf7eba3e2415e22cd45c.seg 02958343
+03467517/points/9e26dcbac33f056c343b0b12983b9982.pts 03467517/expert_verified/points_label/9e26dcbac33f056c343b0b12983b9982.seg 03467517
+03467517/points/a92cd0b5d559075daa9518d76daaca23.pts 03467517/expert_verified/points_label/a92cd0b5d559075daa9518d76daaca23.seg 03467517
+03636649/points/b6989c99bba1226539b3360f500ac52a.pts 03636649/expert_verified/points_label/b6989c99bba1226539b3360f500ac52a.seg 03636649
+03624134/points/cc38f97557029b2a2b5fd8277662be97.pts 03624134/expert_verified/points_label/cc38f97557029b2a2b5fd8277662be97.seg 03624134
+03790512/points/41cc9674e700c3fdb37378f3c85478b4.pts 03790512/expert_verified/points_label/41cc9674e700c3fdb37378f3c85478b4.seg 03790512
+03001627/points/56b171b1f1521d27291d12adef12641b.pts 03001627/expert_verified/points_label/56b171b1f1521d27291d12adef12641b.seg 03001627
+03636649/points/ddc2d39dac6e84506c5b8009db95f66f.pts 03636649/expert_verified/points_label/ddc2d39dac6e84506c5b8009db95f66f.seg 03636649
+02691156/points/edc185566c1df89c35fc197bbabcd5bd.pts 02691156/expert_verified/points_label/edc185566c1df89c35fc197bbabcd5bd.seg 02691156
+04379243/points/fb5e8a6361262c26acf7920879052e93.pts 04379243/expert_verified/points_label/fb5e8a6361262c26acf7920879052e93.seg 04379243
+04379243/points/8862cddf90fddb3119fb4103277a6b93.pts 04379243/expert_verified/points_label/8862cddf90fddb3119fb4103277a6b93.seg 04379243
+02691156/points/d5a94c9f09d238c4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/d5a94c9f09d238c4c3a35cee92bb95b.seg 02691156
+03636649/points/1682d4404196cf127588e2ca59b15f8.pts 03636649/expert_verified/points_label/1682d4404196cf127588e2ca59b15f8.seg 03636649
+04379243/points/2f33abdfe147813e44949d7685cb63ea.pts 04379243/expert_verified/points_label/2f33abdfe147813e44949d7685cb63ea.seg 04379243
+03001627/points/e158f7ba6828db5c654ea6737b0d3597.pts 03001627/expert_verified/points_label/e158f7ba6828db5c654ea6737b0d3597.seg 03001627
+04379243/points/564474f25a4400c5dc20930e6fc85682.pts 04379243/expert_verified/points_label/564474f25a4400c5dc20930e6fc85682.seg 04379243
+04379243/points/eb379b2b95e76502e258d1c3e7302e7b.pts 04379243/expert_verified/points_label/eb379b2b95e76502e258d1c3e7302e7b.seg 04379243
+03001627/points/3a1b54325b3565e72ca4b544d68c52.pts 03001627/expert_verified/points_label/3a1b54325b3565e72ca4b544d68c52.seg 03001627
+04225987/points/393ca71bd734f3071082f2ea630bf69e.pts 04225987/expert_verified/points_label/393ca71bd734f3071082f2ea630bf69e.seg 04225987
+03636649/points/bd1cbcb990375022b45fed2806c331ab.pts 03636649/expert_verified/points_label/bd1cbcb990375022b45fed2806c331ab.seg 03636649
+03001627/points/6a9dce6566cd61652b339ec555ba3bfc.pts 03001627/expert_verified/points_label/6a9dce6566cd61652b339ec555ba3bfc.seg 03001627
+02691156/points/94379090010cd6bb874c9ce092a813ef.pts 02691156/expert_verified/points_label/94379090010cd6bb874c9ce092a813ef.seg 02691156
+02773838/points/d3bd250ca3cb8e29976855a35549333.pts 02773838/expert_verified/points_label/d3bd250ca3cb8e29976855a35549333.seg 02773838
+03001627/points/36cb782fbc164ac312591a3ac05fadf1.pts 03001627/expert_verified/points_label/36cb782fbc164ac312591a3ac05fadf1.seg 03001627
+03642806/points/2211a40cc77a085362c091e763f81d3.pts 03642806/expert_verified/points_label/2211a40cc77a085362c091e763f81d3.seg 03642806
+04379243/points/5cbd726c3ffd8fc49b458816be7a3962.pts 04379243/expert_verified/points_label/5cbd726c3ffd8fc49b458816be7a3962.seg 04379243
+02691156/points/72aee7d0e998a68aca8607f540cc62ba.pts 02691156/expert_verified/points_label/72aee7d0e998a68aca8607f540cc62ba.seg 02691156
+04379243/points/1c3310f4c05ce1f6a192483aa282f8e5.pts 04379243/expert_verified/points_label/1c3310f4c05ce1f6a192483aa282f8e5.seg 04379243
+04379243/points/4ced745f960f7439b91767277279ac70.pts 04379243/expert_verified/points_label/4ced745f960f7439b91767277279ac70.seg 04379243
+03642806/points/8d70fb6adc63e21eb7e0383b9609fa5.pts 03642806/expert_verified/points_label/8d70fb6adc63e21eb7e0383b9609fa5.seg 03642806
+03001627/points/2bd6800d64c01d677721fafb59ea099.pts 03001627/expert_verified/points_label/2bd6800d64c01d677721fafb59ea099.seg 03001627
+03467517/points/1abe78447898821e93f0194265a9746c.pts 03467517/expert_verified/points_label/1abe78447898821e93f0194265a9746c.seg 03467517
+02691156/points/9bf3c126d5918c41f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/9bf3c126d5918c41f5c7319b71bdce6e.seg 02691156
+03642806/points/1312ea502b4e9b51701c1f58e22b85e8.pts 03642806/expert_verified/points_label/1312ea502b4e9b51701c1f58e22b85e8.seg 03642806
+04379243/points/a9cc8112fb8c4ed5dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/a9cc8112fb8c4ed5dfd21203bf8b4b46.seg 04379243
+03642806/points/62b25a5e3119b8409023147b38c03c9f.pts 03642806/expert_verified/points_label/62b25a5e3119b8409023147b38c03c9f.seg 03642806
+04379243/points/a4fcd8afe8b6de585beaf00da5b709c2.pts 04379243/expert_verified/points_label/a4fcd8afe8b6de585beaf00da5b709c2.seg 04379243
+03636649/points/907fd296708ae71dd5fab5deb286066.pts 03636649/expert_verified/points_label/907fd296708ae71dd5fab5deb286066.seg 03636649
+04379243/points/c5ae96124c15c734e6c5cd45aa112726.pts 04379243/expert_verified/points_label/c5ae96124c15c734e6c5cd45aa112726.seg 04379243
+03642806/points/ef6d43add46d0cae4e07b09c086cc5c4.pts 03642806/expert_verified/points_label/ef6d43add46d0cae4e07b09c086cc5c4.seg 03642806
+04379243/points/8d07df2bf706cda58c5591114064d173.pts 04379243/expert_verified/points_label/8d07df2bf706cda58c5591114064d173.seg 04379243
+02958343/points/5316fab78a6732f0428df271ebc70bc0.pts 02958343/expert_verified/points_label/5316fab78a6732f0428df271ebc70bc0.seg 02958343
+03467517/points/7946e354e342f560c5a468097fc791e4.pts 03467517/expert_verified/points_label/7946e354e342f560c5a468097fc791e4.seg 03467517
+03467517/points/d3684d071dcb6bffd3193ed047bef161.pts 03467517/expert_verified/points_label/d3684d071dcb6bffd3193ed047bef161.seg 03467517
+04379243/points/33b081062b2195e71771ee930e861b13.pts 04379243/expert_verified/points_label/33b081062b2195e71771ee930e861b13.seg 04379243
+02958343/points/511962626501e4abf500cc506a763c18.pts 02958343/expert_verified/points_label/511962626501e4abf500cc506a763c18.seg 02958343
+03797390/points/c82b9f1b98f044fc15cf6e5ad80f2da.pts 03797390/expert_verified/points_label/c82b9f1b98f044fc15cf6e5ad80f2da.seg 03797390
+04379243/points/49f625856c796254d249abd69334079c.pts 04379243/expert_verified/points_label/49f625856c796254d249abd69334079c.seg 04379243
+03001627/points/ca4900c42b8016ef8397cd720acaa508.pts 03001627/expert_verified/points_label/ca4900c42b8016ef8397cd720acaa508.seg 03001627
+03636649/points/31a15957bd4f32f87eedf2c7d21f7cfa.pts 03636649/expert_verified/points_label/31a15957bd4f32f87eedf2c7d21f7cfa.seg 03636649
+03797390/points/928a383f79698c3fb6d9bc28c8d8a2c4.pts 03797390/expert_verified/points_label/928a383f79698c3fb6d9bc28c8d8a2c4.seg 03797390
+04379243/points/17e5a64889ca085fa5526f91aecc0c37.pts 04379243/expert_verified/points_label/17e5a64889ca085fa5526f91aecc0c37.seg 04379243
+02958343/points/cbe2dc469c47bb80425b2c354eccabaf.pts 02958343/expert_verified/points_label/cbe2dc469c47bb80425b2c354eccabaf.seg 02958343
+03001627/points/19c8189116dd7cd3e95c611687989498.pts 03001627/expert_verified/points_label/19c8189116dd7cd3e95c611687989498.seg 03001627
+03636649/points/7f518fe982aae1b5940c8a2639c8747.pts 03636649/expert_verified/points_label/7f518fe982aae1b5940c8a2639c8747.seg 03636649
+03636649/points/7b1fef0071908d4bd93768e7b9b1eabf.pts 03636649/expert_verified/points_label/7b1fef0071908d4bd93768e7b9b1eabf.seg 03636649
+03001627/points/475e2c8f7a2c1bbd9acf9a86c283d1a2.pts 03001627/expert_verified/points_label/475e2c8f7a2c1bbd9acf9a86c283d1a2.seg 03001627
+03467517/points/5c805aca7aa8bdd3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/5c805aca7aa8bdd3ac61a2f8346a8f.seg 03467517
+03790512/points/8032295bd3851d75468bac13e007a6e9.pts 03790512/expert_verified/points_label/8032295bd3851d75468bac13e007a6e9.seg 03790512
+02691156/points/3e0561d70c7fd4f51c6e4e20f2b76086.pts 02691156/expert_verified/points_label/3e0561d70c7fd4f51c6e4e20f2b76086.seg 02691156
+02691156/points/e5610bbacaf098508b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/e5610bbacaf098508b96ae1a0a8b84ec.seg 02691156
+03467517/points/97e8ee1b6df404bd57700c05b1862d8.pts 03467517/expert_verified/points_label/97e8ee1b6df404bd57700c05b1862d8.seg 03467517
+03636649/points/981b55897cee64403c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/981b55897cee64403c8d0fdfb1cc2535.seg 03636649
+04379243/points/204d9ecc196990ebe8479ad2eabcbab4.pts 04379243/expert_verified/points_label/204d9ecc196990ebe8479ad2eabcbab4.seg 04379243
+04379243/points/9d039675f4d51869f3edd695842c6d58.pts 04379243/expert_verified/points_label/9d039675f4d51869f3edd695842c6d58.seg 04379243
+03467517/points/cb5b2e3f499e4fdecc571cd3cf8f17a1.pts 03467517/expert_verified/points_label/cb5b2e3f499e4fdecc571cd3cf8f17a1.seg 03467517
+04379243/points/5243b5491a4f8a16a2b5862518c93.pts 04379243/expert_verified/points_label/5243b5491a4f8a16a2b5862518c93.seg 04379243
+04379243/points/efbf0d75648b7c7d5792b99b8245d225.pts 04379243/expert_verified/points_label/efbf0d75648b7c7d5792b99b8245d225.seg 04379243
+03001627/points/c8265e04c94bcb5a1346e336f65f96f6.pts 03001627/expert_verified/points_label/c8265e04c94bcb5a1346e336f65f96f6.seg 03001627
+02958343/points/94cfcfb74e246f938acb0ff76f4aec7d.pts 02958343/expert_verified/points_label/94cfcfb74e246f938acb0ff76f4aec7d.seg 02958343
+03467517/points/a0b6f040538d26e3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/a0b6f040538d26e3ac61a2f8346a8f.seg 03467517
+03001627/points/70f1f85d47c970bb78dd615a59de5f05.pts 03001627/expert_verified/points_label/70f1f85d47c970bb78dd615a59de5f05.seg 03001627
+04379243/points/f4976e80b8533bcf85518f8659f21d56.pts 04379243/expert_verified/points_label/f4976e80b8533bcf85518f8659f21d56.seg 04379243
+03636649/points/9fdaafde365beafc37f7ce56c66316ea.pts 03636649/expert_verified/points_label/9fdaafde365beafc37f7ce56c66316ea.seg 03636649
+03467517/points/22033c6d7e5a90f193f0194265a9746c.pts 03467517/expert_verified/points_label/22033c6d7e5a90f193f0194265a9746c.seg 03467517
+02691156/points/c1b5dc92221bcdad5fc84bf2b9ef981.pts 02691156/expert_verified/points_label/c1b5dc92221bcdad5fc84bf2b9ef981.seg 02691156
+04379243/points/79d0985603f7ff3be6c5cd45aa112726.pts 04379243/expert_verified/points_label/79d0985603f7ff3be6c5cd45aa112726.seg 04379243
+03467517/points/5d6c1516b83dec8663e148e250c0340d.pts 03467517/expert_verified/points_label/5d6c1516b83dec8663e148e250c0340d.seg 03467517
+04379243/points/79c5df613523a462d42b9650f19dd425.pts 04379243/expert_verified/points_label/79c5df613523a462d42b9650f19dd425.seg 04379243
+03001627/points/f19e8da9d8f369c531e63f1270e2b445.pts 03001627/expert_verified/points_label/f19e8da9d8f369c531e63f1270e2b445.seg 03001627
+03001627/points/9a711bb7070ae88de948e3d64826c640.pts 03001627/expert_verified/points_label/9a711bb7070ae88de948e3d64826c640.seg 03001627
+03467517/points/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.pts 03467517/expert_verified/points_label/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.seg 03467517
+03001627/points/837ba605a4ab4a4f19fb4103277a6b93.pts 03001627/expert_verified/points_label/837ba605a4ab4a4f19fb4103277a6b93.seg 03001627
+03001627/points/807f08096308af5e28c0cecb7de2397a.pts 03001627/expert_verified/points_label/807f08096308af5e28c0cecb7de2397a.seg 03001627
+03467517/points/275c4f98ef07f2b393f0194265a9746c.pts 03467517/expert_verified/points_label/275c4f98ef07f2b393f0194265a9746c.seg 03467517
+04379243/points/57afaabf994feb305512673aa47c7e3d.pts 04379243/expert_verified/points_label/57afaabf994feb305512673aa47c7e3d.seg 04379243
+03001627/points/d9156f5552178de2713decb1a0563b12.pts 03001627/expert_verified/points_label/d9156f5552178de2713decb1a0563b12.seg 03001627
+03948459/points/fe62130ce6fcd9b77754fed890b42399.pts 03948459/expert_verified/points_label/fe62130ce6fcd9b77754fed890b42399.seg 03948459
+03261776/points/1757fe64e76a9630fc176230c2f2d294.pts 03261776/expert_verified/points_label/1757fe64e76a9630fc176230c2f2d294.seg 03261776
+03790512/points/3fd1bff496b369f71765540024eb9fef.pts 03790512/expert_verified/points_label/3fd1bff496b369f71765540024eb9fef.seg 03790512
+02958343/points/a6d494af391a97686436916a86a90ed7.pts 02958343/expert_verified/points_label/a6d494af391a97686436916a86a90ed7.seg 02958343
+04099429/points/59389aac7b1ea9b09b28f5f9cf8893b5.pts 04099429/expert_verified/points_label/59389aac7b1ea9b09b28f5f9cf8893b5.seg 04099429
+04379243/points/c399ed276ed35cb9a6ce08f0d82ba063.pts 04379243/expert_verified/points_label/c399ed276ed35cb9a6ce08f0d82ba063.seg 04379243
+03624134/points/e4f610f36ba3c6f69246ea0301684d80.pts 03624134/expert_verified/points_label/e4f610f36ba3c6f69246ea0301684d80.seg 03624134
+03636649/points/90b0f9a1ac2e54ecbc7f58784fda27b5.pts 03636649/expert_verified/points_label/90b0f9a1ac2e54ecbc7f58784fda27b5.seg 03636649
+03636649/points/e5e9ff118631c2a3ee088de33038f12a.pts 03636649/expert_verified/points_label/e5e9ff118631c2a3ee088de33038f12a.seg 03636649
+04099429/points/4936716925b1cd6428eba1f0b7744e9.pts 04099429/expert_verified/points_label/4936716925b1cd6428eba1f0b7744e9.seg 04099429
+04379243/points/6e446bb5adf14b0b6121178eafd002fd.pts 04379243/expert_verified/points_label/6e446bb5adf14b0b6121178eafd002fd.seg 04379243
+03001627/points/7ea38c936513f5df3772b104757a4809.pts 03001627/expert_verified/points_label/7ea38c936513f5df3772b104757a4809.seg 03001627
+04379243/points/23d68e01b77089ae76ad4f5e7c7020eb.pts 04379243/expert_verified/points_label/23d68e01b77089ae76ad4f5e7c7020eb.seg 04379243
+03636649/points/4d6bced89943df73b4edf02c99e16daa.pts 03636649/expert_verified/points_label/4d6bced89943df73b4edf02c99e16daa.seg 03636649
+04379243/points/3459eec8eb56fa312bac236fe109e385.pts 04379243/expert_verified/points_label/3459eec8eb56fa312bac236fe109e385.seg 04379243
+03261776/points/1a5e2a7cddc8e46aa681aea7976a4565.pts 03261776/expert_verified/points_label/1a5e2a7cddc8e46aa681aea7976a4565.seg 03261776
+03001627/points/ed0d65c68a1fa5c485e2f8b1d3a373fe.pts 03001627/expert_verified/points_label/ed0d65c68a1fa5c485e2f8b1d3a373fe.seg 03001627
+03636649/points/7b005e23eae2768eb08c032bedc99529.pts 03636649/expert_verified/points_label/7b005e23eae2768eb08c032bedc99529.seg 03636649
+04379243/points/3f2e9c14ab1d26a0ebead06af665220.pts 04379243/expert_verified/points_label/3f2e9c14ab1d26a0ebead06af665220.seg 04379243
+03001627/points/383ab6330284af461fc4ae93e00c18e5.pts 03001627/expert_verified/points_label/383ab6330284af461fc4ae93e00c18e5.seg 03001627
+02691156/points/fc7387d630c84bb9c863ab010b80d9ed.pts 02691156/expert_verified/points_label/fc7387d630c84bb9c863ab010b80d9ed.seg 02691156
+04225987/points/344e9402d06bd94031145076011658c5.pts 04225987/expert_verified/points_label/344e9402d06bd94031145076011658c5.seg 04225987
+04379243/points/745a2b060d0f692bf4b6538438a0b930.pts 04379243/expert_verified/points_label/745a2b060d0f692bf4b6538438a0b930.seg 04379243
+04379243/points/928ea87878a7bbe26cf876b69450cd4e.pts 04379243/expert_verified/points_label/928ea87878a7bbe26cf876b69450cd4e.seg 04379243
+03001627/points/5fe56a4a9d5508c3b2373df00b89e5d.pts 03001627/expert_verified/points_label/5fe56a4a9d5508c3b2373df00b89e5d.seg 03001627
+02691156/points/6a75658fb8242b9c590874dcd9dc8481.pts 02691156/expert_verified/points_label/6a75658fb8242b9c590874dcd9dc8481.seg 02691156
+03948459/points/f377665c5b17d0ce61b636d79e46a7e9.pts 03948459/expert_verified/points_label/f377665c5b17d0ce61b636d79e46a7e9.seg 03948459
+03642806/points/ab21f75b97d6b1054f22ce0a3592d5.pts 03642806/expert_verified/points_label/ab21f75b97d6b1054f22ce0a3592d5.seg 03642806
+04379243/points/a2baf45f001e118e2c79f7f31759bfa7.pts 04379243/expert_verified/points_label/a2baf45f001e118e2c79f7f31759bfa7.seg 04379243
+02691156/points/19ff8fce1658f864ca8607f540cc62ba.pts 02691156/expert_verified/points_label/19ff8fce1658f864ca8607f540cc62ba.seg 02691156
+04379243/points/8bb3a7e1cb24fe6febad4f49b26ec52.pts 04379243/expert_verified/points_label/8bb3a7e1cb24fe6febad4f49b26ec52.seg 04379243
+04379243/points/dbc5a4d1dc3a6e8271a782a4379556c7.pts 04379243/expert_verified/points_label/dbc5a4d1dc3a6e8271a782a4379556c7.seg 04379243
+03001627/points/e6c11fed9469141ace8fba09dd640742.pts 03001627/expert_verified/points_label/e6c11fed9469141ace8fba09dd640742.seg 03001627
+03797390/points/f99e19b8c4a729353deb88581ea8417a.pts 03797390/expert_verified/points_label/f99e19b8c4a729353deb88581ea8417a.seg 03797390
+03001627/points/d454f99b99248bf337c99625b0c170be.pts 03001627/expert_verified/points_label/d454f99b99248bf337c99625b0c170be.seg 03001627
+03636649/points/7c23362b39f318cbb18d6f615cb18bdd.pts 03636649/expert_verified/points_label/7c23362b39f318cbb18d6f615cb18bdd.seg 03636649
+03001627/points/d8e2e2a923b372731cf97e154cc62f43.pts 03001627/expert_verified/points_label/d8e2e2a923b372731cf97e154cc62f43.seg 03001627
+03642806/points/621882a4afd2a126369873c1090720a1.pts 03642806/expert_verified/points_label/621882a4afd2a126369873c1090720a1.seg 03642806
+04379243/points/d5d1e750bb492dd5391e4d6c585a697a.pts 04379243/expert_verified/points_label/d5d1e750bb492dd5391e4d6c585a697a.seg 04379243
+03467517/points/42f3172b8770d2fd2200c35bfa7099ee.pts 03467517/expert_verified/points_label/42f3172b8770d2fd2200c35bfa7099ee.seg 03467517
+03624134/points/a2288d5f3a44233bc40c6b891c4913bd.pts 03624134/expert_verified/points_label/a2288d5f3a44233bc40c6b891c4913bd.seg 03624134
+02691156/points/90612205109d7458e84aab2e1d454e3c.pts 02691156/expert_verified/points_label/90612205109d7458e84aab2e1d454e3c.seg 02691156
+03001627/points/2c03bcb2a133ce28bb6caad47eee6580.pts 03001627/expert_verified/points_label/2c03bcb2a133ce28bb6caad47eee6580.seg 03001627
+03001627/points/f23d3a85baabd7ae32d9baba75737e72.pts 03001627/expert_verified/points_label/f23d3a85baabd7ae32d9baba75737e72.seg 03001627
+04379243/points/90be5de0faef91ef3f7e27638e63d848.pts 04379243/expert_verified/points_label/90be5de0faef91ef3f7e27638e63d848.seg 04379243
+02691156/points/d5f01e2aa54bbf28ca8607f540cc62ba.pts 02691156/expert_verified/points_label/d5f01e2aa54bbf28ca8607f540cc62ba.seg 02691156
+02691156/points/4f0bf26c62bb7c8b7e1c97634acf0214.pts 02691156/expert_verified/points_label/4f0bf26c62bb7c8b7e1c97634acf0214.seg 02691156
+03001627/points/4246c8c293c56ea34b3c42e318f3affc.pts 03001627/expert_verified/points_label/4246c8c293c56ea34b3c42e318f3affc.seg 03001627
+04379243/points/9b42cb91ccead6d42f6d10c5d1d56320.pts 04379243/expert_verified/points_label/9b42cb91ccead6d42f6d10c5d1d56320.seg 04379243
+03001627/points/c67b7b62e529295dfc30525e763ef5eb.pts 03001627/expert_verified/points_label/c67b7b62e529295dfc30525e763ef5eb.seg 03001627
+04379243/points/394c63a5658ef759b515d1675be6b5d3.pts 04379243/expert_verified/points_label/394c63a5658ef759b515d1675be6b5d3.seg 04379243
+03636649/points/13ba3fbe8fbc53f3ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/13ba3fbe8fbc53f3ef3a2c64cef919d0.seg 03636649
+04379243/points/cb860d60db8f3d18febad4f49b26ec52.pts 04379243/expert_verified/points_label/cb860d60db8f3d18febad4f49b26ec52.seg 04379243
+04379243/points/657aad273d665f5dd9823f45c4411583.pts 04379243/expert_verified/points_label/657aad273d665f5dd9823f45c4411583.seg 04379243
+03001627/points/64fcd1ba0df5d54d79b3e1be3524f72f.pts 03001627/expert_verified/points_label/64fcd1ba0df5d54d79b3e1be3524f72f.seg 03001627
+03642806/points/8489cb783d249651b674654e7bbe623d.pts 03642806/expert_verified/points_label/8489cb783d249651b674654e7bbe623d.seg 03642806
+03467517/points/3824a2336972d144a24eeca91f583600.pts 03467517/expert_verified/points_label/3824a2336972d144a24eeca91f583600.seg 03467517
+03797390/points/99eaa69cf6fe8811dec712af445786fe.pts 03797390/expert_verified/points_label/99eaa69cf6fe8811dec712af445786fe.seg 03797390
+03001627/points/e31d71ed32273fede42ac999db581f5e.pts 03001627/expert_verified/points_label/e31d71ed32273fede42ac999db581f5e.seg 03001627
+03001627/points/9a42cff883cbd358106f706dac6c58f0.pts 03001627/expert_verified/points_label/9a42cff883cbd358106f706dac6c58f0.seg 03001627
+04379243/points/b515a107aa3a3fd0e3dff0d5ebb43915.pts 04379243/expert_verified/points_label/b515a107aa3a3fd0e3dff0d5ebb43915.seg 04379243
+03001627/points/bd6a8b133fa4d269491d6cee03fef2a9.pts 03001627/expert_verified/points_label/bd6a8b133fa4d269491d6cee03fef2a9.seg 03001627
+03001627/points/51c8f249e778e84a5bae8923b29985ad.pts 03001627/expert_verified/points_label/51c8f249e778e84a5bae8923b29985ad.seg 03001627
+02691156/points/f12eefbbefabe566ca8607f540cc62ba.pts 02691156/expert_verified/points_label/f12eefbbefabe566ca8607f540cc62ba.seg 02691156
+02691156/points/ad6e93a1db3e1da5977e4bb19a62128e.pts 02691156/expert_verified/points_label/ad6e93a1db3e1da5977e4bb19a62128e.seg 02691156
+03001627/points/efa83c67ce47bfca304edcf7c4314468.pts 03001627/expert_verified/points_label/efa83c67ce47bfca304edcf7c4314468.seg 03001627
+03624134/points/d6e9e4e07bafca0fa37f3fc191551700.pts 03624134/expert_verified/points_label/d6e9e4e07bafca0fa37f3fc191551700.seg 03624134
+03642806/points/e083105e9c2a28bb0c3a03d0a1f182f.pts 03642806/expert_verified/points_label/e083105e9c2a28bb0c3a03d0a1f182f.seg 03642806
+03001627/points/d2992fd5e6715bad3bbf93f83cbaf271.pts 03001627/expert_verified/points_label/d2992fd5e6715bad3bbf93f83cbaf271.seg 03001627
+04379243/points/4a27cb9384782ce33e95c55cb020b7e6.pts 04379243/expert_verified/points_label/4a27cb9384782ce33e95c55cb020b7e6.seg 04379243
+04379243/points/cf046edeff204b81cdf7280ff8af6720.pts 04379243/expert_verified/points_label/cf046edeff204b81cdf7280ff8af6720.seg 04379243
+03001627/points/6534f04a1c349a3c8c6540fe6bc16d6f.pts 03001627/expert_verified/points_label/6534f04a1c349a3c8c6540fe6bc16d6f.seg 03001627
+03636649/points/1917888a2b6901091735ea0e092a805a.pts 03636649/expert_verified/points_label/1917888a2b6901091735ea0e092a805a.seg 03636649
+03636649/points/b37e07ac31fa4f311735ea0e092a805a.pts 03636649/expert_verified/points_label/b37e07ac31fa4f311735ea0e092a805a.seg 03636649
+03636649/points/2f6f1fe66631572c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/2f6f1fe66631572c6c5b8009db95f66f.seg 03636649
+03467517/points/feab270427cee00a24eeca91f583600.pts 03467517/expert_verified/points_label/feab270427cee00a24eeca91f583600.seg 03467517
+02691156/points/e30e25fe047ce1ea10b08ceced9a0113.pts 02691156/expert_verified/points_label/e30e25fe047ce1ea10b08ceced9a0113.seg 02691156
+03636649/points/b2347fe81bd2db6a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/b2347fe81bd2db6a4b3c42e318f3affc.seg 03636649
+03001627/points/bb7755090f984ba85dd1bba5b1310523.pts 03001627/expert_verified/points_label/bb7755090f984ba85dd1bba5b1310523.seg 03001627
+02691156/points/bc7ead8b45952ab8822054a0a020bf4a.pts 02691156/expert_verified/points_label/bc7ead8b45952ab8822054a0a020bf4a.seg 02691156
+02691156/points/5a1d4af1f417d28566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/5a1d4af1f417d28566cf1b4a8fc3914e.seg 02691156
+02691156/points/a6cbada42d1a30d0f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/a6cbada42d1a30d0f5c7319b71bdce6e.seg 02691156
+02691156/points/b785b39d10c33b5de9f07d25f575b2d4.pts 02691156/expert_verified/points_label/b785b39d10c33b5de9f07d25f575b2d4.seg 02691156
+03001627/points/2df8d2af1bc4b9972056b4bd5d870b47.pts 03001627/expert_verified/points_label/2df8d2af1bc4b9972056b4bd5d870b47.seg 03001627
+03797390/points/d46b98f63a017578ea456f4bbbc96af9.pts 03797390/expert_verified/points_label/d46b98f63a017578ea456f4bbbc96af9.seg 03797390
+04379243/points/1adf96850963550f19fb4103277a6b93.pts 04379243/expert_verified/points_label/1adf96850963550f19fb4103277a6b93.seg 04379243
+03001627/points/cb7a4324fdfa690e96dd43aa0ec847c9.pts 03001627/expert_verified/points_label/cb7a4324fdfa690e96dd43aa0ec847c9.seg 03001627
+03624134/points/c19088b4c32c0f1d22b38218e60be05.pts 03624134/expert_verified/points_label/c19088b4c32c0f1d22b38218e60be05.seg 03624134
+04379243/points/1acf7b0939f3eea2eafdf94e5032b200.pts 04379243/expert_verified/points_label/1acf7b0939f3eea2eafdf94e5032b200.seg 04379243
+03467517/points/d50d06b159363b1693f0194265a9746c.pts 03467517/expert_verified/points_label/d50d06b159363b1693f0194265a9746c.seg 03467517
+02691156/points/dacb447d7820e7f7ca8607f540cc62ba.pts 02691156/expert_verified/points_label/dacb447d7820e7f7ca8607f540cc62ba.seg 02691156
+04379243/points/c3a9dc47c5bf10aac3bd24f986301745.pts 04379243/expert_verified/points_label/c3a9dc47c5bf10aac3bd24f986301745.seg 04379243
+04379243/points/4791914b3bcaf57efebad4f49b26ec52.pts 04379243/expert_verified/points_label/4791914b3bcaf57efebad4f49b26ec52.seg 04379243
+03001627/points/bf3f14225e8f899db62f9fb4b7f0626.pts 03001627/expert_verified/points_label/bf3f14225e8f899db62f9fb4b7f0626.seg 03001627
+04379243/points/4f5c111a89b3fd27aa29e9f0529e8ef7.pts 04379243/expert_verified/points_label/4f5c111a89b3fd27aa29e9f0529e8ef7.seg 04379243
+03001627/points/6af8d7bfa508b8d23759750e8db40476.pts 03001627/expert_verified/points_label/6af8d7bfa508b8d23759750e8db40476.seg 03001627
+02691156/points/427030abcc0f11a8947bbeb9022263b8.pts 02691156/expert_verified/points_label/427030abcc0f11a8947bbeb9022263b8.seg 02691156
+03642806/points/367fbaea8743ec1cc98452c8fce6b43.pts 03642806/expert_verified/points_label/367fbaea8743ec1cc98452c8fce6b43.seg 03642806
+04379243/points/419412b927d11c7d8312881285c04cb3.pts 04379243/expert_verified/points_label/419412b927d11c7d8312881285c04cb3.seg 04379243
+03001627/points/56cc047440e7c999a23949c21eddef76.pts 03001627/expert_verified/points_label/56cc047440e7c999a23949c21eddef76.seg 03001627
+03790512/points/fdb6223c286cb653cc9e7530f9d8e186.pts 03790512/expert_verified/points_label/fdb6223c286cb653cc9e7530f9d8e186.seg 03790512
+03636649/points/6b2a590446ad5794b10e111f2d30684d.pts 03636649/expert_verified/points_label/6b2a590446ad5794b10e111f2d30684d.seg 03636649
+03001627/points/a3ce9ba74ab50352e6fe3612af521500.pts 03001627/expert_verified/points_label/a3ce9ba74ab50352e6fe3612af521500.seg 03001627
+02958343/points/9986dd19b2c459152470de2774d6099.pts 02958343/expert_verified/points_label/9986dd19b2c459152470de2774d6099.seg 02958343
+03642806/points/b806daf849a5dba289c212008d2a390e.pts 03642806/expert_verified/points_label/b806daf849a5dba289c212008d2a390e.seg 03642806
+04379243/points/2eb503dde3cc027d86c701087a194026.pts 04379243/expert_verified/points_label/2eb503dde3cc027d86c701087a194026.seg 04379243
+03001627/points/c4a4710012ee39bd19f4b416b31c46e0.pts 03001627/expert_verified/points_label/c4a4710012ee39bd19f4b416b31c46e0.seg 03001627
+02958343/points/bd8654fbca233e41ddb8f37b1865d989.pts 02958343/expert_verified/points_label/bd8654fbca233e41ddb8f37b1865d989.seg 02958343
+03001627/points/6fd485a2345c3dd69233bf560301e53.pts 03001627/expert_verified/points_label/6fd485a2345c3dd69233bf560301e53.seg 03001627
+02691156/points/aebc4c46b3cb7c3bca8607f540cc62ba.pts 02691156/expert_verified/points_label/aebc4c46b3cb7c3bca8607f540cc62ba.seg 02691156
+03001627/points/9343df9a7ed6cbba1923501fcdd899bb.pts 03001627/expert_verified/points_label/9343df9a7ed6cbba1923501fcdd899bb.seg 03001627
+04379243/points/7fadae39394c5622c3bd24f986301745.pts 04379243/expert_verified/points_label/7fadae39394c5622c3bd24f986301745.seg 04379243
+03001627/points/d619fd50c4d0fb46dea83bbf303af433.pts 03001627/expert_verified/points_label/d619fd50c4d0fb46dea83bbf303af433.seg 03001627
+04379243/points/ef02c88a34b3888a1b1a00a31bfed97b.pts 04379243/expert_verified/points_label/ef02c88a34b3888a1b1a00a31bfed97b.seg 04379243
+03467517/points/71d0016078dea05a94ca7929d4ba6d2d.pts 03467517/expert_verified/points_label/71d0016078dea05a94ca7929d4ba6d2d.seg 03467517
+03001627/points/5623d0ec9efedbc9d4da89766e80607a.pts 03001627/expert_verified/points_label/5623d0ec9efedbc9d4da89766e80607a.seg 03001627
+04379243/points/21486e6d0bd896ad5cca18918d24f6cd.pts 04379243/expert_verified/points_label/21486e6d0bd896ad5cca18918d24f6cd.seg 04379243
+03636649/points/978df83c1cee012729a60d6ab40898d.pts 03636649/expert_verified/points_label/978df83c1cee012729a60d6ab40898d.seg 03636649
+02691156/points/350d12f5290908c7f446f92b52bbd82a.pts 02691156/expert_verified/points_label/350d12f5290908c7f446f92b52bbd82a.seg 02691156
+03636649/points/86d7a728dc35d634f800b597bc1c1eb5.pts 03636649/expert_verified/points_label/86d7a728dc35d634f800b597bc1c1eb5.seg 03636649
+03001627/points/3b4292989394ba62f51f77a6d7299806.pts 03001627/expert_verified/points_label/3b4292989394ba62f51f77a6d7299806.seg 03001627
+03001627/points/f5f18fccf9e16800dbd185de408ea209.pts 03001627/expert_verified/points_label/f5f18fccf9e16800dbd185de408ea209.seg 03001627
+04379243/points/4d873bf1a658dcd523eb3ad3d378722a.pts 04379243/expert_verified/points_label/4d873bf1a658dcd523eb3ad3d378722a.seg 04379243
+03001627/points/a3e4639ff201f69b22a3043dcd383f68.pts 03001627/expert_verified/points_label/a3e4639ff201f69b22a3043dcd383f68.seg 03001627
+04379243/points/8d247c6f6aaf805a2530bfb25087f2b0.pts 04379243/expert_verified/points_label/8d247c6f6aaf805a2530bfb25087f2b0.seg 04379243
+03467517/points/511fc5ccf4f1c857a24eeca91f583600.pts 03467517/expert_verified/points_label/511fc5ccf4f1c857a24eeca91f583600.seg 03467517
+02691156/points/4635326bc4fdc3e9297cd7e2ef7dfa80.pts 02691156/expert_verified/points_label/4635326bc4fdc3e9297cd7e2ef7dfa80.seg 02691156
+03001627/points/525776b59266140381dff5c2e57ad46e.pts 03001627/expert_verified/points_label/525776b59266140381dff5c2e57ad46e.seg 03001627
+03001627/points/f1d6552ca66b2e37713decb1a0563b12.pts 03001627/expert_verified/points_label/f1d6552ca66b2e37713decb1a0563b12.seg 03001627
+04379243/points/40ff8ae39ad13d014a873bbe35452b88.pts 04379243/expert_verified/points_label/40ff8ae39ad13d014a873bbe35452b88.seg 04379243
+02691156/points/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.pts 02691156/expert_verified/points_label/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.seg 02691156
+04379243/points/63aa14915f59ed8671a782a4379556c7.pts 04379243/expert_verified/points_label/63aa14915f59ed8671a782a4379556c7.seg 04379243
+02691156/points/e16f9cc7dedcacdb9b0435532743fd43.pts 02691156/expert_verified/points_label/e16f9cc7dedcacdb9b0435532743fd43.seg 02691156
+04379243/points/c5b83c681c085f2195493ccf8f26ab2c.pts 04379243/expert_verified/points_label/c5b83c681c085f2195493ccf8f26ab2c.seg 04379243
+03001627/points/b2ba1569509cdb439451566a8c6563ed.pts 03001627/expert_verified/points_label/b2ba1569509cdb439451566a8c6563ed.seg 03001627
+02691156/points/265f5348ab2320b2148672750a1a335.pts 02691156/expert_verified/points_label/265f5348ab2320b2148672750a1a335.seg 02691156
+03001627/points/47da08d9c7cd7e104b3c42e318f3affc.pts 03001627/expert_verified/points_label/47da08d9c7cd7e104b3c42e318f3affc.seg 03001627
+03001627/points/458356b9c5a8d7bd7cc86734cb2f5062.pts 03001627/expert_verified/points_label/458356b9c5a8d7bd7cc86734cb2f5062.seg 03001627
+02691156/points/d20e3ed9b3430672bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d20e3ed9b3430672bbf3143b1cb6076a.seg 02691156
+04379243/points/c45e6ceae72c7a97be8908669c476d49.pts 04379243/expert_verified/points_label/c45e6ceae72c7a97be8908669c476d49.seg 04379243
+03001627/points/d9bbd1a1eaf6d2259d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/d9bbd1a1eaf6d2259d3ea1c6b57a0095.seg 03001627
+02958343/points/8242b114695b68286f522b2bb8ded829.pts 02958343/expert_verified/points_label/8242b114695b68286f522b2bb8ded829.seg 02958343
+03001627/points/e4b40369894a16ce6821a1e68ba5ebab.pts 03001627/expert_verified/points_label/e4b40369894a16ce6821a1e68ba5ebab.seg 03001627
+03636649/points/dfe800d8d8642e9647bc3701b998a7d5.pts 03636649/expert_verified/points_label/dfe800d8d8642e9647bc3701b998a7d5.seg 03636649
+04379243/points/bdf7606e8d493149664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/bdf7606e8d493149664b3b9b23ddfcbc.seg 04379243
+03001627/points/6015aaa9ef170d9bfdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/6015aaa9ef170d9bfdef1c01cbd4ae0c.seg 03001627
+03624134/points/df7a65224f295122ed9c5b25fef60d04.pts 03624134/expert_verified/points_label/df7a65224f295122ed9c5b25fef60d04.seg 03624134
+03467517/points/df959f68bb22e402a24eeca91f583600.pts 03467517/expert_verified/points_label/df959f68bb22e402a24eeca91f583600.seg 03467517
+04379243/points/69604fc24b7976d69ccce4c6d5bb195f.pts 04379243/expert_verified/points_label/69604fc24b7976d69ccce4c6d5bb195f.seg 04379243
+04379243/points/23aca164c7b2e2d4ad8af6714b643432.pts 04379243/expert_verified/points_label/23aca164c7b2e2d4ad8af6714b643432.seg 04379243
+03636649/points/e37796d40348fa5fd8013bb984303089.pts 03636649/expert_verified/points_label/e37796d40348fa5fd8013bb984303089.seg 03636649
+04379243/points/8cb6a2e9ba365c94593ebeeedbff73b.pts 04379243/expert_verified/points_label/8cb6a2e9ba365c94593ebeeedbff73b.seg 04379243
+03001627/points/d6f2d44c693d2e857062f2d72cde5c95.pts 03001627/expert_verified/points_label/d6f2d44c693d2e857062f2d72cde5c95.seg 03001627
+03948459/points/ed29dd43ad28f042d1987c07c912c6e1.pts 03948459/expert_verified/points_label/ed29dd43ad28f042d1987c07c912c6e1.seg 03948459
+03001627/points/ca01fd0de2534323c594a0e804f37c1a.pts 03001627/expert_verified/points_label/ca01fd0de2534323c594a0e804f37c1a.seg 03001627
+03636649/points/e7b719516449701362525a4d857f099d.pts 03636649/expert_verified/points_label/e7b719516449701362525a4d857f099d.seg 03636649
+02691156/points/bd48d0beb5d1acf1d2106c9042f1bde9.pts 02691156/expert_verified/points_label/bd48d0beb5d1acf1d2106c9042f1bde9.seg 02691156
+03636649/points/7cb828eb3b8e424b1e88064118b89a3e.pts 03636649/expert_verified/points_label/7cb828eb3b8e424b1e88064118b89a3e.seg 03636649
+03001627/points/fdd21f7f2ca9f0bcbdcbca499b446e89.pts 03001627/expert_verified/points_label/fdd21f7f2ca9f0bcbdcbca499b446e89.seg 03001627
+03636649/points/d779977c2417752b815c6de5374a8dd2.pts 03636649/expert_verified/points_label/d779977c2417752b815c6de5374a8dd2.seg 03636649
+02691156/points/f3e2df468c15795872517bb0a6b4d3ef.pts 02691156/expert_verified/points_label/f3e2df468c15795872517bb0a6b4d3ef.seg 02691156
+04379243/points/e3cc0b06be2c972cab610b0c94236463.pts 04379243/expert_verified/points_label/e3cc0b06be2c972cab610b0c94236463.seg 04379243
+03261776/points/ca1c1c9aba8f4491a656de49935d2359.pts 03261776/expert_verified/points_label/ca1c1c9aba8f4491a656de49935d2359.seg 03261776
+03001627/points/c535629f9661293dc16ef5c633c71b56.pts 03001627/expert_verified/points_label/c535629f9661293dc16ef5c633c71b56.seg 03001627
+03636649/points/699fcda4f4e9166ec5eb7aae719027b2.pts 03636649/expert_verified/points_label/699fcda4f4e9166ec5eb7aae719027b2.seg 03636649
+03001627/points/8a5d60067de905336c183a120a388982.pts 03001627/expert_verified/points_label/8a5d60067de905336c183a120a388982.seg 03001627
+02691156/points/4ad92be763c2ded8fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/4ad92be763c2ded8fca1f1143bb6bc17.seg 02691156
+04379243/points/14d6b4b09dfc54e9d679a95896f75103.pts 04379243/expert_verified/points_label/14d6b4b09dfc54e9d679a95896f75103.seg 04379243
+02691156/points/5e9129782c45b26992e39b8eae3e6b15.pts 02691156/expert_verified/points_label/5e9129782c45b26992e39b8eae3e6b15.seg 02691156
+02691156/points/2aec6e6096e640add00d52e62bf14ee9.pts 02691156/expert_verified/points_label/2aec6e6096e640add00d52e62bf14ee9.seg 02691156
+03642806/points/7b4260884a1dfd76b080af510dd640b.pts 03642806/expert_verified/points_label/7b4260884a1dfd76b080af510dd640b.seg 03642806
+03636649/points/3a0edfd418e020b97f32712aef0efc5a.pts 03636649/expert_verified/points_label/3a0edfd418e020b97f32712aef0efc5a.seg 03636649
+03467517/points/1c374a198daaddc493f0194265a9746c.pts 03467517/expert_verified/points_label/1c374a198daaddc493f0194265a9746c.seg 03467517
+04379243/points/9d90a58677e619f94b8710a3469971b1.pts 04379243/expert_verified/points_label/9d90a58677e619f94b8710a3469971b1.seg 04379243
+02691156/points/26f8a11864fd6bf7b68211fcc7956ac6.pts 02691156/expert_verified/points_label/26f8a11864fd6bf7b68211fcc7956ac6.seg 02691156
+02773838/points/f5108ede5ca11f041f6736765dee4fa9.pts 02773838/expert_verified/points_label/f5108ede5ca11f041f6736765dee4fa9.seg 02773838
+03001627/points/41ce60d5443c203eb31c248b8665b2e7.pts 03001627/expert_verified/points_label/41ce60d5443c203eb31c248b8665b2e7.seg 03001627
+03797390/points/a637500654ca8d16c97cfc3e8a6b1d16.pts 03797390/expert_verified/points_label/a637500654ca8d16c97cfc3e8a6b1d16.seg 03797390
+03001627/points/9ee4b9c97bcf4b3715dec43ae6a12831.pts 03001627/expert_verified/points_label/9ee4b9c97bcf4b3715dec43ae6a12831.seg 03001627
+03001627/points/e2dbad7996e7e13430c589758b4b5646.pts 03001627/expert_verified/points_label/e2dbad7996e7e13430c589758b4b5646.seg 03001627
+03001627/points/ec9f1fc13f2e4ae2c3bd24f986301745.pts 03001627/expert_verified/points_label/ec9f1fc13f2e4ae2c3bd24f986301745.seg 03001627
+03624134/points/172b9a77462dcdeaed90ead9558ee6cb.pts 03624134/expert_verified/points_label/172b9a77462dcdeaed90ead9558ee6cb.seg 03624134
+04379243/points/713a4be770bb19b9586b2526565371c0.pts 04379243/expert_verified/points_label/713a4be770bb19b9586b2526565371c0.seg 04379243
+04379243/points/f2e6820ca69d9b7719fb4103277a6b93.pts 04379243/expert_verified/points_label/f2e6820ca69d9b7719fb4103277a6b93.seg 04379243
+03001627/points/11a06e6f68b1d99c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/11a06e6f68b1d99c8687ff9b0b4e4ac.seg 03001627
+04379243/points/cfd7e354a5ae982aa0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/cfd7e354a5ae982aa0ab1d82ef09f78f.seg 04379243
+03797390/points/8012f52dd0a4d2f718a93a45bf780820.pts 03797390/expert_verified/points_label/8012f52dd0a4d2f718a93a45bf780820.seg 03797390
+03636649/points/57c1bc69df779d87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/57c1bc69df779d87bbc7a6acbd8f058b.seg 03636649
+03948459/points/664579680dc09267e1f2a1daf140ac9f.pts 03948459/expert_verified/points_label/664579680dc09267e1f2a1daf140ac9f.seg 03948459
+03001627/points/ca032d3b6dcbe1cea3056fa1e8da3997.pts 03001627/expert_verified/points_label/ca032d3b6dcbe1cea3056fa1e8da3997.seg 03001627
+02691156/points/4a837740b388aa45d8ff6111270336a9.pts 02691156/expert_verified/points_label/4a837740b388aa45d8ff6111270336a9.seg 02691156
+04099429/points/64803bab9799d0e698d2d2b2ae2563b0.pts 04099429/expert_verified/points_label/64803bab9799d0e698d2d2b2ae2563b0.seg 04099429
+04379243/points/c2c36909e461e10adaaaeef365d8f6e5.pts 04379243/expert_verified/points_label/c2c36909e461e10adaaaeef365d8f6e5.seg 04379243
+04379243/points/bc842e548e68a3cbb48513409ae7c51d.pts 04379243/expert_verified/points_label/bc842e548e68a3cbb48513409ae7c51d.seg 04379243
+03467517/points/4709e55a82a63f64d57700c05b1862d8.pts 03467517/expert_verified/points_label/4709e55a82a63f64d57700c05b1862d8.seg 03467517
+04379243/points/dc6f030d9ee566a5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/dc6f030d9ee566a5dcfcef693e7ec696.seg 04379243
+03001627/points/8be8093e99b94bd9cf320c31965db5a1.pts 03001627/expert_verified/points_label/8be8093e99b94bd9cf320c31965db5a1.seg 03001627
+02958343/points/a0a1b0377d72e86bab3dd76bf33b0f5e.pts 02958343/expert_verified/points_label/a0a1b0377d72e86bab3dd76bf33b0f5e.seg 02958343
+03001627/points/efc684ff4dc6ff49ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/efc684ff4dc6ff49ccd42a2d6eea63ed.seg 03001627
+03001627/points/ff2223a085d32243696b74614952b2d0.pts 03001627/expert_verified/points_label/ff2223a085d32243696b74614952b2d0.seg 03001627
+02954340/points/8b2951e32e0906bb5f6cb4951755315c.pts 02954340/expert_verified/points_label/8b2951e32e0906bb5f6cb4951755315c.seg 02954340
+04379243/points/82b69c9b72a5159ce76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/82b69c9b72a5159ce76bc197b3a3ffc0.seg 04379243
+03642806/points/5b5247b13d5b21bdad2954b86711abbd.pts 03642806/expert_verified/points_label/5b5247b13d5b21bdad2954b86711abbd.seg 03642806
+03636649/points/44e442591f82cd4cab0ac374f450cdc.pts 03636649/expert_verified/points_label/44e442591f82cd4cab0ac374f450cdc.seg 03636649
+03001627/points/2a1184b04dd8f30e3e92f39ce48d644.pts 03001627/expert_verified/points_label/2a1184b04dd8f30e3e92f39ce48d644.seg 03001627
+03636649/points/bc49fe3559e18fcb7d910d51d878f708.pts 03636649/expert_verified/points_label/bc49fe3559e18fcb7d910d51d878f708.seg 03636649
+03624134/points/c50af8af50613e822bf26da672b84220.pts 03624134/expert_verified/points_label/c50af8af50613e822bf26da672b84220.seg 03624134
+04225987/points/c0280aaad5473e8398c63cb68f11df34.pts 04225987/expert_verified/points_label/c0280aaad5473e8398c63cb68f11df34.seg 04225987
+03636649/points/5849d1a237cb493c659dda512294c744.pts 03636649/expert_verified/points_label/5849d1a237cb493c659dda512294c744.seg 03636649
+02958343/points/fcd90d547fdeb629f200a72c9245aee7.pts 02958343/expert_verified/points_label/fcd90d547fdeb629f200a72c9245aee7.seg 02958343
+03001627/points/34898c36e711fbde713decb1a0563b12.pts 03001627/expert_verified/points_label/34898c36e711fbde713decb1a0563b12.seg 03001627
+02691156/points/af696fc30a96a0c8bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/af696fc30a96a0c8bc0909d98a1ff2b4.seg 02691156
+04379243/points/f28e030e715b9d3e318462aca9e62b6b.pts 04379243/expert_verified/points_label/f28e030e715b9d3e318462aca9e62b6b.seg 04379243
+02691156/points/3c7e4628a9ea201bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/3c7e4628a9ea201bbf3143b1cb6076a.seg 02691156
+03636649/points/f092117adb1e9254d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/f092117adb1e9254d1cbf3e52b9b6237.seg 03636649
+04379243/points/7dd881a26eea656d193afeeca14e3baa.pts 04379243/expert_verified/points_label/7dd881a26eea656d193afeeca14e3baa.seg 04379243
+03001627/points/79a3115a6f96eef7c151419181ef256.pts 03001627/expert_verified/points_label/79a3115a6f96eef7c151419181ef256.seg 03001627
+04379243/points/fc51355d4d03ff4ae6c5cd45aa112726.pts 04379243/expert_verified/points_label/fc51355d4d03ff4ae6c5cd45aa112726.seg 04379243
+04379243/points/34121f5cc12135148c1cf3f7d7f0373.pts 04379243/expert_verified/points_label/34121f5cc12135148c1cf3f7d7f0373.seg 04379243
+03624134/points/d5167211e757e79f012465c621a63e3.pts 03624134/expert_verified/points_label/d5167211e757e79f012465c621a63e3.seg 03624134
+04379243/points/5b375eacdbe49cfaaa539cd22945e538.pts 04379243/expert_verified/points_label/5b375eacdbe49cfaaa539cd22945e538.seg 04379243
+02691156/points/d3d788c1fb35227619ba010ddb4974fe.pts 02691156/expert_verified/points_label/d3d788c1fb35227619ba010ddb4974fe.seg 02691156
+02691156/points/f26ea1a00455f44fb88e2a19106395c2.pts 02691156/expert_verified/points_label/f26ea1a00455f44fb88e2a19106395c2.seg 02691156
+03001627/points/798a46965d9e0edfcea003eff0268278.pts 03001627/expert_verified/points_label/798a46965d9e0edfcea003eff0268278.seg 03001627
+02691156/points/3069d990d52051eb3a34c2907e8f3f1f.pts 02691156/expert_verified/points_label/3069d990d52051eb3a34c2907e8f3f1f.seg 02691156
+02691156/points/8c42e3042a4beaa7d5c40787c7bb7824.pts 02691156/expert_verified/points_label/8c42e3042a4beaa7d5c40787c7bb7824.seg 02691156
+04379243/points/45c5ee611c73b90a509330ce00eb0b20.pts 04379243/expert_verified/points_label/45c5ee611c73b90a509330ce00eb0b20.seg 04379243
+03001627/points/22ada577361ed0374b3c42e318f3affc.pts 03001627/expert_verified/points_label/22ada577361ed0374b3c42e318f3affc.seg 03001627
+04379243/points/b6ad7be371729438dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b6ad7be371729438dcfcef693e7ec696.seg 04379243
+03636649/points/4c266f2b866c59e761fef32872c6fa53.pts 03636649/expert_verified/points_label/4c266f2b866c59e761fef32872c6fa53.seg 03636649
+04379243/points/812dd06fc99f174e9f2349486c570dd4.pts 04379243/expert_verified/points_label/812dd06fc99f174e9f2349486c570dd4.seg 04379243
+02691156/points/36a5bd4ca6a0b191532d23702363f9a5.pts 02691156/expert_verified/points_label/36a5bd4ca6a0b191532d23702363f9a5.seg 02691156
+03001627/points/be0890a6a0f3fcf841f91bc9e1dece3b.pts 03001627/expert_verified/points_label/be0890a6a0f3fcf841f91bc9e1dece3b.seg 03001627
+03642806/points/6008f256f3beafd9988abef1fd117e7.pts 03642806/expert_verified/points_label/6008f256f3beafd9988abef1fd117e7.seg 03642806
+03001627/points/490941bf4a532b62492d9da2668ec34c.pts 03001627/expert_verified/points_label/490941bf4a532b62492d9da2668ec34c.seg 03001627
+03636649/points/94940283714fdff6244ba644cf33cb2e.pts 03636649/expert_verified/points_label/94940283714fdff6244ba644cf33cb2e.seg 03636649
+03642806/points/6227e7dd1a391e8d54f22ce0a3592d5.pts 03642806/expert_verified/points_label/6227e7dd1a391e8d54f22ce0a3592d5.seg 03642806
+02691156/points/b2ceeee3c5b75962ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/b2ceeee3c5b75962ac4f72bf08dc79a6.seg 02691156
+03642806/points/55a05b33f34e7211f71cb38553f14917.pts 03642806/expert_verified/points_label/55a05b33f34e7211f71cb38553f14917.seg 03642806
+02773838/points/74c548ef3ca7b1987515e7bb7dba4019.pts 02773838/expert_verified/points_label/74c548ef3ca7b1987515e7bb7dba4019.seg 02773838
+03467517/points/defcf80fcef4b51b3f431ca2c1260d62.pts 03467517/expert_verified/points_label/defcf80fcef4b51b3f431ca2c1260d62.seg 03467517
+04379243/points/eaea1cf98b61abd043383304411cc9ec.pts 04379243/expert_verified/points_label/eaea1cf98b61abd043383304411cc9ec.seg 04379243
+03001627/points/7f6858bd9d4af9df97316612e1a4343a.pts 03001627/expert_verified/points_label/7f6858bd9d4af9df97316612e1a4343a.seg 03001627
+03001627/points/3c27660aacbcf99886327adaa986dff.pts 03001627/expert_verified/points_label/3c27660aacbcf99886327adaa986dff.seg 03001627
+04379243/points/229d510bace435811572ee5ddf1b55b.pts 04379243/expert_verified/points_label/229d510bace435811572ee5ddf1b55b.seg 04379243
+03636649/points/83c0ad378b5802b73d39d8012919dd25.pts 03636649/expert_verified/points_label/83c0ad378b5802b73d39d8012919dd25.seg 03636649
+02691156/points/f009f3112625ee00b8cf782e8c539948.pts 02691156/expert_verified/points_label/f009f3112625ee00b8cf782e8c539948.seg 02691156
+02691156/points/f13827d156628467b4cdad9a5bf52dd5.pts 02691156/expert_verified/points_label/f13827d156628467b4cdad9a5bf52dd5.seg 02691156
+03636649/points/526251a7530426a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/526251a7530426a4b3c42e318f3affc.seg 03636649
+03001627/points/a1133464132d65fcfce0ccdae30f97db.pts 03001627/expert_verified/points_label/a1133464132d65fcfce0ccdae30f97db.seg 03001627
+02691156/points/d844094b073a0452b04b2d1c5ce9783b.pts 02691156/expert_verified/points_label/d844094b073a0452b04b2d1c5ce9783b.seg 02691156
+03948459/points/2f5b4bcb8d4dd901609e2d916fa0da27.pts 03948459/expert_verified/points_label/2f5b4bcb8d4dd901609e2d916fa0da27.seg 03948459
+03636649/points/a4c06cd5032733af543df75232f6ff2b.pts 03636649/expert_verified/points_label/a4c06cd5032733af543df75232f6ff2b.seg 03636649
+03636649/points/64eaa45bd2e01db8991ff09eca5b27a8.pts 03636649/expert_verified/points_label/64eaa45bd2e01db8991ff09eca5b27a8.seg 03636649
+03636649/points/5bc478e9c4e0bb8180936c51aa7ffcf5.pts 03636649/expert_verified/points_label/5bc478e9c4e0bb8180936c51aa7ffcf5.seg 03636649
+03636649/points/b02bd8e5ef9cfe354b3c42e318f3affc.pts 03636649/expert_verified/points_label/b02bd8e5ef9cfe354b3c42e318f3affc.seg 03636649
+03636649/points/cf6c082b9534049494db33559ec0df30.pts 03636649/expert_verified/points_label/cf6c082b9534049494db33559ec0df30.seg 03636649
+04225987/points/af4343c5b78b70b11082f2ea630bf69e.pts 04225987/expert_verified/points_label/af4343c5b78b70b11082f2ea630bf69e.seg 04225987
+03467517/points/c084022f2ddbf95493f0194265a9746c.pts 03467517/expert_verified/points_label/c084022f2ddbf95493f0194265a9746c.seg 03467517
+03001627/points/550dd11407c28f9f3bd04286517a8395.pts 03001627/expert_verified/points_label/550dd11407c28f9f3bd04286517a8395.seg 03001627
+04379243/points/702cebffa33a19f019f079d1b712f46f.pts 04379243/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 04379243
+04379243/points/388d9e7b2b8a8f909492fbce0bd54e2e.pts 04379243/expert_verified/points_label/388d9e7b2b8a8f909492fbce0bd54e2e.seg 04379243
+03636649/points/7634fbdcaa6b304d62c83ac1e3a4ebaa.pts 03636649/expert_verified/points_label/7634fbdcaa6b304d62c83ac1e3a4ebaa.seg 03636649
+03636649/points/14d3d2418165ec86bba785994a529f86.pts 03636649/expert_verified/points_label/14d3d2418165ec86bba785994a529f86.seg 03636649
+04379243/points/13e19274b358ec867aa3000697a75d55.pts 04379243/expert_verified/points_label/13e19274b358ec867aa3000697a75d55.seg 04379243
+03467517/points/727fcc85add981325e683993f34d42f2.pts 03467517/expert_verified/points_label/727fcc85add981325e683993f34d42f2.seg 03467517
+02691156/points/947d6b9cd1966e2e719b5362fe06bbb.pts 02691156/expert_verified/points_label/947d6b9cd1966e2e719b5362fe06bbb.seg 02691156
+04379243/points/ee5f85db427865e63e5399147a5b4763.pts 04379243/expert_verified/points_label/ee5f85db427865e63e5399147a5b4763.seg 04379243
+02691156/points/1678946724380812de689e373096b0e3.pts 02691156/expert_verified/points_label/1678946724380812de689e373096b0e3.seg 02691156
+03001627/points/3fdef0a7606c397331ad067823a3f0ce.pts 03001627/expert_verified/points_label/3fdef0a7606c397331ad067823a3f0ce.seg 03001627
+03636649/points/1bb465b8f22315d1116f219d90a571c2.pts 03636649/expert_verified/points_label/1bb465b8f22315d1116f219d90a571c2.seg 03636649
+04379243/points/9dd5b7e6f90ee322b56d92c5d7b06038.pts 04379243/expert_verified/points_label/9dd5b7e6f90ee322b56d92c5d7b06038.seg 04379243
+03467517/points/7eee3b79e053759143891ae68a82472e.pts 03467517/expert_verified/points_label/7eee3b79e053759143891ae68a82472e.seg 03467517
+03001627/points/f4b6bf9253918b52944d8f8e13d63fde.pts 03001627/expert_verified/points_label/f4b6bf9253918b52944d8f8e13d63fde.seg 03001627
+03636649/points/92e0f64c08f0c8ac3c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/92e0f64c08f0c8ac3c8d0fdfb1cc2535.seg 03636649
+03624134/points/d63521a0dfac9c1f342494fa6f09f376.pts 03624134/expert_verified/points_label/d63521a0dfac9c1f342494fa6f09f376.seg 03624134
+04379243/points/c7ff0afab4b7885a52160ba64fb535b2.pts 04379243/expert_verified/points_label/c7ff0afab4b7885a52160ba64fb535b2.seg 04379243
+02958343/points/89765af115d9a4955591fcdffe729c55.pts 02958343/expert_verified/points_label/89765af115d9a4955591fcdffe729c55.seg 02958343
+03636649/points/70bf2aaedbf9499ec889c00efdaf9928.pts 03636649/expert_verified/points_label/70bf2aaedbf9499ec889c00efdaf9928.seg 03636649
+02958343/points/ef15b938dcfa9893c4d922e8a1141322.pts 02958343/expert_verified/points_label/ef15b938dcfa9893c4d922e8a1141322.seg 02958343
+03636649/points/4bb676c497969016de98d10ab5975b59.pts 03636649/expert_verified/points_label/4bb676c497969016de98d10ab5975b59.seg 03636649
+04379243/points/1c8121e1ad6cd6fc7a480f3f1d55ed3f.pts 04379243/expert_verified/points_label/1c8121e1ad6cd6fc7a480f3f1d55ed3f.seg 04379243
+04379243/points/83b8e64089968ae8fd3feb4581507302.pts 04379243/expert_verified/points_label/83b8e64089968ae8fd3feb4581507302.seg 04379243
+03636649/points/a4c0f3aed58f0e092fdae21c212bf119.pts 03636649/expert_verified/points_label/a4c0f3aed58f0e092fdae21c212bf119.seg 03636649
+04379243/points/e02925509615eb5a4eaf5bbf36d243d4.pts 04379243/expert_verified/points_label/e02925509615eb5a4eaf5bbf36d243d4.seg 04379243
+04379243/points/c5087fce38b009ae30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/c5087fce38b009ae30bbd4cddd04c77b.seg 04379243
+03001627/points/5107542cfbf142f36209799e55a657c.pts 03001627/expert_verified/points_label/5107542cfbf142f36209799e55a657c.seg 03001627
+04379243/points/94a62cfdb84e88ca9a3528690d225ee1.pts 04379243/expert_verified/points_label/94a62cfdb84e88ca9a3528690d225ee1.seg 04379243
+04379243/points/80ad1f839582d183fbf6f493308acc40.pts 04379243/expert_verified/points_label/80ad1f839582d183fbf6f493308acc40.seg 04379243
+03001627/points/91819d15c2c044ebd47ffa500636d198.pts 03001627/expert_verified/points_label/91819d15c2c044ebd47ffa500636d198.seg 03001627
+03636649/points/77a5a12147a6624d786810c22b062a88.pts 03636649/expert_verified/points_label/77a5a12147a6624d786810c22b062a88.seg 03636649
+03001627/points/beb4c42cfa1c3b282811d30bba54859.pts 03001627/expert_verified/points_label/beb4c42cfa1c3b282811d30bba54859.seg 03001627
+03636649/points/e529fc190753cc9df647dc544bb0ab61.pts 03636649/expert_verified/points_label/e529fc190753cc9df647dc544bb0ab61.seg 03636649
+04379243/points/680d4a8b5a30601a4b3c42e318f3affc.pts 04379243/expert_verified/points_label/680d4a8b5a30601a4b3c42e318f3affc.seg 04379243
+03001627/points/1d6f4020cab4ec1962d6a66a1a314d66.pts 03001627/expert_verified/points_label/1d6f4020cab4ec1962d6a66a1a314d66.seg 03001627
+03001627/points/5b3fd3199d1bc950c1ae25a29e9d46d3.pts 03001627/expert_verified/points_label/5b3fd3199d1bc950c1ae25a29e9d46d3.seg 03001627
+03001627/points/17e916fc863540ee3def89b32cef8e45.pts 03001627/expert_verified/points_label/17e916fc863540ee3def89b32cef8e45.seg 03001627
+04379243/points/a5d5fc6b0bb7881419fb4103277a6b93.pts 04379243/expert_verified/points_label/a5d5fc6b0bb7881419fb4103277a6b93.seg 04379243
+03001627/points/eafec1b145972dcd815b2b467e8e2eac.pts 03001627/expert_verified/points_label/eafec1b145972dcd815b2b467e8e2eac.seg 03001627
+04379243/points/1fb2be490f45ec6e19fb4103277a6b93.pts 04379243/expert_verified/points_label/1fb2be490f45ec6e19fb4103277a6b93.seg 04379243
+02691156/points/8b61ba80d9e487deca8607f540cc62ba.pts 02691156/expert_verified/points_label/8b61ba80d9e487deca8607f540cc62ba.seg 02691156
+03467517/points/2d767b3fbb8a3053b8836869016d1afd.pts 03467517/expert_verified/points_label/2d767b3fbb8a3053b8836869016d1afd.seg 03467517
+04379243/points/e0940f2229e42007d98e761e6d91dfc8.pts 04379243/expert_verified/points_label/e0940f2229e42007d98e761e6d91dfc8.seg 04379243
+03001627/points/bb90094030f369e4305a3b2fd9173d6f.pts 03001627/expert_verified/points_label/bb90094030f369e4305a3b2fd9173d6f.seg 03001627
+02958343/points/c6e3d9cf26016b5752aa494042b7c9db.pts 02958343/expert_verified/points_label/c6e3d9cf26016b5752aa494042b7c9db.seg 02958343
+03001627/points/bd0fab2e72b445bd1e722bceee6e83aa.pts 03001627/expert_verified/points_label/bd0fab2e72b445bd1e722bceee6e83aa.seg 03001627
+02691156/points/e86fd13a49f0ee0a62b600da24e0965.pts 02691156/expert_verified/points_label/e86fd13a49f0ee0a62b600da24e0965.seg 02691156
+03001627/points/eeebe3fe14ee4d3aebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/eeebe3fe14ee4d3aebefe6b1d594ad2e.seg 03001627
+04379243/points/398dbb0a34ca527871a782a4379556c7.pts 04379243/expert_verified/points_label/398dbb0a34ca527871a782a4379556c7.seg 04379243
+04379243/points/737cc2beda4a023619fb4103277a6b93.pts 04379243/expert_verified/points_label/737cc2beda4a023619fb4103277a6b93.seg 04379243
+03001627/points/3895b96949fd81c5f07fee5fc5c45ee2.pts 03001627/expert_verified/points_label/3895b96949fd81c5f07fee5fc5c45ee2.seg 03001627
+04379243/points/bba5ce8555c8fa89ba18ade30e563d37.pts 04379243/expert_verified/points_label/bba5ce8555c8fa89ba18ade30e563d37.seg 04379243
+04379243/points/cab027dd0162c5b7f1426260885dd0ef.pts 04379243/expert_verified/points_label/cab027dd0162c5b7f1426260885dd0ef.seg 04379243
+04379243/points/75f2bc98aecf198974984b9cd0997a52.pts 04379243/expert_verified/points_label/75f2bc98aecf198974984b9cd0997a52.seg 04379243
+04379243/points/8d4fe49d942ec85ff4b6538438a0b930.pts 04379243/expert_verified/points_label/8d4fe49d942ec85ff4b6538438a0b930.seg 04379243
+03001627/points/89dd53d0377c28207f7114254c4286d2.pts 03001627/expert_verified/points_label/89dd53d0377c28207f7114254c4286d2.seg 03001627
+03636649/points/a37695d83a39adb52866fbd701f50f71.pts 03636649/expert_verified/points_label/a37695d83a39adb52866fbd701f50f71.seg 03636649
+04379243/points/f99ebf0f053140525a0e5699b3040a35.pts 04379243/expert_verified/points_label/f99ebf0f053140525a0e5699b3040a35.seg 04379243
+03624134/points/bbfd2df3edce576e1e652fa812161367.pts 03624134/expert_verified/points_label/bbfd2df3edce576e1e652fa812161367.seg 03624134
+04379243/points/f0d8620b49ea76db83130614d8020b3.pts 04379243/expert_verified/points_label/f0d8620b49ea76db83130614d8020b3.seg 04379243
+04379243/points/d01a6b35a54c8f77dd986a55e273fa14.pts 04379243/expert_verified/points_label/d01a6b35a54c8f77dd986a55e273fa14.seg 04379243
+03001627/points/2f6b0ddf12d1311795bea7c29e873d16.pts 03001627/expert_verified/points_label/2f6b0ddf12d1311795bea7c29e873d16.seg 03001627
+03001627/points/5695fd37d1e673cebf964fc57f6a7d6d.pts 03001627/expert_verified/points_label/5695fd37d1e673cebf964fc57f6a7d6d.seg 03001627
+03636649/points/746b82746c6a02cca5f600ed2cf472ac.pts 03636649/expert_verified/points_label/746b82746c6a02cca5f600ed2cf472ac.seg 03636649
+03001627/points/bcc4ea0133864bfe4d4c0769270d8651.pts 03001627/expert_verified/points_label/bcc4ea0133864bfe4d4c0769270d8651.seg 03001627
+03624134/points/81ba3f06ec38eaa46016d22b1dfacd4b.pts 03624134/expert_verified/points_label/81ba3f06ec38eaa46016d22b1dfacd4b.seg 03624134
+04379243/points/2a2d6560f14a01c6afac72146bbc9d59.pts 04379243/expert_verified/points_label/2a2d6560f14a01c6afac72146bbc9d59.seg 04379243
+04379243/points/856e86709df98497dcfcef693e7ec696.pts 04379243/expert_verified/points_label/856e86709df98497dcfcef693e7ec696.seg 04379243
+03948459/points/7418810de4b13e8430b6ca3ac82edfa3.pts 03948459/expert_verified/points_label/7418810de4b13e8430b6ca3ac82edfa3.seg 03948459
+03001627/points/11e0f0dfd3d0b22130ddb6ead95f49cc.pts 03001627/expert_verified/points_label/11e0f0dfd3d0b22130ddb6ead95f49cc.seg 03001627
+04379243/points/5c6748b094725d9af008d8a3590fb522.pts 04379243/expert_verified/points_label/5c6748b094725d9af008d8a3590fb522.seg 04379243
+04379243/points/17f3a2945d6550cbf7628281ecb18112.pts 04379243/expert_verified/points_label/17f3a2945d6550cbf7628281ecb18112.seg 04379243
+04379243/points/889c9aedc4ba47592fb02b79d375eea5.pts 04379243/expert_verified/points_label/889c9aedc4ba47592fb02b79d375eea5.seg 04379243
+04379243/points/c0b74c61865b563067dc358060e3c47b.pts 04379243/expert_verified/points_label/c0b74c61865b563067dc358060e3c47b.seg 04379243
+03636649/points/783b81aa54a69a26d42b9650f19dd425.pts 03636649/expert_verified/points_label/783b81aa54a69a26d42b9650f19dd425.seg 03636649
+03467517/points/8b8b084109eef6d81082f2ea630bf69e.pts 03467517/expert_verified/points_label/8b8b084109eef6d81082f2ea630bf69e.seg 03467517
+03001627/points/8a9af7d8a83d90fcd53e36731300f5b4.pts 03001627/expert_verified/points_label/8a9af7d8a83d90fcd53e36731300f5b4.seg 03001627
+03001627/points/47aca56ff3a7b8a71a782a4379556c7.pts 03001627/expert_verified/points_label/47aca56ff3a7b8a71a782a4379556c7.seg 03001627
+03001627/points/9fae8d94a028e9ec2818b21315fe1bde.pts 03001627/expert_verified/points_label/9fae8d94a028e9ec2818b21315fe1bde.seg 03001627
+03001627/points/9a41550ba7dd31e3bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a41550ba7dd31e3bf80985a99195eb8.seg 03001627
+03001627/points/184b4797cea77beb5ca1c42bb8ac17a.pts 03001627/expert_verified/points_label/184b4797cea77beb5ca1c42bb8ac17a.seg 03001627
+04379243/points/bc1ff7fc750617d690f7bef12e52ac08.pts 04379243/expert_verified/points_label/bc1ff7fc750617d690f7bef12e52ac08.seg 04379243
+02691156/points/5fb64e3fc0abe449ca8607f540cc62ba.pts 02691156/expert_verified/points_label/5fb64e3fc0abe449ca8607f540cc62ba.seg 02691156
+03001627/points/2e0beb3b6927a2b7e45ef4135c266a12.pts 03001627/expert_verified/points_label/2e0beb3b6927a2b7e45ef4135c266a12.seg 03001627
+03467517/points/a38684b166ce2c77c155f88004a92bc8.pts 03467517/expert_verified/points_label/a38684b166ce2c77c155f88004a92bc8.seg 03467517
+02691156/points/b590adb6d3486f6e90b1d6deb98feec6.pts 02691156/expert_verified/points_label/b590adb6d3486f6e90b1d6deb98feec6.seg 02691156
+03636649/points/9d41e23f00d11d153033d35b49a20c8.pts 03636649/expert_verified/points_label/9d41e23f00d11d153033d35b49a20c8.seg 03636649
+03001627/points/f4b141ab64a6c4e771a782a4379556c7.pts 03001627/expert_verified/points_label/f4b141ab64a6c4e771a782a4379556c7.seg 03001627
+03948459/points/19e45672a3109f18be4927dbd39f74e9.pts 03948459/expert_verified/points_label/19e45672a3109f18be4927dbd39f74e9.seg 03948459
+04379243/points/58475b1b20ece0c5eeb8d422649e5f2b.pts 04379243/expert_verified/points_label/58475b1b20ece0c5eeb8d422649e5f2b.seg 04379243
+04379243/points/400393a56fc243c442c39a4fb8d01418.pts 04379243/expert_verified/points_label/400393a56fc243c442c39a4fb8d01418.seg 04379243
+03001627/points/a128eda00983dd01fb7d9615be5ab4b0.pts 03001627/expert_verified/points_label/a128eda00983dd01fb7d9615be5ab4b0.seg 03001627
+04379243/points/6af9a593129b028eb67e68783d58425a.pts 04379243/expert_verified/points_label/6af9a593129b028eb67e68783d58425a.seg 04379243
+03001627/points/40f188600cf8362b654ea6737b0d3597.pts 03001627/expert_verified/points_label/40f188600cf8362b654ea6737b0d3597.seg 03001627
+04379243/points/a4af8f822fa8d95456c08464b83f209e.pts 04379243/expert_verified/points_label/a4af8f822fa8d95456c08464b83f209e.seg 04379243
+03001627/points/d9558dccfe8e3381e45ef4135c266a12.pts 03001627/expert_verified/points_label/d9558dccfe8e3381e45ef4135c266a12.seg 03001627
+04379243/points/631028ddb76eed4dbb0085d0daabdaea.pts 04379243/expert_verified/points_label/631028ddb76eed4dbb0085d0daabdaea.seg 04379243
+03001627/points/8967e65c1541d1874aa7f42ef07f614e.pts 03001627/expert_verified/points_label/8967e65c1541d1874aa7f42ef07f614e.seg 03001627
+04379243/points/38feb6b209579f6faadbf8208284c675.pts 04379243/expert_verified/points_label/38feb6b209579f6faadbf8208284c675.seg 04379243
+03624134/points/60277f4060b8703e4e18d7136dc2dc80.pts 03624134/expert_verified/points_label/60277f4060b8703e4e18d7136dc2dc80.seg 03624134
+03467517/points/a78c3356a5dca4e7670b811945485012.pts 03467517/expert_verified/points_label/a78c3356a5dca4e7670b811945485012.seg 03467517
+03797390/points/645b0e2ef3b95979204df312eabf367f.pts 03797390/expert_verified/points_label/645b0e2ef3b95979204df312eabf367f.seg 03797390
+03467517/points/bd6057c7ac1ef31193f0194265a9746c.pts 03467517/expert_verified/points_label/bd6057c7ac1ef31193f0194265a9746c.seg 03467517
+03790512/points/bcbcfdad5e0e1d9ba88e8cb97b773125.pts 03790512/expert_verified/points_label/bcbcfdad5e0e1d9ba88e8cb97b773125.seg 03790512
+03636649/points/761fb0822bb05bc8ee0cd746086d989.pts 03636649/expert_verified/points_label/761fb0822bb05bc8ee0cd746086d989.seg 03636649
+03636649/points/be13324c84d2a9d72b151d8b52c53b90.pts 03636649/expert_verified/points_label/be13324c84d2a9d72b151d8b52c53b90.seg 03636649
+04379243/points/7b3dfbd70333485d219a1300d9489f4e.pts 04379243/expert_verified/points_label/7b3dfbd70333485d219a1300d9489f4e.seg 04379243
+04379243/points/22c5cbe6271736bffebad4f49b26ec52.pts 04379243/expert_verified/points_label/22c5cbe6271736bffebad4f49b26ec52.seg 04379243
+02958343/points/4b7b3b54dc04df53c19f1e8ed99ac2fa.pts 02958343/expert_verified/points_label/4b7b3b54dc04df53c19f1e8ed99ac2fa.seg 02958343
+03636649/points/947c6753d77d8082290e2f84c414e6be.pts 03636649/expert_verified/points_label/947c6753d77d8082290e2f84c414e6be.seg 03636649
+02958343/points/36c2770d00fdd0bdf1ee968c9039cc3.pts 02958343/expert_verified/points_label/36c2770d00fdd0bdf1ee968c9039cc3.seg 02958343
+03001627/points/4ac17ecd78880859e302b6082b0ffc09.pts 03001627/expert_verified/points_label/4ac17ecd78880859e302b6082b0ffc09.seg 03001627
+03636649/points/70b78b9439a9de7530f6e0ede20c4525.pts 03636649/expert_verified/points_label/70b78b9439a9de7530f6e0ede20c4525.seg 03636649
+04379243/points/d8be4b45afb21cf1616fb9ab42452112.pts 04379243/expert_verified/points_label/d8be4b45afb21cf1616fb9ab42452112.seg 04379243
+02691156/points/fe266c740580c102ff9ce0c50c2cd25a.pts 02691156/expert_verified/points_label/fe266c740580c102ff9ce0c50c2cd25a.seg 02691156
+02958343/points/30f4617775480bcce27281f3b76d1f5.pts 02958343/expert_verified/points_label/30f4617775480bcce27281f3b76d1f5.seg 02958343
+03467517/points/34874708b51c7ed493f0194265a9746c.pts 03467517/expert_verified/points_label/34874708b51c7ed493f0194265a9746c.seg 03467517
+04225987/points/abdc4a823b1f78c397f47f3057557cbe.pts 04225987/expert_verified/points_label/abdc4a823b1f78c397f47f3057557cbe.seg 04225987
+03948459/points/14fe99eb0c105a90fc9c56fb43681c11.pts 03948459/expert_verified/points_label/14fe99eb0c105a90fc9c56fb43681c11.seg 03948459
+04379243/points/f5aecb6607876495e03eb69820d1aaf2.pts 04379243/expert_verified/points_label/f5aecb6607876495e03eb69820d1aaf2.seg 04379243
+03001627/points/3c81fab5678a3872327289c00b6dc9ca.pts 03001627/expert_verified/points_label/3c81fab5678a3872327289c00b6dc9ca.seg 03001627
+04379243/points/fe3351c94fbab8ce3002761e7a3ba3bd.pts 04379243/expert_verified/points_label/fe3351c94fbab8ce3002761e7a3ba3bd.seg 04379243
+04379243/points/5f0c33039269b7a9f0e84b9d9ad447e2.pts 04379243/expert_verified/points_label/5f0c33039269b7a9f0e84b9d9ad447e2.seg 04379243
+03001627/points/fa7347547e290732bf65e1af50b5b7d4.pts 03001627/expert_verified/points_label/fa7347547e290732bf65e1af50b5b7d4.seg 03001627
+04379243/points/9c33336af33fd905776d8bc79b9caa2c.pts 04379243/expert_verified/points_label/9c33336af33fd905776d8bc79b9caa2c.seg 04379243
+03001627/points/1d828c69106609f8cd783766d090e665.pts 03001627/expert_verified/points_label/1d828c69106609f8cd783766d090e665.seg 03001627
+04379243/points/5fbb7a5f01f646ca5830980abc1c717a.pts 04379243/expert_verified/points_label/5fbb7a5f01f646ca5830980abc1c717a.seg 04379243
+03636649/points/777a686890d74b350359b4e03cfdfa.pts 03636649/expert_verified/points_label/777a686890d74b350359b4e03cfdfa.seg 03636649
+02773838/points/3077a9b76724b6d35de21284bb515a83.pts 02773838/expert_verified/points_label/3077a9b76724b6d35de21284bb515a83.seg 02773838
+03642806/points/b233163860361eda8cfacef5204026d6.pts 03642806/expert_verified/points_label/b233163860361eda8cfacef5204026d6.seg 03642806
+02958343/points/f10f279643fbb3276a78cd0552215cff.pts 02958343/expert_verified/points_label/f10f279643fbb3276a78cd0552215cff.seg 02958343
+02691156/points/2c64c521c114df40e51f766854841067.pts 02691156/expert_verified/points_label/2c64c521c114df40e51f766854841067.seg 02691156
+03001627/points/3b8f2b955ee9a904b3c42e318f3affc.pts 03001627/expert_verified/points_label/3b8f2b955ee9a904b3c42e318f3affc.seg 03001627
+04379243/points/2a64bd38a4e42f33dc43fde5155b3946.pts 04379243/expert_verified/points_label/2a64bd38a4e42f33dc43fde5155b3946.seg 04379243
+03001627/points/52310bca00e6a3671201d487ecde379e.pts 03001627/expert_verified/points_label/52310bca00e6a3671201d487ecde379e.seg 03001627
+03001627/points/5346017af72c1843169d299c5f567c18.pts 03001627/expert_verified/points_label/5346017af72c1843169d299c5f567c18.seg 03001627
+02954340/points/c1436c38beba0005284432ce2f42f498.pts 02954340/expert_verified/points_label/c1436c38beba0005284432ce2f42f498.seg 02954340
+03636649/points/34ce1de178694f87e76bc197b3a3ffc0.pts 03636649/expert_verified/points_label/34ce1de178694f87e76bc197b3a3ffc0.seg 03636649
+03001627/points/8e7714615a4b1e6f82390c5f604e0d9b.pts 03001627/expert_verified/points_label/8e7714615a4b1e6f82390c5f604e0d9b.seg 03001627
+03948459/points/a3e6dcfc074489fd8ec2966c0323533e.pts 03948459/expert_verified/points_label/a3e6dcfc074489fd8ec2966c0323533e.seg 03948459
+02691156/points/3ad337dcef167024fe6302fece358e4a.pts 02691156/expert_verified/points_label/3ad337dcef167024fe6302fece358e4a.seg 02691156
+04379243/points/124cc3b92266c2767156f312cf4e035e.pts 04379243/expert_verified/points_label/124cc3b92266c2767156f312cf4e035e.seg 04379243
+04379243/points/ee5f0411fcff59951105a3fc18779f13.pts 04379243/expert_verified/points_label/ee5f0411fcff59951105a3fc18779f13.seg 04379243
+04379243/points/b1117a83ebf5a4c9c337a931444a5063.pts 04379243/expert_verified/points_label/b1117a83ebf5a4c9c337a931444a5063.seg 04379243
+03001627/points/fb847cd696ec711197f2016c3d6097c9.pts 03001627/expert_verified/points_label/fb847cd696ec711197f2016c3d6097c9.seg 03001627
+02691156/points/50da48c8e7644508fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/50da48c8e7644508fca1f1143bb6bc17.seg 02691156
+02958343/points/78c0bec338fa1c01d6b98bf27ff43caf.pts 02958343/expert_verified/points_label/78c0bec338fa1c01d6b98bf27ff43caf.seg 02958343
+02691156/points/37fbd275a734ec1b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/37fbd275a734ec1b66cf1b4a8fc3914e.seg 02691156
+03636649/points/e053e531fc4341b5fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e053e531fc4341b5fcb8d8c6d4df8143.seg 03636649
+02691156/points/3db61220251b3c9de719b5362fe06bbb.pts 02691156/expert_verified/points_label/3db61220251b3c9de719b5362fe06bbb.seg 02691156
+03642806/points/a7f983f1d0642745135a402b573354e4.pts 03642806/expert_verified/points_label/a7f983f1d0642745135a402b573354e4.seg 03642806
+03001627/points/4e26eab28703c12bdd5f3f2440a93d21.pts 03001627/expert_verified/points_label/4e26eab28703c12bdd5f3f2440a93d21.seg 03001627
+04225987/points/24e46e195f4907887a70e5e6aa241c88.pts 04225987/expert_verified/points_label/24e46e195f4907887a70e5e6aa241c88.seg 04225987
+02691156/points/3ab1e94b6c3a1730c56cc5a87f567365.pts 02691156/expert_verified/points_label/3ab1e94b6c3a1730c56cc5a87f567365.seg 02691156
+03001627/points/61b984febe54b752d61420a53a0cb96d.pts 03001627/expert_verified/points_label/61b984febe54b752d61420a53a0cb96d.seg 03001627
+04379243/points/adf574f947f00bdd548b2639ebc3e759.pts 04379243/expert_verified/points_label/adf574f947f00bdd548b2639ebc3e759.seg 04379243
+03001627/points/ef76b9cbf76bad40586ef70b3cee4240.pts 03001627/expert_verified/points_label/ef76b9cbf76bad40586ef70b3cee4240.seg 03001627
+04379243/points/abef0c609ad3e9c2edea4b985280bcc1.pts 04379243/expert_verified/points_label/abef0c609ad3e9c2edea4b985280bcc1.seg 04379243
+02773838/points/1b84dededd445058e44a5473032f38f.pts 02773838/expert_verified/points_label/1b84dededd445058e44a5473032f38f.seg 02773838
+04379243/points/cd09a9641ea97d873823cce3247aa03b.pts 04379243/expert_verified/points_label/cd09a9641ea97d873823cce3247aa03b.seg 04379243
+03636649/points/6aa1ce4e245001589f1a71e46bbde97c.pts 03636649/expert_verified/points_label/6aa1ce4e245001589f1a71e46bbde97c.seg 03636649
+04379243/points/bb1aa2cdf216d348e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/bb1aa2cdf216d348e76bc197b3a3ffc0.seg 04379243
+04379243/points/da1e75a8647bfd919778416969ddad32.pts 04379243/expert_verified/points_label/da1e75a8647bfd919778416969ddad32.seg 04379243
+02958343/points/3d0308da43d52e3ef56f8ea3d9016e55.pts 02958343/expert_verified/points_label/3d0308da43d52e3ef56f8ea3d9016e55.seg 02958343
+04379243/points/1ca75076bcebfac76c3484ac7eef025f.pts 04379243/expert_verified/points_label/1ca75076bcebfac76c3484ac7eef025f.seg 04379243
+02691156/points/97ec5b82d9757b639cb1b92881e8e76.pts 02691156/expert_verified/points_label/97ec5b82d9757b639cb1b92881e8e76.seg 02691156
+02691156/points/75db11c354c6342aad01ec966c80ac91.pts 02691156/expert_verified/points_label/75db11c354c6342aad01ec966c80ac91.seg 02691156
+02691156/points/caf80ecbad22a7384e1799d9d4d697c3.pts 02691156/expert_verified/points_label/caf80ecbad22a7384e1799d9d4d697c3.seg 02691156
+03001627/points/d6e0a95f00c7af6fbae0ffb97058b7cc.pts 03001627/expert_verified/points_label/d6e0a95f00c7af6fbae0ffb97058b7cc.seg 03001627
+04379243/points/fa72e9cf7308066b1c072ac0b83fe07a.pts 04379243/expert_verified/points_label/fa72e9cf7308066b1c072ac0b83fe07a.seg 04379243
+03790512/points/455485399ab75f93429f1c522640e6f0.pts 03790512/expert_verified/points_label/455485399ab75f93429f1c522640e6f0.seg 03790512
+03642806/points/241ec8a746dd1cfc78f71a335ebabfa5.pts 03642806/expert_verified/points_label/241ec8a746dd1cfc78f71a335ebabfa5.seg 03642806
+04379243/points/c6575b4c39a341c698d5fc0473d00a1c.pts 04379243/expert_verified/points_label/c6575b4c39a341c698d5fc0473d00a1c.seg 04379243
+02958343/points/219a0021526791d18bb5c0bf5eec83fc.pts 02958343/expert_verified/points_label/219a0021526791d18bb5c0bf5eec83fc.seg 02958343
+02691156/points/49917fb82beca4beca8607f540cc62ba.pts 02691156/expert_verified/points_label/49917fb82beca4beca8607f540cc62ba.seg 02691156
+03636649/points/dac278ab197b5efefaa6996ece0d86f4.pts 03636649/expert_verified/points_label/dac278ab197b5efefaa6996ece0d86f4.seg 03636649
+03467517/points/f146c58eaa06f5e4d57700c05b1862d8.pts 03467517/expert_verified/points_label/f146c58eaa06f5e4d57700c05b1862d8.seg 03467517
+04379243/points/aaf6be1d92a8c61fdcfcef693e7ec696.pts 04379243/expert_verified/points_label/aaf6be1d92a8c61fdcfcef693e7ec696.seg 04379243
+03001627/points/46789c1fb150dfaf51f77a6d7299806.pts 03001627/expert_verified/points_label/46789c1fb150dfaf51f77a6d7299806.seg 03001627
+03790512/points/4a2f0b20ef680347395d58407f193ba.pts 03790512/expert_verified/points_label/4a2f0b20ef680347395d58407f193ba.seg 03790512
+04379243/points/28ce06aa6f25b39f2d19175e7d19b7cb.pts 04379243/expert_verified/points_label/28ce06aa6f25b39f2d19175e7d19b7cb.seg 04379243
+02958343/points/1710ff46ca275e171df27141dea8c9a.pts 02958343/expert_verified/points_label/1710ff46ca275e171df27141dea8c9a.seg 02958343
+03636649/points/b57bcdb88c669663ec2a7a1f5fe7365d.pts 03636649/expert_verified/points_label/b57bcdb88c669663ec2a7a1f5fe7365d.seg 03636649
+04379243/points/c348d279fd22730a9741b7ee128375de.pts 04379243/expert_verified/points_label/c348d279fd22730a9741b7ee128375de.seg 04379243
+03001627/points/76fe7cf10c5dbf1edcb466b6f48b5810.pts 03001627/expert_verified/points_label/76fe7cf10c5dbf1edcb466b6f48b5810.seg 03001627
+04379243/points/7727cc0cb47705632dfc2f8d5d30193c.pts 04379243/expert_verified/points_label/7727cc0cb47705632dfc2f8d5d30193c.seg 04379243
+03797390/points/586e67c53f181dc22adf8abaa25e0215.pts 03797390/expert_verified/points_label/586e67c53f181dc22adf8abaa25e0215.seg 03797390
+04379243/points/d9b418e6ec14dbf50efffb055ed6bd1.pts 04379243/expert_verified/points_label/d9b418e6ec14dbf50efffb055ed6bd1.seg 04379243
+04379243/points/f52e52094d8240b2dcfcef693e7ec696.pts 04379243/expert_verified/points_label/f52e52094d8240b2dcfcef693e7ec696.seg 04379243
+02691156/points/821309c2037b49135fab3f99161dc2c2.pts 02691156/expert_verified/points_label/821309c2037b49135fab3f99161dc2c2.seg 02691156
+02954340/points/254e230d31a62470a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/254e230d31a62470a52821bf1aa3b19a.seg 02954340
+02691156/points/e8de6c58f4a772d771d03b466c72ce41.pts 02691156/expert_verified/points_label/e8de6c58f4a772d771d03b466c72ce41.seg 02691156
+03642806/points/f1c6801e84c85a07bfb149497503af.pts 03642806/expert_verified/points_label/f1c6801e84c85a07bfb149497503af.seg 03642806
+02691156/points/a04d10b24ede5e9a3de778e85611513b.pts 02691156/expert_verified/points_label/a04d10b24ede5e9a3de778e85611513b.seg 02691156
+03467517/points/c8acdfaec5008118343b0b12983b9982.pts 03467517/expert_verified/points_label/c8acdfaec5008118343b0b12983b9982.seg 03467517
+03001627/points/9c3e53d9d1e653c0bf80985a99195eb8.pts 03001627/expert_verified/points_label/9c3e53d9d1e653c0bf80985a99195eb8.seg 03001627
+02691156/points/123bd9e948881939c38a1d3458dafa1b.pts 02691156/expert_verified/points_label/123bd9e948881939c38a1d3458dafa1b.seg 02691156
+03948459/points/abc7a1373f4b30291adcc40d88daf7c8.pts 03948459/expert_verified/points_label/abc7a1373f4b30291adcc40d88daf7c8.seg 03948459
+03636649/points/c906a9c7ae536a0c7fb7f79251dd7727.pts 03636649/expert_verified/points_label/c906a9c7ae536a0c7fb7f79251dd7727.seg 03636649
+03797390/points/e71102b6da1d63f3a363b55cbd344baa.pts 03797390/expert_verified/points_label/e71102b6da1d63f3a363b55cbd344baa.seg 03797390
+03642806/points/22389f9c3c049ce757c29983a611b1c6.pts 03642806/expert_verified/points_label/22389f9c3c049ce757c29983a611b1c6.seg 03642806
+04379243/points/5c2c29fd07c365afe5c65540d3456093.pts 04379243/expert_verified/points_label/5c2c29fd07c365afe5c65540d3456093.seg 04379243
+03001627/points/9a8dfc7a6831749f504721639e19f609.pts 03001627/expert_verified/points_label/9a8dfc7a6831749f504721639e19f609.seg 03001627
+03001627/points/d49ce87d43cf4c8f1679065e1c457f94.pts 03001627/expert_verified/points_label/d49ce87d43cf4c8f1679065e1c457f94.seg 03001627
+02691156/points/dfa36bffe436a98ee0534173b9189765.pts 02691156/expert_verified/points_label/dfa36bffe436a98ee0534173b9189765.seg 02691156
+04379243/points/987b7b49a1435a4b1b17743c18fb63dc.pts 04379243/expert_verified/points_label/987b7b49a1435a4b1b17743c18fb63dc.seg 04379243
+04379243/points/8d0d7787f4babee7e66285d36ebb986.pts 04379243/expert_verified/points_label/8d0d7787f4babee7e66285d36ebb986.seg 04379243
+04379243/points/4f06092100d0164013d2510999d0f1d2.pts 04379243/expert_verified/points_label/4f06092100d0164013d2510999d0f1d2.seg 04379243
+02958343/points/fce2b933f93d132f4f45033b2f001552.pts 02958343/expert_verified/points_label/fce2b933f93d132f4f45033b2f001552.seg 02958343
+04379243/points/3817a222e96acc4ca78510b72d2281ea.pts 04379243/expert_verified/points_label/3817a222e96acc4ca78510b72d2281ea.seg 04379243
+03001627/points/7ee09fdece7d9142afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/7ee09fdece7d9142afdb9a672b7d3b8a.seg 03001627
+04379243/points/676d05aaaeecb8a04b3c42e318f3affc.pts 04379243/expert_verified/points_label/676d05aaaeecb8a04b3c42e318f3affc.seg 04379243
+03624134/points/6813197ad5e7011fcc34b900bb2492e.pts 03624134/expert_verified/points_label/6813197ad5e7011fcc34b900bb2492e.seg 03624134
+04379243/points/ea367e390741fc38dcfcef693e7ec696.pts 04379243/expert_verified/points_label/ea367e390741fc38dcfcef693e7ec696.seg 04379243
+04379243/points/2e5ac0552fa296c43bbab77a66bc3671.pts 04379243/expert_verified/points_label/2e5ac0552fa296c43bbab77a66bc3671.seg 04379243
+03467517/points/32a337387527f39193f0194265a9746c.pts 03467517/expert_verified/points_label/32a337387527f39193f0194265a9746c.seg 03467517
+03001627/points/97cd4ed02e022ce7174150bd56e389a8.pts 03001627/expert_verified/points_label/97cd4ed02e022ce7174150bd56e389a8.seg 03001627
+04379243/points/88e06a85e2a0f99fa7e7cb173e141227.pts 04379243/expert_verified/points_label/88e06a85e2a0f99fa7e7cb173e141227.seg 04379243
+04379243/points/c5a02d586ea431a1e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/c5a02d586ea431a1e76bc197b3a3ffc0.seg 04379243
+03001627/points/bcdcb4928e07e4174a623eb2e3317415.pts 03001627/expert_verified/points_label/bcdcb4928e07e4174a623eb2e3317415.seg 03001627
+02691156/points/934dd5529c22cd05bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/934dd5529c22cd05bc0909d98a1ff2b4.seg 02691156
+03001627/points/e696f4c7cd88b8b52ff834514c92e8fd.pts 03001627/expert_verified/points_label/e696f4c7cd88b8b52ff834514c92e8fd.seg 03001627
+02691156/points/93ba822e84586999e3375a6b96a1d765.pts 02691156/expert_verified/points_label/93ba822e84586999e3375a6b96a1d765.seg 02691156
+02958343/points/3ac664a7486a0bdff200a72c9245aee7.pts 02958343/expert_verified/points_label/3ac664a7486a0bdff200a72c9245aee7.seg 02958343
+02691156/points/545cadae487b55bbc46ba5100bcdc520.pts 02691156/expert_verified/points_label/545cadae487b55bbc46ba5100bcdc520.seg 02691156
+03001627/points/c47f71319ead4eb8a4fb72f4f3b0e317.pts 03001627/expert_verified/points_label/c47f71319ead4eb8a4fb72f4f3b0e317.seg 03001627
+04379243/points/39bb09201e0cd201c17e7f250c5222bd.pts 04379243/expert_verified/points_label/39bb09201e0cd201c17e7f250c5222bd.seg 04379243
+04379243/points/13782b95eeefcedacf004563556ddb36.pts 04379243/expert_verified/points_label/13782b95eeefcedacf004563556ddb36.seg 04379243
+03001627/points/3cc90d903e0ec7aa61e11d707ecb7fa0.pts 03001627/expert_verified/points_label/3cc90d903e0ec7aa61e11d707ecb7fa0.seg 03001627
+04379243/points/4079aaabaa6451a2765ca89770f206ec.pts 04379243/expert_verified/points_label/4079aaabaa6451a2765ca89770f206ec.seg 04379243
+04379243/points/4bbf789edb243cafc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/4bbf789edb243cafc955e5ed03ef3a2f.seg 04379243
+02773838/points/6187bd900c3bc002ed13f430b2941481.pts 02773838/expert_verified/points_label/6187bd900c3bc002ed13f430b2941481.seg 02773838
+04379243/points/6dc6bb97c387b2f3af4e8812cf1b9e1.pts 04379243/expert_verified/points_label/6dc6bb97c387b2f3af4e8812cf1b9e1.seg 04379243
+03467517/points/9c260623916034b6f7d037d5768b173f.pts 03467517/expert_verified/points_label/9c260623916034b6f7d037d5768b173f.seg 03467517
+02691156/points/8d5c3d38de9c3685f2e77d54f4da142.pts 02691156/expert_verified/points_label/8d5c3d38de9c3685f2e77d54f4da142.seg 02691156
+04379243/points/6152e14b042aa17546f41dc2aaef556b.pts 04379243/expert_verified/points_label/6152e14b042aa17546f41dc2aaef556b.seg 04379243
+03467517/points/68a8bf89972cd337a77e8142614cdaae.pts 03467517/expert_verified/points_label/68a8bf89972cd337a77e8142614cdaae.seg 03467517
+02691156/points/3d5354863690ac7eca27bba175814d1.pts 02691156/expert_verified/points_label/3d5354863690ac7eca27bba175814d1.seg 02691156
+04379243/points/3411daa955306811d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/3411daa955306811d93768e7b9b1eabf.seg 04379243
+04379243/points/8594658920d6ea7b23656ce81843.pts 04379243/expert_verified/points_label/8594658920d6ea7b23656ce81843.seg 04379243
+02691156/points/a074750e28ed3818203936772104a82d.pts 02691156/expert_verified/points_label/a074750e28ed3818203936772104a82d.seg 02691156
+04379243/points/fcd4d0e1777f4841dcfcef693e7ec696.pts 04379243/expert_verified/points_label/fcd4d0e1777f4841dcfcef693e7ec696.seg 04379243
+03948459/points/708e38e7b733fd22bfae4699de9cb91a.pts 03948459/expert_verified/points_label/708e38e7b733fd22bfae4699de9cb91a.seg 03948459
+04379243/points/3c4e1361b066ea3b8ca998f0f87d0c84.pts 04379243/expert_verified/points_label/3c4e1361b066ea3b8ca998f0f87d0c84.seg 04379243
+03624134/points/38798b7013607bbf1e0b76f10c6e38af.pts 03624134/expert_verified/points_label/38798b7013607bbf1e0b76f10c6e38af.seg 03624134
+02691156/points/2176fa9f69e5e1dcca8607f540cc62ba.pts 02691156/expert_verified/points_label/2176fa9f69e5e1dcca8607f540cc62ba.seg 02691156
+03467517/points/8dd7df733a5ba17acae98171fea031ef.pts 03467517/expert_verified/points_label/8dd7df733a5ba17acae98171fea031ef.seg 03467517
+03001627/points/d3f31fd0fc99f45e8b3f6b4a44a70e52.pts 03001627/expert_verified/points_label/d3f31fd0fc99f45e8b3f6b4a44a70e52.seg 03001627
+02691156/points/118e8142a8cb1fe19a4a28ef635593ce.pts 02691156/expert_verified/points_label/118e8142a8cb1fe19a4a28ef635593ce.seg 02691156
+03624134/points/de62211649b4cced49384f9741ad64d8.pts 03624134/expert_verified/points_label/de62211649b4cced49384f9741ad64d8.seg 03624134
+03642806/points/7a4342f61ed7b153341aafe10fd0cbd4.pts 03642806/expert_verified/points_label/7a4342f61ed7b153341aafe10fd0cbd4.seg 03642806
+03001627/points/ba56f02dee485974c242632b2a8c3129.pts 03001627/expert_verified/points_label/ba56f02dee485974c242632b2a8c3129.seg 03001627
+04379243/points/97b7baeb8a172de42f56f09e5bc67bee.pts 04379243/expert_verified/points_label/97b7baeb8a172de42f56f09e5bc67bee.seg 04379243
+04379243/points/7b2af227264af938d42b9650f19dd425.pts 04379243/expert_verified/points_label/7b2af227264af938d42b9650f19dd425.seg 04379243
+04379243/points/e25fdb977fb867fdc3bd24f986301745.pts 04379243/expert_verified/points_label/e25fdb977fb867fdc3bd24f986301745.seg 04379243
+03467517/points/33da9c54f43be3e17693a84bff425e3.pts 03467517/expert_verified/points_label/33da9c54f43be3e17693a84bff425e3.seg 03467517
+02691156/points/e1e5cfcabcbe26a03087f84b199fd297.pts 02691156/expert_verified/points_label/e1e5cfcabcbe26a03087f84b199fd297.seg 02691156
+03636649/points/ba05811f301cdd791735ea0e092a805a.pts 03636649/expert_verified/points_label/ba05811f301cdd791735ea0e092a805a.seg 03636649
+03001627/points/6678f63c9b584a549d9e5580ae9f8738.pts 03001627/expert_verified/points_label/6678f63c9b584a549d9e5580ae9f8738.seg 03001627
+04379243/points/b6b8ede77085c0a95bea7c29e873d16.pts 04379243/expert_verified/points_label/b6b8ede77085c0a95bea7c29e873d16.seg 04379243
+02691156/points/d81042a53dd1cc5bd90bfc986bc4c94d.pts 02691156/expert_verified/points_label/d81042a53dd1cc5bd90bfc986bc4c94d.seg 02691156
+03001627/points/37b432326fecc8a1327289c00b6dc9ca.pts 03001627/expert_verified/points_label/37b432326fecc8a1327289c00b6dc9ca.seg 03001627
+03636649/points/c898f9b1dddbb8801735ea0e092a805a.pts 03636649/expert_verified/points_label/c898f9b1dddbb8801735ea0e092a805a.seg 03636649
+03001627/points/5d02aed0e9c93e829b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/5d02aed0e9c93e829b9f2eb77f5e247e.seg 03001627
+03001627/points/9a864d5de972a8c7cb686b8b855fed61.pts 03001627/expert_verified/points_label/9a864d5de972a8c7cb686b8b855fed61.seg 03001627
+04379243/points/b14a14cc2f3c38c9e3def9c422df2282.pts 04379243/expert_verified/points_label/b14a14cc2f3c38c9e3def9c422df2282.seg 04379243
+04379243/points/f2893a87ec37f8b3781cb4570305e329.pts 04379243/expert_verified/points_label/f2893a87ec37f8b3781cb4570305e329.seg 04379243
+02691156/points/3fa511e1882e41eeca8607f540cc62ba.pts 02691156/expert_verified/points_label/3fa511e1882e41eeca8607f540cc62ba.seg 02691156
+02691156/points/444d67950ff9a4cc1139bebb00fe5be8.pts 02691156/expert_verified/points_label/444d67950ff9a4cc1139bebb00fe5be8.seg 02691156
+03001627/points/3d3b7f63f5525b1ae37f5a622d383617.pts 03001627/expert_verified/points_label/3d3b7f63f5525b1ae37f5a622d383617.seg 03001627
+03001627/points/30beaf15d2d2beb1febad4f49b26ec52.pts 03001627/expert_verified/points_label/30beaf15d2d2beb1febad4f49b26ec52.seg 03001627
+04379243/points/59f04ddbd896f4f5430644dfe647c381.pts 04379243/expert_verified/points_label/59f04ddbd896f4f5430644dfe647c381.seg 04379243
+04379243/points/eb9b9b8d186a974a7afee304cce81d6f.pts 04379243/expert_verified/points_label/eb9b9b8d186a974a7afee304cce81d6f.seg 04379243
+03790512/points/7c4fc3a05d5fc8b1d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/7c4fc3a05d5fc8b1d0f568c31c1cd62a.seg 03790512
+04379243/points/68142013a4f5e7c2febad4f49b26ec52.pts 04379243/expert_verified/points_label/68142013a4f5e7c2febad4f49b26ec52.seg 04379243
+02958343/points/8053e014516531ddc3f500d7b182f6.pts 02958343/expert_verified/points_label/8053e014516531ddc3f500d7b182f6.seg 02958343
+02958343/points/1a3782ae4bd711b66b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/1a3782ae4bd711b66b418c7d9fedcaa9.seg 02958343
+04379243/points/cc58de930acd321fac242c3aebc81b2f.pts 04379243/expert_verified/points_label/cc58de930acd321fac242c3aebc81b2f.seg 04379243
+02691156/points/d4dac019726e980e203936772104a82d.pts 02691156/expert_verified/points_label/d4dac019726e980e203936772104a82d.seg 02691156
+02954340/points/6e983d20e0bf80296829cd4082fbdbdf.pts 02954340/expert_verified/points_label/6e983d20e0bf80296829cd4082fbdbdf.seg 02954340
+03636649/points/fad026744a6abb1937cf479d4bb58d.pts 03636649/expert_verified/points_label/fad026744a6abb1937cf479d4bb58d.seg 03636649
+02958343/points/4d2d4e26349be1f3be2cbcda9b6dc9b2.pts 02958343/expert_verified/points_label/4d2d4e26349be1f3be2cbcda9b6dc9b2.seg 02958343
+03636649/points/280fa01686e780ba3501c961e91ff6d7.pts 03636649/expert_verified/points_label/280fa01686e780ba3501c961e91ff6d7.seg 03636649
+04379243/points/f02907c5c42e1e766f1e07a56c129dfc.pts 04379243/expert_verified/points_label/f02907c5c42e1e766f1e07a56c129dfc.seg 04379243
+04379243/points/5f100571ffd90f8252b4875f731f71cd.pts 04379243/expert_verified/points_label/5f100571ffd90f8252b4875f731f71cd.seg 04379243
+04379243/points/f718cb5d6202341dc183308b9aafe2ca.pts 04379243/expert_verified/points_label/f718cb5d6202341dc183308b9aafe2ca.seg 04379243
+03642806/points/b436271050d647052f8d6d501b18a4b5.pts 03642806/expert_verified/points_label/b436271050d647052f8d6d501b18a4b5.seg 03642806
+03001627/points/6dddf2b95ca09bf5febad4f49b26ec52.pts 03001627/expert_verified/points_label/6dddf2b95ca09bf5febad4f49b26ec52.seg 03001627
+02691156/points/b812c2df636aa0218b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/b812c2df636aa0218b96ae1a0a8b84ec.seg 02691156
+02958343/points/89edb3d434f4c983afe1d4530f4c6e24.pts 02958343/expert_verified/points_label/89edb3d434f4c983afe1d4530f4c6e24.seg 02958343
+02958343/points/80ac9cc0d4c9dde3b7a7bc444c2d756b.pts 02958343/expert_verified/points_label/80ac9cc0d4c9dde3b7a7bc444c2d756b.seg 02958343
+04379243/points/b62d45745434ac46c4cfe384be4426c3.pts 04379243/expert_verified/points_label/b62d45745434ac46c4cfe384be4426c3.seg 04379243
+04379243/points/9c4afb731e910d3723500a5b036df62e.pts 04379243/expert_verified/points_label/9c4afb731e910d3723500a5b036df62e.seg 04379243
+04379243/points/43fcddd5232a6021a56e8b79ca4e2911.pts 04379243/expert_verified/points_label/43fcddd5232a6021a56e8b79ca4e2911.seg 04379243
+04379243/points/6724ae69c0bde4c09b7dad6c9c46bcf1.pts 04379243/expert_verified/points_label/6724ae69c0bde4c09b7dad6c9c46bcf1.seg 04379243
+03001627/points/323fc7b1d2b44cb7ff2b8acf844d34d2.pts 03001627/expert_verified/points_label/323fc7b1d2b44cb7ff2b8acf844d34d2.seg 03001627
+03001627/points/434cee44934612a81f98c0761af40e04.pts 03001627/expert_verified/points_label/434cee44934612a81f98c0761af40e04.seg 03001627
+03636649/points/31dee666120727b0be78c8b300d2a963.pts 03636649/expert_verified/points_label/31dee666120727b0be78c8b300d2a963.seg 03636649
+02958343/points/48f5446e6ac9c1b51f1446551412bde4.pts 02958343/expert_verified/points_label/48f5446e6ac9c1b51f1446551412bde4.seg 02958343
+04379243/points/aa3eb180a4f6d8d42de421c2ab5cfb52.pts 04379243/expert_verified/points_label/aa3eb180a4f6d8d42de421c2ab5cfb52.seg 04379243
+04379243/points/14e5e4db3246dacff12d7184a2ad3430.pts 04379243/expert_verified/points_label/14e5e4db3246dacff12d7184a2ad3430.seg 04379243
+03001627/points/96c0ecd1ef80e818c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/96c0ecd1ef80e818c8687ff9b0b4e4ac.seg 03001627
+04225987/points/d4c042d11f29dffa1082f2ea630bf69e.pts 04225987/expert_verified/points_label/d4c042d11f29dffa1082f2ea630bf69e.seg 04225987
+03642806/points/7ebff305b2e93504239603972bcd2e7b.pts 03642806/expert_verified/points_label/7ebff305b2e93504239603972bcd2e7b.seg 03642806
+03467517/points/369fc7f8d880e1b793f0194265a9746c.pts 03467517/expert_verified/points_label/369fc7f8d880e1b793f0194265a9746c.seg 03467517
+04379243/points/25f69a74efbff4d071a782a4379556c7.pts 04379243/expert_verified/points_label/25f69a74efbff4d071a782a4379556c7.seg 04379243
+04379243/points/7cd4844def36a9f5bc7589eefbdbc3c5.pts 04379243/expert_verified/points_label/7cd4844def36a9f5bc7589eefbdbc3c5.seg 04379243
+03467517/points/5852a24dde24a8ef93f0194265a9746c.pts 03467517/expert_verified/points_label/5852a24dde24a8ef93f0194265a9746c.seg 03467517
+03001627/points/df8440d8678f3a91c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/df8440d8678f3a91c8687ff9b0b4e4ac.seg 03001627
+04379243/points/49bf25ff4401946524c10ba1eb690638.pts 04379243/expert_verified/points_label/49bf25ff4401946524c10ba1eb690638.seg 04379243
+03001627/points/7eedcb6d76b8c23a9cdb421f6af95e5f.pts 03001627/expert_verified/points_label/7eedcb6d76b8c23a9cdb421f6af95e5f.seg 03001627
+03797390/points/ff1a44e1c1785d618bca309f2c51966a.pts 03797390/expert_verified/points_label/ff1a44e1c1785d618bca309f2c51966a.seg 03797390
+02958343/points/85f3dc3318f5200c8672c9b355cd2075.pts 02958343/expert_verified/points_label/85f3dc3318f5200c8672c9b355cd2075.seg 02958343
+02691156/points/c9be9f07f5ae7c375d7629390efe0a2.pts 02691156/expert_verified/points_label/c9be9f07f5ae7c375d7629390efe0a2.seg 02691156
+02691156/points/14cd2f1de7f68bf3ab550998f901c8e1.pts 02691156/expert_verified/points_label/14cd2f1de7f68bf3ab550998f901c8e1.seg 02691156
+02958343/points/81fad64b8fd8f010b17445a1c29f6d34.pts 02958343/expert_verified/points_label/81fad64b8fd8f010b17445a1c29f6d34.seg 02958343
+02958343/points/fe2ce22107693354f1cc1cb691702a23.pts 02958343/expert_verified/points_label/fe2ce22107693354f1cc1cb691702a23.seg 02958343
+02691156/points/74cbf170c5f2fb587d9c9c8a8ba32919.pts 02691156/expert_verified/points_label/74cbf170c5f2fb587d9c9c8a8ba32919.seg 02691156
+02691156/points/67dbb0de722cf5cd7a734abc5ba1db0f.pts 02691156/expert_verified/points_label/67dbb0de722cf5cd7a734abc5ba1db0f.seg 02691156
+04379243/points/fa345f8f107d93b9ba70f71694a4b74c.pts 04379243/expert_verified/points_label/fa345f8f107d93b9ba70f71694a4b74c.seg 04379243
+04379243/points/a45a7ba9a2842a55634c21965ee6bab.pts 04379243/expert_verified/points_label/a45a7ba9a2842a55634c21965ee6bab.seg 04379243
+04379243/points/8d7ac6078989980fad16260d4d73b56.pts 04379243/expert_verified/points_label/8d7ac6078989980fad16260d4d73b56.seg 04379243
+03001627/points/e803b31e2185d0405784b22e1081a3e1.pts 03001627/expert_verified/points_label/e803b31e2185d0405784b22e1081a3e1.seg 03001627
+04379243/points/aaf3aeda0f848344b87028a4b477349f.pts 04379243/expert_verified/points_label/aaf3aeda0f848344b87028a4b477349f.seg 04379243
+03636649/points/e94aab17400945413225afab722d9fd2.pts 03636649/expert_verified/points_label/e94aab17400945413225afab722d9fd2.seg 03636649
+03001627/points/d2c465e85d2e8f1fcea003eff0268278.pts 03001627/expert_verified/points_label/d2c465e85d2e8f1fcea003eff0268278.seg 03001627
+03001627/points/88376e3d3a23d263de29d28278a34a18.pts 03001627/expert_verified/points_label/88376e3d3a23d263de29d28278a34a18.seg 03001627
+04379243/points/4775e71d37374444febad4f49b26ec52.pts 04379243/expert_verified/points_label/4775e71d37374444febad4f49b26ec52.seg 04379243
+03636649/points/f12822778713f5e35b36bbc16e99b441.pts 03636649/expert_verified/points_label/f12822778713f5e35b36bbc16e99b441.seg 03636649
+03636649/points/963e6743370d5c5c9b5d51fa8cce1753.pts 03636649/expert_verified/points_label/963e6743370d5c5c9b5d51fa8cce1753.seg 03636649
+04379243/points/13c51c08c3695a09eda47978b73f5994.pts 04379243/expert_verified/points_label/13c51c08c3695a09eda47978b73f5994.seg 04379243
+04379243/points/89827ac677337629ab610b0c94236463.pts 04379243/expert_verified/points_label/89827ac677337629ab610b0c94236463.seg 04379243
+04379243/points/89b478643e53d3d6285c99063fc6fcf8.pts 04379243/expert_verified/points_label/89b478643e53d3d6285c99063fc6fcf8.seg 04379243
+04379243/points/401cd99ace3b92fadf6cfab91d65bb91.pts 04379243/expert_verified/points_label/401cd99ace3b92fadf6cfab91d65bb91.seg 04379243
+04379243/points/74c3d551e32a1cca664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/74c3d551e32a1cca664b3b9b23ddfcbc.seg 04379243
+04379243/points/db64db160fd13a514e1a714ee619465a.pts 04379243/expert_verified/points_label/db64db160fd13a514e1a714ee619465a.seg 04379243
+03001627/points/8e664a0bcaf9d2a45ca1aaa0789db621.pts 03001627/expert_verified/points_label/8e664a0bcaf9d2a45ca1aaa0789db621.seg 03001627
+03001627/points/43897195d7f893d759c257be4c612509.pts 03001627/expert_verified/points_label/43897195d7f893d759c257be4c612509.seg 03001627
+04379243/points/e6d8569c0957e7453002761e7a3ba3bd.pts 04379243/expert_verified/points_label/e6d8569c0957e7453002761e7a3ba3bd.seg 04379243
+03636649/points/ead77648c9c7dbf8d42b9650f19dd425.pts 03636649/expert_verified/points_label/ead77648c9c7dbf8d42b9650f19dd425.seg 03636649
+03636649/points/c54d3a5a9c8a655e46407779dbd69b2d.pts 03636649/expert_verified/points_label/c54d3a5a9c8a655e46407779dbd69b2d.seg 03636649
+03001627/points/379f0efc898d7a7e9fe74a48bbc553d7.pts 03001627/expert_verified/points_label/379f0efc898d7a7e9fe74a48bbc553d7.seg 03001627
+04379243/points/c1d44782ac45d6fe3671949e4f99cc76.pts 04379243/expert_verified/points_label/c1d44782ac45d6fe3671949e4f99cc76.seg 04379243
+04379243/points/7b3b160dafe7e122d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/7b3b160dafe7e122d93768e7b9b1eabf.seg 04379243
+03001627/points/7f271ecbdeb7610d637adadafee6f182.pts 03001627/expert_verified/points_label/7f271ecbdeb7610d637adadafee6f182.seg 03001627
+02958343/points/df34c25a1e1abe9428044fe9244db50a.pts 02958343/expert_verified/points_label/df34c25a1e1abe9428044fe9244db50a.seg 02958343
+03948459/points/98c0bd351e275b3c96893524e607761d.pts 03948459/expert_verified/points_label/98c0bd351e275b3c96893524e607761d.seg 03948459
+03636649/points/b96c8cc6529167bfcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/b96c8cc6529167bfcb8d8c6d4df8143.seg 03636649
+03624134/points/a33847e9c32c1afc93ac017b81605788.pts 03624134/expert_verified/points_label/a33847e9c32c1afc93ac017b81605788.seg 03624134
+03001627/points/594d5b7f3e705a1ab3234e0da44b11e4.pts 03001627/expert_verified/points_label/594d5b7f3e705a1ab3234e0da44b11e4.seg 03001627
+03001627/points/f0f04644e071d9348ca588a3264b9f86.pts 03001627/expert_verified/points_label/f0f04644e071d9348ca588a3264b9f86.seg 03001627
+02691156/points/4bdb2c4fc6701174ca8607f540cc62ba.pts 02691156/expert_verified/points_label/4bdb2c4fc6701174ca8607f540cc62ba.seg 02691156
+03001627/points/fc2a1c4c332f7731e45ef4135c266a12.pts 03001627/expert_verified/points_label/fc2a1c4c332f7731e45ef4135c266a12.seg 03001627
+02691156/points/df68b8fb9f4531b42e690fa6dfd5d610.pts 02691156/expert_verified/points_label/df68b8fb9f4531b42e690fa6dfd5d610.seg 02691156
+03642806/points/517de75577ac6e8a42b9615216f9a30d.pts 03642806/expert_verified/points_label/517de75577ac6e8a42b9615216f9a30d.seg 03642806
+03001627/points/74cc57ea0e2e06dbe4106b1d06dc89b3.pts 03001627/expert_verified/points_label/74cc57ea0e2e06dbe4106b1d06dc89b3.seg 03001627
+02691156/points/d72a483cf8a0cf2bbbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d72a483cf8a0cf2bbbf3143b1cb6076a.seg 02691156
+03001627/points/9c7b2ed3770d1a6ea6fee8e2140acec9.pts 03001627/expert_verified/points_label/9c7b2ed3770d1a6ea6fee8e2140acec9.seg 03001627
+04379243/points/28fb9a81898f88c4ae8375def5e736d8.pts 04379243/expert_verified/points_label/28fb9a81898f88c4ae8375def5e736d8.seg 04379243
+03636649/points/c0b0d7e15d3dfab1733c22d8b8e1c33d.pts 03636649/expert_verified/points_label/c0b0d7e15d3dfab1733c22d8b8e1c33d.seg 03636649
+03001627/points/bb04dc0b336abf4b263915c09bc4854f.pts 03001627/expert_verified/points_label/bb04dc0b336abf4b263915c09bc4854f.seg 03001627
+03001627/points/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.pts 03001627/expert_verified/points_label/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.seg 03001627
+04379243/points/86ad91ef08c53dd77189b31b3e8c8ef3.pts 04379243/expert_verified/points_label/86ad91ef08c53dd77189b31b3e8c8ef3.seg 04379243
+03790512/points/80e717f07645a4a0b37378f3c85478b4.pts 03790512/expert_verified/points_label/80e717f07645a4a0b37378f3c85478b4.seg 03790512
+02691156/points/7d226c520a29c7705e28caa3b26a73fd.pts 02691156/expert_verified/points_label/7d226c520a29c7705e28caa3b26a73fd.seg 02691156
+04379243/points/89c095a52766ecb05d2ac47f638a4ea4.pts 04379243/expert_verified/points_label/89c095a52766ecb05d2ac47f638a4ea4.seg 04379243
+04379243/points/7b92f6facc2a27bc84cc0348a73b80c3.pts 04379243/expert_verified/points_label/7b92f6facc2a27bc84cc0348a73b80c3.seg 04379243
+04379243/points/d578287c4a9452efa9af104529ef47c3.pts 04379243/expert_verified/points_label/d578287c4a9452efa9af104529ef47c3.seg 04379243
+03636649/points/1475fe59961fc726f096eadaad23f93d.pts 03636649/expert_verified/points_label/1475fe59961fc726f096eadaad23f93d.seg 03636649
+03790512/points/7d75e8200565ffa7b37378f3c85478b4.pts 03790512/expert_verified/points_label/7d75e8200565ffa7b37378f3c85478b4.seg 03790512
+04379243/points/852826a94cce36ea9f1deb04fb8ae481.pts 04379243/expert_verified/points_label/852826a94cce36ea9f1deb04fb8ae481.seg 04379243
+03001627/points/9c50878c91aeb8126bb6bc0db07c71e8.pts 03001627/expert_verified/points_label/9c50878c91aeb8126bb6bc0db07c71e8.seg 03001627
+02691156/points/ce827e4c857d553f71d03b466c72ce41.pts 02691156/expert_verified/points_label/ce827e4c857d553f71d03b466c72ce41.seg 02691156
+03001627/points/3aab16309520fb21dc0a8cba62d9a78a.pts 03001627/expert_verified/points_label/3aab16309520fb21dc0a8cba62d9a78a.seg 03001627
+03001627/points/697cfbe6e043136b737a00f007529fbf.pts 03001627/expert_verified/points_label/697cfbe6e043136b737a00f007529fbf.seg 03001627
+04379243/points/fd7769d0eba554c53def89b32cef8e45.pts 04379243/expert_verified/points_label/fd7769d0eba554c53def89b32cef8e45.seg 04379243
+03948459/points/d7e86e0e5b1982d4bf0ab4d7096d87f2.pts 03948459/expert_verified/points_label/d7e86e0e5b1982d4bf0ab4d7096d87f2.seg 03948459
+03001627/points/70cb8d70d961ca48b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/70cb8d70d961ca48b04cb542e2c50eb4.seg 03001627
+03636649/points/c26b7862f2afb7ee4b3c42e318f3affc.pts 03636649/expert_verified/points_label/c26b7862f2afb7ee4b3c42e318f3affc.seg 03636649
+03624134/points/906b20dc0a5a5022714112b147c95c8b.pts 03624134/expert_verified/points_label/906b20dc0a5a5022714112b147c95c8b.seg 03624134
+03001627/points/f5caa9b5ada31a8b3cf15c77de45986.pts 03001627/expert_verified/points_label/f5caa9b5ada31a8b3cf15c77de45986.seg 03001627
+04379243/points/6110d87def4fa88c154c6bbaeb7d331f.pts 04379243/expert_verified/points_label/6110d87def4fa88c154c6bbaeb7d331f.seg 04379243
+03642806/points/b5f6fd84a3f44ddb1aa47689117a61e1.pts 03642806/expert_verified/points_label/b5f6fd84a3f44ddb1aa47689117a61e1.seg 03642806
+03001627/points/95317d46812e4ed4df5aea2392d894b4.pts 03001627/expert_verified/points_label/95317d46812e4ed4df5aea2392d894b4.seg 03001627
+02691156/points/471ca950dbdf0c6c5f80f808704d6409.pts 02691156/expert_verified/points_label/471ca950dbdf0c6c5f80f808704d6409.seg 02691156
+04379243/points/c9f85a671d551086d61f9b2773e1d72a.pts 04379243/expert_verified/points_label/c9f85a671d551086d61f9b2773e1d72a.seg 04379243
+04379243/points/70f1b5f74faa9bda664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/70f1b5f74faa9bda664b3b9b23ddfcbc.seg 04379243
+02691156/points/9a266b3a734e374687bf26680c510802.pts 02691156/expert_verified/points_label/9a266b3a734e374687bf26680c510802.seg 02691156
+03001627/points/4c0983329afcd06f730e89ca0d2d13c3.pts 03001627/expert_verified/points_label/4c0983329afcd06f730e89ca0d2d13c3.seg 03001627
+04379243/points/a7172fa4177661f4858699aaad4acee4.pts 04379243/expert_verified/points_label/a7172fa4177661f4858699aaad4acee4.seg 04379243
+04379243/points/504d908a55f3e0c764810cc21086da42.pts 04379243/expert_verified/points_label/504d908a55f3e0c764810cc21086da42.seg 04379243
+03948459/points/7ba9f65e926d5e3e6fe695987d47043.pts 03948459/expert_verified/points_label/7ba9f65e926d5e3e6fe695987d47043.seg 03948459
+04379243/points/5b546ef5de5d10f3ecc9201d3d846bc1.pts 04379243/expert_verified/points_label/5b546ef5de5d10f3ecc9201d3d846bc1.seg 04379243
+04379243/points/80f986ae572fce791429f9a19502375a.pts 04379243/expert_verified/points_label/80f986ae572fce791429f9a19502375a.seg 04379243
+04379243/points/fd7a579772b195532de421c2ab5cfb52.pts 04379243/expert_verified/points_label/fd7a579772b195532de421c2ab5cfb52.seg 04379243
+03001627/points/e09466e9c122dbfdf51f77a6d7299806.pts 03001627/expert_verified/points_label/e09466e9c122dbfdf51f77a6d7299806.seg 03001627
+04379243/points/2a80c95b4bbcb73d87ed2480ebb0f3d2.pts 04379243/expert_verified/points_label/2a80c95b4bbcb73d87ed2480ebb0f3d2.seg 04379243
+03467517/points/e0d74618e316b0f16d9376f644442e99.pts 03467517/expert_verified/points_label/e0d74618e316b0f16d9376f644442e99.seg 03467517
+03001627/points/587ebb2aa71acfe644dd3aaee16d3f4c.pts 03001627/expert_verified/points_label/587ebb2aa71acfe644dd3aaee16d3f4c.seg 03001627
+03467517/points/10d2c216c70b788485b61f146daff2fb.pts 03467517/expert_verified/points_label/10d2c216c70b788485b61f146daff2fb.seg 03467517
+04379243/points/3c72ddd0dca19bbedcfcef693e7ec696.pts 04379243/expert_verified/points_label/3c72ddd0dca19bbedcfcef693e7ec696.seg 04379243
+03001627/points/2742c0a5e984d92fa0dcc52ca811e565.pts 03001627/expert_verified/points_label/2742c0a5e984d92fa0dcc52ca811e565.seg 03001627
+03624134/points/792f252dcb06f042dd56c1edf3f6e336.pts 03624134/expert_verified/points_label/792f252dcb06f042dd56c1edf3f6e336.seg 03624134
+02691156/points/8fa9e2e8dbed43911f32208e53f871eb.pts 02691156/expert_verified/points_label/8fa9e2e8dbed43911f32208e53f871eb.seg 02691156
+03001627/points/d4f5c3e3eab52d0a3334fb6668ccd834.pts 03001627/expert_verified/points_label/d4f5c3e3eab52d0a3334fb6668ccd834.seg 03001627
+03642806/points/520d98e360cf44ec8139dd63d55edc44.pts 03642806/expert_verified/points_label/520d98e360cf44ec8139dd63d55edc44.seg 03642806
+03467517/points/2eba922263fc1580cc010a80df5d3c87.pts 03467517/expert_verified/points_label/2eba922263fc1580cc010a80df5d3c87.seg 03467517
+04379243/points/53c11596c3fc36a8a5094cb6d104b35.pts 04379243/expert_verified/points_label/53c11596c3fc36a8a5094cb6d104b35.seg 04379243
+03467517/points/265009e163bf5c6f69da8e7f9a803d12.pts 03467517/expert_verified/points_label/265009e163bf5c6f69da8e7f9a803d12.seg 03467517
+04379243/points/fbdf9bffeb353474c3a767747b75e56.pts 04379243/expert_verified/points_label/fbdf9bffeb353474c3a767747b75e56.seg 04379243
+03636649/points/b4af7e9a7338a9a3225afab722d9fd2.pts 03636649/expert_verified/points_label/b4af7e9a7338a9a3225afab722d9fd2.seg 03636649
+03001627/points/55eeb952519ceb87c3bd24f986301745.pts 03001627/expert_verified/points_label/55eeb952519ceb87c3bd24f986301745.seg 03001627
+04379243/points/2259e09ebd0ed2befebad4f49b26ec52.pts 04379243/expert_verified/points_label/2259e09ebd0ed2befebad4f49b26ec52.seg 04379243
+04379243/points/63fedc0334f5552dbec3a71604e140e3.pts 04379243/expert_verified/points_label/63fedc0334f5552dbec3a71604e140e3.seg 04379243
+03001627/points/70ac5cb405df84575e62305d14755686.pts 03001627/expert_verified/points_label/70ac5cb405df84575e62305d14755686.seg 03001627
+03001627/points/3f41b4339ebd59c1c397356311cbeea4.pts 03001627/expert_verified/points_label/3f41b4339ebd59c1c397356311cbeea4.seg 03001627
+04379243/points/10bb44a54a12a74e4719088c8e42c6ab.pts 04379243/expert_verified/points_label/10bb44a54a12a74e4719088c8e42c6ab.seg 04379243
+04379243/points/a83cda80e5c5a0fc3719086e0b4ab8be.pts 04379243/expert_verified/points_label/a83cda80e5c5a0fc3719086e0b4ab8be.seg 04379243
+04379243/points/74983e99e7606eb114708467db3d00e2.pts 04379243/expert_verified/points_label/74983e99e7606eb114708467db3d00e2.seg 04379243
+03001627/points/e052eaa1d5bbe795ded10515704c9720.pts 03001627/expert_verified/points_label/e052eaa1d5bbe795ded10515704c9720.seg 03001627
+02691156/points/35892510dcd7cebb87bf26680c510802.pts 02691156/expert_verified/points_label/35892510dcd7cebb87bf26680c510802.seg 02691156
+03001627/points/7f73cc6c1c9121a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/7f73cc6c1c9121a9b9f2eb77f5e247e.seg 03001627
+03001627/points/2a8554af80cfa5e719fb4103277a6b93.pts 03001627/expert_verified/points_label/2a8554af80cfa5e719fb4103277a6b93.seg 03001627
+04379243/points/f82a5f3c2a57655d825da2b9ec9c8c29.pts 04379243/expert_verified/points_label/f82a5f3c2a57655d825da2b9ec9c8c29.seg 04379243
+02691156/points/319cf93077118d19f64801ad2940cdd5.pts 02691156/expert_verified/points_label/319cf93077118d19f64801ad2940cdd5.seg 02691156
+03790512/points/5bb3597d49c58017b37378f3c85478b4.pts 03790512/expert_verified/points_label/5bb3597d49c58017b37378f3c85478b4.seg 03790512
+02958343/points/17926c1ef484b73e6758a098566bc94e.pts 02958343/expert_verified/points_label/17926c1ef484b73e6758a098566bc94e.seg 02958343
+04379243/points/345c1bb95b12ff8c013a7bed5288654.pts 04379243/expert_verified/points_label/345c1bb95b12ff8c013a7bed5288654.seg 04379243
+03001627/points/3b788994cd578990c35131da26f8061a.pts 03001627/expert_verified/points_label/3b788994cd578990c35131da26f8061a.seg 03001627
+03636649/points/c25cc72cd06852e75bbea6ee257e41cc.pts 03636649/expert_verified/points_label/c25cc72cd06852e75bbea6ee257e41cc.seg 03636649
+03001627/points/4e4570768f981ca7b95617254e8005c0.pts 03001627/expert_verified/points_label/4e4570768f981ca7b95617254e8005c0.seg 03001627
+03642806/points/ef6d92c90aeabf5becae27d182a3e41c.pts 03642806/expert_verified/points_label/ef6d92c90aeabf5becae27d182a3e41c.seg 03642806
+04379243/points/97718e2651d22b3a74740f837351e7eb.pts 04379243/expert_verified/points_label/97718e2651d22b3a74740f837351e7eb.seg 04379243
+03948459/points/1f646ff59cabdddcd810dcd63f342aca.pts 03948459/expert_verified/points_label/1f646ff59cabdddcd810dcd63f342aca.seg 03948459
+02958343/points/74f7b559d6af926012f2e446484bbaf7.pts 02958343/expert_verified/points_label/74f7b559d6af926012f2e446484bbaf7.seg 02958343
+03001627/points/8b3619396de4df10db8860d0872e9c55.pts 03001627/expert_verified/points_label/8b3619396de4df10db8860d0872e9c55.seg 03001627
+03001627/points/44ddb3d46266bb0ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/44ddb3d46266bb0ffebad4f49b26ec52.seg 03001627
+03001627/points/a5f300f3975497fa9dcf2183c858e6e5.pts 03001627/expert_verified/points_label/a5f300f3975497fa9dcf2183c858e6e5.seg 03001627
+03467517/points/113b65f0e68314737c481698bd5233b4.pts 03467517/expert_verified/points_label/113b65f0e68314737c481698bd5233b4.seg 03467517
+03001627/points/49795a9ebd9a9c6d2c697f0a1454869.pts 03001627/expert_verified/points_label/49795a9ebd9a9c6d2c697f0a1454869.seg 03001627
+03001627/points/5822ae77b06bea3091da37ff8bdd2524.pts 03001627/expert_verified/points_label/5822ae77b06bea3091da37ff8bdd2524.seg 03001627
+03467517/points/15222c5926c7058cc6df7dab8e567ef6.pts 03467517/expert_verified/points_label/15222c5926c7058cc6df7dab8e567ef6.seg 03467517
+02691156/points/14d9c576d06622198f52dc705c3109b9.pts 02691156/expert_verified/points_label/14d9c576d06622198f52dc705c3109b9.seg 02691156
+04379243/points/62ae9ded861138be9d2be74cfb51ade1.pts 04379243/expert_verified/points_label/62ae9ded861138be9d2be74cfb51ade1.seg 04379243
+02958343/points/7b067be3aa39b1a124853ec273f6c1d2.pts 02958343/expert_verified/points_label/7b067be3aa39b1a124853ec273f6c1d2.seg 02958343
+03636649/points/66cf69a98ff895e2b55fde51a411949f.pts 03636649/expert_verified/points_label/66cf69a98ff895e2b55fde51a411949f.seg 03636649
+04379243/points/3253f2c59e6bd2a119fb4103277a6b93.pts 04379243/expert_verified/points_label/3253f2c59e6bd2a119fb4103277a6b93.seg 04379243
+02691156/points/fe0c4db38fb6399990b1d6deb98feec6.pts 02691156/expert_verified/points_label/fe0c4db38fb6399990b1d6deb98feec6.seg 02691156
+02691156/points/6d93492543d1087eb87697d3904b168b.pts 02691156/expert_verified/points_label/6d93492543d1087eb87697d3904b168b.seg 02691156
+03636649/points/402f7ce2b87e7d1ac066b9622c005c53.pts 03636649/expert_verified/points_label/402f7ce2b87e7d1ac066b9622c005c53.seg 03636649
+04379243/points/272a4cf3cfff3eb1e173cee47fbaa88.pts 04379243/expert_verified/points_label/272a4cf3cfff3eb1e173cee47fbaa88.seg 04379243
+02691156/points/6420a3ff5e526d59e16519c843f95ce0.pts 02691156/expert_verified/points_label/6420a3ff5e526d59e16519c843f95ce0.seg 02691156
+03001627/points/487040c5fdc68fdfe6cfc789522bfbab.pts 03001627/expert_verified/points_label/487040c5fdc68fdfe6cfc789522bfbab.seg 03001627
+04379243/points/8f48ccd17a15baf5ce01c07526cf2aa4.pts 04379243/expert_verified/points_label/8f48ccd17a15baf5ce01c07526cf2aa4.seg 04379243
+03001627/points/40e5d8e71ee3902a31358207d42bcb21.pts 03001627/expert_verified/points_label/40e5d8e71ee3902a31358207d42bcb21.seg 03001627
+03636649/points/68491d576b5d35aade8e7376ce4e111f.pts 03636649/expert_verified/points_label/68491d576b5d35aade8e7376ce4e111f.seg 03636649
+03467517/points/80aa2f0d66100844925eded29d6897b9.pts 03467517/expert_verified/points_label/80aa2f0d66100844925eded29d6897b9.seg 03467517
+03001627/points/7929676e756dcd41577b5d737869717e.pts 03001627/expert_verified/points_label/7929676e756dcd41577b5d737869717e.seg 03001627
+03001627/points/2cf7ccf97b09187fcb7547c95fbdff26.pts 03001627/expert_verified/points_label/2cf7ccf97b09187fcb7547c95fbdff26.seg 03001627
+02691156/points/e8409b544c626028a9b2becd26dc2fc1.pts 02691156/expert_verified/points_label/e8409b544c626028a9b2becd26dc2fc1.seg 02691156
+02691156/points/1e2de00cf19a0a33554ccf8c30febe7.pts 02691156/expert_verified/points_label/1e2de00cf19a0a33554ccf8c30febe7.seg 02691156
+02691156/points/8f40518bd30467151e5ae32cb9e3711f.pts 02691156/expert_verified/points_label/8f40518bd30467151e5ae32cb9e3711f.seg 02691156
+02958343/points/4f0147c8a158087a4c19dab9f2c7c52d.pts 02958343/expert_verified/points_label/4f0147c8a158087a4c19dab9f2c7c52d.seg 02958343
+03624134/points/954fb0819736737a1b9c8e2fdbfc1118.pts 03624134/expert_verified/points_label/954fb0819736737a1b9c8e2fdbfc1118.seg 03624134
+04379243/points/415a08a66b8527519f803a8da27dd9a9.pts 04379243/expert_verified/points_label/415a08a66b8527519f803a8da27dd9a9.seg 04379243
+03001627/points/4bdbecfbc925219157915a20ae9ec6b6.pts 03001627/expert_verified/points_label/4bdbecfbc925219157915a20ae9ec6b6.seg 03001627
+03624134/points/2f74196bd5cb462727c767f081f1365a.pts 03624134/expert_verified/points_label/2f74196bd5cb462727c767f081f1365a.seg 03624134
+02958343/points/b5b6b09711cbee6daa44bfa127abe4bb.pts 02958343/expert_verified/points_label/b5b6b09711cbee6daa44bfa127abe4bb.seg 02958343
+03001627/points/43e74f15a986eb626a90f735365ac29e.pts 03001627/expert_verified/points_label/43e74f15a986eb626a90f735365ac29e.seg 03001627
+03624134/points/385bb539629cd6991dd89e5fcd05911a.pts 03624134/expert_verified/points_label/385bb539629cd6991dd89e5fcd05911a.seg 03624134
+03642806/points/fdec2b8af5dd988cef56c22fd326c67.pts 03642806/expert_verified/points_label/fdec2b8af5dd988cef56c22fd326c67.seg 03642806
+02958343/points/244a8476648bd073834daea73aa18748.pts 02958343/expert_verified/points_label/244a8476648bd073834daea73aa18748.seg 02958343
+03467517/points/d91b0745e57f6508dc6782957fd2f5d2.pts 03467517/expert_verified/points_label/d91b0745e57f6508dc6782957fd2f5d2.seg 03467517
+04379243/points/83f1ff21744e71ad2690c0a5b39562ad.pts 04379243/expert_verified/points_label/83f1ff21744e71ad2690c0a5b39562ad.seg 04379243
+03001627/points/49aa713bec70ee1f1104b8f54582c707.pts 03001627/expert_verified/points_label/49aa713bec70ee1f1104b8f54582c707.seg 03001627
+03001627/points/9231ef07326eae09b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/9231ef07326eae09b04cb542e2c50eb4.seg 03001627
+03642806/points/b211cfb105e9f97e6436916a86a90ed7.pts 03642806/expert_verified/points_label/b211cfb105e9f97e6436916a86a90ed7.seg 03642806
+03001627/points/fdfedb5bb8cd35374233148ffd345970.pts 03001627/expert_verified/points_label/fdfedb5bb8cd35374233148ffd345970.seg 03001627
+04379243/points/3037fac5bc67207e23fa92d98173c06f.pts 04379243/expert_verified/points_label/3037fac5bc67207e23fa92d98173c06f.seg 04379243
+04379243/points/40d0dd3fe786e120d75c27ddd792e41a.pts 04379243/expert_verified/points_label/40d0dd3fe786e120d75c27ddd792e41a.seg 04379243
+03001627/points/e6ea5e70c2f29d881e8fd793667dc14f.pts 03001627/expert_verified/points_label/e6ea5e70c2f29d881e8fd793667dc14f.seg 03001627
+04379243/points/9502eecc3a057115b129901f80d24b7b.pts 04379243/expert_verified/points_label/9502eecc3a057115b129901f80d24b7b.seg 04379243
+03001627/points/e68bb6f55e2454fac7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e68bb6f55e2454fac7f1f7c0570e288d.seg 03001627
+02691156/points/9bd8d0fa75bc21c5e3375a6b96a1d765.pts 02691156/expert_verified/points_label/9bd8d0fa75bc21c5e3375a6b96a1d765.seg 02691156
+02958343/points/1714b6e57c8c4983fb1aad5dae793ff4.pts 02958343/expert_verified/points_label/1714b6e57c8c4983fb1aad5dae793ff4.seg 02958343
+02691156/points/8a84a26158da1db7668586dcfb752ad.pts 02691156/expert_verified/points_label/8a84a26158da1db7668586dcfb752ad.seg 02691156
+02691156/points/36d8c865f766e3e097872638b21438e3.pts 02691156/expert_verified/points_label/36d8c865f766e3e097872638b21438e3.seg 02691156
+03001627/points/96e8a51b1680b756e99481ddc3bbddfb.pts 03001627/expert_verified/points_label/96e8a51b1680b756e99481ddc3bbddfb.seg 03001627
+02958343/points/37ad66d0433beb633df8f4ac45647158.pts 02958343/expert_verified/points_label/37ad66d0433beb633df8f4ac45647158.seg 02958343
+04379243/points/56a57ef7c3385c9f2f38c0d2792fb5e.pts 04379243/expert_verified/points_label/56a57ef7c3385c9f2f38c0d2792fb5e.seg 04379243
+03467517/points/dbdf45cab0adbded1f260c1b356c52ce.pts 03467517/expert_verified/points_label/dbdf45cab0adbded1f260c1b356c52ce.seg 03467517
+04379243/points/868bab5194e93577858699aaad4acee4.pts 04379243/expert_verified/points_label/868bab5194e93577858699aaad4acee4.seg 04379243
+04379243/points/2bbd62449b56abee659dda512294c744.pts 04379243/expert_verified/points_label/2bbd62449b56abee659dda512294c744.seg 04379243
+04379243/points/a18aa2d20d516333daf1f22b6daf05ed.pts 04379243/expert_verified/points_label/a18aa2d20d516333daf1f22b6daf05ed.seg 04379243
+03636649/points/7a2362fbddbee9a4d197f67767b32741.pts 03636649/expert_verified/points_label/7a2362fbddbee9a4d197f67767b32741.seg 03636649
+03636649/points/f9259d31df38bd5decd204cd7180226d.pts 03636649/expert_verified/points_label/f9259d31df38bd5decd204cd7180226d.seg 03636649
+04379243/points/54e85b248576c4eb57cd80d4b17e7e11.pts 04379243/expert_verified/points_label/54e85b248576c4eb57cd80d4b17e7e11.seg 04379243
+04379243/points/1299579419252fa954b02959579aa6bb.pts 04379243/expert_verified/points_label/1299579419252fa954b02959579aa6bb.seg 04379243
+04379243/points/49ad167497a2af8c9672e39f89e4622e.pts 04379243/expert_verified/points_label/49ad167497a2af8c9672e39f89e4622e.seg 04379243
+04379243/points/55221b101eec29dc656a19d1d18fdbac.pts 04379243/expert_verified/points_label/55221b101eec29dc656a19d1d18fdbac.seg 04379243
+04379243/points/e8870f3190f6b8d4bd1025bd755a15aa.pts 04379243/expert_verified/points_label/e8870f3190f6b8d4bd1025bd755a15aa.seg 04379243
+02691156/points/9818f0b88fed05b24b0a1bcf2fb497ec.pts 02691156/expert_verified/points_label/9818f0b88fed05b24b0a1bcf2fb497ec.seg 02691156
+02691156/points/9ba460913d86466f62347b4731688b0f.pts 02691156/expert_verified/points_label/9ba460913d86466f62347b4731688b0f.seg 02691156
+04379243/points/574447022c4473d455f46d55537192b6.pts 04379243/expert_verified/points_label/574447022c4473d455f46d55537192b6.seg 04379243
+04379243/points/7b5b7bfa8580e913e2580b23e60e4674.pts 04379243/expert_verified/points_label/7b5b7bfa8580e913e2580b23e60e4674.seg 04379243
+04225987/points/48f26ddc704fec2f379c6a1d59ef7283.pts 04225987/expert_verified/points_label/48f26ddc704fec2f379c6a1d59ef7283.seg 04225987
+04379243/points/b7821e69687d767aab610b0c94236463.pts 04379243/expert_verified/points_label/b7821e69687d767aab610b0c94236463.seg 04379243
+02691156/points/e42443669339a6c1a5a118bd15e6e34f.pts 02691156/expert_verified/points_label/e42443669339a6c1a5a118bd15e6e34f.seg 02691156
+04379243/points/2444551d00693a0fab610b0c94236463.pts 04379243/expert_verified/points_label/2444551d00693a0fab610b0c94236463.seg 04379243
+03467517/points/5e452914684ea7fc398707f20de9db08.pts 03467517/expert_verified/points_label/5e452914684ea7fc398707f20de9db08.seg 03467517
+03001627/points/cc6840207c0cf55db30e42459dcb06f.pts 03001627/expert_verified/points_label/cc6840207c0cf55db30e42459dcb06f.seg 03001627
+04379243/points/9046b2e610065fe5a5d95e73eecd308a.pts 04379243/expert_verified/points_label/9046b2e610065fe5a5d95e73eecd308a.seg 04379243
+03467517/points/c651a91562b86ed8edb9371445f615ae.pts 03467517/expert_verified/points_label/c651a91562b86ed8edb9371445f615ae.seg 03467517
+03001627/points/9bb6d3d76d4f5ba94b3c42e318f3affc.pts 03001627/expert_verified/points_label/9bb6d3d76d4f5ba94b3c42e318f3affc.seg 03001627
+03001627/points/7fb336186da77367962800be79c6e52.pts 03001627/expert_verified/points_label/7fb336186da77367962800be79c6e52.seg 03001627
+04379243/points/b69b2ff85d0ec661d8f9dd7647048a0c.pts 04379243/expert_verified/points_label/b69b2ff85d0ec661d8f9dd7647048a0c.seg 04379243
+03001627/points/d2815e678f173616e6cfc789522bfbab.pts 03001627/expert_verified/points_label/d2815e678f173616e6cfc789522bfbab.seg 03001627
+03636649/points/b8350fcf08ff0b2ca950bf8f33cff658.pts 03636649/expert_verified/points_label/b8350fcf08ff0b2ca950bf8f33cff658.seg 03636649
+04379243/points/202e7b5c3ec079e299e8bf807e902261.pts 04379243/expert_verified/points_label/202e7b5c3ec079e299e8bf807e902261.seg 04379243
+03001627/points/c8938f54fecab41e77cd061c90fcdb44.pts 03001627/expert_verified/points_label/c8938f54fecab41e77cd061c90fcdb44.seg 03001627
+04379243/points/894e095c7036c8411933ffef19678834.pts 04379243/expert_verified/points_label/894e095c7036c8411933ffef19678834.seg 04379243
+03001627/points/4362e715455f42ba9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/4362e715455f42ba9b9f2eb77f5e247e.seg 03001627
+04379243/points/8963760f8bec0fee7f807d3c406ee.pts 04379243/expert_verified/points_label/8963760f8bec0fee7f807d3c406ee.seg 04379243
+03948459/points/4acb6494e3aaeb39998978df244b5bd.pts 03948459/expert_verified/points_label/4acb6494e3aaeb39998978df244b5bd.seg 03948459
+03636649/points/c1b939cc403a0662664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/c1b939cc403a0662664b3b9b23ddfcbc.seg 03636649
+04379243/points/e64876f5590e6fb7c3bd24f986301745.pts 04379243/expert_verified/points_label/e64876f5590e6fb7c3bd24f986301745.seg 04379243
+02691156/points/b8ce3803485b620b2c674305897e1782.pts 02691156/expert_verified/points_label/b8ce3803485b620b2c674305897e1782.seg 02691156
+03636649/points/a60c6cf7d4893f2ba26bf7a8fd4719ad.pts 03636649/expert_verified/points_label/a60c6cf7d4893f2ba26bf7a8fd4719ad.seg 03636649
+04379243/points/6ca66a443e651c1423500a5b036df62e.pts 04379243/expert_verified/points_label/6ca66a443e651c1423500a5b036df62e.seg 04379243
+04379243/points/51930b149cf6125373fa072a624ce947.pts 04379243/expert_verified/points_label/51930b149cf6125373fa072a624ce947.seg 04379243
+02691156/points/eb658ff31f0becea1d0f8853f6d023e3.pts 02691156/expert_verified/points_label/eb658ff31f0becea1d0f8853f6d023e3.seg 02691156
+03642806/points/3f45cde6f7a13138e256fb3794905772.pts 03642806/expert_verified/points_label/3f45cde6f7a13138e256fb3794905772.seg 03642806
+03001627/points/ea572cc193b804399c66df0f068d2a36.pts 03001627/expert_verified/points_label/ea572cc193b804399c66df0f068d2a36.seg 03001627
+03001627/points/9e0a0ad80be6df7789d2595edb5088ee.pts 03001627/expert_verified/points_label/9e0a0ad80be6df7789d2595edb5088ee.seg 03001627
+04379243/points/8eed35fd5b777acf58316b27df6c8e87.pts 04379243/expert_verified/points_label/8eed35fd5b777acf58316b27df6c8e87.seg 04379243
+03642806/points/5baaa726f51cd09b507f3bf1d3472684.pts 03642806/expert_verified/points_label/5baaa726f51cd09b507f3bf1d3472684.seg 03642806
+02691156/points/789f032dccc6092977b7d0d4764c121d.pts 02691156/expert_verified/points_label/789f032dccc6092977b7d0d4764c121d.seg 02691156
+03001627/points/9682d28e03acd2e3735013f3db728e20.pts 03001627/expert_verified/points_label/9682d28e03acd2e3735013f3db728e20.seg 03001627
+02958343/points/b50f9931670e25ef44ccce632b473b8c.pts 02958343/expert_verified/points_label/b50f9931670e25ef44ccce632b473b8c.seg 02958343
+03467517/points/d3972d599036251369da8e7f9a803d12.pts 03467517/expert_verified/points_label/d3972d599036251369da8e7f9a803d12.seg 03467517
+02691156/points/329987191cce68bfe64acd170567d820.pts 02691156/expert_verified/points_label/329987191cce68bfe64acd170567d820.seg 02691156
+03636649/points/ab3e153cd23e992b576a354bb9319732.pts 03636649/expert_verified/points_label/ab3e153cd23e992b576a354bb9319732.seg 03636649
+04379243/points/f850a69b0d308fbc19fb4103277a6b93.pts 04379243/expert_verified/points_label/f850a69b0d308fbc19fb4103277a6b93.seg 04379243
+04379243/points/1645b28322131b6258c407efcf93be6b.pts 04379243/expert_verified/points_label/1645b28322131b6258c407efcf93be6b.seg 04379243
+03001627/points/195464ae11f6bfe1cba091e036bf65ed.pts 03001627/expert_verified/points_label/195464ae11f6bfe1cba091e036bf65ed.seg 03001627
+02691156/points/edd9583988b62c90328f15e6c60d0e90.pts 02691156/expert_verified/points_label/edd9583988b62c90328f15e6c60d0e90.seg 02691156
+04225987/points/36aaae334d636ec28043db94fbc8c982.pts 04225987/expert_verified/points_label/36aaae334d636ec28043db94fbc8c982.seg 04225987
+04379243/points/c3c467718eb9b2a313f96345312df593.pts 04379243/expert_verified/points_label/c3c467718eb9b2a313f96345312df593.seg 04379243
+02691156/points/a1848a4a69b14704ca8607f540cc62ba.pts 02691156/expert_verified/points_label/a1848a4a69b14704ca8607f540cc62ba.seg 02691156
+02958343/points/c8bd4d0ac34266ffaaa232d0915adae9.pts 02958343/expert_verified/points_label/c8bd4d0ac34266ffaaa232d0915adae9.seg 02958343
+04379243/points/ad61a5bc7cba29b88cc413950b617e8f.pts 04379243/expert_verified/points_label/ad61a5bc7cba29b88cc413950b617e8f.seg 04379243
+03642806/points/466ea85bb4653ba3a715ae636b111d77.pts 03642806/expert_verified/points_label/466ea85bb4653ba3a715ae636b111d77.seg 03642806
+03001627/points/e93714e5553f63619215045784774049.pts 03001627/expert_verified/points_label/e93714e5553f63619215045784774049.seg 03001627
+03636649/points/b88c9a7aaab268fb42b08fbc749346d6.pts 03636649/expert_verified/points_label/b88c9a7aaab268fb42b08fbc749346d6.seg 03636649
+03636649/points/6ba931adfa36c7965208aab875b932bc.pts 03636649/expert_verified/points_label/6ba931adfa36c7965208aab875b932bc.seg 03636649
+03001627/points/e3479f55f5894bb3c7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e3479f55f5894bb3c7f1f7c0570e288d.seg 03001627
+03467517/points/4c5288cc18896f8f352e5d4d2615db5b.pts 03467517/expert_verified/points_label/4c5288cc18896f8f352e5d4d2615db5b.seg 03467517
+03001627/points/631e102e9a689339b0ec386df15ab64f.pts 03001627/expert_verified/points_label/631e102e9a689339b0ec386df15ab64f.seg 03001627
+04379243/points/6daed91ae491c9cbe22ea6d770699e4b.pts 04379243/expert_verified/points_label/6daed91ae491c9cbe22ea6d770699e4b.seg 04379243
+03001627/points/40e73a326cf95d0361c93c4994c91bd1.pts 03001627/expert_verified/points_label/40e73a326cf95d0361c93c4994c91bd1.seg 03001627
+03467517/points/dc7708c870000008a24eeca91f583600.pts 03467517/expert_verified/points_label/dc7708c870000008a24eeca91f583600.seg 03467517
+03001627/points/1ac6531a337de85f2f7628d6bf38bcc4.pts 03001627/expert_verified/points_label/1ac6531a337de85f2f7628d6bf38bcc4.seg 03001627
+04379243/points/5191d64e9a1b9664bfdcc70dcc16baa1.pts 04379243/expert_verified/points_label/5191d64e9a1b9664bfdcc70dcc16baa1.seg 04379243
+03636649/points/c4dc0ac169c91ff29f8c3d2002c77ddb.pts 03636649/expert_verified/points_label/c4dc0ac169c91ff29f8c3d2002c77ddb.seg 03636649
+03624134/points/b8648ae17fb9937949f73a97204d432b.pts 03624134/expert_verified/points_label/b8648ae17fb9937949f73a97204d432b.seg 03624134
+04379243/points/a465210c23b0136d7afee304cce81d6f.pts 04379243/expert_verified/points_label/a465210c23b0136d7afee304cce81d6f.seg 04379243
+03001627/points/513686d6d63a1d8e577b5d737869717e.pts 03001627/expert_verified/points_label/513686d6d63a1d8e577b5d737869717e.seg 03001627
+03624134/points/bee1a473472639e25ca3862a7efa6401.pts 03624134/expert_verified/points_label/bee1a473472639e25ca3862a7efa6401.seg 03624134
+02691156/points/adb3ea03d7b954255e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/adb3ea03d7b954255e9e2656aff7dd5b.seg 02691156
+02691156/points/959f28c6724979ef9a6e43b878d5b335.pts 02691156/expert_verified/points_label/959f28c6724979ef9a6e43b878d5b335.seg 02691156
+04379243/points/dec1d2cf8a4563d36cb02543e4df83bf.pts 04379243/expert_verified/points_label/dec1d2cf8a4563d36cb02543e4df83bf.seg 04379243
+03790512/points/a9c432d1dc4034762a45a87054fa7272.pts 03790512/expert_verified/points_label/a9c432d1dc4034762a45a87054fa7272.seg 03790512
+03001627/points/1b5e876f3559c231532a8e162f399205.pts 03001627/expert_verified/points_label/1b5e876f3559c231532a8e162f399205.seg 03001627
+04379243/points/82e5309809e455d5f15fed2243deb166.pts 04379243/expert_verified/points_label/82e5309809e455d5f15fed2243deb166.seg 04379243
+03467517/points/8f1f54d337bf6ccac782e6226a4f593e.pts 03467517/expert_verified/points_label/8f1f54d337bf6ccac782e6226a4f593e.seg 03467517
+04379243/points/67d97102f9c54cc95512673aa47c7e3d.pts 04379243/expert_verified/points_label/67d97102f9c54cc95512673aa47c7e3d.seg 04379243
+02691156/points/e0cc4f538a8da2d65d3bbd70fc7759b7.pts 02691156/expert_verified/points_label/e0cc4f538a8da2d65d3bbd70fc7759b7.seg 02691156
+04379243/points/d0008b042256fb5f7ab911835312d4f1.pts 04379243/expert_verified/points_label/d0008b042256fb5f7ab911835312d4f1.seg 04379243
+03467517/points/44c05e219618a6395b3335548350bdee.pts 03467517/expert_verified/points_label/44c05e219618a6395b3335548350bdee.seg 03467517
+03001627/points/3f7808c221b01668b4d174e5c61f344.pts 03001627/expert_verified/points_label/3f7808c221b01668b4d174e5c61f344.seg 03001627
+03467517/points/51abcb617b2faf3a24eeca91f583600.pts 03467517/expert_verified/points_label/51abcb617b2faf3a24eeca91f583600.seg 03467517
+03636649/points/f38370fc4c112017a6e7138fdd58748.pts 03636649/expert_verified/points_label/f38370fc4c112017a6e7138fdd58748.seg 03636649
+03001627/points/37607ea19e352af4fffc97a61124b1a9.pts 03001627/expert_verified/points_label/37607ea19e352af4fffc97a61124b1a9.seg 03001627
+02958343/points/2cb6de89f5b6e702b626f6a649199824.pts 02958343/expert_verified/points_label/2cb6de89f5b6e702b626f6a649199824.seg 02958343
+04099429/points/d781243cc1d1d2e91a0ec553feb1c2c3.pts 04099429/expert_verified/points_label/d781243cc1d1d2e91a0ec553feb1c2c3.seg 04099429
+04379243/points/900afcc9f0f5fbfd858699aaad4acee4.pts 04379243/expert_verified/points_label/900afcc9f0f5fbfd858699aaad4acee4.seg 04379243
+03001627/points/d13eb19745344ae5fb0eb7e753c06942.pts 03001627/expert_verified/points_label/d13eb19745344ae5fb0eb7e753c06942.seg 03001627
+02958343/points/5785192c95cdd67b704715417c0f83c1.pts 02958343/expert_verified/points_label/5785192c95cdd67b704715417c0f83c1.seg 02958343
+03001627/points/5bb5b15807158f71504721639e19f609.pts 03001627/expert_verified/points_label/5bb5b15807158f71504721639e19f609.seg 03001627
+03636649/points/ba05f660341b7b7b70be09f44cb2fef5.pts 03636649/expert_verified/points_label/ba05f660341b7b7b70be09f44cb2fef5.seg 03636649
+02691156/points/97066012fbca5983c74417871493eae8.pts 02691156/expert_verified/points_label/97066012fbca5983c74417871493eae8.seg 02691156
+03001627/points/4499729e53c858ae71a782a4379556c7.pts 03001627/expert_verified/points_label/4499729e53c858ae71a782a4379556c7.seg 03001627
+04379243/points/41d280b7db61ebddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/41d280b7db61ebddfebad4f49b26ec52.seg 04379243
+02773838/points/30bf69aa24dbb3fc9de193e488fc4dce.pts 02773838/expert_verified/points_label/30bf69aa24dbb3fc9de193e488fc4dce.seg 02773838
+03467517/points/6c9a9c0e2af9d5b35f713e773d664ec2.pts 03467517/expert_verified/points_label/6c9a9c0e2af9d5b35f713e773d664ec2.seg 03467517
+04379243/points/f979c7a650d29ea819fb4103277a6b93.pts 04379243/expert_verified/points_label/f979c7a650d29ea819fb4103277a6b93.seg 04379243
+03001627/points/b631b78c2dcc748cba5342d638d0c267.pts 03001627/expert_verified/points_label/b631b78c2dcc748cba5342d638d0c267.seg 03001627
+03467517/points/d2ad57f36e00c602baba3b7560fe62f4.pts 03467517/expert_verified/points_label/d2ad57f36e00c602baba3b7560fe62f4.seg 03467517
+04379243/points/5771d5a3084b3ca3a2d7b309863cb1b.pts 04379243/expert_verified/points_label/5771d5a3084b3ca3a2d7b309863cb1b.seg 04379243
+03636649/points/2d638c6b6b2feb9248da169d95204ce2.pts 03636649/expert_verified/points_label/2d638c6b6b2feb9248da169d95204ce2.seg 03636649
+02958343/points/63a4e46bbbd855fc2b63d3b2a8c4e8b.pts 02958343/expert_verified/points_label/63a4e46bbbd855fc2b63d3b2a8c4e8b.seg 02958343
+04379243/points/8c67fd5a15e8d9defebad4f49b26ec52.pts 04379243/expert_verified/points_label/8c67fd5a15e8d9defebad4f49b26ec52.seg 04379243
+03467517/points/28c3903b29f6b38363e148e250c0340d.pts 03467517/expert_verified/points_label/28c3903b29f6b38363e148e250c0340d.seg 03467517
+04379243/points/ab2967188299bea54cb0654f4cfa9684.pts 04379243/expert_verified/points_label/ab2967188299bea54cb0654f4cfa9684.seg 04379243
+02691156/points/a9a7f21271b3efbaf446f92b52bbd82a.pts 02691156/expert_verified/points_label/a9a7f21271b3efbaf446f92b52bbd82a.seg 02691156
+04379243/points/c3e43144fd61c56f19fb4103277a6b93.pts 04379243/expert_verified/points_label/c3e43144fd61c56f19fb4103277a6b93.seg 04379243
+03001627/points/7fcde5fc8e023dd2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/7fcde5fc8e023dd2a6fee8e2140acec9.seg 03001627
+03790512/points/70d9cc5115bfedeeab548456bc75847f.pts 03790512/expert_verified/points_label/70d9cc5115bfedeeab548456bc75847f.seg 03790512
+03001627/points/3c0dd3719baecf3319fb4103277a6b93.pts 03001627/expert_verified/points_label/3c0dd3719baecf3319fb4103277a6b93.seg 03001627
+03636649/points/55077c2175d97b8889ab11a408196888.pts 03636649/expert_verified/points_label/55077c2175d97b8889ab11a408196888.seg 03636649
+04379243/points/71fc8c7cdb48978282fa4d4f2c19b2ce.pts 04379243/expert_verified/points_label/71fc8c7cdb48978282fa4d4f2c19b2ce.seg 04379243
+04379243/points/f0d5eefef970fa4b9f2349486c570dd4.pts 04379243/expert_verified/points_label/f0d5eefef970fa4b9f2349486c570dd4.seg 04379243
+03642806/points/90c01fd78513bb99c9b20aa1b8066c46.pts 03642806/expert_verified/points_label/90c01fd78513bb99c9b20aa1b8066c46.seg 03642806
+04379243/points/ca6c07357ba5125b8e2adb29857f8a1.pts 04379243/expert_verified/points_label/ca6c07357ba5125b8e2adb29857f8a1.seg 04379243
+04379243/points/634bcd3197e337aafe4e4de1adda2150.pts 04379243/expert_verified/points_label/634bcd3197e337aafe4e4de1adda2150.seg 04379243
+04379243/points/7b411de42d4960eb6e25f3efedf6785f.pts 04379243/expert_verified/points_label/7b411de42d4960eb6e25f3efedf6785f.seg 04379243
+04379243/points/878414eb6e86494d9a8ef44e1d2c5b75.pts 04379243/expert_verified/points_label/878414eb6e86494d9a8ef44e1d2c5b75.seg 04379243
+03001627/points/f3fa7bd00b76f6a87a8a6b9421844d96.pts 03001627/expert_verified/points_label/f3fa7bd00b76f6a87a8a6b9421844d96.seg 03001627
+03467517/points/a2c1ee6a7ddb50a493f0194265a9746c.pts 03467517/expert_verified/points_label/a2c1ee6a7ddb50a493f0194265a9746c.seg 03467517
+04379243/points/25bc205f6de491f4ccde40b1205ec7ff.pts 04379243/expert_verified/points_label/25bc205f6de491f4ccde40b1205ec7ff.seg 04379243
+03636649/points/771d4def2e44bc169eb34048e600e1ea.pts 03636649/expert_verified/points_label/771d4def2e44bc169eb34048e600e1ea.seg 03636649
+03624134/points/6ebe2a22b8d9d70862a95b942081dfee.pts 03624134/expert_verified/points_label/6ebe2a22b8d9d70862a95b942081dfee.seg 03624134
+02691156/points/9b1fc3881a5335cb44012f72ba1e15a8.pts 02691156/expert_verified/points_label/9b1fc3881a5335cb44012f72ba1e15a8.seg 02691156
+03001627/points/3dc252fd90d82b18c9be65dfbd21428b.pts 03001627/expert_verified/points_label/3dc252fd90d82b18c9be65dfbd21428b.seg 03001627
+04379243/points/f6f180c3e72caacb5077539b37310c29.pts 04379243/expert_verified/points_label/f6f180c3e72caacb5077539b37310c29.seg 04379243
+03642806/points/25bc168b214b54799e28e9cf32e5157.pts 03642806/expert_verified/points_label/25bc168b214b54799e28e9cf32e5157.seg 03642806
+04379243/points/ac9fae8af57729945eee45c00c4de9d3.pts 04379243/expert_verified/points_label/ac9fae8af57729945eee45c00c4de9d3.seg 04379243
+03001627/points/e8126f9e2d106620d2f33aaf794b5932.pts 03001627/expert_verified/points_label/e8126f9e2d106620d2f33aaf794b5932.seg 03001627
+03624134/points/3dc5a6d79ed591bda709dec9a148b2fe.pts 03624134/expert_verified/points_label/3dc5a6d79ed591bda709dec9a148b2fe.seg 03624134
+04379243/points/8f73278956fecb80327289c00b6dc9ca.pts 04379243/expert_verified/points_label/8f73278956fecb80327289c00b6dc9ca.seg 04379243
+03948459/points/5f46578efd2c65e5d4ac2f5fcaa742ac.pts 03948459/expert_verified/points_label/5f46578efd2c65e5d4ac2f5fcaa742ac.seg 03948459
+03624134/points/a05ea45d396c86784e52b614e584a543.pts 03624134/expert_verified/points_label/a05ea45d396c86784e52b614e584a543.seg 03624134
+03001627/points/cd939609247df917d9d3572bbd9cf789.pts 03001627/expert_verified/points_label/cd939609247df917d9d3572bbd9cf789.seg 03001627
+03261776/points/17c9866b42ae1831df4cfe396cee719e.pts 03261776/expert_verified/points_label/17c9866b42ae1831df4cfe396cee719e.seg 03261776
+03797390/points/3d3e993f7baa4d7ef1ff24a8b1564a36.pts 03797390/expert_verified/points_label/3d3e993f7baa4d7ef1ff24a8b1564a36.seg 03797390
+03467517/points/36b49aff54f6d7e893f0194265a9746c.pts 03467517/expert_verified/points_label/36b49aff54f6d7e893f0194265a9746c.seg 03467517
+02691156/points/48df2496242053da4ee0fb6a51564c3.pts 02691156/expert_verified/points_label/48df2496242053da4ee0fb6a51564c3.seg 02691156
+04379243/points/7ad23def902ea4f37b7a2c2624e46d0a.pts 04379243/expert_verified/points_label/7ad23def902ea4f37b7a2c2624e46d0a.seg 04379243
+04379243/points/1a8fe5baa2d4b5f7ee84261b3d20656.pts 04379243/expert_verified/points_label/1a8fe5baa2d4b5f7ee84261b3d20656.seg 04379243
+03467517/points/d685415d4fcd3205a24eeca91f583600.pts 03467517/expert_verified/points_label/d685415d4fcd3205a24eeca91f583600.seg 03467517
+02958343/points/8e308d28d463427f43f0e92e826556b8.pts 02958343/expert_verified/points_label/8e308d28d463427f43f0e92e826556b8.seg 02958343
+04379243/points/dc68436ab1a576f6573d2c9ac4b23fdf.pts 04379243/expert_verified/points_label/dc68436ab1a576f6573d2c9ac4b23fdf.seg 04379243
+04379243/points/1a153612bcdab3e23cc149415a408229.pts 04379243/expert_verified/points_label/1a153612bcdab3e23cc149415a408229.seg 04379243
+03001627/points/19ce953da9aa8065d747a43c11e738e9.pts 03001627/expert_verified/points_label/19ce953da9aa8065d747a43c11e738e9.seg 03001627
+04379243/points/db2d4f781756e687d8864caa856253b.pts 04379243/expert_verified/points_label/db2d4f781756e687d8864caa856253b.seg 04379243
+04379243/points/d8f851bbc98dccc23fa92d98173c06f.pts 04379243/expert_verified/points_label/d8f851bbc98dccc23fa92d98173c06f.seg 04379243
+03467517/points/e585e31db7568c4cf0e1c0df18936d05.pts 03467517/expert_verified/points_label/e585e31db7568c4cf0e1c0df18936d05.seg 03467517
+03001627/points/98ac0106ad244505e04fc3fcc1c852e0.pts 03001627/expert_verified/points_label/98ac0106ad244505e04fc3fcc1c852e0.seg 03001627
+03001627/points/1b81441b7e597235d61420a53a0cb96d.pts 03001627/expert_verified/points_label/1b81441b7e597235d61420a53a0cb96d.seg 03001627
+03001627/points/918145be863f7aeaf050758b903e6054.pts 03001627/expert_verified/points_label/918145be863f7aeaf050758b903e6054.seg 03001627
+02691156/points/1af4b32eafffb0f7ee60c37cbf99c1c.pts 02691156/expert_verified/points_label/1af4b32eafffb0f7ee60c37cbf99c1c.seg 02691156
+03636649/points/f4e1a4032b1686cec35131da26f8061a.pts 03636649/expert_verified/points_label/f4e1a4032b1686cec35131da26f8061a.seg 03636649
+04379243/points/9c4dfafdbd7f9b76c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/9c4dfafdbd7f9b76c955e5ed03ef3a2f.seg 04379243
+02691156/points/80b8f4da6b77eb66d208f79049825a82.pts 02691156/expert_verified/points_label/80b8f4da6b77eb66d208f79049825a82.seg 02691156
+03642806/points/de2e95eac460c361e862e3cac45aa769.pts 03642806/expert_verified/points_label/de2e95eac460c361e862e3cac45aa769.seg 03642806
+04379243/points/e2571e4eba2d9f5eab610b0c94236463.pts 04379243/expert_verified/points_label/e2571e4eba2d9f5eab610b0c94236463.seg 04379243
+04379243/points/a0445e4888d56666b9d7c2fc41e80228.pts 04379243/expert_verified/points_label/a0445e4888d56666b9d7c2fc41e80228.seg 04379243
+03001627/points/873c017f35957717b56a13a4b2372aa4.pts 03001627/expert_verified/points_label/873c017f35957717b56a13a4b2372aa4.seg 03001627
+03001627/points/3af90da238ac4ddbf91663a74ccd2338.pts 03001627/expert_verified/points_label/3af90da238ac4ddbf91663a74ccd2338.seg 03001627
+02958343/points/9698be0fd3516f01fbeda5389ab05f5f.pts 02958343/expert_verified/points_label/9698be0fd3516f01fbeda5389ab05f5f.seg 02958343
+03790512/points/655b9dd9425cc3a12a45a87054fa7272.pts 03790512/expert_verified/points_label/655b9dd9425cc3a12a45a87054fa7272.seg 03790512
+04379243/points/ec1c92efffb9ee78beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/ec1c92efffb9ee78beedb4c8fd29e2d1.seg 04379243
+04379243/points/3b7fc97192e483ebb0bf045ee98272fc.pts 04379243/expert_verified/points_label/3b7fc97192e483ebb0bf045ee98272fc.seg 04379243
+03467517/points/8c3d3e69d03d3443e84e459fb01822f.pts 03467517/expert_verified/points_label/8c3d3e69d03d3443e84e459fb01822f.seg 03467517
+02691156/points/e0058b4948f87d3b87697d3904b168b.pts 02691156/expert_verified/points_label/e0058b4948f87d3b87697d3904b168b.seg 02691156
+03001627/points/4428b7dc4b6696812905b6e26038a78.pts 03001627/expert_verified/points_label/4428b7dc4b6696812905b6e26038a78.seg 03001627
+03636649/points/f7093dd024fd09fc7219d6d5c4afbaff.pts 03636649/expert_verified/points_label/f7093dd024fd09fc7219d6d5c4afbaff.seg 03636649
+04379243/points/7d0c5e28089c2b7bd99e852ee772dfa4.pts 04379243/expert_verified/points_label/7d0c5e28089c2b7bd99e852ee772dfa4.seg 04379243
+03636649/points/4916f793d87dd184d42b9650f19dd425.pts 03636649/expert_verified/points_label/4916f793d87dd184d42b9650f19dd425.seg 03636649
+04379243/points/1ffcbc064f473b7de7c13848b2d8f5ec.pts 04379243/expert_verified/points_label/1ffcbc064f473b7de7c13848b2d8f5ec.seg 04379243
+03636649/points/e180510d07b65fff571108a6d1e94edd.pts 03636649/expert_verified/points_label/e180510d07b65fff571108a6d1e94edd.seg 03636649
+03636649/points/d9f6bd064c9fd456fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/d9f6bd064c9fd456fcb8d8c6d4df8143.seg 03636649
+04379243/points/ec81c49ee12e8a70fd06de9ba37d44bd.pts 04379243/expert_verified/points_label/ec81c49ee12e8a70fd06de9ba37d44bd.seg 04379243
+03636649/points/4a868756ae6404a5c0bc57897eddf6f.pts 03636649/expert_verified/points_label/4a868756ae6404a5c0bc57897eddf6f.seg 03636649
+02958343/points/9c827e532de4967285089a13cc567dbd.pts 02958343/expert_verified/points_label/9c827e532de4967285089a13cc567dbd.seg 02958343
+03797390/points/1c9f9e25c654cbca3c71bf3f4dd78475.pts 03797390/expert_verified/points_label/1c9f9e25c654cbca3c71bf3f4dd78475.seg 03797390
+03001627/points/ca3670f77268f899febad4f49b26ec52.pts 03001627/expert_verified/points_label/ca3670f77268f899febad4f49b26ec52.seg 03001627
+04379243/points/9b8e6eb835f0c8bcf37af16b2893f1d4.pts 04379243/expert_verified/points_label/9b8e6eb835f0c8bcf37af16b2893f1d4.seg 04379243
+03001627/points/5c9d582488732ee0d7f7a4c4609b0913.pts 03001627/expert_verified/points_label/5c9d582488732ee0d7f7a4c4609b0913.seg 03001627
+04379243/points/684ccc0f629ee45cab610b0c94236463.pts 04379243/expert_verified/points_label/684ccc0f629ee45cab610b0c94236463.seg 04379243
+03001627/points/4913388a4c94547a81806e3880250dff.pts 03001627/expert_verified/points_label/4913388a4c94547a81806e3880250dff.seg 03001627
+03636649/points/73378b714c5bfed2b922d818b19db1e.pts 03636649/expert_verified/points_label/73378b714c5bfed2b922d818b19db1e.seg 03636649
+03001627/points/4a89a789f817ab5414038d588fd1342f.pts 03001627/expert_verified/points_label/4a89a789f817ab5414038d588fd1342f.seg 03001627
+04379243/points/df7761a3b4ac638c9eaceb124b71b7be.pts 04379243/expert_verified/points_label/df7761a3b4ac638c9eaceb124b71b7be.seg 04379243
+03001627/points/46557f689f4cf5dd2acd2bb6205825cb.pts 03001627/expert_verified/points_label/46557f689f4cf5dd2acd2bb6205825cb.seg 03001627
+04379243/points/2db1f557e247ded7e907b6d9dc1d71b7.pts 04379243/expert_verified/points_label/2db1f557e247ded7e907b6d9dc1d71b7.seg 04379243
+04379243/points/b69d9e876e7a80a29f2349486c570dd4.pts 04379243/expert_verified/points_label/b69d9e876e7a80a29f2349486c570dd4.seg 04379243
+04379243/points/a94ea7183f27073248c0c0980e363341.pts 04379243/expert_verified/points_label/a94ea7183f27073248c0c0980e363341.seg 04379243
+03636649/points/8f85c2195890ccf671f0940f5ed452dc.pts 03636649/expert_verified/points_label/8f85c2195890ccf671f0940f5ed452dc.seg 03636649
+02691156/points/cc80380c511ec8e2c91a9d486db717.pts 02691156/expert_verified/points_label/cc80380c511ec8e2c91a9d486db717.seg 02691156
+03642806/points/6b61ef17b4f45050b598e8984f11eb0c.pts 03642806/expert_verified/points_label/6b61ef17b4f45050b598e8984f11eb0c.seg 03642806
+04379243/points/d9ce0b512e0420f8be95ff480950e9ef.pts 04379243/expert_verified/points_label/d9ce0b512e0420f8be95ff480950e9ef.seg 04379243
+04379243/points/c27a1c6a26642c907ecc778b34d42f32.pts 04379243/expert_verified/points_label/c27a1c6a26642c907ecc778b34d42f32.seg 04379243
+04379243/points/debd06d3176a5b728cbb8bac2032149c.pts 04379243/expert_verified/points_label/debd06d3176a5b728cbb8bac2032149c.seg 04379243
+04099429/points/fa07813a89527d195d1df55cbe0874aa.pts 04099429/expert_verified/points_label/fa07813a89527d195d1df55cbe0874aa.seg 04099429
+03001627/points/2a98a638f675f46e7d44dc16af152638.pts 03001627/expert_verified/points_label/2a98a638f675f46e7d44dc16af152638.seg 03001627
+03624134/points/ec1eb959cc203f1de5a365227cfe63ec.pts 03624134/expert_verified/points_label/ec1eb959cc203f1de5a365227cfe63ec.seg 03624134
+04379243/points/db0c430a51ac45c19d2be74cfb51ade1.pts 04379243/expert_verified/points_label/db0c430a51ac45c19d2be74cfb51ade1.seg 04379243
+04379243/points/26b2a15646f6a3a06f1e07a56c129dfc.pts 04379243/expert_verified/points_label/26b2a15646f6a3a06f1e07a56c129dfc.seg 04379243
+04379243/points/90343e416528b576f41d9ea5f63b1b05.pts 04379243/expert_verified/points_label/90343e416528b576f41d9ea5f63b1b05.seg 04379243
+03001627/points/43d38ad2f5d103adf9b9977a2406713a.pts 03001627/expert_verified/points_label/43d38ad2f5d103adf9b9977a2406713a.seg 03001627
+03001627/points/e279758e8a5b6a8d492d9da2668ec34c.pts 03001627/expert_verified/points_label/e279758e8a5b6a8d492d9da2668ec34c.seg 03001627
+03642806/points/71907a4a567dce3bb0de1e7a6809fd90.pts 03642806/expert_verified/points_label/71907a4a567dce3bb0de1e7a6809fd90.seg 03642806
+03636649/points/2958cd9fd799bf02cfbcbf340cec6da1.pts 03636649/expert_verified/points_label/2958cd9fd799bf02cfbcbf340cec6da1.seg 03636649
+04379243/points/bd7c71ca15b0d4e56c252f74b6220e29.pts 04379243/expert_verified/points_label/bd7c71ca15b0d4e56c252f74b6220e29.seg 04379243
+04379243/points/51c6a7298408c3f19730cb37c9a5f63b.pts 04379243/expert_verified/points_label/51c6a7298408c3f19730cb37c9a5f63b.seg 04379243
+02691156/points/e3de366a0cfb59ed38294c37c250d7cd.pts 02691156/expert_verified/points_label/e3de366a0cfb59ed38294c37c250d7cd.seg 02691156
+03467517/points/f288cd2146b8f4c1f0e1c0df18936d05.pts 03467517/expert_verified/points_label/f288cd2146b8f4c1f0e1c0df18936d05.seg 03467517
+04379243/points/270430ab9efb9d85c0f947750540fb22.pts 04379243/expert_verified/points_label/270430ab9efb9d85c0f947750540fb22.seg 04379243
+04379243/points/f5ad10e6a938aa80e85c7a030ebdf69a.pts 04379243/expert_verified/points_label/f5ad10e6a938aa80e85c7a030ebdf69a.seg 04379243
+04379243/points/8343d98e3710f5bee1b32bbe69d5bc15.pts 04379243/expert_verified/points_label/8343d98e3710f5bee1b32bbe69d5bc15.seg 04379243
+03790512/points/40b7a63fd9ede0cf48272812609617e2.pts 03790512/expert_verified/points_label/40b7a63fd9ede0cf48272812609617e2.seg 03790512
+03467517/points/16bc13ee237ebeb38460585fe283a1c9.pts 03467517/expert_verified/points_label/16bc13ee237ebeb38460585fe283a1c9.seg 03467517
+02691156/points/a56143efe74ee89ebbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a56143efe74ee89ebbf3143b1cb6076a.seg 02691156
+04379243/points/9a6ab25d91c92a5a35acfdef2ece21c0.pts 04379243/expert_verified/points_label/9a6ab25d91c92a5a35acfdef2ece21c0.seg 04379243
+03467517/points/c9b60abdc17708fb78ad94b294a9faa6.pts 03467517/expert_verified/points_label/c9b60abdc17708fb78ad94b294a9faa6.seg 03467517
+04379243/points/cde67434193a2a6f19fb4103277a6b93.pts 04379243/expert_verified/points_label/cde67434193a2a6f19fb4103277a6b93.seg 04379243
+04379243/points/6b62c85b16e300557005dacb6907e37d.pts 04379243/expert_verified/points_label/6b62c85b16e300557005dacb6907e37d.seg 04379243
+04379243/points/7956ac7aba6295d1c2fd07f66cbad0f7.pts 04379243/expert_verified/points_label/7956ac7aba6295d1c2fd07f66cbad0f7.seg 04379243
+04379243/points/dcda90e411cb4e35506d1e1cc84da713.pts 04379243/expert_verified/points_label/dcda90e411cb4e35506d1e1cc84da713.seg 04379243
+02691156/points/c494f446954523a8a32748a9f843a0bf.pts 02691156/expert_verified/points_label/c494f446954523a8a32748a9f843a0bf.seg 02691156
+02691156/points/18e6f319062ccb49ca8607f540cc62ba.pts 02691156/expert_verified/points_label/18e6f319062ccb49ca8607f540cc62ba.seg 02691156
+04379243/points/b7cead95e18b570d2c97486f63c12d76.pts 04379243/expert_verified/points_label/b7cead95e18b570d2c97486f63c12d76.seg 04379243
+03948459/points/f6d52684720d52a01ab78426351eea4a.pts 03948459/expert_verified/points_label/f6d52684720d52a01ab78426351eea4a.seg 03948459
+04379243/points/7eeceefed2b3aa2794f3bda96cf548cc.pts 04379243/expert_verified/points_label/7eeceefed2b3aa2794f3bda96cf548cc.seg 04379243
+03001627/points/5eaa2730f10054d0f6cabe1df6f4c9d9.pts 03001627/expert_verified/points_label/5eaa2730f10054d0f6cabe1df6f4c9d9.seg 03001627
+03001627/points/92f79b8e45269847f0efa341b439d741.pts 03001627/expert_verified/points_label/92f79b8e45269847f0efa341b439d741.seg 03001627
+03001627/points/cbaca6a6edfa2d512b520984c067934c.pts 03001627/expert_verified/points_label/cbaca6a6edfa2d512b520984c067934c.seg 03001627
+04379243/points/390e0db80fe12ef65fa6da97b9eb4a2f.pts 04379243/expert_verified/points_label/390e0db80fe12ef65fa6da97b9eb4a2f.seg 04379243
+04379243/points/2ec33e8b457ac0fa278d386bfa54545.pts 04379243/expert_verified/points_label/2ec33e8b457ac0fa278d386bfa54545.seg 04379243
+04225987/points/ac2b6924a60a7a87aa4f69d519551495.pts 04225987/expert_verified/points_label/ac2b6924a60a7a87aa4f69d519551495.seg 04225987
+02958343/points/468780ef4ace9a422e877e82c90c24d.pts 02958343/expert_verified/points_label/468780ef4ace9a422e877e82c90c24d.seg 02958343
+03001627/points/78c9204b2eac432b65b77a565916c7f.pts 03001627/expert_verified/points_label/78c9204b2eac432b65b77a565916c7f.seg 03001627
+04379243/points/b278b58e294a7d2bac242c3aebc81b2f.pts 04379243/expert_verified/points_label/b278b58e294a7d2bac242c3aebc81b2f.seg 04379243
+04379243/points/fc95d34ab1afb92b9118eee0b123125f.pts 04379243/expert_verified/points_label/fc95d34ab1afb92b9118eee0b123125f.seg 04379243
+03790512/points/54f016b47a5864cd5dde04c96fd8146.pts 03790512/expert_verified/points_label/54f016b47a5864cd5dde04c96fd8146.seg 03790512
+04379243/points/9afa121e3aec8bd7c387f328a37d8ece.pts 04379243/expert_verified/points_label/9afa121e3aec8bd7c387f328a37d8ece.seg 04379243
+04379243/points/382889dbc86b5dd919fb4103277a6b93.pts 04379243/expert_verified/points_label/382889dbc86b5dd919fb4103277a6b93.seg 04379243
+03467517/points/b83a81b2476ec59e59610f6f40382499.pts 03467517/expert_verified/points_label/b83a81b2476ec59e59610f6f40382499.seg 03467517
+03001627/points/5d959b0f79a22e8c67c9124d122355ab.pts 03001627/expert_verified/points_label/5d959b0f79a22e8c67c9124d122355ab.seg 03001627
+02691156/points/c4111dbb21e1f17043afdb9c81ff2967.pts 02691156/expert_verified/points_label/c4111dbb21e1f17043afdb9c81ff2967.seg 02691156
+02691156/points/46829981c5c25285bfc0a2c490b4c222.pts 02691156/expert_verified/points_label/46829981c5c25285bfc0a2c490b4c222.seg 02691156
+04379243/points/497659c4723fbc4fe90ff84c89de437.pts 04379243/expert_verified/points_label/497659c4723fbc4fe90ff84c89de437.seg 04379243
+02691156/points/a805c30d4b09f11f62347b4731688b0f.pts 02691156/expert_verified/points_label/a805c30d4b09f11f62347b4731688b0f.seg 02691156
+03636649/points/e485053f3e0d18252cd2160e449d45ae.pts 03636649/expert_verified/points_label/e485053f3e0d18252cd2160e449d45ae.seg 03636649
+02958343/points/2fb5fe84c28b8b35cc02882a83047172.pts 02958343/expert_verified/points_label/2fb5fe84c28b8b35cc02882a83047172.seg 02958343
+03636649/points/f7a4590c54e2ac7ce62fad6b4f42c880.pts 03636649/expert_verified/points_label/f7a4590c54e2ac7ce62fad6b4f42c880.seg 03636649
+03642806/points/9fc5b76d363ca64ed03066fc8168e9c6.pts 03642806/expert_verified/points_label/9fc5b76d363ca64ed03066fc8168e9c6.seg 03642806
+02691156/points/be080a797406422843afdb9c81ff2967.pts 02691156/expert_verified/points_label/be080a797406422843afdb9c81ff2967.seg 02691156
+04379243/points/81a84fcb2b247a3348eaa510713cb074.pts 04379243/expert_verified/points_label/81a84fcb2b247a3348eaa510713cb074.seg 04379243
+03001627/points/47c540c2e9c3483ce79a6b87656a120a.pts 03001627/expert_verified/points_label/47c540c2e9c3483ce79a6b87656a120a.seg 03001627
+03001627/points/5073d7a546b9a4d0e810eba61b778ebb.pts 03001627/expert_verified/points_label/5073d7a546b9a4d0e810eba61b778ebb.seg 03001627
+03001627/points/e4a890f2330ebd7e4a11872aa986426d.pts 03001627/expert_verified/points_label/e4a890f2330ebd7e4a11872aa986426d.seg 03001627
+03001627/points/a7200578bd7bea065dc3653f8341633a.pts 03001627/expert_verified/points_label/a7200578bd7bea065dc3653f8341633a.seg 03001627
+03467517/points/b004331ee5cc39caa24eeca91f583600.pts 03467517/expert_verified/points_label/b004331ee5cc39caa24eeca91f583600.seg 03467517
+04379243/points/f01768b8b8ba025ee45ef4135c266a12.pts 04379243/expert_verified/points_label/f01768b8b8ba025ee45ef4135c266a12.seg 04379243
+03642806/points/5173aa7f75ff3cf1b55fde51a411949f.pts 03642806/expert_verified/points_label/5173aa7f75ff3cf1b55fde51a411949f.seg 03642806
+03636649/points/e7e45a8f0b0ab311c754474f0ac106.pts 03636649/expert_verified/points_label/e7e45a8f0b0ab311c754474f0ac106.seg 03636649
+03642806/points/1b67b4bfed6688ba5b22feddf58c05e1.pts 03642806/expert_verified/points_label/1b67b4bfed6688ba5b22feddf58c05e1.seg 03642806
+03797390/points/f1e439307b834015770a0ff1161fa15a.pts 03797390/expert_verified/points_label/f1e439307b834015770a0ff1161fa15a.seg 03797390
+03001627/points/b6c9495629c00419940806ade53ef2f.pts 03001627/expert_verified/points_label/b6c9495629c00419940806ade53ef2f.seg 03001627
+03001627/points/8e19d2ec95c45186a6fd617b2ff5d2d.pts 03001627/expert_verified/points_label/8e19d2ec95c45186a6fd617b2ff5d2d.seg 03001627
+03001627/points/d7b8189fe69cebedc41b07b1627c4b43.pts 03001627/expert_verified/points_label/d7b8189fe69cebedc41b07b1627c4b43.seg 03001627
+02691156/points/a7a0e7eddf4ffb8c19378fd691582500.pts 02691156/expert_verified/points_label/a7a0e7eddf4ffb8c19378fd691582500.seg 02691156
+03001627/points/2b6cbad4ba1e9a0645881d7eab1353ba.pts 03001627/expert_verified/points_label/2b6cbad4ba1e9a0645881d7eab1353ba.seg 03001627
+04379243/points/dade0594e68e2250be6c545952e7fa4a.pts 04379243/expert_verified/points_label/dade0594e68e2250be6c545952e7fa4a.seg 04379243
+03001627/points/9850d225049f987e9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9850d225049f987e9b9f2eb77f5e247e.seg 03001627
+03948459/points/e9e6426605eb6d5952d52701459b1f0.pts 03948459/expert_verified/points_label/e9e6426605eb6d5952d52701459b1f0.seg 03948459
+03636649/points/e507bc77c03a1b3afcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e507bc77c03a1b3afcb8d8c6d4df8143.seg 03636649
+03797390/points/a6d9f9ae39728831808951ff5fb582ac.pts 03797390/expert_verified/points_label/a6d9f9ae39728831808951ff5fb582ac.seg 03797390
+04379243/points/3144ba0c286cc61f490ad276cd2af3a4.pts 04379243/expert_verified/points_label/3144ba0c286cc61f490ad276cd2af3a4.seg 04379243
+04379243/points/9be565678aab11cba0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/9be565678aab11cba0ab1d82ef09f78f.seg 04379243
+04379243/points/a4b2870ce7a54b8eec11c6b035aac769.pts 04379243/expert_verified/points_label/a4b2870ce7a54b8eec11c6b035aac769.seg 04379243
+03636649/points/78b95abd1d1158ffef3a2c64cef919d0.pts 03636649/expert_verified/points_label/78b95abd1d1158ffef3a2c64cef919d0.seg 03636649
+04379243/points/2182028f013e7eb530bbd4cddd04c77b.pts 04379243/expert_verified/points_label/2182028f013e7eb530bbd4cddd04c77b.seg 04379243
+02691156/points/e00b89bc338348caa42c49797afd1f5c.pts 02691156/expert_verified/points_label/e00b89bc338348caa42c49797afd1f5c.seg 02691156
+03001627/points/9d28a066df22319cca2e16d6cd76503c.pts 03001627/expert_verified/points_label/9d28a066df22319cca2e16d6cd76503c.seg 03001627
+03636649/points/3c4d8c4ebe9dedbc2cd2160e449d45ae.pts 03636649/expert_verified/points_label/3c4d8c4ebe9dedbc2cd2160e449d45ae.seg 03636649
+02691156/points/97d662e5e6345b46bd46d022fd7d80aa.pts 02691156/expert_verified/points_label/97d662e5e6345b46bd46d022fd7d80aa.seg 02691156
+03001627/points/9dac39c51680daa2f71e06115e9c3b3e.pts 03001627/expert_verified/points_label/9dac39c51680daa2f71e06115e9c3b3e.seg 03001627
+03624134/points/1ecb37ea8f0c4abc20fc54d2500eb7f1.pts 03624134/expert_verified/points_label/1ecb37ea8f0c4abc20fc54d2500eb7f1.seg 03624134
+03624134/points/3a0f48139bfd3a4ea152d2e823b9fe06.pts 03624134/expert_verified/points_label/3a0f48139bfd3a4ea152d2e823b9fe06.seg 03624134
+04379243/points/1264d88ae599df3fbeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/1264d88ae599df3fbeedb4c8fd29e2d1.seg 04379243
+03001627/points/97bbc8970b05c4a3fcde6bcb709edd9a.pts 03001627/expert_verified/points_label/97bbc8970b05c4a3fcde6bcb709edd9a.seg 03001627
+03636649/points/1f58b59a1b6b06df766fc93a239bada0.pts 03636649/expert_verified/points_label/1f58b59a1b6b06df766fc93a239bada0.seg 03636649
+03001627/points/eb51e814c3f44a07914ced7dab3536b9.pts 03001627/expert_verified/points_label/eb51e814c3f44a07914ced7dab3536b9.seg 03001627
+03636649/points/a138582b1d0b9cbb137af984a9f45d65.pts 03636649/expert_verified/points_label/a138582b1d0b9cbb137af984a9f45d65.seg 03636649
+03790512/points/9f9de88a95b56660b37378f3c85478b4.pts 03790512/expert_verified/points_label/9f9de88a95b56660b37378f3c85478b4.seg 03790512
+03001627/points/a521fba02ca7f9aa822215026d1e8d82.pts 03001627/expert_verified/points_label/a521fba02ca7f9aa822215026d1e8d82.seg 03001627
+04225987/points/d303055e96cd59949da15808191f1405.pts 04225987/expert_verified/points_label/d303055e96cd59949da15808191f1405.seg 04225987
+04379243/points/7e3022a7bd00eb4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/7e3022a7bd00eb4195b8ea6a366e14d.seg 04379243
+02691156/points/d83300deab42c100eb9db4e832a6dd82.pts 02691156/expert_verified/points_label/d83300deab42c100eb9db4e832a6dd82.seg 02691156
+03642806/points/a4b410734514306ac401e233323032d6.pts 03642806/expert_verified/points_label/a4b410734514306ac401e233323032d6.seg 03642806
+03790512/points/532e6f88a9975a27b37378f3c85478b4.pts 03790512/expert_verified/points_label/532e6f88a9975a27b37378f3c85478b4.seg 03790512
+03642806/points/cc691d9e8e189ce47a381a112bfd785.pts 03642806/expert_verified/points_label/cc691d9e8e189ce47a381a112bfd785.seg 03642806
+02691156/points/aa07239e9397cf189601fb40d0d298b9.pts 02691156/expert_verified/points_label/aa07239e9397cf189601fb40d0d298b9.seg 02691156
+03642806/points/cc0535a34cdc7d676bf98d15712168f.pts 03642806/expert_verified/points_label/cc0535a34cdc7d676bf98d15712168f.seg 03642806
+02691156/points/ddec69970cbc4d29112a90660b187a10.pts 02691156/expert_verified/points_label/ddec69970cbc4d29112a90660b187a10.seg 02691156
+04379243/points/268e68f1819a225c1b4b790955c17432.pts 04379243/expert_verified/points_label/268e68f1819a225c1b4b790955c17432.seg 04379243
+03624134/points/1943c87f92ac76e112cad8be168fe72d.pts 03624134/expert_verified/points_label/1943c87f92ac76e112cad8be168fe72d.seg 03624134
+04379243/points/b9fc2f624533bb8119fb4103277a6b93.pts 04379243/expert_verified/points_label/b9fc2f624533bb8119fb4103277a6b93.seg 04379243
+03001627/points/1c45b266d3c879dab36dcc661f3905d.pts 03001627/expert_verified/points_label/1c45b266d3c879dab36dcc661f3905d.seg 03001627
+03948459/points/1660ef4b3f20b1e2a94b922b533051b7.pts 03948459/expert_verified/points_label/1660ef4b3f20b1e2a94b922b533051b7.seg 03948459
+02691156/points/167250e2014c72dbb87697d3904b168b.pts 02691156/expert_verified/points_label/167250e2014c72dbb87697d3904b168b.seg 02691156
+02691156/points/dfe65f8a20df11c5d1df55cbe0874aa.pts 02691156/expert_verified/points_label/dfe65f8a20df11c5d1df55cbe0874aa.seg 02691156
+03001627/points/44a2a3952ea2315ff51f77a6d7299806.pts 03001627/expert_verified/points_label/44a2a3952ea2315ff51f77a6d7299806.seg 03001627
+04379243/points/a1896691fe875eccb9968f25875bdef4.pts 04379243/expert_verified/points_label/a1896691fe875eccb9968f25875bdef4.seg 04379243
+04379243/points/6f3506c9c5202101c4e8952b27b5f370.pts 04379243/expert_verified/points_label/6f3506c9c5202101c4e8952b27b5f370.seg 04379243
+04379243/points/fead7e0c30a347b1710801cae5dc529.pts 04379243/expert_verified/points_label/fead7e0c30a347b1710801cae5dc529.seg 04379243
+04379243/points/384bf53e12744e2019fb4103277a6b93.pts 04379243/expert_verified/points_label/384bf53e12744e2019fb4103277a6b93.seg 04379243
+03001627/points/30378faa6bf5b245fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/30378faa6bf5b245fdef1c01cbd4ae0c.seg 03001627
+04379243/points/5690d17b330f73adfeb8ceb93793cb5.pts 04379243/expert_verified/points_label/5690d17b330f73adfeb8ceb93793cb5.seg 04379243
+03467517/points/2e4ec0874ea34a50812ca0ac90db1c07.pts 03467517/expert_verified/points_label/2e4ec0874ea34a50812ca0ac90db1c07.seg 03467517
+03001627/points/a007a3cd5b8ca7fb19fb4103277a6b93.pts 03001627/expert_verified/points_label/a007a3cd5b8ca7fb19fb4103277a6b93.seg 03001627
+03001627/points/bc21c95f766502a78b03575bb54dfd4.pts 03001627/expert_verified/points_label/bc21c95f766502a78b03575bb54dfd4.seg 03001627
+04379243/points/6a3ee73d42228f8581654cb17c02fd.pts 04379243/expert_verified/points_label/6a3ee73d42228f8581654cb17c02fd.seg 04379243
+04379243/points/4b399cdce8337c29285e0e27752e54a8.pts 04379243/expert_verified/points_label/4b399cdce8337c29285e0e27752e54a8.seg 04379243
+04379243/points/7f9d2da43d6aba67afb6676a5cd782b6.pts 04379243/expert_verified/points_label/7f9d2da43d6aba67afb6676a5cd782b6.seg 04379243
+03001627/points/72669be1815b2bb81e4fe86c4ad3ec90.pts 03001627/expert_verified/points_label/72669be1815b2bb81e4fe86c4ad3ec90.seg 03001627
+04379243/points/223fbcc813831d8c6e526771d2f7444e.pts 04379243/expert_verified/points_label/223fbcc813831d8c6e526771d2f7444e.seg 04379243
+02691156/points/adeb5d68e8d65cc419ba010ddb4974fe.pts 02691156/expert_verified/points_label/adeb5d68e8d65cc419ba010ddb4974fe.seg 02691156
+03001627/points/8a9d8dad6800d55ff37af16b2893f1d4.pts 03001627/expert_verified/points_label/8a9d8dad6800d55ff37af16b2893f1d4.seg 03001627
+04379243/points/db406d9b2a94bce5622d7484764b58f.pts 04379243/expert_verified/points_label/db406d9b2a94bce5622d7484764b58f.seg 04379243
+03001627/points/68b88c0be088c21d5e0096fb2d3266a.pts 03001627/expert_verified/points_label/68b88c0be088c21d5e0096fb2d3266a.seg 03001627
+03790512/points/973d75ed9c12836f3d033e6cf82ec72c.pts 03790512/expert_verified/points_label/973d75ed9c12836f3d033e6cf82ec72c.seg 03790512
+04379243/points/20292fba71362950c59c53f7df509858.pts 04379243/expert_verified/points_label/20292fba71362950c59c53f7df509858.seg 04379243
+03001627/points/21fb308ca737174e22f2f93459bd863e.pts 03001627/expert_verified/points_label/21fb308ca737174e22f2f93459bd863e.seg 03001627
+03001627/points/be9d5105e48ae27e713decb1a0563b12.pts 03001627/expert_verified/points_label/be9d5105e48ae27e713decb1a0563b12.seg 03001627
+02958343/points/c6441f127d51e478f0fb72d24c42a39.pts 02958343/expert_verified/points_label/c6441f127d51e478f0fb72d24c42a39.seg 02958343
+03001627/points/f29cbdb2c7bb10f9953d950bcd7de7a.pts 03001627/expert_verified/points_label/f29cbdb2c7bb10f9953d950bcd7de7a.seg 03001627
+02691156/points/65654b5c4e488e0c961fa14fc879444e.pts 02691156/expert_verified/points_label/65654b5c4e488e0c961fa14fc879444e.seg 02691156
+04379243/points/8654b644c766dd23d1dcc55e36186e4e.pts 04379243/expert_verified/points_label/8654b644c766dd23d1dcc55e36186e4e.seg 04379243
+04379243/points/56bb7376dfa9cb5c8cf069d506f8b5ac.pts 04379243/expert_verified/points_label/56bb7376dfa9cb5c8cf069d506f8b5ac.seg 04379243
+04379243/points/d291243cfb51ea7dcb25d116843b43a4.pts 04379243/expert_verified/points_label/d291243cfb51ea7dcb25d116843b43a4.seg 04379243
+03790512/points/49edb54e97458de8d373c34785838ee4.pts 03790512/expert_verified/points_label/49edb54e97458de8d373c34785838ee4.seg 03790512
+04379243/points/216da8313bc7b192ab610b0c94236463.pts 04379243/expert_verified/points_label/216da8313bc7b192ab610b0c94236463.seg 04379243
+03001627/points/5ac8b44ff77e5490c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/5ac8b44ff77e5490c8687ff9b0b4e4ac.seg 03001627
+03001627/points/956063d67b939431f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/956063d67b939431f56aa11cd5e0c3e.seg 03001627
+04379243/points/8dd8370dcaa8d770ea5682a3b818969a.pts 04379243/expert_verified/points_label/8dd8370dcaa8d770ea5682a3b818969a.seg 04379243
+03636649/points/3b64d5033c580d2ef76898f881b76a.pts 03636649/expert_verified/points_label/3b64d5033c580d2ef76898f881b76a.seg 03636649
+03001627/points/3d9dce1953180fe6f9c9f9697d1ec60.pts 03001627/expert_verified/points_label/3d9dce1953180fe6f9c9f9697d1ec60.seg 03001627
+03001627/points/d1b03eeb33fd441d8189e5e3786f2290.pts 03001627/expert_verified/points_label/d1b03eeb33fd441d8189e5e3786f2290.seg 03001627
+02691156/points/5294c39d2a57bd7e5cad6226edb8e82.pts 02691156/expert_verified/points_label/5294c39d2a57bd7e5cad6226edb8e82.seg 02691156
+04379243/points/7bc93a4cc26fab5c8c12b667670a35f2.pts 04379243/expert_verified/points_label/7bc93a4cc26fab5c8c12b667670a35f2.seg 04379243
+04379243/points/813d34995b5c4406b65b71636c46ae49.pts 04379243/expert_verified/points_label/813d34995b5c4406b65b71636c46ae49.seg 04379243
+03001627/points/6782b941de7b2199a344c33f76676fbd.pts 03001627/expert_verified/points_label/6782b941de7b2199a344c33f76676fbd.seg 03001627
+03636649/points/ea5ae3cfd142c3b923f93f957094a824.pts 03636649/expert_verified/points_label/ea5ae3cfd142c3b923f93f957094a824.seg 03636649
+03001627/points/47caca00f993bc4e4b3c42e318f3affc.pts 03001627/expert_verified/points_label/47caca00f993bc4e4b3c42e318f3affc.seg 03001627
+02691156/points/b702e35f4a59e81f64801ad2940cdd5.pts 02691156/expert_verified/points_label/b702e35f4a59e81f64801ad2940cdd5.seg 02691156
+03636649/points/3b5f0c01c2b914fc6f16f167d27a7dab.pts 03636649/expert_verified/points_label/3b5f0c01c2b914fc6f16f167d27a7dab.seg 03636649
+04379243/points/ad63116007d98a6d19758238d4c7aff2.pts 04379243/expert_verified/points_label/ad63116007d98a6d19758238d4c7aff2.seg 04379243
+03797390/points/8f6c86feaa74698d5c91ee20ade72edc.pts 03797390/expert_verified/points_label/8f6c86feaa74698d5c91ee20ade72edc.seg 03797390
+04379243/points/48baef3ab18d2d43d2afe8d5254a0d04.pts 04379243/expert_verified/points_label/48baef3ab18d2d43d2afe8d5254a0d04.seg 04379243
+03001627/points/fe5310a3457bf0e5c4e8952b27b5f370.pts 03001627/expert_verified/points_label/fe5310a3457bf0e5c4e8952b27b5f370.seg 03001627
+04379243/points/d4c330d27bbef3808f6610bf672cd686.pts 04379243/expert_verified/points_label/d4c330d27bbef3808f6610bf672cd686.seg 04379243
+04379243/points/adcb67b58024afb99910b7ec4c4e599b.pts 04379243/expert_verified/points_label/adcb67b58024afb99910b7ec4c4e599b.seg 04379243
+02958343/points/65d6433043c40046b82c0841410a924f.pts 02958343/expert_verified/points_label/65d6433043c40046b82c0841410a924f.seg 02958343
+04379243/points/1a00aa6b75362cc5b324368d54a7416f.pts 04379243/expert_verified/points_label/1a00aa6b75362cc5b324368d54a7416f.seg 04379243
+04379243/points/7982e2f2984978c6f4b6538438a0b930.pts 04379243/expert_verified/points_label/7982e2f2984978c6f4b6538438a0b930.seg 04379243
+03467517/points/26e1801ea747f72f14fe0da28e4f8384.pts 03467517/expert_verified/points_label/26e1801ea747f72f14fe0da28e4f8384.seg 03467517
+04379243/points/c8ee4a8b703180992985858e6f5832da.pts 04379243/expert_verified/points_label/c8ee4a8b703180992985858e6f5832da.seg 04379243
+02691156/points/f24daae76836e249f0878b58b4e887bf.pts 02691156/expert_verified/points_label/f24daae76836e249f0878b58b4e887bf.seg 02691156
+04379243/points/f29863d2fe8863d4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/f29863d2fe8863d4195b8ea6a366e14d.seg 04379243
+04379243/points/babb0963a0e17bb59cd0aef0207ac8c6.pts 04379243/expert_verified/points_label/babb0963a0e17bb59cd0aef0207ac8c6.seg 04379243
+03001627/points/39911f927331db1c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/39911f927331db1c8687ff9b0b4e4ac.seg 03001627
+03001627/points/4a9d3ce54c09a2da696b74614952b2d0.pts 03001627/expert_verified/points_label/4a9d3ce54c09a2da696b74614952b2d0.seg 03001627
+03642806/points/caa4afd404f24d21275c1147a304ed86.pts 03642806/expert_verified/points_label/caa4afd404f24d21275c1147a304ed86.seg 03642806
+02691156/points/ff6e377e8e5b3757cc34b900bb2492e.pts 02691156/expert_verified/points_label/ff6e377e8e5b3757cc34b900bb2492e.seg 02691156
+03001627/points/483cfed0659965ed73c478529c40c4e6.pts 03001627/expert_verified/points_label/483cfed0659965ed73c478529c40c4e6.seg 03001627
+03797390/points/4b7888feea81219ab5f4a9188bfa0ef6.pts 03797390/expert_verified/points_label/4b7888feea81219ab5f4a9188bfa0ef6.seg 03797390
+03790512/points/40d84e407c46e8d8b31e74d456742c7.pts 03790512/expert_verified/points_label/40d84e407c46e8d8b31e74d456742c7.seg 03790512
+04379243/points/176e3b32d749ac94d79f2fc0b8d8ffad.pts 04379243/expert_verified/points_label/176e3b32d749ac94d79f2fc0b8d8ffad.seg 04379243
+03001627/points/657790bc7fd16326c132086242d50af2.pts 03001627/expert_verified/points_label/657790bc7fd16326c132086242d50af2.seg 03001627
+04379243/points/94c0ab5650ea392ddcfcef693e7ec696.pts 04379243/expert_verified/points_label/94c0ab5650ea392ddcfcef693e7ec696.seg 04379243
+03624134/points/bf5cae3922d3cb2bca7250d90eb506cf.pts 03624134/expert_verified/points_label/bf5cae3922d3cb2bca7250d90eb506cf.seg 03624134
+03001627/points/49a3b0242c13f92da6fee8e2140acec9.pts 03001627/expert_verified/points_label/49a3b0242c13f92da6fee8e2140acec9.seg 03001627
+03636649/points/e4c9bb21fe5bfeb3e21f078602e2eda8.pts 03636649/expert_verified/points_label/e4c9bb21fe5bfeb3e21f078602e2eda8.seg 03636649
+03636649/points/6595ee36783d261ed3281970e2c44dbe.pts 03636649/expert_verified/points_label/6595ee36783d261ed3281970e2c44dbe.seg 03636649
+02958343/points/9a152b11907b11074549b3c52ae0632e.pts 02958343/expert_verified/points_label/9a152b11907b11074549b3c52ae0632e.seg 02958343
+04379243/points/68a7bad2b06bc1a9d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/68a7bad2b06bc1a9d93768e7b9b1eabf.seg 04379243
+04379243/points/b9c756b2ff5d66ddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/b9c756b2ff5d66ddfebad4f49b26ec52.seg 04379243
+03797390/points/2d10421716b16580e45ef4135c266a12.pts 03797390/expert_verified/points_label/2d10421716b16580e45ef4135c266a12.seg 03797390
+03001627/points/2c76aaa00e55c26836c07750784b6bc6.pts 03001627/expert_verified/points_label/2c76aaa00e55c26836c07750784b6bc6.seg 03001627
+03636649/points/5cca570916f420e64b3c42e318f3affc.pts 03636649/expert_verified/points_label/5cca570916f420e64b3c42e318f3affc.seg 03636649
+03001627/points/9225e57e34334ee019cb07ecb5b4102.pts 03001627/expert_verified/points_label/9225e57e34334ee019cb07ecb5b4102.seg 03001627
+03001627/points/17aeeadccf0e560e274b862d3a151946.pts 03001627/expert_verified/points_label/17aeeadccf0e560e274b862d3a151946.seg 03001627
+03636649/points/427806f30c61059c22e05b5d2ce39e3b.pts 03636649/expert_verified/points_label/427806f30c61059c22e05b5d2ce39e3b.seg 03636649
+03636649/points/17349d6d35aac0685ed28d6c8a1bdfe5.pts 03636649/expert_verified/points_label/17349d6d35aac0685ed28d6c8a1bdfe5.seg 03636649
+04379243/points/5ee4cbe45bdc4cd571a782a4379556c7.pts 04379243/expert_verified/points_label/5ee4cbe45bdc4cd571a782a4379556c7.seg 04379243
+03636649/points/5eda619e5f36499fc1537287b5c50d9d.pts 03636649/expert_verified/points_label/5eda619e5f36499fc1537287b5c50d9d.seg 03636649
+02691156/points/f57c74e194cd2b2bc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/f57c74e194cd2b2bc8727b27ee96a4b7.seg 02691156
+02958343/points/27d42437168ccd7ddd75f724c0ccbe00.pts 02958343/expert_verified/points_label/27d42437168ccd7ddd75f724c0ccbe00.seg 02958343
+04379243/points/c8cf1c77bbb79d214719088c8e42c6ab.pts 04379243/expert_verified/points_label/c8cf1c77bbb79d214719088c8e42c6ab.seg 04379243
+04379243/points/40b48121d1879be2ee0605a41c3320d6.pts 04379243/expert_verified/points_label/40b48121d1879be2ee0605a41c3320d6.seg 04379243
+02691156/points/4f9b12d07dce21ac9d93a50cb0355558.pts 02691156/expert_verified/points_label/4f9b12d07dce21ac9d93a50cb0355558.seg 02691156
+02691156/points/25bd1569261bc545e8323edc0fe816a8.pts 02691156/expert_verified/points_label/25bd1569261bc545e8323edc0fe816a8.seg 02691156
+02691156/points/fbc429365ab7136be1a9c234926c21e2.pts 02691156/expert_verified/points_label/fbc429365ab7136be1a9c234926c21e2.seg 02691156
+04379243/points/798c315f86d8f02f931e98da3a93e73e.pts 04379243/expert_verified/points_label/798c315f86d8f02f931e98da3a93e73e.seg 04379243
+03790512/points/a0a40a9d5aabd6a7d5dde04c96fd8146.pts 03790512/expert_verified/points_label/a0a40a9d5aabd6a7d5dde04c96fd8146.seg 03790512
+04379243/points/884f15cfc6a3eea3dcfcef693e7ec696.pts 04379243/expert_verified/points_label/884f15cfc6a3eea3dcfcef693e7ec696.seg 04379243
+04379243/points/f16f939baeb7722e664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/f16f939baeb7722e664b3b9b23ddfcbc.seg 04379243
+03001627/points/1e0580f443a9e6d2593ebeeedbff73b.pts 03001627/expert_verified/points_label/1e0580f443a9e6d2593ebeeedbff73b.seg 03001627
+03636649/points/927e0654427c4d0b82241d99b4e87f38.pts 03636649/expert_verified/points_label/927e0654427c4d0b82241d99b4e87f38.seg 03636649
+03001627/points/bdd29e651e5f6fb2b079317292bdc5d4.pts 03001627/expert_verified/points_label/bdd29e651e5f6fb2b079317292bdc5d4.seg 03001627
+03642806/points/cb1e3a990782678b4b6682da890df381.pts 03642806/expert_verified/points_label/cb1e3a990782678b4b6682da890df381.seg 03642806
+03001627/points/fd5ac9b342fe518b9d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/fd5ac9b342fe518b9d3ea1c6b57a0095.seg 03001627
+02958343/points/6bbcd5608ddf871a4cdd04162f008888.pts 02958343/expert_verified/points_label/6bbcd5608ddf871a4cdd04162f008888.seg 02958343
+04379243/points/76338ed3326689b249524cfd5973a145.pts 04379243/expert_verified/points_label/76338ed3326689b249524cfd5973a145.seg 04379243
+03001627/points/9a0571ae6169a6ebfebad4f49b26ec52.pts 03001627/expert_verified/points_label/9a0571ae6169a6ebfebad4f49b26ec52.seg 03001627
+03948459/points/49429e1d1e90c1ca202be79d8b285c1e.pts 03948459/expert_verified/points_label/49429e1d1e90c1ca202be79d8b285c1e.seg 03948459
+02691156/points/45a4ec99ed13ed773c2498c4c2f13ca.pts 02691156/expert_verified/points_label/45a4ec99ed13ed773c2498c4c2f13ca.seg 02691156
+04379243/points/70995336d06fc07ae9f3e9c758fef992.pts 04379243/expert_verified/points_label/70995336d06fc07ae9f3e9c758fef992.seg 04379243
+03001627/points/6fd76577d0df60669b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6fd76577d0df60669b9f2eb77f5e247e.seg 03001627
+03001627/points/66f18d05d960ffe0bcd12732b5a4b789.pts 03001627/expert_verified/points_label/66f18d05d960ffe0bcd12732b5a4b789.seg 03001627
+03001627/points/e401be99c5a51d8bef8e9284f76f3024.pts 03001627/expert_verified/points_label/e401be99c5a51d8bef8e9284f76f3024.seg 03001627
+03001627/points/4a0b61d33846824ab1f04c301b6ccc90.pts 03001627/expert_verified/points_label/4a0b61d33846824ab1f04c301b6ccc90.seg 03001627
+04379243/points/9a5cb4122d518111b339f790b1757e92.pts 04379243/expert_verified/points_label/9a5cb4122d518111b339f790b1757e92.seg 04379243
+04379243/points/6281381ce38aa988de98d10ab5975b59.pts 04379243/expert_verified/points_label/6281381ce38aa988de98d10ab5975b59.seg 04379243
+04379243/points/d382d9e34f365544278d386bfa54545.pts 04379243/expert_verified/points_label/d382d9e34f365544278d386bfa54545.seg 04379243
+03948459/points/6de6e56c6f7d43692866658c90231a1a.pts 03948459/expert_verified/points_label/6de6e56c6f7d43692866658c90231a1a.seg 03948459
+02691156/points/494a1698eb82572c3df325aac2f73830.pts 02691156/expert_verified/points_label/494a1698eb82572c3df325aac2f73830.seg 02691156
+02691156/points/c581942f40cbb60819ba010ddb4974fe.pts 02691156/expert_verified/points_label/c581942f40cbb60819ba010ddb4974fe.seg 02691156
+04379243/points/e9038664b7d35e6b436e6787c76ef3f0.pts 04379243/expert_verified/points_label/e9038664b7d35e6b436e6787c76ef3f0.seg 04379243
+04099429/points/56c13d294f8afb1ffb88d148e845f82e.pts 04099429/expert_verified/points_label/56c13d294f8afb1ffb88d148e845f82e.seg 04099429
+02958343/points/86fa16c6da908e6b44221994b043fd86.pts 02958343/expert_verified/points_label/86fa16c6da908e6b44221994b043fd86.seg 02958343
+04379243/points/3249c3ad90085a9e98d5fc0473d00a1c.pts 04379243/expert_verified/points_label/3249c3ad90085a9e98d5fc0473d00a1c.seg 04379243
+03636649/points/8581a3ae1f77319ac066b9622c005c53.pts 03636649/expert_verified/points_label/8581a3ae1f77319ac066b9622c005c53.seg 03636649
+03790512/points/6e1397773a4d15db429f1c522640e6f0.pts 03790512/expert_verified/points_label/6e1397773a4d15db429f1c522640e6f0.seg 03790512
+03624134/points/c1ab7029de67351cf97a65c35ea619f0.pts 03624134/expert_verified/points_label/c1ab7029de67351cf97a65c35ea619f0.seg 03624134
+04379243/points/16e874e6165e836b30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/16e874e6165e836b30bbd4cddd04c77b.seg 04379243
+03636649/points/ff08713d837d87edf2098a9f7fc86999.pts 03636649/expert_verified/points_label/ff08713d837d87edf2098a9f7fc86999.seg 03636649
+03790512/points/b649be9c09e2b332429f1c522640e6f0.pts 03790512/expert_verified/points_label/b649be9c09e2b332429f1c522640e6f0.seg 03790512
+03001627/points/85b16941984902f8facfa12c7d71c89f.pts 03001627/expert_verified/points_label/85b16941984902f8facfa12c7d71c89f.seg 03001627
+04379243/points/cf1a7653c10aaa0eab610b0c94236463.pts 04379243/expert_verified/points_label/cf1a7653c10aaa0eab610b0c94236463.seg 04379243
+03001627/points/a42aa59fa23b4a4d9c0ca344f487323e.pts 03001627/expert_verified/points_label/a42aa59fa23b4a4d9c0ca344f487323e.seg 03001627
+03001627/points/3f4f1d18c61a07f134b707eb14b2a4a5.pts 03001627/expert_verified/points_label/3f4f1d18c61a07f134b707eb14b2a4a5.seg 03001627
+03001627/points/d2b9e98373e96afec8d65ca96e6b18ef.pts 03001627/expert_verified/points_label/d2b9e98373e96afec8d65ca96e6b18ef.seg 03001627
+03636649/points/71dffdee89efe07cdff00b2637ddcbde.pts 03636649/expert_verified/points_label/71dffdee89efe07cdff00b2637ddcbde.seg 03636649
+02691156/points/5ac0cd21410b2a6a341877ff7a6c751f.pts 02691156/expert_verified/points_label/5ac0cd21410b2a6a341877ff7a6c751f.seg 02691156
+03636649/points/76eb7436c40e083384d184bdc625781a.pts 03636649/expert_verified/points_label/76eb7436c40e083384d184bdc625781a.seg 03636649
+03642806/points/13330d1e7b199dd82530b9c2b65d3f86.pts 03642806/expert_verified/points_label/13330d1e7b199dd82530b9c2b65d3f86.seg 03642806
+02691156/points/e726c8e6897130439a6e43b878d5b335.pts 02691156/expert_verified/points_label/e726c8e6897130439a6e43b878d5b335.seg 02691156
+04379243/points/40a402e1d949364a104ceb84075e40d6.pts 04379243/expert_verified/points_label/40a402e1d949364a104ceb84075e40d6.seg 04379243
+03001627/points/42140baad25c8598baa1a4ff2c45ffc9.pts 03001627/expert_verified/points_label/42140baad25c8598baa1a4ff2c45ffc9.seg 03001627
+03001627/points/5283a98b5c693e64ebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/5283a98b5c693e64ebefe6b1d594ad2e.seg 03001627
+02691156/points/15898fef6fec88c53ada73811bb576de.pts 02691156/expert_verified/points_label/15898fef6fec88c53ada73811bb576de.seg 02691156
+03001627/points/3f8d0d53e2bd74124b3c42e318f3affc.pts 03001627/expert_verified/points_label/3f8d0d53e2bd74124b3c42e318f3affc.seg 03001627
+04379243/points/cd106955d3bdf8e751c4deb11af7079e.pts 04379243/expert_verified/points_label/cd106955d3bdf8e751c4deb11af7079e.seg 04379243
+03001627/points/11506b96d41f7d3dd7c4a943f33e0384.pts 03001627/expert_verified/points_label/11506b96d41f7d3dd7c4a943f33e0384.seg 03001627
+03001627/points/f51ab8433184dfd2c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/f51ab8433184dfd2c8687ff9b0b4e4ac.seg 03001627
+02691156/points/c9a6dcf87d1f15bca8607f540cc62ba.pts 02691156/expert_verified/points_label/c9a6dcf87d1f15bca8607f540cc62ba.seg 02691156
+04379243/points/d9c75799ff9ff74664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/d9c75799ff9ff74664b3b9b23ddfcbc.seg 04379243
+04379243/points/93e81005c19a74b8664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/93e81005c19a74b8664b3b9b23ddfcbc.seg 04379243
+02958343/points/5057c9dbf72e0352728fa2df514c65d4.pts 02958343/expert_verified/points_label/5057c9dbf72e0352728fa2df514c65d4.seg 02958343
+04379243/points/8ad88ee4442fd0fd8a6ba7ebad3985bb.pts 04379243/expert_verified/points_label/8ad88ee4442fd0fd8a6ba7ebad3985bb.seg 04379243
+04379243/points/a2554ec7e2331a8fab610b0c94236463.pts 04379243/expert_verified/points_label/a2554ec7e2331a8fab610b0c94236463.seg 04379243
+04379243/points/482a76d14781e55e25374da32e705c.pts 04379243/expert_verified/points_label/482a76d14781e55e25374da32e705c.seg 04379243
+02691156/points/d06105ee2a2ae27c51008e496c6cfd2e.pts 02691156/expert_verified/points_label/d06105ee2a2ae27c51008e496c6cfd2e.seg 02691156
+04379243/points/45a09b1ce3111e4f22f4fabdf1ee0670.pts 04379243/expert_verified/points_label/45a09b1ce3111e4f22f4fabdf1ee0670.seg 04379243
+03467517/points/9aaad035af7e6ab1ed724609df3eb104.pts 03467517/expert_verified/points_label/9aaad035af7e6ab1ed724609df3eb104.seg 03467517
+02691156/points/cf0cdaa94220ee3f4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cf0cdaa94220ee3f4c3a35cee92bb95b.seg 02691156
+02691156/points/48cb2de06f46cde25ed29e0a9f14425.pts 02691156/expert_verified/points_label/48cb2de06f46cde25ed29e0a9f14425.seg 02691156
+03001627/points/2f0a94efe6d1da7f8616812464c86290.pts 03001627/expert_verified/points_label/2f0a94efe6d1da7f8616812464c86290.seg 03001627
+02691156/points/e0385af10bddc6a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/e0385af10bddc6a0ca8607f540cc62ba.seg 02691156
+03467517/points/71139bd2ff6c4257280ec2e5049bb369.pts 03467517/expert_verified/points_label/71139bd2ff6c4257280ec2e5049bb369.seg 03467517
+03001627/points/6251b398004a02fffebad4f49b26ec52.pts 03001627/expert_verified/points_label/6251b398004a02fffebad4f49b26ec52.seg 03001627
+03467517/points/7eba657565cc69e913f86abea5e4b9e0.pts 03467517/expert_verified/points_label/7eba657565cc69e913f86abea5e4b9e0.seg 03467517
+03001627/points/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.pts 03001627/expert_verified/points_label/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.seg 03001627
+03001627/points/ffa1e25f499e586694e98ee4fdfd7464.pts 03001627/expert_verified/points_label/ffa1e25f499e586694e98ee4fdfd7464.seg 03001627
+03797390/points/9af98540f45411467246665d3d3724c.pts 03797390/expert_verified/points_label/9af98540f45411467246665d3d3724c.seg 03797390
+02691156/points/b9fabfa6d5fedbc3a8e091cb544689d5.pts 02691156/expert_verified/points_label/b9fabfa6d5fedbc3a8e091cb544689d5.seg 02691156
+04379243/points/a2561614d015f2fdfebad4f49b26ec52.pts 04379243/expert_verified/points_label/a2561614d015f2fdfebad4f49b26ec52.seg 04379243
+03642806/points/2134ad3fc25a6284193a4c984002ed32.pts 03642806/expert_verified/points_label/2134ad3fc25a6284193a4c984002ed32.seg 03642806
+03001627/points/d3302b7fa6504cab1a461b43b8f257f.pts 03001627/expert_verified/points_label/d3302b7fa6504cab1a461b43b8f257f.seg 03001627
+03467517/points/bf7026f9814230414269db3f92b7aa5e.pts 03467517/expert_verified/points_label/bf7026f9814230414269db3f92b7aa5e.seg 03467517
+03636649/points/9aff9fdad0e3555c7eecb4e0df212ad9.pts 03636649/expert_verified/points_label/9aff9fdad0e3555c7eecb4e0df212ad9.seg 03636649
+03797390/points/a3cd44bbd3ba5b019a4cbf5d3b79df06.pts 03797390/expert_verified/points_label/a3cd44bbd3ba5b019a4cbf5d3b79df06.seg 03797390
+04099429/points/eff3a27a085e02e5146be45f8a3c1ff8.pts 04099429/expert_verified/points_label/eff3a27a085e02e5146be45f8a3c1ff8.seg 04099429
+02958343/points/1e3f494626a24badf35b4953d8add91f.pts 02958343/expert_verified/points_label/1e3f494626a24badf35b4953d8add91f.seg 02958343
+04379243/points/1f3e217cbc871152d7465eca206fda6f.pts 04379243/expert_verified/points_label/1f3e217cbc871152d7465eca206fda6f.seg 04379243
+03636649/points/cef6757831b4d9738c8f019f17f4687c.pts 03636649/expert_verified/points_label/cef6757831b4d9738c8f019f17f4687c.seg 03636649
+04379243/points/e8689b8b1610bf2841bb8a7ba579a58.pts 04379243/expert_verified/points_label/e8689b8b1610bf2841bb8a7ba579a58.seg 04379243
+03001627/points/40168f46019eb867be7e1d42d63ca9f0.pts 03001627/expert_verified/points_label/40168f46019eb867be7e1d42d63ca9f0.seg 03001627
+03624134/points/7aed22a7074f16431cf05d6e4dbb95af.pts 03624134/expert_verified/points_label/7aed22a7074f16431cf05d6e4dbb95af.seg 03624134
+04379243/points/5d53ed3005f4dc6856786b90799c4fdb.pts 04379243/expert_verified/points_label/5d53ed3005f4dc6856786b90799c4fdb.seg 04379243
+04379243/points/beebc267ea0c16a5c7f6a57f6f73d8a6.pts 04379243/expert_verified/points_label/beebc267ea0c16a5c7f6a57f6f73d8a6.seg 04379243
+04379243/points/943d786e2df9251ec76aead7da70af41.pts 04379243/expert_verified/points_label/943d786e2df9251ec76aead7da70af41.seg 04379243
+04379243/points/90d87b4d9a5a1e78f4b6538438a0b930.pts 04379243/expert_verified/points_label/90d87b4d9a5a1e78f4b6538438a0b930.seg 04379243
+02958343/points/d47353fc60390df85d918097f81825e3.pts 02958343/expert_verified/points_label/d47353fc60390df85d918097f81825e3.seg 02958343
+03624134/points/90021da7c71f6bcbf02ee453ff283e26.pts 03624134/expert_verified/points_label/90021da7c71f6bcbf02ee453ff283e26.seg 03624134
+02958343/points/d1acd4916d3d3b57c48db2ed8f5e994c.pts 02958343/expert_verified/points_label/d1acd4916d3d3b57c48db2ed8f5e994c.seg 02958343
+03001627/points/1d1c829a54f0ae426cdb122727dd360f.pts 03001627/expert_verified/points_label/1d1c829a54f0ae426cdb122727dd360f.seg 03001627
+04379243/points/c35a14f84985f92a9856fa70a578baeb.pts 04379243/expert_verified/points_label/c35a14f84985f92a9856fa70a578baeb.seg 04379243
+03636649/points/5c5119a226e1ce9934804d261199e1bf.pts 03636649/expert_verified/points_label/5c5119a226e1ce9934804d261199e1bf.seg 03636649
+03636649/points/6bb8020fa82b27dde11a3e838aa2c287.pts 03636649/expert_verified/points_label/6bb8020fa82b27dde11a3e838aa2c287.seg 03636649
+03797390/points/fad118b32085f3f2c2c72e575af174cd.pts 03797390/expert_verified/points_label/fad118b32085f3f2c2c72e575af174cd.seg 03797390
+04379243/points/a82387cf9d9d253aa06f94abffad1304.pts 04379243/expert_verified/points_label/a82387cf9d9d253aa06f94abffad1304.seg 04379243
+03948459/points/a7a340a901d63486260a770f90456bf7.pts 03948459/expert_verified/points_label/a7a340a901d63486260a770f90456bf7.seg 03948459
+03624134/points/60e7b05ddeeb48eb37fa2c3ecb75f337.pts 03624134/expert_verified/points_label/60e7b05ddeeb48eb37fa2c3ecb75f337.seg 03624134
+02958343/points/3e2c3cb4f4c65b9cde9d4070fcdfa604.pts 02958343/expert_verified/points_label/3e2c3cb4f4c65b9cde9d4070fcdfa604.seg 02958343
+03001627/points/d58df0968070bf3b4b3c42e318f3affc.pts 03001627/expert_verified/points_label/d58df0968070bf3b4b3c42e318f3affc.seg 03001627
+04379243/points/4a3641784a9ecca04fa8d6439169bda4.pts 04379243/expert_verified/points_label/4a3641784a9ecca04fa8d6439169bda4.seg 04379243
+04225987/points/d31aaca67fd8ef1827d17dabad15093.pts 04225987/expert_verified/points_label/d31aaca67fd8ef1827d17dabad15093.seg 04225987
+03001627/points/c51937167dd0db45f7628281ecb18112.pts 03001627/expert_verified/points_label/c51937167dd0db45f7628281ecb18112.seg 03001627
+04379243/points/768cb2332a16fd63855931d119219022.pts 04379243/expert_verified/points_label/768cb2332a16fd63855931d119219022.seg 04379243
+03001627/points/8c76176c82e3e42d283b00891f680579.pts 03001627/expert_verified/points_label/8c76176c82e3e42d283b00891f680579.seg 03001627
+03001627/points/d4d9b991ff7d31e8c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/d4d9b991ff7d31e8c8687ff9b0b4e4ac.seg 03001627
+03797390/points/162201dfe14b73f0281365259d1cf342.pts 03797390/expert_verified/points_label/162201dfe14b73f0281365259d1cf342.seg 03797390
+04379243/points/ed1e06e886b5514fe8f49d7c9e73ab9.pts 04379243/expert_verified/points_label/ed1e06e886b5514fe8f49d7c9e73ab9.seg 04379243
+03636649/points/90651b3febfc3afe15226aa76eb7c3e.pts 03636649/expert_verified/points_label/90651b3febfc3afe15226aa76eb7c3e.seg 03636649
+04379243/points/24b208dd138d8af36210db75a4cd581b.pts 04379243/expert_verified/points_label/24b208dd138d8af36210db75a4cd581b.seg 04379243
+03001627/points/439418b35f600f4bb10dc0fca58d0b2c.pts 03001627/expert_verified/points_label/439418b35f600f4bb10dc0fca58d0b2c.seg 03001627
+03636649/points/88257c5a48d94b1e2b151d8b52c53b90.pts 03636649/expert_verified/points_label/88257c5a48d94b1e2b151d8b52c53b90.seg 03636649
+02691156/points/ad546b049b2246bd609e2d916fa0da27.pts 02691156/expert_verified/points_label/ad546b049b2246bd609e2d916fa0da27.seg 02691156
+03001627/points/7efeece3b5cf2853d706779c93538ee1.pts 03001627/expert_verified/points_label/7efeece3b5cf2853d706779c93538ee1.seg 03001627
+04379243/points/30dd74f09af6b1c2fe5c8ffd0f5eba47.pts 04379243/expert_verified/points_label/30dd74f09af6b1c2fe5c8ffd0f5eba47.seg 04379243
+02691156/points/752d9a010346862551cfdb4c9f126c12.pts 02691156/expert_verified/points_label/752d9a010346862551cfdb4c9f126c12.seg 02691156
+03001627/points/d1237422881f4d22ff25b0c2db862d19.pts 03001627/expert_verified/points_label/d1237422881f4d22ff25b0c2db862d19.seg 03001627
+04379243/points/95af60aa8cb9be066a76e23e6f966dea.pts 04379243/expert_verified/points_label/95af60aa8cb9be066a76e23e6f966dea.seg 04379243
+02691156/points/556d2b99469e62e623a346a784afd6ba.pts 02691156/expert_verified/points_label/556d2b99469e62e623a346a784afd6ba.seg 02691156
+04379243/points/6e23179a3559775a65eacc25f128a1c5.pts 04379243/expert_verified/points_label/6e23179a3559775a65eacc25f128a1c5.seg 04379243
+02691156/points/3b82e575165383903c83f6e156ad107a.pts 02691156/expert_verified/points_label/3b82e575165383903c83f6e156ad107a.seg 02691156
+02773838/points/71ead7f072106c63ed13f430b2941481.pts 02773838/expert_verified/points_label/71ead7f072106c63ed13f430b2941481.seg 02773838
+03001627/points/c9d68e1e5309ac25ac57e7d566628472.pts 03001627/expert_verified/points_label/c9d68e1e5309ac25ac57e7d566628472.seg 03001627
+02691156/points/b3a59a941500e76535592b447835a16e.pts 02691156/expert_verified/points_label/b3a59a941500e76535592b447835a16e.seg 02691156
+03797390/points/4d9764afa3fbeb1b6c69dceb67157a66.pts 03797390/expert_verified/points_label/4d9764afa3fbeb1b6c69dceb67157a66.seg 03797390
+04379243/points/68ea1f319a9d724ec3bd24f986301745.pts 04379243/expert_verified/points_label/68ea1f319a9d724ec3bd24f986301745.seg 04379243
+03001627/points/30363681727c804095937f6e581cbd41.pts 03001627/expert_verified/points_label/30363681727c804095937f6e581cbd41.seg 03001627
+03001627/points/f4f1aba65ebe48eb70930286c914896b.pts 03001627/expert_verified/points_label/f4f1aba65ebe48eb70930286c914896b.seg 03001627
+02691156/points/a3fc9ef9f611a783525e60273896d30a.pts 02691156/expert_verified/points_label/a3fc9ef9f611a783525e60273896d30a.seg 02691156
+03636649/points/b0871c4ac8505d9c3d39d8012919dd25.pts 03636649/expert_verified/points_label/b0871c4ac8505d9c3d39d8012919dd25.seg 03636649
+03001627/points/d7e26a070ee3b35cdf6cfab91d65bb91.pts 03001627/expert_verified/points_label/d7e26a070ee3b35cdf6cfab91d65bb91.seg 03001627
+04379243/points/9012c6ca245c1bf4e6c5cd45aa112726.pts 04379243/expert_verified/points_label/9012c6ca245c1bf4e6c5cd45aa112726.seg 04379243
+03636649/points/3ab9e4300cee0259f72e8839e840c146.pts 03636649/expert_verified/points_label/3ab9e4300cee0259f72e8839e840c146.seg 03636649
+04379243/points/6e0fed54fcae8a62edccc47bf0dcf5d3.pts 04379243/expert_verified/points_label/6e0fed54fcae8a62edccc47bf0dcf5d3.seg 04379243
+04379243/points/aafc579804cc095cbababe11fcea8796.pts 04379243/expert_verified/points_label/aafc579804cc095cbababe11fcea8796.seg 04379243
+03636649/points/9adee08c737c7c134c6deb9ede0648df.pts 03636649/expert_verified/points_label/9adee08c737c7c134c6deb9ede0648df.seg 03636649
+02691156/points/f39985959d394f8c863ab010b80d9ed.pts 02691156/expert_verified/points_label/f39985959d394f8c863ab010b80d9ed.seg 02691156
+04379243/points/23d4170c7a0a2a014b3c42e318f3affc.pts 04379243/expert_verified/points_label/23d4170c7a0a2a014b3c42e318f3affc.seg 04379243
+04379243/points/a1593fbe3a78c7858795000a72749c36.pts 04379243/expert_verified/points_label/a1593fbe3a78c7858795000a72749c36.seg 04379243
+03001627/points/4b2ede169dcc83ce4591019e9d133858.pts 03001627/expert_verified/points_label/4b2ede169dcc83ce4591019e9d133858.seg 03001627
+03001627/points/3fa1eeed2e8e2534febad4f49b26ec52.pts 03001627/expert_verified/points_label/3fa1eeed2e8e2534febad4f49b26ec52.seg 03001627
+04379243/points/e8ba9621aef9395a3019620286259e2c.pts 04379243/expert_verified/points_label/e8ba9621aef9395a3019620286259e2c.seg 04379243
+03001627/points/875925d42780159ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/875925d42780159ffebad4f49b26ec52.seg 03001627
+03001627/points/548ab6b6e8b2dc505ff61a3a2a0e2484.pts 03001627/expert_verified/points_label/548ab6b6e8b2dc505ff61a3a2a0e2484.seg 03001627
+03467517/points/4f401d78068a9d348ee96618ee16ca27.pts 03467517/expert_verified/points_label/4f401d78068a9d348ee96618ee16ca27.seg 03467517
+04379243/points/f7600660924857c0d31d0d81bfe9c743.pts 04379243/expert_verified/points_label/f7600660924857c0d31d0d81bfe9c743.seg 04379243
+04379243/points/edba7eb533ae3578ece232edf44331c7.pts 04379243/expert_verified/points_label/edba7eb533ae3578ece232edf44331c7.seg 04379243
+03001627/points/8b8fa92f9c677b0713decb1a0563b12.pts 03001627/expert_verified/points_label/8b8fa92f9c677b0713decb1a0563b12.seg 03001627
+02691156/points/81e6b629264dad5daf2c6c19cc41708a.pts 02691156/expert_verified/points_label/81e6b629264dad5daf2c6c19cc41708a.seg 02691156
+02691156/points/a0a7e673a1e1bca78699933784576e73.pts 02691156/expert_verified/points_label/a0a7e673a1e1bca78699933784576e73.seg 02691156
+03636649/points/f01358d4f45cae23ce670f026edf07e5.pts 03636649/expert_verified/points_label/f01358d4f45cae23ce670f026edf07e5.seg 03636649
+03001627/points/808fa82fe9ad86d9f1cc184b6fa3e1f9.pts 03001627/expert_verified/points_label/808fa82fe9ad86d9f1cc184b6fa3e1f9.seg 03001627
+02691156/points/57937c7ab42260ebf119374ee5d5f944.pts 02691156/expert_verified/points_label/57937c7ab42260ebf119374ee5d5f944.seg 02691156
+03001627/points/fbddac94cfa74a7b5c0228148b88226c.pts 03001627/expert_verified/points_label/fbddac94cfa74a7b5c0228148b88226c.seg 03001627
+04379243/points/ad92bfc65465091c48d90eef8384210.pts 04379243/expert_verified/points_label/ad92bfc65465091c48d90eef8384210.seg 04379243
+03467517/points/6ce23c82af30b629e8f705eb96ba3376.pts 03467517/expert_verified/points_label/6ce23c82af30b629e8f705eb96ba3376.seg 03467517
+03001627/points/bd1787066323c7a64424fc4d3c9cb157.pts 03001627/expert_verified/points_label/bd1787066323c7a64424fc4d3c9cb157.seg 03001627
+03001627/points/uca24feec-f0c0-454c-baaf-561530686f40.pts 03001627/expert_verified/points_label/uca24feec-f0c0-454c-baaf-561530686f40.seg 03001627
+03001627/points/226704c72560008421ceb39dc3069834.pts 03001627/expert_verified/points_label/226704c72560008421ceb39dc3069834.seg 03001627
+02691156/points/2c49289098e4492bca8607f540cc62ba.pts 02691156/expert_verified/points_label/2c49289098e4492bca8607f540cc62ba.seg 02691156
+03001627/points/cff9a523a9e20eaeb40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/cff9a523a9e20eaeb40f0ac0fb9a650d.seg 03001627
+04379243/points/38e90183c838f443b43753a53e4593db.pts 04379243/expert_verified/points_label/38e90183c838f443b43753a53e4593db.seg 04379243
+04379243/points/8b4ec70a3c1283b1fb5f8baea920e189.pts 04379243/expert_verified/points_label/8b4ec70a3c1283b1fb5f8baea920e189.seg 04379243
+04379243/points/59a1703cb9320c018f49a52c8d710d0f.pts 04379243/expert_verified/points_label/59a1703cb9320c018f49a52c8d710d0f.seg 04379243
+03636649/points/4ba237c2c40313f373b3ec02b97cb0f.pts 03636649/expert_verified/points_label/4ba237c2c40313f373b3ec02b97cb0f.seg 03636649
+04379243/points/bb027ed892722b1f3399de188dc5ee56.pts 04379243/expert_verified/points_label/bb027ed892722b1f3399de188dc5ee56.seg 04379243
+03467517/points/8b1d0f73e54ef59c93f0194265a9746c.pts 03467517/expert_verified/points_label/8b1d0f73e54ef59c93f0194265a9746c.seg 03467517
+03467517/points/1300e8bafb819c8e1887f40a4f62df44.pts 03467517/expert_verified/points_label/1300e8bafb819c8e1887f40a4f62df44.seg 03467517
+03642806/points/9fa387d7f442b96e75e60c00fabe2744.pts 03642806/expert_verified/points_label/9fa387d7f442b96e75e60c00fabe2744.seg 03642806
+04379243/points/e153f757330a4ea3cdd1f51ef2b8f2ed.pts 04379243/expert_verified/points_label/e153f757330a4ea3cdd1f51ef2b8f2ed.seg 04379243
+03636649/points/d00157a022079bdef3655a2ce983ab1f.pts 03636649/expert_verified/points_label/d00157a022079bdef3655a2ce983ab1f.seg 03636649
+04379243/points/9eeea5f7b030ff6ac155f88004a92bc8.pts 04379243/expert_verified/points_label/9eeea5f7b030ff6ac155f88004a92bc8.seg 04379243
+04379243/points/10ed64b4c7eb6d9311ee7ca4f000feba.pts 04379243/expert_verified/points_label/10ed64b4c7eb6d9311ee7ca4f000feba.seg 04379243
+03001627/points/6db2255a51caf84e823e7e244bf84209.pts 03001627/expert_verified/points_label/6db2255a51caf84e823e7e244bf84209.seg 03001627
+03001627/points/8ddaa112e6ba36b5b1e23c7675c49239.pts 03001627/expert_verified/points_label/8ddaa112e6ba36b5b1e23c7675c49239.seg 03001627
+04379243/points/7813f4e4c0a58118cbb8bac2032149c.pts 04379243/expert_verified/points_label/7813f4e4c0a58118cbb8bac2032149c.seg 04379243
+03797390/points/336122c3105440d193e42e2720468bf0.pts 03797390/expert_verified/points_label/336122c3105440d193e42e2720468bf0.seg 03797390
+03001627/points/f2e2993abf4c952b2e69a7e134f91051.pts 03001627/expert_verified/points_label/f2e2993abf4c952b2e69a7e134f91051.seg 03001627
+04379243/points/627248fa64c1db5fab610b0c94236463.pts 04379243/expert_verified/points_label/627248fa64c1db5fab610b0c94236463.seg 04379243
+04379243/points/3b465822b34ed20ca05d3424fd8d541a.pts 04379243/expert_verified/points_label/3b465822b34ed20ca05d3424fd8d541a.seg 04379243
+03467517/points/a7ddf2e5b9dc278293f0194265a9746c.pts 03467517/expert_verified/points_label/a7ddf2e5b9dc278293f0194265a9746c.seg 03467517
+03636649/points/b36bfbbc98cb45431735ea0e092a805a.pts 03636649/expert_verified/points_label/b36bfbbc98cb45431735ea0e092a805a.seg 03636649
+04379243/points/7d14ae7d0b7338bda0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/7d14ae7d0b7338bda0ab1d82ef09f78f.seg 04379243
+03467517/points/f7645b3c690d954682c2412261cb8600.pts 03467517/expert_verified/points_label/f7645b3c690d954682c2412261cb8600.seg 03467517
+02958343/points/41a6deadd39b4c754d0f9a1ef5f184fe.pts 02958343/expert_verified/points_label/41a6deadd39b4c754d0f9a1ef5f184fe.seg 02958343
+02691156/points/f74cbd91e6fb40dfce5965228d7e8c9f.pts 02691156/expert_verified/points_label/f74cbd91e6fb40dfce5965228d7e8c9f.seg 02691156
+04379243/points/6c4c3bfe275e66b1b75e606711562bfc.pts 04379243/expert_verified/points_label/6c4c3bfe275e66b1b75e606711562bfc.seg 04379243
+04379243/points/7d358a01c9467815a9505c473725122e.pts 04379243/expert_verified/points_label/7d358a01c9467815a9505c473725122e.seg 04379243
+04379243/points/5fe3476df92392e1397aad305ec14786.pts 04379243/expert_verified/points_label/5fe3476df92392e1397aad305ec14786.seg 04379243
+03001627/points/34d3960d35d8d5219b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/34d3960d35d8d5219b9f2eb77f5e247e.seg 03001627
+03001627/points/1b67a3a1101a9acb905477d2a8504646.pts 03001627/expert_verified/points_label/1b67a3a1101a9acb905477d2a8504646.seg 03001627
+03001627/points/ee4858f78dc33591100e9bd5c4b0af54.pts 03001627/expert_verified/points_label/ee4858f78dc33591100e9bd5c4b0af54.seg 03001627
+03001627/points/a578b0027e7d9ec7b2ca3ea77e53abe.pts 03001627/expert_verified/points_label/a578b0027e7d9ec7b2ca3ea77e53abe.seg 03001627
+02691156/points/916950e40ca7aabc8b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/916950e40ca7aabc8b96ae1a0a8b84ec.seg 02691156
+04379243/points/1abfb0c03c81fc2219fb4103277a6b93.pts 04379243/expert_verified/points_label/1abfb0c03c81fc2219fb4103277a6b93.seg 04379243
+02691156/points/a702da03d770f5096e2738fc9da60e6f.pts 02691156/expert_verified/points_label/a702da03d770f5096e2738fc9da60e6f.seg 02691156
+04379243/points/2e2894138df855b26f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/2e2894138df855b26f88aa1b7f7cc6c6.seg 04379243
+03001627/points/589cd6a1f4367fd834b707eb14b2a4a5.pts 03001627/expert_verified/points_label/589cd6a1f4367fd834b707eb14b2a4a5.seg 03001627
+03636649/points/f8534299ecce5c16eaf14273fa406ffc.pts 03636649/expert_verified/points_label/f8534299ecce5c16eaf14273fa406ffc.seg 03636649
+04379243/points/ea96b8a866121d1abed1bd9593e318c.pts 04379243/expert_verified/points_label/ea96b8a866121d1abed1bd9593e318c.seg 04379243
+03624134/points/9746101f20473d346bbd83c2bc4c3b2e.pts 03624134/expert_verified/points_label/9746101f20473d346bbd83c2bc4c3b2e.seg 03624134
+02958343/points/9c4a3879c71df693af0f25977186b501.pts 02958343/expert_verified/points_label/9c4a3879c71df693af0f25977186b501.seg 02958343
+03001627/points/6621723f7af35f2dcd344c2b2cefcda6.pts 03001627/expert_verified/points_label/6621723f7af35f2dcd344c2b2cefcda6.seg 03001627
+03948459/points/8c9e592c95f95e7c9a6e43b878d5b335.pts 03948459/expert_verified/points_label/8c9e592c95f95e7c9a6e43b878d5b335.seg 03948459
+04379243/points/36a6d851dbe02410ad16260d4d73b56.pts 04379243/expert_verified/points_label/36a6d851dbe02410ad16260d4d73b56.seg 04379243
+04379243/points/b1ca280d9567270ade98d10ab5975b59.pts 04379243/expert_verified/points_label/b1ca280d9567270ade98d10ab5975b59.seg 04379243
+03467517/points/5ed99a0b793e1f5ee52744498b9b3051.pts 03467517/expert_verified/points_label/5ed99a0b793e1f5ee52744498b9b3051.seg 03467517
+03001627/points/18fd8342fa5d1d4f5268b70948af88b2.pts 03001627/expert_verified/points_label/18fd8342fa5d1d4f5268b70948af88b2.seg 03001627
+02691156/points/cc60baa1a796f5c14c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cc60baa1a796f5c14c3a35cee92bb95b.seg 02691156
+03642806/points/3237f5cd4bca555955357c338ec9641.pts 03642806/expert_verified/points_label/3237f5cd4bca555955357c338ec9641.seg 03642806
+03001627/points/fee248777c9c4807f8bc1f8036e08e44.pts 03001627/expert_verified/points_label/fee248777c9c4807f8bc1f8036e08e44.seg 03001627
+04379243/points/2d90a1998eca8778dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2d90a1998eca8778dcfcef693e7ec696.seg 04379243
+02958343/points/3ef7cfbc172840b2393bf61b30c528bb.pts 02958343/expert_verified/points_label/3ef7cfbc172840b2393bf61b30c528bb.seg 02958343
+02691156/points/240fd3c1fd804ec1b8cf782e8c539948.pts 02691156/expert_verified/points_label/240fd3c1fd804ec1b8cf782e8c539948.seg 02691156
+04379243/points/60c931dcc6d0982944bda2555d37e46.pts 04379243/expert_verified/points_label/60c931dcc6d0982944bda2555d37e46.seg 04379243
+04379243/points/93040a14fad5588ed889130b88839a0c.pts 04379243/expert_verified/points_label/93040a14fad5588ed889130b88839a0c.seg 04379243
+02958343/points/a75ff576da012340468bac13e007a6e9.pts 02958343/expert_verified/points_label/a75ff576da012340468bac13e007a6e9.seg 02958343
+03467517/points/57286d92604c9ebea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/57286d92604c9ebea3d3eb77b119df6d.seg 03467517
+03636649/points/913ba6b6ac6aea3356c82fefb25b338b.pts 03636649/expert_verified/points_label/913ba6b6ac6aea3356c82fefb25b338b.seg 03636649
+03001627/points/cce9ffdcc7ca8ddea300840c9d7bfa74.pts 03001627/expert_verified/points_label/cce9ffdcc7ca8ddea300840c9d7bfa74.seg 03001627
+04379243/points/913c0ff011ad0658dcfcef693e7ec696.pts 04379243/expert_verified/points_label/913c0ff011ad0658dcfcef693e7ec696.seg 04379243
+03001627/points/9d0b25421c13008e35836c728d324152.pts 03001627/expert_verified/points_label/9d0b25421c13008e35836c728d324152.seg 03001627
+03797390/points/a8f7a0edd3edc3299e54b4084dc33544.pts 03797390/expert_verified/points_label/a8f7a0edd3edc3299e54b4084dc33544.seg 03797390
+04379243/points/5b9a7b7952996844d802aa676be38da2.pts 04379243/expert_verified/points_label/5b9a7b7952996844d802aa676be38da2.seg 04379243
+02954340/points/4bd0b6df02772d8f59c9250a427b57f.pts 02954340/expert_verified/points_label/4bd0b6df02772d8f59c9250a427b57f.seg 02954340
+02958343/points/a72134cd499fd1c4f79e091fa09130a.pts 02958343/expert_verified/points_label/a72134cd499fd1c4f79e091fa09130a.seg 02958343
+04379243/points/cc6fbdc6f2aa5ea3d889130b88839a0c.pts 04379243/expert_verified/points_label/cc6fbdc6f2aa5ea3d889130b88839a0c.seg 04379243
+03624134/points/85ced924eedc6ff566b5b592ed1ddee0.pts 03624134/expert_verified/points_label/85ced924eedc6ff566b5b592ed1ddee0.seg 03624134
+03001627/points/60622d74c0712934a5817f81a1efa3cc.pts 03001627/expert_verified/points_label/60622d74c0712934a5817f81a1efa3cc.seg 03001627
+04379243/points/2633f011b236a8979070b65ce7b4b532.pts 04379243/expert_verified/points_label/2633f011b236a8979070b65ce7b4b532.seg 04379243
+03001627/points/9d9d69e5f2bc80a867903707764646db.pts 03001627/expert_verified/points_label/9d9d69e5f2bc80a867903707764646db.seg 03001627
+03001627/points/ce463d63d8771c5ccf19858fd1963d10.pts 03001627/expert_verified/points_label/ce463d63d8771c5ccf19858fd1963d10.seg 03001627
+04379243/points/ad17445446e4fd3adcfcef693e7ec696.pts 04379243/expert_verified/points_label/ad17445446e4fd3adcfcef693e7ec696.seg 04379243
+03001627/points/71372c1f20b6a04c43c40c5aa3d5c5b7.pts 03001627/expert_verified/points_label/71372c1f20b6a04c43c40c5aa3d5c5b7.seg 03001627
+02691156/points/9436273fc1a5e3ca7af159eaf7625abf.pts 02691156/expert_verified/points_label/9436273fc1a5e3ca7af159eaf7625abf.seg 02691156
+03797390/points/b98fa11a567f644344b25d683fe71de.pts 03797390/expert_verified/points_label/b98fa11a567f644344b25d683fe71de.seg 03797390
+02691156/points/53eee66291c47a91bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/53eee66291c47a91bc0909d98a1ff2b4.seg 02691156
+03642806/points/e55ececde88255b93e73f3893a7337bb.pts 03642806/expert_verified/points_label/e55ececde88255b93e73f3893a7337bb.seg 03642806
+02958343/points/1079efee042629d4ce28f0f1b509eda.pts 02958343/expert_verified/points_label/1079efee042629d4ce28f0f1b509eda.seg 02958343
+03001627/points/c826c65111c867ab45a1df43bcd9e471.pts 03001627/expert_verified/points_label/c826c65111c867ab45a1df43bcd9e471.seg 03001627
+02958343/points/39201299cf83ec2577763486d77d1cb.pts 02958343/expert_verified/points_label/39201299cf83ec2577763486d77d1cb.seg 02958343
+04379243/points/e8c01f71fd941af11190e285a2cbc9c.pts 04379243/expert_verified/points_label/e8c01f71fd941af11190e285a2cbc9c.seg 04379243
+03001627/points/948f1555282e27da190c615a2115d2f7.pts 03001627/expert_verified/points_label/948f1555282e27da190c615a2115d2f7.seg 03001627
+02691156/points/ca4ec545363b3b8e8c2814a4ead9cb90.pts 02691156/expert_verified/points_label/ca4ec545363b3b8e8c2814a4ead9cb90.seg 02691156
+03001627/points/b8f4ce34b44620cc9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b8f4ce34b44620cc9b9f2eb77f5e247e.seg 03001627
+02958343/points/188621bbfc7d9477ce27281f3b76d1f5.pts 02958343/expert_verified/points_label/188621bbfc7d9477ce27281f3b76d1f5.seg 02958343
+04379243/points/9a71b92445cd3f023a9bc242c86fb7a0.pts 04379243/expert_verified/points_label/9a71b92445cd3f023a9bc242c86fb7a0.seg 04379243
+03001627/points/4372b33dfc84c2f56a9ab6fc87e1604e.pts 03001627/expert_verified/points_label/4372b33dfc84c2f56a9ab6fc87e1604e.seg 03001627
+03001627/points/b16f1858c1a7c0a65001cb19c4a0eee4.pts 03001627/expert_verified/points_label/b16f1858c1a7c0a65001cb19c4a0eee4.seg 03001627
+03467517/points/5238adec0790595930c206f77b5cb4d0.pts 03467517/expert_verified/points_label/5238adec0790595930c206f77b5cb4d0.seg 03467517
+02958343/points/3ec7f0347638f7a891eea2fc80d4a25f.pts 02958343/expert_verified/points_label/3ec7f0347638f7a891eea2fc80d4a25f.seg 02958343
+02691156/points/32e7224d196e5866bd564bd76cf3cbec.pts 02691156/expert_verified/points_label/32e7224d196e5866bd564bd76cf3cbec.seg 02691156
+04379243/points/f9beeefdebf70350f4b6538438a0b930.pts 04379243/expert_verified/points_label/f9beeefdebf70350f4b6538438a0b930.seg 04379243
+04379243/points/acbc99e153b9d4d419fb4103277a6b93.pts 04379243/expert_verified/points_label/acbc99e153b9d4d419fb4103277a6b93.seg 04379243
+03467517/points/8ebc3d48afeceec752561cc0fb924c36.pts 03467517/expert_verified/points_label/8ebc3d48afeceec752561cc0fb924c36.seg 03467517
+04379243/points/966cef675324e416cd415550f639925.pts 04379243/expert_verified/points_label/966cef675324e416cd415550f639925.seg 04379243
+03636649/points/85f71a4724fa37c33d39d8012919dd25.pts 03636649/expert_verified/points_label/85f71a4724fa37c33d39d8012919dd25.seg 03636649
+03636649/points/370623095c9773e42ce7d46577f8a9bd.pts 03636649/expert_verified/points_label/370623095c9773e42ce7d46577f8a9bd.seg 03636649
+03624134/points/bbe934c9cdca9c1839ec49305bb07d3d.pts 03624134/expert_verified/points_label/bbe934c9cdca9c1839ec49305bb07d3d.seg 03624134
+02958343/points/d22a2d20acbdca70c972ff3f74d38438.pts 02958343/expert_verified/points_label/d22a2d20acbdca70c972ff3f74d38438.seg 02958343
+02958343/points/ff3c8e21a48ed17cc1bcae9def1986da.pts 02958343/expert_verified/points_label/ff3c8e21a48ed17cc1bcae9def1986da.seg 02958343
+03001627/points/fd5ca05b59b30241d838ae16242881dc.pts 03001627/expert_verified/points_label/fd5ca05b59b30241d838ae16242881dc.seg 03001627
+02691156/points/e3aff5ae3e8f2a7c4c2c88971423d0be.pts 02691156/expert_verified/points_label/e3aff5ae3e8f2a7c4c2c88971423d0be.seg 02691156
+02691156/points/b4575e5e6161fd497b164268a44f7712.pts 02691156/expert_verified/points_label/b4575e5e6161fd497b164268a44f7712.seg 02691156
+03467517/points/153e7883f6cf0e66d57700c05b1862d8.pts 03467517/expert_verified/points_label/153e7883f6cf0e66d57700c05b1862d8.seg 03467517
+03642806/points/4fc3d56243d2d8801ef1ccfaf50f2048.pts 03642806/expert_verified/points_label/4fc3d56243d2d8801ef1ccfaf50f2048.seg 03642806
+04379243/points/ec9861c234daf6bc915f51b5f5e95ffa.pts 04379243/expert_verified/points_label/ec9861c234daf6bc915f51b5f5e95ffa.seg 04379243
+03001627/points/7114ef00fe68d053cccbd142483bf2e7.pts 03001627/expert_verified/points_label/7114ef00fe68d053cccbd142483bf2e7.seg 03001627
+02691156/points/e812f54386acd072d44f37c9e0fb10d0.pts 02691156/expert_verified/points_label/e812f54386acd072d44f37c9e0fb10d0.seg 02691156
+03001627/points/5490efbdadce792f524f4eb395a8604.pts 03001627/expert_verified/points_label/5490efbdadce792f524f4eb395a8604.seg 03001627
+03948459/points/42740af029297f1d9874fa4c7b1a4298.pts 03948459/expert_verified/points_label/42740af029297f1d9874fa4c7b1a4298.seg 03948459
+03001627/points/d1ec6e9b8063b7efd7f7a4c4609b0913.pts 03001627/expert_verified/points_label/d1ec6e9b8063b7efd7f7a4c4609b0913.seg 03001627
+04379243/points/4b11be42b0c0482dd94faaee2b20e2bf.pts 04379243/expert_verified/points_label/4b11be42b0c0482dd94faaee2b20e2bf.seg 04379243
+03001627/points/d29971cef754cc91cd8c5d1ba690a2c3.pts 03001627/expert_verified/points_label/d29971cef754cc91cd8c5d1ba690a2c3.seg 03001627
+04379243/points/8cc8485f249a37f595b25bd3accf45b5.pts 04379243/expert_verified/points_label/8cc8485f249a37f595b25bd3accf45b5.seg 04379243
+04379243/points/bb5dbf708d5eb7f82099f9e22ca45b04.pts 04379243/expert_verified/points_label/bb5dbf708d5eb7f82099f9e22ca45b04.seg 04379243
+03001627/points/c1b64fef5f3efa0a129905ebfd12d5cd.pts 03001627/expert_verified/points_label/c1b64fef5f3efa0a129905ebfd12d5cd.seg 03001627
+04379243/points/e58e958428584b2b79972b30518c97e2.pts 04379243/expert_verified/points_label/e58e958428584b2b79972b30518c97e2.seg 04379243
+03790512/points/90a521e0def2631fd5dde04c96fd8146.pts 03790512/expert_verified/points_label/90a521e0def2631fd5dde04c96fd8146.seg 03790512
+03467517/points/fcab134da044e5fc77f469126771fc30.pts 03467517/expert_verified/points_label/fcab134da044e5fc77f469126771fc30.seg 03467517
+03001627/points/1d6faeb6d77d1f2cf95cd8df6bebbc3a.pts 03001627/expert_verified/points_label/1d6faeb6d77d1f2cf95cd8df6bebbc3a.seg 03001627
+04379243/points/e993ddaf6d03003071a782a4379556c7.pts 04379243/expert_verified/points_label/e993ddaf6d03003071a782a4379556c7.seg 04379243
+03001627/points/702cebffa33a19f019f079d1b712f46f.pts 03001627/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 03001627
+03790512/points/7b4eb8cbc470d0d6d5dde04c96fd8146.pts 03790512/expert_verified/points_label/7b4eb8cbc470d0d6d5dde04c96fd8146.seg 03790512
+03001627/points/9515e377c1ec86529b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9515e377c1ec86529b9f2eb77f5e247e.seg 03001627
+03001627/points/9c3d7b65c739a618285330f26226f8fb.pts 03001627/expert_verified/points_label/9c3d7b65c739a618285330f26226f8fb.seg 03001627
+03790512/points/8ed4bdaf0c8b88ea8b31e74d456742c7.pts 03790512/expert_verified/points_label/8ed4bdaf0c8b88ea8b31e74d456742c7.seg 03790512
+02958343/points/6ed2957beeb7940a9fbaa69916aaebda.pts 02958343/expert_verified/points_label/6ed2957beeb7940a9fbaa69916aaebda.seg 02958343
+03001627/points/37e2b82d5e9dde21cbde89e0c48a01bf.pts 03001627/expert_verified/points_label/37e2b82d5e9dde21cbde89e0c48a01bf.seg 03001627
+04379243/points/1b6bd64fda74bdc4d6983f351200ac6a.pts 04379243/expert_verified/points_label/1b6bd64fda74bdc4d6983f351200ac6a.seg 04379243
+04379243/points/531381f5bbc69e485769b3af36a2ff9f.pts 04379243/expert_verified/points_label/531381f5bbc69e485769b3af36a2ff9f.seg 04379243
+03790512/points/992fbae5178edcbc4e31d0cb4d7568.pts 03790512/expert_verified/points_label/992fbae5178edcbc4e31d0cb4d7568.seg 03790512
+04379243/points/65e7fd8d158658106a76e23e6f966dea.pts 04379243/expert_verified/points_label/65e7fd8d158658106a76e23e6f966dea.seg 04379243
+02691156/points/2229bc4e646f506679f56e78e8640bfb.pts 02691156/expert_verified/points_label/2229bc4e646f506679f56e78e8640bfb.seg 02691156
+02954340/points/f40b47fcbf83b962f0d11ae402ef940e.pts 02954340/expert_verified/points_label/f40b47fcbf83b962f0d11ae402ef940e.seg 02954340
+02773838/points/cbc2328cadf8dc573394926146371698.pts 02773838/expert_verified/points_label/cbc2328cadf8dc573394926146371698.seg 02773838
+02958343/points/3c6d7c6ce950917b3a93df79ef2b80ef.pts 02958343/expert_verified/points_label/3c6d7c6ce950917b3a93df79ef2b80ef.seg 02958343
+02958343/points/2ccaaa66525d7f095473e57e894e0ef5.pts 02958343/expert_verified/points_label/2ccaaa66525d7f095473e57e894e0ef5.seg 02958343
+02691156/points/70d9304de59792a9515d73fcb34092fc.pts 02691156/expert_verified/points_label/70d9304de59792a9515d73fcb34092fc.seg 02691156
+03001627/points/2ed8d45343a442097869557127addfc0.pts 03001627/expert_verified/points_label/2ed8d45343a442097869557127addfc0.seg 03001627
+04379243/points/84f5e52756fc84f86df14337f24e49f4.pts 04379243/expert_verified/points_label/84f5e52756fc84f86df14337f24e49f4.seg 04379243
+03001627/points/b33a3b1627ad61eb8ca4809dcf42fe1.pts 03001627/expert_verified/points_label/b33a3b1627ad61eb8ca4809dcf42fe1.seg 03001627
+04379243/points/369c19c0971221f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/369c19c0971221f3664b3b9b23ddfcbc.seg 04379243
+03642806/points/5a13f7551c20eb29f3ebfe51dc60263e.pts 03642806/expert_verified/points_label/5a13f7551c20eb29f3ebfe51dc60263e.seg 03642806
+04379243/points/1b01ef65920c342323bdffac38e6b250.pts 04379243/expert_verified/points_label/1b01ef65920c342323bdffac38e6b250.seg 04379243
+02691156/points/9b687f9cff46d43d89c2da356f872ebc.pts 02691156/expert_verified/points_label/9b687f9cff46d43d89c2da356f872ebc.seg 02691156
+04379243/points/746ceaf694d85eb5d5192f88466da1dc.pts 04379243/expert_verified/points_label/746ceaf694d85eb5d5192f88466da1dc.seg 04379243
+04379243/points/9f4eb0d734a2b7a4ab610b0c94236463.pts 04379243/expert_verified/points_label/9f4eb0d734a2b7a4ab610b0c94236463.seg 04379243
+03001627/points/a1213da0e7efffcafebad4f49b26ec52.pts 03001627/expert_verified/points_label/a1213da0e7efffcafebad4f49b26ec52.seg 03001627
+02958343/points/71b00ea32b1810ac373af83f3f2fe606.pts 02958343/expert_verified/points_label/71b00ea32b1810ac373af83f3f2fe606.seg 02958343
+02691156/points/52a84fea7c314f4c3dfc741b4df74043.pts 02691156/expert_verified/points_label/52a84fea7c314f4c3dfc741b4df74043.seg 02691156
+02958343/points/9f3c463272d13d39eb7780cdb3ece367.pts 02958343/expert_verified/points_label/9f3c463272d13d39eb7780cdb3ece367.seg 02958343
+03001627/points/def03f645b3fbd665bb93149cc0adf0.pts 03001627/expert_verified/points_label/def03f645b3fbd665bb93149cc0adf0.seg 03001627
+03001627/points/f9e386d968653602d68fb8f5d99affa0.pts 03001627/expert_verified/points_label/f9e386d968653602d68fb8f5d99affa0.seg 03001627
+03467517/points/9c399ebc617349dcd016bd20f13ab302.pts 03467517/expert_verified/points_label/9c399ebc617349dcd016bd20f13ab302.seg 03467517
+04379243/points/aaaba1bbe037d3b1e406974af41e8842.pts 04379243/expert_verified/points_label/aaaba1bbe037d3b1e406974af41e8842.seg 04379243
+03001627/points/4030ea84b560b857febad4f49b26ec52.pts 03001627/expert_verified/points_label/4030ea84b560b857febad4f49b26ec52.seg 03001627
+04379243/points/a38405108fb416d8356ca1f9220b9968.pts 04379243/expert_verified/points_label/a38405108fb416d8356ca1f9220b9968.seg 04379243
+04379243/points/f864677894410315ab610b0c94236463.pts 04379243/expert_verified/points_label/f864677894410315ab610b0c94236463.seg 04379243
+02954340/points/da5e5ec4c486d6c03baa6271927f050e.pts 02954340/expert_verified/points_label/da5e5ec4c486d6c03baa6271927f050e.seg 02954340
+02691156/points/eed299b690be51ffbd931fcaa69140.pts 02691156/expert_verified/points_label/eed299b690be51ffbd931fcaa69140.seg 02691156
+03797390/points/b4ae56d6638d5338de671f28c83d2dcb.pts 03797390/expert_verified/points_label/b4ae56d6638d5338de671f28c83d2dcb.seg 03797390
+04379243/points/10cc8c941fc8aeaa71a782a4379556c7.pts 04379243/expert_verified/points_label/10cc8c941fc8aeaa71a782a4379556c7.seg 04379243
+03636649/points/61b57e8b5da8fb13d527a9a6f5a872b9.pts 03636649/expert_verified/points_label/61b57e8b5da8fb13d527a9a6f5a872b9.seg 03636649
+02691156/points/ae4a9574248395b671d03b466c72ce41.pts 02691156/expert_verified/points_label/ae4a9574248395b671d03b466c72ce41.seg 02691156
+04379243/points/8cfe3ff92244310534506cc3910614fe.pts 04379243/expert_verified/points_label/8cfe3ff92244310534506cc3910614fe.seg 04379243
+03001627/points/597cb92a5bfb580eed98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/597cb92a5bfb580eed98cca8f0ccd5f7.seg 03001627
+03001627/points/4231883e92a3c1a21c62d11641ffbd35.pts 03001627/expert_verified/points_label/4231883e92a3c1a21c62d11641ffbd35.seg 03001627
+03636649/points/28793511c46b4fa030f6e0ede20c4525.pts 03636649/expert_verified/points_label/28793511c46b4fa030f6e0ede20c4525.seg 03636649
+02958343/points/4c60f32b6efdc7217dfb1ee6a4b12bf8.pts 02958343/expert_verified/points_label/4c60f32b6efdc7217dfb1ee6a4b12bf8.seg 02958343
+04379243/points/397c56f15e547fad1bb088904f7cb154.pts 04379243/expert_verified/points_label/397c56f15e547fad1bb088904f7cb154.seg 04379243
+04379243/points/9bb816d6a3517a5ca74c2333655a11dd.pts 04379243/expert_verified/points_label/9bb816d6a3517a5ca74c2333655a11dd.seg 04379243
+03790512/points/bae59e64a50d3aa2f68f798d07e007b6.pts 03790512/expert_verified/points_label/bae59e64a50d3aa2f68f798d07e007b6.seg 03790512
+04379243/points/8b094873d775f6e21130871dbfe24c18.pts 04379243/expert_verified/points_label/8b094873d775f6e21130871dbfe24c18.seg 04379243
+04379243/points/4d2f7c689e77df6b6dc1766995c17a41.pts 04379243/expert_verified/points_label/4d2f7c689e77df6b6dc1766995c17a41.seg 04379243
+03467517/points/16916a50a064304bf6ed0b697979412e.pts 03467517/expert_verified/points_label/16916a50a064304bf6ed0b697979412e.seg 03467517
+03636649/points/c802fa4c82498450af6016f34c89d087.pts 03636649/expert_verified/points_label/c802fa4c82498450af6016f34c89d087.seg 03636649
+03001627/points/1ec5a88141aefca9cf6e4dd7ee69d71f.pts 03001627/expert_verified/points_label/1ec5a88141aefca9cf6e4dd7ee69d71f.seg 03001627
+04379243/points/bdefbb1f281434e39961e1085a81acc5.pts 04379243/expert_verified/points_label/bdefbb1f281434e39961e1085a81acc5.seg 04379243
+04379243/points/acf57dbafe8966f577fb15a8d7923976.pts 04379243/expert_verified/points_label/acf57dbafe8966f577fb15a8d7923976.seg 04379243
+03642806/points/cc67f6608c41743ec1830f8ca7a3cbed.pts 03642806/expert_verified/points_label/cc67f6608c41743ec1830f8ca7a3cbed.seg 03642806
+03001627/points/95e1571acdd75922afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/95e1571acdd75922afdb9a672b7d3b8a.seg 03001627
+04379243/points/2ebe5dfb7bd9a50c6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/2ebe5dfb7bd9a50c6effbd64ad6b71b8.seg 04379243
+03001627/points/a6420c4ed13cf628945a77b945b7b70f.pts 03001627/expert_verified/points_label/a6420c4ed13cf628945a77b945b7b70f.seg 03001627
+04379243/points/1de679dd26d8c69cae44c65a6d0f0732.pts 04379243/expert_verified/points_label/1de679dd26d8c69cae44c65a6d0f0732.seg 04379243
+03001627/points/271012d5de261d08101accd22c701b9.pts 03001627/expert_verified/points_label/271012d5de261d08101accd22c701b9.seg 03001627
+04379243/points/5e409a2627f7cd7d63ecd64ef0e6814c.pts 04379243/expert_verified/points_label/5e409a2627f7cd7d63ecd64ef0e6814c.seg 04379243
+02691156/points/c9aeb20d7cd1b3b45e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/c9aeb20d7cd1b3b45e9e2656aff7dd5b.seg 02691156
+04379243/points/45b23ac79688170893ba1eeaf62819a2.pts 04379243/expert_verified/points_label/45b23ac79688170893ba1eeaf62819a2.seg 04379243
+02691156/points/9ac292686a2fcebbe719b5362fe06bbb.pts 02691156/expert_verified/points_label/9ac292686a2fcebbe719b5362fe06bbb.seg 02691156
+04379243/points/3b0c62bde7b24de85ce578b5b4bfae3c.pts 04379243/expert_verified/points_label/3b0c62bde7b24de85ce578b5b4bfae3c.seg 04379243
+02958343/points/c487e9850891e1ec2d15396b7bcc6366.pts 02958343/expert_verified/points_label/c487e9850891e1ec2d15396b7bcc6366.seg 02958343
+03636649/points/b8e25e0825cb5db7765609a3f435fe9d.pts 03636649/expert_verified/points_label/b8e25e0825cb5db7765609a3f435fe9d.seg 03636649
+03001627/points/9fd6bb18dc21c70766ef9dd2f3ef27d3.pts 03001627/expert_verified/points_label/9fd6bb18dc21c70766ef9dd2f3ef27d3.seg 03001627
+02958343/points/bf37249fc8e16fd8f9a88cc63b910f3.pts 02958343/expert_verified/points_label/bf37249fc8e16fd8f9a88cc63b910f3.seg 02958343
+04225987/points/58ae991bd0350810b9ac379f661f5c75.pts 04225987/expert_verified/points_label/58ae991bd0350810b9ac379f661f5c75.seg 04225987
+03001627/points/508306f8ddf1b54c41cc9e8c39b4e399.pts 03001627/expert_verified/points_label/508306f8ddf1b54c41cc9e8c39b4e399.seg 03001627
+03642806/points/ef5b312fc20f1b20aab089a6db538ba7.pts 03642806/expert_verified/points_label/ef5b312fc20f1b20aab089a6db538ba7.seg 03642806
+03001627/points/d97c5945e9449a58737e4e0df09d751.pts 03001627/expert_verified/points_label/d97c5945e9449a58737e4e0df09d751.seg 03001627
+03001627/points/e1897a4391784bc2e8b2b8dc0c816caf.pts 03001627/expert_verified/points_label/e1897a4391784bc2e8b2b8dc0c816caf.seg 03001627
+04379243/points/a624ebf0bf0451a8d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/a624ebf0bf0451a8d93768e7b9b1eabf.seg 04379243
+03636649/points/1e5e1ff56c27c0d2adc5f5aafedb1c38.pts 03636649/expert_verified/points_label/1e5e1ff56c27c0d2adc5f5aafedb1c38.seg 03636649
+03642806/points/2ce3a50ca6087f30d8e007cc6755cce9.pts 03642806/expert_verified/points_label/2ce3a50ca6087f30d8e007cc6755cce9.seg 03642806
+02691156/points/d615a8217b70af06bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/d615a8217b70af06bc0909d98a1ff2b4.seg 02691156
+02691156/points/6f72a0d86494b551a834b9c8bfc8647a.pts 02691156/expert_verified/points_label/6f72a0d86494b551a834b9c8bfc8647a.seg 02691156
+03001627/points/20fbab2b8770a1cbf51f77a6d7299806.pts 03001627/expert_verified/points_label/20fbab2b8770a1cbf51f77a6d7299806.seg 03001627
+03001627/points/d239d38424429a9a4626612b5d655dc.pts 03001627/expert_verified/points_label/d239d38424429a9a4626612b5d655dc.seg 03001627
+03001627/points/4c97f421c4ea4396d8ac5d7ad0953104.pts 03001627/expert_verified/points_label/4c97f421c4ea4396d8ac5d7ad0953104.seg 03001627
+03001627/points/5b68a6c2baf0ad61d0de9c949c366777.pts 03001627/expert_verified/points_label/5b68a6c2baf0ad61d0de9c949c366777.seg 03001627
+04379243/points/9bd1c242bd66d2fbb63c01786992bd2f.pts 04379243/expert_verified/points_label/9bd1c242bd66d2fbb63c01786992bd2f.seg 04379243
+03001627/points/e2dbe84030167f1ca5aad165050e534c.pts 03001627/expert_verified/points_label/e2dbe84030167f1ca5aad165050e534c.seg 03001627
+03001627/points/1c17cc67b8c747c3febad4f49b26ec52.pts 03001627/expert_verified/points_label/1c17cc67b8c747c3febad4f49b26ec52.seg 03001627
+04379243/points/2766a883126503cac3bd24f986301745.pts 04379243/expert_verified/points_label/2766a883126503cac3bd24f986301745.seg 04379243
+04225987/points/755dc44dae7791761082f2ea630bf69e.pts 04225987/expert_verified/points_label/755dc44dae7791761082f2ea630bf69e.seg 04225987
+04379243/points/c38ba6c06d2b813230c589758b4b5646.pts 04379243/expert_verified/points_label/c38ba6c06d2b813230c589758b4b5646.seg 04379243
+02691156/points/44c0cb6571f6f000ca8607f540cc62ba.pts 02691156/expert_verified/points_label/44c0cb6571f6f000ca8607f540cc62ba.seg 02691156
+03636649/points/522bc10920249e67141c66e2b49d221.pts 03636649/expert_verified/points_label/522bc10920249e67141c66e2b49d221.seg 03636649
+03790512/points/4548d86cf7f1c11ad373c34785838ee4.pts 03790512/expert_verified/points_label/4548d86cf7f1c11ad373c34785838ee4.seg 03790512
+02958343/points/37c5ac3d5b34761add75f724c0ccbe00.pts 02958343/expert_verified/points_label/37c5ac3d5b34761add75f724c0ccbe00.seg 02958343
+04379243/points/a15f31e2302f6ae5d67a73ffd62ba73f.pts 04379243/expert_verified/points_label/a15f31e2302f6ae5d67a73ffd62ba73f.seg 04379243
+02958343/points/6d714f7b7170a581da8e502a3c6cb4fb.pts 02958343/expert_verified/points_label/6d714f7b7170a581da8e502a3c6cb4fb.seg 02958343
+03624134/points/17c4163247e9237d4b7644126b1d71e0.pts 03624134/expert_verified/points_label/17c4163247e9237d4b7644126b1d71e0.seg 03624134
+03636649/points/7972fd0fe5755b4ad42b9650f19dd425.pts 03636649/expert_verified/points_label/7972fd0fe5755b4ad42b9650f19dd425.seg 03636649
+03001627/points/8ff4ba87d700054546992ce9fde1b2c2.pts 03001627/expert_verified/points_label/8ff4ba87d700054546992ce9fde1b2c2.seg 03001627
+03636649/points/a654df55875a2104d663817442d5278.pts 03636649/expert_verified/points_label/a654df55875a2104d663817442d5278.seg 03636649
+04379243/points/9c12fada31224bdf58c4e7e56d799d97.pts 04379243/expert_verified/points_label/9c12fada31224bdf58c4e7e56d799d97.seg 04379243
+03636649/points/9dad7ce60aa168d72cd2160e449d45ae.pts 03636649/expert_verified/points_label/9dad7ce60aa168d72cd2160e449d45ae.seg 03636649
+02691156/points/cfb555a4d82a600aca8607f540cc62ba.pts 02691156/expert_verified/points_label/cfb555a4d82a600aca8607f540cc62ba.seg 02691156
+04379243/points/415c174ecdc612fb6f5c30e29039b12d.pts 04379243/expert_verified/points_label/415c174ecdc612fb6f5c30e29039b12d.seg 04379243
+03467517/points/a5e2f05386e4ba55a894e1aba5d3799a.pts 03467517/expert_verified/points_label/a5e2f05386e4ba55a894e1aba5d3799a.seg 03467517
+03001627/points/a91b2c89e543a4b3aa3d970c5602cd4a.pts 03001627/expert_verified/points_label/a91b2c89e543a4b3aa3d970c5602cd4a.seg 03001627
+03624134/points/97ed13011e2d85e16029317225a75a9f.pts 03624134/expert_verified/points_label/97ed13011e2d85e16029317225a75a9f.seg 03624134
+04379243/points/388ea3f8ba27da8b777b6246417c94ff.pts 04379243/expert_verified/points_label/388ea3f8ba27da8b777b6246417c94ff.seg 04379243
+04379243/points/983cd9caf65adf1ddf6cfab91d65bb91.pts 04379243/expert_verified/points_label/983cd9caf65adf1ddf6cfab91d65bb91.seg 04379243
+03001627/points/e65d2f0ed75a786a37b2bb75885cfc44.pts 03001627/expert_verified/points_label/e65d2f0ed75a786a37b2bb75885cfc44.seg 03001627
+03624134/points/dce941899bcb752dfe474f09e3f3ac9a.pts 03624134/expert_verified/points_label/dce941899bcb752dfe474f09e3f3ac9a.seg 03624134
+04379243/points/ea3bcd9e6c4205031964126395b17c2a.pts 04379243/expert_verified/points_label/ea3bcd9e6c4205031964126395b17c2a.seg 04379243
+02691156/points/d13d131a649c5df38b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/d13d131a649c5df38b96ae1a0a8b84ec.seg 02691156
+04379243/points/f917474a20558aa33bbab77a66bc3671.pts 04379243/expert_verified/points_label/f917474a20558aa33bbab77a66bc3671.seg 04379243
+03001627/points/4a24652fbf2bed7e93583c67df8faf1.pts 03001627/expert_verified/points_label/4a24652fbf2bed7e93583c67df8faf1.seg 03001627
+02691156/points/5dd2324cd6ebf52e293fdbda4e7beec9.pts 02691156/expert_verified/points_label/5dd2324cd6ebf52e293fdbda4e7beec9.seg 02691156
+03642806/points/a59d3d87068d313c2656684d670220c2.pts 03642806/expert_verified/points_label/a59d3d87068d313c2656684d670220c2.seg 03642806
+04379243/points/5354ecb0e3aa1da074a16879fb3ac81f.pts 04379243/expert_verified/points_label/5354ecb0e3aa1da074a16879fb3ac81f.seg 04379243
+03642806/points/6c6a96e4486cc02cda66ecbb2c411f37.pts 03642806/expert_verified/points_label/6c6a96e4486cc02cda66ecbb2c411f37.seg 03642806
+04225987/points/fd3627deb2476b0f1f942c57ac0e8959.pts 04225987/expert_verified/points_label/fd3627deb2476b0f1f942c57ac0e8959.seg 04225987
+04379243/points/91bf48934d3b52ea36658c6705d0c08.pts 04379243/expert_verified/points_label/91bf48934d3b52ea36658c6705d0c08.seg 04379243
+04379243/points/18be1556eb4da5af7ccf848ce05c84be.pts 04379243/expert_verified/points_label/18be1556eb4da5af7ccf848ce05c84be.seg 04379243
+02958343/points/33211aabfefa14603b05c2ad25b4380f.pts 02958343/expert_verified/points_label/33211aabfefa14603b05c2ad25b4380f.seg 02958343
+04379243/points/3243ddb2aa4d1659beb83c64f2162734.pts 04379243/expert_verified/points_label/3243ddb2aa4d1659beb83c64f2162734.seg 04379243
+04379243/points/4ce90fe70faf4c3e255bc16374754e69.pts 04379243/expert_verified/points_label/4ce90fe70faf4c3e255bc16374754e69.seg 04379243
+04379243/points/15be511a2433482aa192483aa282f8e5.pts 04379243/expert_verified/points_label/15be511a2433482aa192483aa282f8e5.seg 04379243
+03624134/points/70b6b3ba6a27fd6f782db73f915dfbb8.pts 03624134/expert_verified/points_label/70b6b3ba6a27fd6f782db73f915dfbb8.seg 03624134
+03001627/points/519d19f3adebd20aba49014d9a3afe99.pts 03001627/expert_verified/points_label/519d19f3adebd20aba49014d9a3afe99.seg 03001627
+03467517/points/ca9720d793355dd693f0194265a9746c.pts 03467517/expert_verified/points_label/ca9720d793355dd693f0194265a9746c.seg 03467517
+03001627/points/e19214cabca496a3f7b54e04c7238d7.pts 03001627/expert_verified/points_label/e19214cabca496a3f7b54e04c7238d7.seg 03001627
+03001627/points/ea1bfe81b88395fcaa29e9f0529e8ef7.pts 03001627/expert_verified/points_label/ea1bfe81b88395fcaa29e9f0529e8ef7.seg 03001627
+03001627/points/2b110b833111b38c420adf24e49f74c8.pts 03001627/expert_verified/points_label/2b110b833111b38c420adf24e49f74c8.seg 03001627
+03001627/points/7b405c1d6d2dbea9f91663a74ccd2338.pts 03001627/expert_verified/points_label/7b405c1d6d2dbea9f91663a74ccd2338.seg 03001627
+02691156/points/489d3e4cc3d790a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/489d3e4cc3d790a0ca8607f540cc62ba.seg 02691156
+04379243/points/79eeee790ed5a5aac242632b2a8c3129.pts 04379243/expert_verified/points_label/79eeee790ed5a5aac242632b2a8c3129.seg 04379243
+03624134/points/665bf5d30d342d64adee73efb2c043f8.pts 03624134/expert_verified/points_label/665bf5d30d342d64adee73efb2c043f8.seg 03624134
+03467517/points/7f3f5c9953fb7e0a6cbec6f3d994a573.pts 03467517/expert_verified/points_label/7f3f5c9953fb7e0a6cbec6f3d994a573.seg 03467517
+03001627/points/d2597d18fdc3594e1dc59d2adbe5297d.pts 03001627/expert_verified/points_label/d2597d18fdc3594e1dc59d2adbe5297d.seg 03001627
+03001627/points/a9a1147eae9936f76f1e07a56c129dfc.pts 03001627/expert_verified/points_label/a9a1147eae9936f76f1e07a56c129dfc.seg 03001627
+02691156/points/64cb683afd5e9e559db1d21b460eacef.pts 02691156/expert_verified/points_label/64cb683afd5e9e559db1d21b460eacef.seg 02691156
+03624134/points/e0a78d771cfde145a5cea7e40e4d21ff.pts 03624134/expert_verified/points_label/e0a78d771cfde145a5cea7e40e4d21ff.seg 03624134
+02691156/points/e59c4f290d8585a862b600da24e0965.pts 02691156/expert_verified/points_label/e59c4f290d8585a862b600da24e0965.seg 02691156
+04379243/points/523ac3575244c7f3a130bbab7337a0cf.pts 04379243/expert_verified/points_label/523ac3575244c7f3a130bbab7337a0cf.seg 04379243
+03001627/points/96e83c79e8d76d4519fb4103277a6b93.pts 03001627/expert_verified/points_label/96e83c79e8d76d4519fb4103277a6b93.seg 03001627
+04379243/points/a2781622b5941ff2a886fe6408aa7382.pts 04379243/expert_verified/points_label/a2781622b5941ff2a886fe6408aa7382.seg 04379243
+04379243/points/5d24567426a614ecfd726e98b98fb36f.pts 04379243/expert_verified/points_label/5d24567426a614ecfd726e98b98fb36f.seg 04379243
+03001627/points/a5a2d09e5384237869513d0907f19c8f.pts 03001627/expert_verified/points_label/a5a2d09e5384237869513d0907f19c8f.seg 03001627
+02691156/points/e02485f093835f45c1b64d86df61366a.pts 02691156/expert_verified/points_label/e02485f093835f45c1b64d86df61366a.seg 02691156
+04379243/points/58f8fd169c9578e62f81cb887dc35578.pts 04379243/expert_verified/points_label/58f8fd169c9578e62f81cb887dc35578.seg 04379243
+04379243/points/c755eeaa4a588fcba9126dd5adc92c1e.pts 04379243/expert_verified/points_label/c755eeaa4a588fcba9126dd5adc92c1e.seg 04379243
+03001627/points/704179dd47a2282e676de9b6e111da8b.pts 03001627/expert_verified/points_label/704179dd47a2282e676de9b6e111da8b.seg 03001627
+03001627/points/9253f198c06794cdc7689830acac6e59.pts 03001627/expert_verified/points_label/9253f198c06794cdc7689830acac6e59.seg 03001627
+04379243/points/2ba8eb5ec0a05694593ebeeedbff73b.pts 04379243/expert_verified/points_label/2ba8eb5ec0a05694593ebeeedbff73b.seg 04379243
+03467517/points/133ebdf2ca7bf4b81d4e8021f58beea0.pts 03467517/expert_verified/points_label/133ebdf2ca7bf4b81d4e8021f58beea0.seg 03467517
+03467517/points/ba6d3dcff42ea7bba32c4b8efb0131e.pts 03467517/expert_verified/points_label/ba6d3dcff42ea7bba32c4b8efb0131e.seg 03467517
+03467517/points/222b705a80d75a4343b0b12983b9982.pts 03467517/expert_verified/points_label/222b705a80d75a4343b0b12983b9982.seg 03467517
+04379243/points/47317755c82114d5c3bd24f986301745.pts 04379243/expert_verified/points_label/47317755c82114d5c3bd24f986301745.seg 04379243
+04379243/points/175c0be26d0f2e916cb0bd372b0960ba.pts 04379243/expert_verified/points_label/175c0be26d0f2e916cb0bd372b0960ba.seg 04379243
+03636649/points/19388898dd69dd9fddc8e6d1ec6242c3.pts 03636649/expert_verified/points_label/19388898dd69dd9fddc8e6d1ec6242c3.seg 03636649
+04379243/points/3cec584145ee513d635418e95eea8a17.pts 04379243/expert_verified/points_label/3cec584145ee513d635418e95eea8a17.seg 04379243
+03001627/points/3a5c8d46fdc6793b956abdbfba57903a.pts 03001627/expert_verified/points_label/3a5c8d46fdc6793b956abdbfba57903a.seg 03001627
+03001627/points/3d32d89db2286377e63c6421b71f17c8.pts 03001627/expert_verified/points_label/3d32d89db2286377e63c6421b71f17c8.seg 03001627
+03001627/points/47a45ce9fb219083411e8b42940aba04.pts 03001627/expert_verified/points_label/47a45ce9fb219083411e8b42940aba04.seg 03001627
+03467517/points/214f6a08b78670de2cb522418d5742a0.pts 03467517/expert_verified/points_label/214f6a08b78670de2cb522418d5742a0.seg 03467517
+04379243/points/1b4bc147baf68d4ff008d8a3590fb522.pts 04379243/expert_verified/points_label/1b4bc147baf68d4ff008d8a3590fb522.seg 04379243
+03467517/points/83b2ecf5caced214e313875ff213ee10.pts 03467517/expert_verified/points_label/83b2ecf5caced214e313875ff213ee10.seg 03467517
+02691156/points/57fe8ad460bcb4929a4a28ef635593ce.pts 02691156/expert_verified/points_label/57fe8ad460bcb4929a4a28ef635593ce.seg 02691156
+03624134/points/e8a6915bd0bcf1bebaa284808a1567a8.pts 03624134/expert_verified/points_label/e8a6915bd0bcf1bebaa284808a1567a8.seg 03624134
+03001627/points/1da29597f89c2b004b3c42e318f3affc.pts 03001627/expert_verified/points_label/1da29597f89c2b004b3c42e318f3affc.seg 03001627
+04379243/points/2ef899e67eecef65190a91fd9a6f7d55.pts 04379243/expert_verified/points_label/2ef899e67eecef65190a91fd9a6f7d55.seg 04379243
+04379243/points/811a7be3be14bd2b62103e4bff47b4cd.pts 04379243/expert_verified/points_label/811a7be3be14bd2b62103e4bff47b4cd.seg 04379243
+03948459/points/592017db407391c68e7e947594effe19.pts 03948459/expert_verified/points_label/592017db407391c68e7e947594effe19.seg 03948459
+03636649/points/eb311e6232cb7011bb5bd941c6665c21.pts 03636649/expert_verified/points_label/eb311e6232cb7011bb5bd941c6665c21.seg 03636649
+02691156/points/caa7e70beee4543f42c20743f866e1a6.pts 02691156/expert_verified/points_label/caa7e70beee4543f42c20743f866e1a6.seg 02691156
+03001627/points/3aaa59b19eebcb5f41552c6ecbda964b.pts 03001627/expert_verified/points_label/3aaa59b19eebcb5f41552c6ecbda964b.seg 03001627
+03001627/points/a93aac9ad86008e69fc01fb65ca37d30.pts 03001627/expert_verified/points_label/a93aac9ad86008e69fc01fb65ca37d30.seg 03001627
+03624134/points/ceeb38ab7929361e76ec14627bf6bbcb.pts 03624134/expert_verified/points_label/ceeb38ab7929361e76ec14627bf6bbcb.seg 03624134
+03001627/points/93dc91115a9002e1663fcfd6703c85f3.pts 03001627/expert_verified/points_label/93dc91115a9002e1663fcfd6703c85f3.seg 03001627
+04379243/points/b08310a1d75702eda09ce9c1262c7237.pts 04379243/expert_verified/points_label/b08310a1d75702eda09ce9c1262c7237.seg 04379243
+03797390/points/e9bd4ee553eb35c1d5ccc40b510e4bd.pts 03797390/expert_verified/points_label/e9bd4ee553eb35c1d5ccc40b510e4bd.seg 03797390
+03001627/points/bdd57499bf64fab6bf80985a99195eb8.pts 03001627/expert_verified/points_label/bdd57499bf64fab6bf80985a99195eb8.seg 03001627
+04379243/points/48af84a5600ad5bc19fb4103277a6b93.pts 04379243/expert_verified/points_label/48af84a5600ad5bc19fb4103277a6b93.seg 04379243
+03001627/points/738395f54b301d80b1f5d603f931c1aa.pts 03001627/expert_verified/points_label/738395f54b301d80b1f5d603f931c1aa.seg 03001627
+03790512/points/6819949f5625ca12d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/6819949f5625ca12d0f568c31c1cd62a.seg 03790512
+03467517/points/70d9a5d0330abd9df4b498e11fb60a4b.pts 03467517/expert_verified/points_label/70d9a5d0330abd9df4b498e11fb60a4b.seg 03467517
+02958343/points/174f1a421f652029d577c0ac53e96823.pts 02958343/expert_verified/points_label/174f1a421f652029d577c0ac53e96823.seg 02958343
+03001627/points/d764960666572084b1ea4e06e88051f3.pts 03001627/expert_verified/points_label/d764960666572084b1ea4e06e88051f3.seg 03001627
+02691156/points/ba662ec78231c493252b4f9439ef95a6.pts 02691156/expert_verified/points_label/ba662ec78231c493252b4f9439ef95a6.seg 02691156
+03636649/points/8a9f2e5b726ea37f60ad823977adaa23.pts 03636649/expert_verified/points_label/8a9f2e5b726ea37f60ad823977adaa23.seg 03636649
+04379243/points/80af0f92ecf69f69f5ff054d67d5fe35.pts 04379243/expert_verified/points_label/80af0f92ecf69f69f5ff054d67d5fe35.seg 04379243
+04379243/points/ce4e075487aa05ecdcfcef693e7ec696.pts 04379243/expert_verified/points_label/ce4e075487aa05ecdcfcef693e7ec696.seg 04379243
+03001627/points/564f5f96bc718194166420d06689fcf.pts 03001627/expert_verified/points_label/564f5f96bc718194166420d06689fcf.seg 03001627
+03636649/points/88d29e1350eda810c066b9622c005c53.pts 03636649/expert_verified/points_label/88d29e1350eda810c066b9622c005c53.seg 03636649
+04379243/points/346db24c1279e8d273fdbe4b39ff4036.pts 04379243/expert_verified/points_label/346db24c1279e8d273fdbe4b39ff4036.seg 04379243
+04379243/points/7062f5b229674ab7b0b54dd2cf2a35d4.pts 04379243/expert_verified/points_label/7062f5b229674ab7b0b54dd2cf2a35d4.seg 04379243
+03636649/points/923097cec128ae77469cbaa3d6420fb4.pts 03636649/expert_verified/points_label/923097cec128ae77469cbaa3d6420fb4.seg 03636649
+04379243/points/3fb5033b5ddaaf365f7afad12924b3b5.pts 04379243/expert_verified/points_label/3fb5033b5ddaaf365f7afad12924b3b5.seg 04379243
+03636649/points/32e9d8a4b5a141a2615efc34c3b36ef0.pts 03636649/expert_verified/points_label/32e9d8a4b5a141a2615efc34c3b36ef0.seg 03636649
+02691156/points/997cb29f544d6f2726360e1e29a956c7.pts 02691156/expert_verified/points_label/997cb29f544d6f2726360e1e29a956c7.seg 02691156
+04379243/points/7df9115b511668bdde98d10ab5975b59.pts 04379243/expert_verified/points_label/7df9115b511668bdde98d10ab5975b59.seg 04379243
+03636649/points/5580b95ab8e7806c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/5580b95ab8e7806c6c5b8009db95f66f.seg 03636649
+04379243/points/6862bebc1f59a5caac7bed72580dc30f.pts 04379243/expert_verified/points_label/6862bebc1f59a5caac7bed72580dc30f.seg 04379243
+02691156/points/56ba815f883279b462b600da24e0965.pts 02691156/expert_verified/points_label/56ba815f883279b462b600da24e0965.seg 02691156
+03797390/points/5c48d471200d2bf16e8a121e6886e18d.pts 03797390/expert_verified/points_label/5c48d471200d2bf16e8a121e6886e18d.seg 03797390
+04379243/points/b48d04600e7cf2bebeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/b48d04600e7cf2bebeedb4c8fd29e2d1.seg 04379243
+02958343/points/323c9dc2a8911e146f2f07de403e98d8.pts 02958343/expert_verified/points_label/323c9dc2a8911e146f2f07de403e98d8.seg 02958343
+04225987/points/d3ff56062272f3e6346e65609be6d72f.pts 04225987/expert_verified/points_label/d3ff56062272f3e6346e65609be6d72f.seg 04225987
+03001627/points/af28dbdce6ed8cea19fb4103277a6b93.pts 03001627/expert_verified/points_label/af28dbdce6ed8cea19fb4103277a6b93.seg 03001627
+02958343/points/dfa6c32dec07727ee9d8921ebe6d5b8e.pts 02958343/expert_verified/points_label/dfa6c32dec07727ee9d8921ebe6d5b8e.seg 02958343
+03001627/points/c2b898dd5601454d626d7e3d07da8352.pts 03001627/expert_verified/points_label/c2b898dd5601454d626d7e3d07da8352.seg 03001627
+04379243/points/a7ef45d86ae5b496a97f238e46bc2221.pts 04379243/expert_verified/points_label/a7ef45d86ae5b496a97f238e46bc2221.seg 04379243
+04379243/points/1bd138c3e54a75d32f38c0d2792fb5e.pts 04379243/expert_verified/points_label/1bd138c3e54a75d32f38c0d2792fb5e.seg 04379243
+02958343/points/cd67376cac9f989151008e496c6cfd2e.pts 02958343/expert_verified/points_label/cd67376cac9f989151008e496c6cfd2e.seg 02958343
+03948459/points/af9eaed1d9574387ab2c2809513f396e.pts 03948459/expert_verified/points_label/af9eaed1d9574387ab2c2809513f396e.seg 03948459
+04379243/points/c418195771c7625945821c000807c3b1.pts 04379243/expert_verified/points_label/c418195771c7625945821c000807c3b1.seg 04379243
+04379243/points/88b227c5fb3906ce47c638c0eee4a2b3.pts 04379243/expert_verified/points_label/88b227c5fb3906ce47c638c0eee4a2b3.seg 04379243
+03467517/points/81bd0c7a35a147988cc3ae4061da3bb0.pts 03467517/expert_verified/points_label/81bd0c7a35a147988cc3ae4061da3bb0.seg 03467517
+04379243/points/5292f2930f188e0a7ff6ace05b36a5.pts 04379243/expert_verified/points_label/5292f2930f188e0a7ff6ace05b36a5.seg 04379243
+03636649/points/5f0a23ce527d0be52f38c0d2792fb5e.pts 03636649/expert_verified/points_label/5f0a23ce527d0be52f38c0d2792fb5e.seg 03636649
+03636649/points/98cdb45ca9925feb194eb328dc97c7e2.pts 03636649/expert_verified/points_label/98cdb45ca9925feb194eb328dc97c7e2.seg 03636649
+03790512/points/47054c1839830834a88e8cb97b773125.pts 03790512/expert_verified/points_label/47054c1839830834a88e8cb97b773125.seg 03790512
+03001627/points/b058cc77e628ac01c433ba3e0e025e8c.pts 03001627/expert_verified/points_label/b058cc77e628ac01c433ba3e0e025e8c.seg 03001627
+04225987/points/f74a5dfc0094e2d5561dce3fe08634b7.pts 04225987/expert_verified/points_label/f74a5dfc0094e2d5561dce3fe08634b7.seg 04225987
+02958343/points/e20b8a9c388eeb012c8b6ee41d7d5d62.pts 02958343/expert_verified/points_label/e20b8a9c388eeb012c8b6ee41d7d5d62.seg 02958343
+02958343/points/7203130a35ab20a4b1bb46d2556ba67d.pts 02958343/expert_verified/points_label/7203130a35ab20a4b1bb46d2556ba67d.seg 02958343
+03261776/points/2c6f04001afcce7ded85c3dc02bada79.pts 03261776/expert_verified/points_label/2c6f04001afcce7ded85c3dc02bada79.seg 03261776
+03001627/points/951fb0d7ad8ab2bec5b5bea66ef4576d.pts 03001627/expert_verified/points_label/951fb0d7ad8ab2bec5b5bea66ef4576d.seg 03001627
+02691156/points/54e926e12382808b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/54e926e12382808b66cf1b4a8fc3914e.seg 02691156
+03001627/points/4c513ea0804fc008c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/4c513ea0804fc008c8687ff9b0b4e4ac.seg 03001627
+03001627/points/748957972cae6b03c56be62b05937331.pts 03001627/expert_verified/points_label/748957972cae6b03c56be62b05937331.seg 03001627
+03001627/points/cc2639f8c584001a922dfe32810651d0.pts 03001627/expert_verified/points_label/cc2639f8c584001a922dfe32810651d0.seg 03001627
+04379243/points/d2f811bc37858425a63ceecddc308b25.pts 04379243/expert_verified/points_label/d2f811bc37858425a63ceecddc308b25.seg 04379243
+03001627/points/d48dac046436a29ec3bd24f986301745.pts 03001627/expert_verified/points_label/d48dac046436a29ec3bd24f986301745.seg 03001627
+03001627/points/30fafef5c734f926781ba0fdb47276df.pts 03001627/expert_verified/points_label/30fafef5c734f926781ba0fdb47276df.seg 03001627
+03001627/points/7293291b3fe8233fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/7293291b3fe8233fdef1c01cbd4ae0c.seg 03001627
+03636649/points/3deedc86a83bbf23f647dc544bb0ab61.pts 03636649/expert_verified/points_label/3deedc86a83bbf23f647dc544bb0ab61.seg 03636649
+03467517/points/bb4a5712da8f63330d758421dd01f45.pts 03467517/expert_verified/points_label/bb4a5712da8f63330d758421dd01f45.seg 03467517
+03636649/points/39af776c1435a3374b59758e9336ca87.pts 03636649/expert_verified/points_label/39af776c1435a3374b59758e9336ca87.seg 03636649
+04379243/points/ef9f3af9b8453613febad4f49b26ec52.pts 04379243/expert_verified/points_label/ef9f3af9b8453613febad4f49b26ec52.seg 04379243
+02691156/points/29192f8c96264e3435fc197bbabcd5bd.pts 02691156/expert_verified/points_label/29192f8c96264e3435fc197bbabcd5bd.seg 02691156
+02691156/points/75d162523d703917b87697d3904b168b.pts 02691156/expert_verified/points_label/75d162523d703917b87697d3904b168b.seg 02691156
+04379243/points/3c04f4e0d183976a7e7cb173e141227.pts 04379243/expert_verified/points_label/3c04f4e0d183976a7e7cb173e141227.seg 04379243
+03790512/points/80011e85cd42668ad373c34785838ee4.pts 03790512/expert_verified/points_label/80011e85cd42668ad373c34785838ee4.seg 03790512
+04379243/points/994e524d70043c3496e349c87c588bf2.pts 04379243/expert_verified/points_label/994e524d70043c3496e349c87c588bf2.seg 04379243
+02691156/points/b1f08c51a098c43696d224195a988f09.pts 02691156/expert_verified/points_label/b1f08c51a098c43696d224195a988f09.seg 02691156
+04379243/points/cb31b6293506eb639a3528690d225ee1.pts 04379243/expert_verified/points_label/cb31b6293506eb639a3528690d225ee1.seg 04379243
+02691156/points/d70d648947c65b1eca8607f540cc62ba.pts 02691156/expert_verified/points_label/d70d648947c65b1eca8607f540cc62ba.seg 02691156
+03636649/points/7bebdd742342ba93febad4f49b26ec52.pts 03636649/expert_verified/points_label/7bebdd742342ba93febad4f49b26ec52.seg 03636649
+02691156/points/2a2caad9e540dcc687bf26680c510802.pts 02691156/expert_verified/points_label/2a2caad9e540dcc687bf26680c510802.seg 02691156
+03790512/points/73fd19410ce60b83d5dde04c96fd8146.pts 03790512/expert_verified/points_label/73fd19410ce60b83d5dde04c96fd8146.seg 03790512
+04379243/points/ccb8c52ff9e7a01819fb4103277a6b93.pts 04379243/expert_verified/points_label/ccb8c52ff9e7a01819fb4103277a6b93.seg 04379243
+03467517/points/cc9e9ef3e1326c5363e148e250c0340d.pts 03467517/expert_verified/points_label/cc9e9ef3e1326c5363e148e250c0340d.seg 03467517
+03001627/points/d5360f2b0b0299c29b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/d5360f2b0b0299c29b9f2eb77f5e247e.seg 03001627
+02691156/points/6b69e4c1cceb6e0681fa1ee3c368532e.pts 02691156/expert_verified/points_label/6b69e4c1cceb6e0681fa1ee3c368532e.seg 02691156
+02691156/points/3ae96a1e1bb488942296d88107d065f6.pts 02691156/expert_verified/points_label/3ae96a1e1bb488942296d88107d065f6.seg 02691156
+04379243/points/5e4351c4525fae6d6fa63795f94c4d8c.pts 04379243/expert_verified/points_label/5e4351c4525fae6d6fa63795f94c4d8c.seg 04379243
+04225987/points/5c55e6b6708f730d758f6def7204bd6b.pts 04225987/expert_verified/points_label/5c55e6b6708f730d758f6def7204bd6b.seg 04225987
+03001627/points/a48e359faed3da88d3519c62a8100783.pts 03001627/expert_verified/points_label/a48e359faed3da88d3519c62a8100783.seg 03001627
+03467517/points/a4170135b1055cb8982c503992eaf09.pts 03467517/expert_verified/points_label/a4170135b1055cb8982c503992eaf09.seg 03467517
+02958343/points/b3f1ad55fa401c35e8c505ac322336cc.pts 02958343/expert_verified/points_label/b3f1ad55fa401c35e8c505ac322336cc.seg 02958343
+02691156/points/c7c5bb658cafcc7c67711f7c205c5b63.pts 02691156/expert_verified/points_label/c7c5bb658cafcc7c67711f7c205c5b63.seg 02691156
+02691156/points/914c308ac4a9156842c20743f866e1a6.pts 02691156/expert_verified/points_label/914c308ac4a9156842c20743f866e1a6.seg 02691156
+04379243/points/23acbe1f91d445f91ca1c7e576bee6b9.pts 04379243/expert_verified/points_label/23acbe1f91d445f91ca1c7e576bee6b9.seg 04379243
+04379243/points/8eb366f4f602219b490ad276cd2af3a4.pts 04379243/expert_verified/points_label/8eb366f4f602219b490ad276cd2af3a4.seg 04379243
+03624134/points/508ca8fa00e0cbb3e168961dc7b88f65.pts 03624134/expert_verified/points_label/508ca8fa00e0cbb3e168961dc7b88f65.seg 03624134
+04379243/points/be045fca16562f6764c85287e21825c4.pts 04379243/expert_verified/points_label/be045fca16562f6764c85287e21825c4.seg 04379243
+03001627/points/70f57047512c2eb84104b1c5cb7f9280.pts 03001627/expert_verified/points_label/70f57047512c2eb84104b1c5cb7f9280.seg 03001627
+03001627/points/975ea4be01c7488611bc8e8361bc5303.pts 03001627/expert_verified/points_label/975ea4be01c7488611bc8e8361bc5303.seg 03001627
+04379243/points/3c7cf00cd78adaef4b3c42e318f3affc.pts 04379243/expert_verified/points_label/3c7cf00cd78adaef4b3c42e318f3affc.seg 04379243
+02773838/points/220f08ff0c1d2a4542282fc88db7886b.pts 02773838/expert_verified/points_label/220f08ff0c1d2a4542282fc88db7886b.seg 02773838
+03636649/points/e35c4fadbf8d0426c26e81144f3196d5.pts 03636649/expert_verified/points_label/e35c4fadbf8d0426c26e81144f3196d5.seg 03636649
+03642806/points/93958423b98be8b538ff1b6d120c56aa.pts 03642806/expert_verified/points_label/93958423b98be8b538ff1b6d120c56aa.seg 03642806
+04379243/points/cf24f0128755080569080f7eaa8f3e1d.pts 04379243/expert_verified/points_label/cf24f0128755080569080f7eaa8f3e1d.seg 04379243
+04379243/points/f5cbbe04afdc4697562b835b63cfd09c.pts 04379243/expert_verified/points_label/f5cbbe04afdc4697562b835b63cfd09c.seg 04379243
+04379243/points/7a7590d19cf8274dab610b0c94236463.pts 04379243/expert_verified/points_label/7a7590d19cf8274dab610b0c94236463.seg 04379243
+03001627/points/bdfc3a43eccaac7e908cb3a44391b80.pts 03001627/expert_verified/points_label/bdfc3a43eccaac7e908cb3a44391b80.seg 03001627
+03636649/points/90d70f0a6b1cf72d79f0be73913de469.pts 03636649/expert_verified/points_label/90d70f0a6b1cf72d79f0be73913de469.seg 03636649
+03642806/points/17069b6604fc28bfa2f5beb253216d5b.pts 03642806/expert_verified/points_label/17069b6604fc28bfa2f5beb253216d5b.seg 03642806
+04379243/points/3b0625a3d623a7decfbec6fc6446a041.pts 04379243/expert_verified/points_label/3b0625a3d623a7decfbec6fc6446a041.seg 04379243
+04379243/points/9482c5f0a38a73c0fa16d3c3138134ae.pts 04379243/expert_verified/points_label/9482c5f0a38a73c0fa16d3c3138134ae.seg 04379243
+04379243/points/ed73c41dcfe9170119cc3eaf35cd388f.pts 04379243/expert_verified/points_label/ed73c41dcfe9170119cc3eaf35cd388f.seg 04379243
+04379243/points/1abed35643d34f60afed86cbd9fd5335.pts 04379243/expert_verified/points_label/1abed35643d34f60afed86cbd9fd5335.seg 04379243
+03001627/points/98e1936d3f25389bc3c6a889ee0bd115.pts 03001627/expert_verified/points_label/98e1936d3f25389bc3c6a889ee0bd115.seg 03001627
+03797390/points/ef24c302911bcde6ea6ff2182dd34668.pts 03797390/expert_verified/points_label/ef24c302911bcde6ea6ff2182dd34668.seg 03797390
+02773838/points/22b7d6fa819d62aefc69b7db9c6d5ad9.pts 02773838/expert_verified/points_label/22b7d6fa819d62aefc69b7db9c6d5ad9.seg 02773838
+03001627/points/19666f52289092a3394a3bbfc81460.pts 03001627/expert_verified/points_label/19666f52289092a3394a3bbfc81460.seg 03001627
+03001627/points/49b38e22f104005ecbde89e0c48a01bf.pts 03001627/expert_verified/points_label/49b38e22f104005ecbde89e0c48a01bf.seg 03001627
+04379243/points/de077e0bd6932baef12d7184a2ad3430.pts 04379243/expert_verified/points_label/de077e0bd6932baef12d7184a2ad3430.seg 04379243
+03001627/points/fe99f16c2532cdd07ba99ad16fdc05cd.pts 03001627/expert_verified/points_label/fe99f16c2532cdd07ba99ad16fdc05cd.seg 03001627
+03642806/points/a17cf326705a6443a09a37cf78d1b866.pts 03642806/expert_verified/points_label/a17cf326705a6443a09a37cf78d1b866.seg 03642806
+04379243/points/890940359fdfa036569c11df1aea8ca4.pts 04379243/expert_verified/points_label/890940359fdfa036569c11df1aea8ca4.seg 04379243
+03642806/points/7f75b94bd59d649958dd315c54df0c15.pts 03642806/expert_verified/points_label/7f75b94bd59d649958dd315c54df0c15.seg 03642806
+04379243/points/d0ef9d431a16e70de6c5cd45aa112726.pts 04379243/expert_verified/points_label/d0ef9d431a16e70de6c5cd45aa112726.seg 04379243
+03001627/points/2dc5055b8d900ec7db4b0ee93cf61ed1.pts 03001627/expert_verified/points_label/2dc5055b8d900ec7db4b0ee93cf61ed1.seg 03001627
+03001627/points/9e6b834449ed2db86199d6fe090be061.pts 03001627/expert_verified/points_label/9e6b834449ed2db86199d6fe090be061.seg 03001627
+04379243/points/9e3f1901ea14aca753315facdf531a34.pts 04379243/expert_verified/points_label/9e3f1901ea14aca753315facdf531a34.seg 04379243
+03001627/points/c4ebef05a72fc4f39d62eb3fdc2d3f8a.pts 03001627/expert_verified/points_label/c4ebef05a72fc4f39d62eb3fdc2d3f8a.seg 03001627
+03001627/points/428b77d0ffe6ab456e06155d245f15d6.pts 03001627/expert_verified/points_label/428b77d0ffe6ab456e06155d245f15d6.seg 03001627
+04225987/points/591971ce679ca4b93ad38b993d9e745f.pts 04225987/expert_verified/points_label/591971ce679ca4b93ad38b993d9e745f.seg 04225987
+03790512/points/bcabe20e46e5126ed5dde04c96fd8146.pts 03790512/expert_verified/points_label/bcabe20e46e5126ed5dde04c96fd8146.seg 03790512
+04379243/points/3ed500a12dfa511ba6040757a0125a99.pts 04379243/expert_verified/points_label/3ed500a12dfa511ba6040757a0125a99.seg 04379243
+04379243/points/1581d2682187764730bbd4cddd04c77b.pts 04379243/expert_verified/points_label/1581d2682187764730bbd4cddd04c77b.seg 04379243
+02691156/points/bb7d526405e9347b8f6810e1a2b6aa04.pts 02691156/expert_verified/points_label/bb7d526405e9347b8f6810e1a2b6aa04.seg 02691156
+02691156/points/fb9deec3a422b06b609e2d916fa0da27.pts 02691156/expert_verified/points_label/fb9deec3a422b06b609e2d916fa0da27.seg 02691156
+03636649/points/5e6abfc7d93fa5f1dc0efee4b442070.pts 03636649/expert_verified/points_label/5e6abfc7d93fa5f1dc0efee4b442070.seg 03636649
+03467517/points/2dbc73ad4ce7950163e148e250c0340d.pts 03467517/expert_verified/points_label/2dbc73ad4ce7950163e148e250c0340d.seg 03467517
+02958343/points/eea7f5d02088d49dfdb3c05088c091ae.pts 02958343/expert_verified/points_label/eea7f5d02088d49dfdb3c05088c091ae.seg 02958343
+04379243/points/83c24aad3914e61a73376642dd664bfd.pts 04379243/expert_verified/points_label/83c24aad3914e61a73376642dd664bfd.seg 04379243
+04379243/points/51874066ba946c58aaf15b62af6b513f.pts 04379243/expert_verified/points_label/51874066ba946c58aaf15b62af6b513f.seg 04379243
+03636649/points/5be8cdad3b218e373d39d8012919dd25.pts 03636649/expert_verified/points_label/5be8cdad3b218e373d39d8012919dd25.seg 03636649
+03636649/points/49cd0dd4d1c008edbbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/49cd0dd4d1c008edbbc7a6acbd8f058b.seg 03636649
+03642806/points/d7e7e6651a23afc68ba4e518219eb66a.pts 03642806/expert_verified/points_label/d7e7e6651a23afc68ba4e518219eb66a.seg 03642806
+02958343/points/6026684ab31d567328044fe9244db50a.pts 02958343/expert_verified/points_label/6026684ab31d567328044fe9244db50a.seg 02958343
+04379243/points/c177762c0445d57ab20aa91e9e90c311.pts 04379243/expert_verified/points_label/c177762c0445d57ab20aa91e9e90c311.seg 04379243
+02691156/points/7bad9d15c0f0d3c03554ccf8c30febe7.pts 02691156/expert_verified/points_label/7bad9d15c0f0d3c03554ccf8c30febe7.seg 02691156
+03636649/points/dd818b0269b1aa15fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/dd818b0269b1aa15fcb8d8c6d4df8143.seg 03636649
+03624134/points/c4851aee1af7d874cc34b900bb2492e.pts 03624134/expert_verified/points_label/c4851aee1af7d874cc34b900bb2492e.seg 03624134
+03001627/points/e2ced471afce616454bfa32aa0766acb.pts 03001627/expert_verified/points_label/e2ced471afce616454bfa32aa0766acb.seg 03001627
+03797390/points/896f1d494bac0ebcdec712af445786fe.pts 03797390/expert_verified/points_label/896f1d494bac0ebcdec712af445786fe.seg 03797390
+04379243/points/481e00e4559705c616a2b5862518c93.pts 04379243/expert_verified/points_label/481e00e4559705c616a2b5862518c93.seg 04379243
+04379243/points/2ca883ba6a9dc6f68985be89a0ee21a.pts 04379243/expert_verified/points_label/2ca883ba6a9dc6f68985be89a0ee21a.seg 04379243
+04379243/points/ebc82e7df36f6e9a33963916b86d221f.pts 04379243/expert_verified/points_label/ebc82e7df36f6e9a33963916b86d221f.seg 04379243
+03001627/points/cdea84a63ad8c44febad4f49b26ec52.pts 03001627/expert_verified/points_label/cdea84a63ad8c44febad4f49b26ec52.seg 03001627
+03624134/points/c71280ea272fbfed4b7644126b1d71e0.pts 03624134/expert_verified/points_label/c71280ea272fbfed4b7644126b1d71e0.seg 03624134
+02958343/points/974c3d82f8726f086b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/974c3d82f8726f086b418c7d9fedcaa9.seg 02958343
+02958343/points/4dbf4e0654d0c234e811106a82796d20.pts 02958343/expert_verified/points_label/4dbf4e0654d0c234e811106a82796d20.seg 02958343
+03467517/points/de9ca0c3e32f907dcb61cf5d9c47c2c7.pts 03467517/expert_verified/points_label/de9ca0c3e32f907dcb61cf5d9c47c2c7.seg 03467517
+02958343/points/9f4bbcf9f51fe1e42957c02bdefc95c8.pts 02958343/expert_verified/points_label/9f4bbcf9f51fe1e42957c02bdefc95c8.seg 02958343
+03467517/points/173e4f1824f7b9fa93f0194265a9746c.pts 03467517/expert_verified/points_label/173e4f1824f7b9fa93f0194265a9746c.seg 03467517
+03636649/points/b4f166440439171741657e31b569b105.pts 03636649/expert_verified/points_label/b4f166440439171741657e31b569b105.seg 03636649
+03948459/points/d1ba405fef56efa0fa29682ba98e856d.pts 03948459/expert_verified/points_label/d1ba405fef56efa0fa29682ba98e856d.seg 03948459
+03467517/points/a39dcefa599a76dd93f0194265a9746c.pts 03467517/expert_verified/points_label/a39dcefa599a76dd93f0194265a9746c.seg 03467517
+02958343/points/e213d976734431773a3afd30f2e86bd7.pts 02958343/expert_verified/points_label/e213d976734431773a3afd30f2e86bd7.seg 02958343
+04379243/points/b1335d826d7d60726e066e11deddab75.pts 04379243/expert_verified/points_label/b1335d826d7d60726e066e11deddab75.seg 04379243
+04379243/points/e37262abd76852ac00ee852f6d8aa3c.pts 04379243/expert_verified/points_label/e37262abd76852ac00ee852f6d8aa3c.seg 04379243
+03001627/points/5d346bdb7db27accf3588493d5c284.pts 03001627/expert_verified/points_label/5d346bdb7db27accf3588493d5c284.seg 03001627
+04379243/points/198ff59a42a147eb8ac5948d70801389.pts 04379243/expert_verified/points_label/198ff59a42a147eb8ac5948d70801389.seg 04379243
+03001627/points/b3fd987b330d0d2acda56795a6fbde1f.pts 03001627/expert_verified/points_label/b3fd987b330d0d2acda56795a6fbde1f.seg 03001627
+02691156/points/1cb757280b862ae52c7575c9089791ff.pts 02691156/expert_verified/points_label/1cb757280b862ae52c7575c9089791ff.seg 02691156
+03636649/points/4631e756666a8a208ca4aeb5e3b33af7.pts 03636649/expert_verified/points_label/4631e756666a8a208ca4aeb5e3b33af7.seg 03636649
+04379243/points/b82c6769c98e877d24d29f1dedd03a57.pts 04379243/expert_verified/points_label/b82c6769c98e877d24d29f1dedd03a57.seg 04379243
+03636649/points/2b194d6bed8daa82c0b2dda5ff15ea28.pts 03636649/expert_verified/points_label/2b194d6bed8daa82c0b2dda5ff15ea28.seg 03636649
+03001627/points/7e6b4a7b4dd60c40cc8bd7a04c9659f1.pts 03001627/expert_verified/points_label/7e6b4a7b4dd60c40cc8bd7a04c9659f1.seg 03001627
+03948459/points/d1cc54762432fd058a2c998c0df41abe.pts 03948459/expert_verified/points_label/d1cc54762432fd058a2c998c0df41abe.seg 03948459
+04225987/points/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.pts 04225987/expert_verified/points_label/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.seg 04225987
+04379243/points/6ce30b0327db26f340b4c5428883e585.pts 04379243/expert_verified/points_label/6ce30b0327db26f340b4c5428883e585.seg 04379243
+04379243/points/c5230678204a1bb8dcfcef693e7ec696.pts 04379243/expert_verified/points_label/c5230678204a1bb8dcfcef693e7ec696.seg 04379243
+02691156/points/563cef4df464ddb1e153dd90dac45a6d.pts 02691156/expert_verified/points_label/563cef4df464ddb1e153dd90dac45a6d.seg 02691156
+02958343/points/42e6ce03b361102ab86e0633bb69faea.pts 02958343/expert_verified/points_label/42e6ce03b361102ab86e0633bb69faea.seg 02958343
+03001627/points/26e8033e59a3adf6bb53a6a5f5051240.pts 03001627/expert_verified/points_label/26e8033e59a3adf6bb53a6a5f5051240.seg 03001627
+04379243/points/731b983cb313634fd018082a1777a5f8.pts 04379243/expert_verified/points_label/731b983cb313634fd018082a1777a5f8.seg 04379243
+02691156/points/10aa040f470500c6a66ef8df4909ded9.pts 02691156/expert_verified/points_label/10aa040f470500c6a66ef8df4909ded9.seg 02691156
+03467517/points/bb895a87931f51c893f0194265a9746c.pts 03467517/expert_verified/points_label/bb895a87931f51c893f0194265a9746c.seg 03467517
+03624134/points/a105080ce4564145aeb54153795ede63.pts 03624134/expert_verified/points_label/a105080ce4564145aeb54153795ede63.seg 03624134
+04379243/points/c12147db9b29ef9ee0480c954dcd56d1.pts 04379243/expert_verified/points_label/c12147db9b29ef9ee0480c954dcd56d1.seg 04379243
+04379243/points/21cdc417e398378e40f3ac0af6b7e700.pts 04379243/expert_verified/points_label/21cdc417e398378e40f3ac0af6b7e700.seg 04379243
+04379243/points/b11e0feb428f61edf008d8a3590fb522.pts 04379243/expert_verified/points_label/b11e0feb428f61edf008d8a3590fb522.seg 04379243
+04379243/points/2700f6693447c32d66c64744a4252d3.pts 04379243/expert_verified/points_label/2700f6693447c32d66c64744a4252d3.seg 04379243
+03467517/points/b6d0cf333c7e013993f0194265a9746c.pts 03467517/expert_verified/points_label/b6d0cf333c7e013993f0194265a9746c.seg 03467517
+03001627/points/ece627bd883d9bbfb0eb7e753c06942.pts 03001627/expert_verified/points_label/ece627bd883d9bbfb0eb7e753c06942.seg 03001627
+03636649/points/26f0f37f0f2623c4a3fa46ae73c48b4.pts 03636649/expert_verified/points_label/26f0f37f0f2623c4a3fa46ae73c48b4.seg 03636649
+04379243/points/8b07d458499d63f36d96c6cb347d6a90.pts 04379243/expert_verified/points_label/8b07d458499d63f36d96c6cb347d6a90.seg 04379243
+04379243/points/eb363770ee36b0309a79b01b89f55c86.pts 04379243/expert_verified/points_label/eb363770ee36b0309a79b01b89f55c86.seg 04379243
+04379243/points/ccf36a20b7ef3bd128071d61462a212d.pts 04379243/expert_verified/points_label/ccf36a20b7ef3bd128071d61462a212d.seg 04379243
+03001627/points/cf24fc2d10f8da31283b00891f680579.pts 03001627/expert_verified/points_label/cf24fc2d10f8da31283b00891f680579.seg 03001627
+02958343/points/8b4879617bd256391738f25e3015f92e.pts 02958343/expert_verified/points_label/8b4879617bd256391738f25e3015f92e.seg 02958343
+03001627/points/55e1cde05a99f6c7d1d34366ca81fb3b.pts 03001627/expert_verified/points_label/55e1cde05a99f6c7d1d34366ca81fb3b.seg 03001627
+03001627/points/6c25ec1178e9bab6e545858398955dd1.pts 03001627/expert_verified/points_label/6c25ec1178e9bab6e545858398955dd1.seg 03001627
+03001627/points/862f70e73fa70c9b1a719e2a845bdada.pts 03001627/expert_verified/points_label/862f70e73fa70c9b1a719e2a845bdada.seg 03001627
+04379243/points/fa5dce1043f44c06ab88e3acae6e8bc5.pts 04379243/expert_verified/points_label/fa5dce1043f44c06ab88e3acae6e8bc5.seg 04379243
+03467517/points/6f9d1467eb39f8abfae47f572c17b9cb.pts 03467517/expert_verified/points_label/6f9d1467eb39f8abfae47f572c17b9cb.seg 03467517
+04379243/points/60ef2830979fd08ec72d4ae978770752.pts 04379243/expert_verified/points_label/60ef2830979fd08ec72d4ae978770752.seg 04379243
+03624134/points/d69e028056c9291069654277b747a908.pts 03624134/expert_verified/points_label/d69e028056c9291069654277b747a908.seg 03624134
+04379243/points/8e7c894039ae2cfe99e8bf807e902261.pts 04379243/expert_verified/points_label/8e7c894039ae2cfe99e8bf807e902261.seg 04379243
+02958343/points/4e2ca20091449636599389919f6522e6.pts 02958343/expert_verified/points_label/4e2ca20091449636599389919f6522e6.seg 02958343
+04379243/points/b10d84b3a04085b17618b16b281bdf56.pts 04379243/expert_verified/points_label/b10d84b3a04085b17618b16b281bdf56.seg 04379243
+03948459/points/d13986cc2403a2034b4b3d2a28039009.pts 03948459/expert_verified/points_label/d13986cc2403a2034b4b3d2a28039009.seg 03948459
+03636649/points/d97a86cea650ae0baf5b49ad7809302.pts 03636649/expert_verified/points_label/d97a86cea650ae0baf5b49ad7809302.seg 03636649
+03797390/points/ca198dc3f7dc0cacec6338171298c66b.pts 03797390/expert_verified/points_label/ca198dc3f7dc0cacec6338171298c66b.seg 03797390
+03636649/points/3f968096c74ee3a3b04a2e6a78ff6c49.pts 03636649/expert_verified/points_label/3f968096c74ee3a3b04a2e6a78ff6c49.seg 03636649
+02691156/points/4d6ec762d1583ded46555ee25941a22e.pts 02691156/expert_verified/points_label/4d6ec762d1583ded46555ee25941a22e.seg 02691156
+03467517/points/401ff6021157dee293f0194265a9746c.pts 03467517/expert_verified/points_label/401ff6021157dee293f0194265a9746c.seg 03467517
+04379243/points/c1d808c75cc5e7ab4da5bb83ec125010.pts 04379243/expert_verified/points_label/c1d808c75cc5e7ab4da5bb83ec125010.seg 04379243
+03790512/points/3d37db1d974499287395d58407f193ba.pts 03790512/expert_verified/points_label/3d37db1d974499287395d58407f193ba.seg 03790512
+03624134/points/65892e0f7f93129d14cb807a24b99e1e.pts 03624134/expert_verified/points_label/65892e0f7f93129d14cb807a24b99e1e.seg 03624134
+03624134/points/854e7bb73afaff7591ea3afb2749822f.pts 03624134/expert_verified/points_label/854e7bb73afaff7591ea3afb2749822f.seg 03624134
+03624134/points/7b492f2baa1dc710cc34b900bb2492e.pts 03624134/expert_verified/points_label/7b492f2baa1dc710cc34b900bb2492e.seg 03624134
+03636649/points/b4b15a84b9067f94a75d03186a0409e2.pts 03636649/expert_verified/points_label/b4b15a84b9067f94a75d03186a0409e2.seg 03636649
+03636649/points/9db87bf898efd448cbde89e0c48a01bf.pts 03636649/expert_verified/points_label/9db87bf898efd448cbde89e0c48a01bf.seg 03636649
+02954340/points/9bd54e0123d3cd70a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/9bd54e0123d3cd70a52821bf1aa3b19a.seg 02954340
--
Gitee
From 5d4cb67710466348b0eeac9a354af2589dcd2f5c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 07:09:44 +0000
Subject: [PATCH 29/54] =?UTF-8?q?AttRec=5FID2630=5Ffor=5FTensorFlow2.X?=
=?UTF-8?q?=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../AttRec_ID2630_for_TensorFlow2.X/LICENSE | 21 ++
.../AttRec_ID2630_for_TensorFlow2.X/README.md | 208 ++++++++++++++++++
.../evaluate.py | 99 +++++++++
.../AttRec_ID2630_for_TensorFlow2.X/model.py | 162 ++++++++++++++
.../modelzoo_level.txt | 3 +
.../modules.py | 92 ++++++++
.../requirements.txt | 0
.../AttRec_ID2630_for_TensorFlow2.X/run_1p.sh | 3 +
.../test/train_full_1p.sh | 189 ++++++++++++++++
.../test/train_performance_1p.sh | 189 ++++++++++++++++
.../test/train_performance_1p_static.sh | 193 ++++++++++++++++
.../AttRec_ID2630_for_TensorFlow2.X/train.py | 208 ++++++++++++++++++
.../AttRec_ID2630_for_TensorFlow2.X/utils.py | 125 +++++++++++
13 files changed, 1492 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py
create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..51d555a15
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Ke YU
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..f052dd27c
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md
@@ -0,0 +1,208 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Recommendation**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.4.11**
+
+**大小(Size):44KB**
+
+**框架(Framework):TensorFlow_2.6.2**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Official**
+
+**描述(Description):基于TensorFlow框架的长短期用户兴趣表示训练代码**
+
+概述
+
+## 简述
+
+模型利用了self-attention机制,通过用户历史行为来预测item-item关系。self-attention能够预测用户行为轨迹上每个item的相关权重,从而学习更好的用户短期兴趣表示。模型最终在度量学习框架上训练,且同时考虑了长短期意图。
+
+- 参考论文:
+
+ https://arxiv.org/abs/1808.06414
+
+- 参考实现:
+
+ https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/AttRec
+
+- 适配昇腾 AI 处理器的实现:
+
+ skip
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+
+
+## 默认配置
+
+- 网络结构:
+ - 29-layers
+ - 1404700 total params
+
+- 训练超参(单卡):
+ - Batch size: 512
+ - Train epochs: 20
+ - Learning rate: 0.001
+ - Trans score: 1
+ - Max len: 5
+ - Embed dim: 100
+ - Embed reg: 1e-6
+ - Gamma: 0.5
+ - Mode: 'inner'
+ - w: 0.5
+ - K: 10
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+| ---------- | -------- |
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+
+拉起脚本中,传入--precision_mode='allow_mix_precision'
+
+```
+ ./train_performance_1p_16bs.sh --help
+
+parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+```
+
+相关代码示例:
+
+```
+flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16',
+ help='allow_fp32_to_fp16/force_fp16/ '
+ 'must_keep_origin_dtype/allow_mix_precision.')
+
+npu_device.global_options().precision_mode=FLAGS.precision_mode
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+快速上手
+
+## 数据集准备
+
+1、用户自行准备好数据集,本网络使用的数据集是ml-1m数据集
+
+数据集目录参考如下:
+
+```
+├──ml_1m
+│ ├──movies.dat
+│ ├──ratings.dat
+│ ├──README
+│ ├──users.dat
+```
+
+
+
+## 模型训练
+
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+ 2. 单卡训练
+
+ 2. 1单卡训练指令(脚本位于AttRec_ID2630_for_TensorFlow2.X/test/train_full.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将数据文件夹放在home目录下。
+
+ bash train_full_1p.sh --data_path=/home
+
+
+
+
+高级参考
+
+## 脚本和示例代码
+
+```
+|--LICENSE
+|--README.md #说明文档
+|--evaluate.py
+|--model.py
+|--modules.py
+|--train.py #训练代码
+|--requirements.txt #所需依赖
+|--run_1p.sh
+|--utils.py
+|--test #训练脚本目录
+| |--train_full_1p.sh #全量训练脚本
+| |--train_performance_1p.sh #performance训练脚本
+```
+
+## 脚本参数
+
+```
+--data_path # the path to train data
+--epochs # epochs of training
+--ckpt_save_path # directory to ckpt
+--batch_size # batch size for 1p
+--log_steps # log frequency
+--precision_mode # the path to save over dump data
+--over_dump # if or not over detection, default is False
+--data_dump_flag # data dump flag, default is False
+--data_dump_step # data dump step, default is 10
+--profiling # if or not profiling for performance debug, default is False
+--profiling_dump_path # the path to save profiling data
+--over_dump_path # the path to save over dump data
+--data_dump_path # the path to save dump data
+--use_mixlist # use_mixlist flag, default is False
+--fusion_off_flag # fusion_off flag, default is False
+--mixlist_file # mixlist file name, default is ops_info.json
+--fusion_off_file # fusion_off file name, default is fusion_switch.cfg
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py
new file mode 100644
index 000000000..b8c843e86
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py
@@ -0,0 +1,99 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Created on Nov 13, 2020
+
+evaluate model
+
+@author: Ziyao Geng
+"""
+import pandas as pd
+import numpy as np
+
+
+def getHit(df):
+ """
+ calculate hit rate
+ :return:
+ """
+ if sum(df['pred']) < _K:
+ return 1
+ else:
+ return 0
+
+
+def getNDCG(df):
+ """
+ calculate NDCG
+ :return:
+ """
+ if sum(df['pred']) < _K:
+ return 1 / np.log(sum(df['pred']) + 2)
+ else:
+ return 0.
+
+
+def getMRR(df):
+ """
+ calculate MRR
+ :return:
+ """
+ return 1 / (sum(df['pred']) + 1)
+
+
+def evaluate_model(model, test, K):
+ """
+ evaluate model
+ :param model: model
+ :param test: test set
+ :param K: top K
+ :return: hit rate, ndcg
+ """
+ global _K
+ _K = K
+ test_X = test
+ # predict
+ pos_score, neg_score = model.predict(test_X)
+ # create dataframe
+ test_df = pd.DataFrame(test_X[0], columns=['user_id'])
+ # if mode == 'inner', pos score < neg score, pred = 1
+ if model.mode == 'inner':
+ test_df['pred'] = (pos_score <= neg_score).astype(np.int32)
+ else:
+ test_df['pred'] = (pos_score >= neg_score).astype(np.int32)
+ # groupby
+ tg = test_df.groupby('user_id')
+ # calculate hit
+ hit_rate = tg.apply(getHit).mean()
+ # calculate ndcg
+ ndcg = tg.apply(getNDCG).mean()
+ # calculate mrr
+ mrr = tg.apply(getMRR).mean()
+ return hit_rate, ndcg, mrr
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py
new file mode 100644
index 000000000..e56471cf7
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py
@@ -0,0 +1,162 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Created on Nov 10, 2020
+
+model: Next Item Recommendation with Self-Attentive Metric Learning
+
+@author: Ziyao Geng
+"""
+
+import tensorflow as tf
+
+from tensorflow.keras import Model
+from tensorflow.keras.layers import Embedding, Input
+from tensorflow.keras.regularizers import l2
+
+from modules import *
+
+
+class AttRec(Model):
+ def __init__(self, feature_columns, maxlen=40, mode='inner', gamma=0.5, w=0.5, embed_reg=1e-6, **kwargs):
+ """
+ AttRec
+ :param feature_columns: A feature columns list. user + seq
+ :param maxlen: A scalar. In the paper, maxlen is L, the number of latest items.
+ :param gamma: A scalar. if mode == 'dist', gamma is the margin.
+ :param mode: A string. inner or dist.
+ :param w: A scalar. The weight of short interest.
+ :param embed_reg: A scalar. The regularizer of embedding.
+ """
+ super(AttRec, self).__init__(**kwargs)
+ # maxlen
+ self.maxlen = maxlen
+ # w
+ self.w = w
+ self.gamma = gamma
+ self.mode = mode
+ # feature columns
+ self.user_fea_col, self.item_fea_col = feature_columns
+ # embed_dim
+ self.embed_dim = self.item_fea_col['embed_dim']
+ # user embedding
+ self.user_embedding = Embedding(input_dim=self.user_fea_col['feat_num'],
+ input_length=1,
+ output_dim=self.user_fea_col['embed_dim'],
+ mask_zero=False,
+ embeddings_initializer='random_normal',
+ embeddings_regularizer=l2(embed_reg))
+ # item embedding
+ self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
+ input_length=1,
+ output_dim=self.item_fea_col['embed_dim'],
+ mask_zero=True,
+ embeddings_initializer='random_normal',
+ embeddings_regularizer=l2(embed_reg))
+ # item2 embedding, not share embedding
+ self.item2_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
+ input_length=1,
+ output_dim=self.item_fea_col['embed_dim'],
+ mask_zero=True,
+ embeddings_initializer='random_normal',
+ embeddings_regularizer=l2(embed_reg))
+ # self-attention
+ self.self_attention = SelfAttention_Layer()
+
+ def call(self, inputs, **kwargs):
+ # input
+ user_inputs, seq_inputs, pos_inputs, neg_inputs = inputs
+ # mask
+ # mask = self.item_embedding.compute_mask(seq_inputs)
+ mask = tf.cast(tf.not_equal(seq_inputs, 0), dtype=tf.float32) # (None, maxlen)
+ # user info
+ user_embed = self.user_embedding(tf.squeeze(user_inputs, axis=-1)) # (None, dim)
+ # seq info
+ seq_embed = self.item_embedding(seq_inputs) # (None, maxlen, dim)
+ # item
+ pos_embed = self.item_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim)
+ neg_embed = self.item_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim)
+ # item2 embed
+ pos_embed2 = self.item2_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim)
+ neg_embed2 = self.item2_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim)
+
+ # short-term interest
+ short_interest = self.self_attention([seq_embed, seq_embed, seq_embed, mask]) # (None, dim)
+
+ # mode
+ if self.mode == 'inner':
+ # long-term interest, pos and neg
+ pos_long_interest = tf.multiply(user_embed, pos_embed2)
+ neg_long_interest = tf.multiply(user_embed, neg_embed2)
+ # combine
+ pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) \
+ + (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, pos_embed), axis=-1, keepdims=True)
+ neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) \
+ + (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, neg_embed), axis=-1, keepdims=True)
+ self.add_loss(tf.reduce_mean(-tf.math.log(tf.nn.sigmoid(pos_scores - neg_scores))))
+ else:
+ # clip by norm
+ user_embed = tf.clip_by_norm(user_embed, 1, -1)
+ pos_embed = tf.clip_by_norm(pos_embed, 1, -1)
+ neg_embed = tf.clip_by_norm(neg_embed, 1, -1)
+ pos_embed2 = tf.clip_by_norm(pos_embed2, 1, -1)
+ neg_embed2 = tf.clip_by_norm(neg_embed2, 1, -1)
+ # distance
+ # long-term interest, pos and neg
+ pos_long_interest = tf.square(user_embed - pos_embed2) # (None, dim)
+ neg_long_interest = tf.square(user_embed - neg_embed2) # (None, dim)
+ # combine. Here is a difference from the original paper.
+ pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) + \
+ (1 - self.w) * tf.reduce_sum(tf.square(short_interest - pos_embed), axis=-1, keepdims=True)
+ neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) + \
+ (1 - self.w) * tf.reduce_sum(tf.square(short_interest - neg_embed), axis=-1, keepdims=True)
+ # minimize loss
+ # self.add_loss(tf.reduce_sum(tf.maximum(pos_scores - neg_scores + self.gamma, 0)))
+ self.add_loss(tf.reduce_sum(tf.nn.relu(pos_scores - neg_scores + self.gamma)))
+ return pos_scores, neg_scores
+
+ def summary(self):
+ seq_inputs = Input(shape=(self.maxlen,), dtype=tf.int32)
+ user_inputs = Input(shape=(1, ), dtype=tf.int32)
+ pos_inputs = Input(shape=(1, ), dtype=tf.int32)
+ neg_inputs = Input(shape=(1, ), dtype=tf.int32)
+ Model(inputs=[user_inputs, seq_inputs, pos_inputs, neg_inputs],
+ outputs=self.call([user_inputs, seq_inputs, pos_inputs, neg_inputs])).summary()
+
+
+def test_model():
+ user_features = {'feat': 'user_id', 'feat_num': 100, 'embed_dim': 8}
+ seq_features = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}
+ features = [user_features, seq_features]
+ model = AttRec(features, mode='dist')
+ model.summary()
+
+
+# test_model()
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt
new file mode 100644
index 000000000..a829ab59b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:NOK
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py
new file mode 100644
index 000000000..c325b2f25
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py
@@ -0,0 +1,92 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Created on Nov 10, 2020
+
+modules of AttRec: self-attention mechanism
+
+@author: Ziyao Geng
+"""
+
+import tensorflow as tf
+import numpy as np
+import math
+
+from tensorflow.keras.layers import Layer, Dense
+from tensorflow.keras.losses import Loss
+
+
+class SelfAttention_Layer(Layer):
+ def __init__(self):
+ super(SelfAttention_Layer, self).__init__()
+
+ def build(self, input_shape):
+ self.dim = input_shape[0][-1]
+ self.W = self.add_weight(shape=[self.dim, self.dim], name='weight',
+ initializer='random_uniform')
+
+ def call(self, inputs, **kwargs):
+ q, k, v, mask = inputs
+ # pos encoding
+ k += self.positional_encoding(k)
+ q += self.positional_encoding(q)
+ # Nonlinear transformation
+ q = tf.nn.relu(tf.matmul(q, self.W)) # (None, seq_len, dim)
+ k = tf.nn.relu(tf.matmul(k, self.W)) # (None, seq_len, dim)
+ mat_qk = tf.matmul(q, k, transpose_b=True) # (None, seq_len, seq_len)
+ dk = tf.cast(self.dim, dtype=tf.float32)
+ # Scaled
+ scaled_att_logits = mat_qk / tf.sqrt(dk)
+ # Mask
+ mask = tf.tile(tf.expand_dims(mask, 1), [1, q.shape[1], 1]) # (None, seq_len, seq_len)
+ paddings = tf.ones_like(scaled_att_logits) * (-2 ** 32 + 1)
+ outputs = tf.where(tf.equal(mask, 0), paddings, scaled_att_logits) # (None, seq_len, seq_len)
+ # softmax
+ outputs = tf.nn.softmax(logits=outputs, axis=-1) # (None, seq_len, seq_len)
+ # output
+ outputs = tf.matmul(outputs, v) # (None, seq_len, dim)
+ outputs = tf.reduce_mean(outputs, axis=1) # (None, dim)
+ return outputs
+
+ @staticmethod
+ def get_angles(pos, i, d_model):
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
+ return pos * angle_rates
+
+ def positional_encoding(self, QK_input):
+ angle_rads = self.get_angles(np.arange(QK_input.shape[1])[:, np.newaxis],
+ np.arange(self.dim)[np.newaxis, :], self.dim)
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
+ pos_encoding = angle_rads[np.newaxis, ...]
+
+ return tf.cast(pos_encoding, dtype=tf.float32)
+
+
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh
new file mode 100644
index 000000000..837e2c7d2
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh
@@ -0,0 +1,3 @@
+cur_path='pwd'
+python3 ${cur_path}/train.py --epochs=40 --data_path=. --batch_size=1024 --ckpt_save_path="" --precision_mode="" > loss+perf_gpu.txt 2>&1
+
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh
new file mode 100644
index 000000000..3146a4489
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=$ASCEND_DEVICE_ID
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="AttRec_ID2630_for_TensorFlow2.X"
+#训练epoch
+train_epochs=20
+#训练batch_size
+batch_size=512
+
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/overflow_dump
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="${cur_path}/../configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="${cur_path}/../configs/fusion_switch.cfg"
+############维测参数##############
+
+############维测参数##############
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --log_steps* ]];then
+ log_steps=`echo ${para#*=}`
+ fi
+done
+############维测参数##############
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ nohup python3 train.py --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --data_path=$data_path \
+ --log_steps=1919\
+ --ckpt_save_path="" \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1`
+wait
+FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}' | tail -n 1`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh
new file mode 100644
index 000000000..2df0ed262
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=$ASCEND_DEVICE_ID
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="AttRec_ID2630_for_TensorFlow2.X"
+#训练epoch
+train_epochs=10
+#训练batch_size
+batch_size=512
+
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/overflow_dump
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="${cur_path}/../configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="${cur_path}/../configs/fusion_switch.cfg"
+############维测参数##############
+
+############维测参数##############
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --log_steps* ]];then
+ log_steps=`echo ${para#*=}`
+ fi
+done
+############维测参数##############
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ nohup python3 train.py --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --data_path=$data_path \
+ --log_steps=960 \
+ --ckpt_save_path="" \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1`
+wait
+FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}' | tail -n 1`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh
new file mode 100644
index 000000000..de4d8a089
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh
@@ -0,0 +1,193 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=$ASCEND_DEVICE_ID
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="AttRec_ID2630_for_TensorFlow2.X"
+#训练epoch
+train_epochs=10
+#训练batch_size
+batch_size=512
+
+
+############维测参数##############
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+if [[ $over_dump == True ]];then
+ over_dump_path=$cur_path/overflow_dump
+ mkdir -p ${over_dump_path}
+fi
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+use_mixlist=False
+mixlist_file="${cur_path}/../configs/ops_info.json"
+fusion_off_flag=False
+fusion_off_file="${cur_path}/../configs/fusion_switch.cfg"
+############维测参数##############
+
+############维测参数##############
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --use_mixlist* ]];then
+ use_mixlist=`echo ${para#*=}`
+ elif [[ $para == --mixlist_file* ]];then
+ mixlist_file=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_flag* ]];then
+ fusion_off_flag=`echo ${para#*=}`
+ elif [[ $para == --fusion_off_file* ]];then
+ fusion_off_file=`echo ${para#*=}`
+ elif [[ $para == --log_steps* ]];then
+ log_steps=`echo ${para#*=}`
+ fi
+done
+############维测参数##############
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ nohup python3 train.py --epochs=$train_epochs \
+ --batch_size=$batch_size \
+ --data_path=$data_path \
+ --log_steps=960 \
+ --ckpt_save_path="" \
+ --precision_mode=${precision_mode} \
+ --over_dump=${over_dump} \
+ --over_dump_path=${over_dump_path} \
+ --data_dump_flag=${data_dump_flag} \
+ --data_dump_step=${data_dump_step} \
+ --data_dump_path=${data_dump_path} \
+ --profiling=${profiling} \
+ --use_mixlist=${use_mixlist} \
+ --fusion_off_flag=${fusion_off_flag} \
+ --mixlist_file=${mixlist_file} \
+ --fusion_off_file=${fusion_off_file} \
+ --profiling_dump_path=${profiling_dump_path} \
+ --static=1> ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+Step=`grep val_loss $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'`
+Time=`grep val_loss $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +3 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'`
+TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'`
+
+wait
+FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'`
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+
+
+#精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+TrainingTime=${TrainingTime}
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}_static" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py
new file mode 100644
index 000000000..1ba29affb
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py
@@ -0,0 +1,208 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Created on Nov 11, 2020
+
+train AttRec model
+
+@author: Ziyao Geng
+"""
+
+import npu_device
+
+import os
+import ast
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+from time import time
+from tensorflow.keras.optimizers import Adam
+
+from model import AttRec
+from modules import *
+from evaluate import *
+from utils import *
+
+import argparse
+
+def parse_args():
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--data_path', default='', help="""directory to data""")
+ parser.add_argument('--ckpt_save_path', default='', help="""directory to ckpt""")
+ parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""")
+ parser.add_argument('--epochs', default=3, type=int, help="""epochs""")
+ parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""")
+ parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data')
+ parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False')
+ parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False')
+ parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10')
+ parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False')
+ parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data')
+ parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data')
+ parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data')
+ parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False')
+ parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False')
+ parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json')
+ parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg')
+ parser.add_argument('--static', default=0, type=int, help="""static""")
+ args, unknown_args = parser.parse_known_args()
+ if len(unknown_args) > 0:
+ for bad_arg in unknown_args:
+ print("ERROR: Unknown command line arg: %s" % bad_arg)
+ raise ValueError("Invalid command line arg(s)")
+ return args
+
+
+args = parse_args()
+def npu_config():
+ if args.data_dump_flag:
+ npu_device.global_options().dump_config.enable_dump = True
+ npu_device.global_options().dump_config.dump_path = args.data_dump_path
+ npu_device.global_options().dump_config.dump_step = args.data_dump_step
+ npu_device.global_options().dump_config.dump_mode = "all"
+
+ if args.over_dump:
+ npu_device.global_options().dump_config.enable_dump_debug = True
+ npu_device.global_options().dump_config.dump_path = args.over_dump_path
+ npu_device.global_options().dump_config.dump_debug_mode = "all"
+
+ if args.profiling:
+ npu_device.global_options().profiling_config.enable_profiling = True
+ profiling_options = '{"output":"' + args.profiling_dump_path + '", \
+ "training_trace":"on", \
+ "task_trace":"on", \
+ "aicpu":"on", \
+ "aic_metrics":"PipeUtilization",\
+ "fp_point":"", \
+ "bp_point":""}'
+ npu_device.global_options().profiling_config.profiling_options = profiling_options
+ npu_device.global_options().precision_mode=args.precision_mode
+ if args.use_mixlist and args.precision_mode=='allow_mix_precision':
+ npu_device.global_options().modify_mixlist=args.mixlist_file
+ if args.fusion_off_flag:
+ npu_device.global_options().fusion_switch_file=args.fusion_off_file
+ npu_device.open().as_default()
+
+npu_config()
+
+class LossHistory(tf.keras.callbacks.Callback):
+ def __init__(self, bs):
+ super().__init__()
+ self.batch_size = bs
+ def on_batch_begin(self, batch, logs={}):
+ self.start = time()
+ def on_batch_end(self, batch, logs={}):
+ if batch % args.log_steps == 0:
+ loss = logs.get('loss')
+ dura = time() - self.start
+ if dura < 10:
+ self.epoch_perf.append(dura)
+ print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True)
+ def on_epoch_begin(self, epoch, logs={}):
+ self.epoch_perf = []
+ self.epochstart = time()
+ def on_epoch_end(self, epoch, logs={}):
+ duration = time() - self.epochstart
+ print('epoch_duration: ', duration)
+ self.perf.append(np.mean(self.epoch_perf))
+ def on_train_begin(self, logs={}):
+ print('params: ', self.params)
+ self.perf = []
+ def on_train_end(self, logs={}):
+ print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf)))
+
+
+if __name__ == '__main__':
+
+ # ========================= Hyper Parameters =======================
+ file = 'ratings.dat'
+ file = os.path.join(args.data_path, file)
+ print(file)
+ trans_score = 1
+ maxlen = 5
+
+ embed_dim = 100
+ embed_reg = 1e-6 # 1e-6
+ gamma = 0.5
+ mode = 'inner' # 'inner' or 'dist'
+ w = 0.5
+ K = 10
+
+ learning_rate = 0.001
+ epochs = args.epochs
+ batch_size = args.batch_size
+ # ========================== Create dataset =======================
+ feature_columns, train, val, test = create_implicit_ml_1m_dataset(file, trans_score, embed_dim, maxlen)
+ if args.static==1:
+ print('=====================[DEBUG]======================',flush=True)
+ train_X = [np.array(train[0][:982016],dtype='int32'),np.array(train[1][:982016],dtype='int32'),np.array(train[2][:982016],dtype='int32'),np.array(train[3][:982016],dtype='int32')]
+ val_X = [np.array(val[0][:5632],dtype='int32'),np.array(val[1][:5632],dtype='int32'),np.array(val[2][:5632],dtype='int32'),np.array(val[3][:5632],dtype='int32')]
+ print(train_X[0].shape,train_X[1].shape,train_X[2].shape,train_X[3].shape,flush=True)
+ print(val_X[0].shape,val_X[1].shape,val_X[2].shape,val_X[3].shape,flush=True)
+
+ #train_X = train[:491520]
+ #val_X = val[:491520]
+ else:
+ train_X = train
+ val_X = val
+ # ============================Build Model==========================
+ model = AttRec(feature_columns, maxlen, mode, gamma, w, embed_reg)
+ model.summary()
+ logger = LossHistory(batch_size)
+ # =========================Compile============================
+ model.compile(optimizer=Adam(learning_rate=learning_rate))
+
+ results = []
+ for epoch in range(1, epochs + 1):
+ # ===========================Fit==============================
+ t1 = time()
+ model.fit(
+ train_X,
+ None,
+ validation_data=(val_X, None),
+ epochs=1,
+ # callbacks=[tensorboard, checkpoint],
+ callbacks=logger,
+ batch_size=batch_size,
+ verbose=2
+ )
+ save_ckpt = os.path.join(args.ckpt_save_path, "checkpoint/tf_model")
+ #model.save_weights(filepath=save_ckpt, save_format="tf")
+ # ===========================Test==============================
+ t2 = time()
+ if epoch % 5 == 0:
+ hit_rate, ndcg, mrr = evaluate_model(model, test, K)
+ print('Iteration %d Fit [%.1f s], Evaluate [%.1f s]: HR = %.4f, NDCG = %.4f, MRR = %.4f'
+ % (epoch, t2 - t1, time() - t2, hit_rate, ndcg, mrr))
+ results.append([epoch, t2 - t1, time() - t2, hit_rate, ndcg, mrr])
+ # ========================== Write Log ===========================
+ pd.DataFrame(results, columns=['Iteration', 'fit_time', 'evaluate_time',
+ 'hit_rate', 'ndcg', 'mrr']).to_csv(
+ 'log/AttRec_log_maxlen_{}_dim_{}_K_{}_w_{}.csv'.format(maxlen, embed_dim, K, w), index=False)
diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py
new file mode 100644
index 000000000..1f6f445ef
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py
@@ -0,0 +1,125 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Created on Nov 10, 2020
+
+create implicit ml-1m dataset(update, delete dense_inputs, sparse_inputs)
+
+This dataset is for AttRec model use.
+
+@author: Ziyao Geng
+"""
+import pandas as pd
+import numpy as np
+import random
+from tqdm import tqdm
+from tensorflow.keras.preprocessing.sequence import pad_sequences
+
+
+def sparseFeature(feat, feat_num, embed_dim=4):
+ """
+ create dictionary for sparse feature
+ :param feat: feature name
+ :param feat_num: the total number of sparse features that do not repeat
+ :param embed_dim: embedding dimension
+ :return:
+ """
+ return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim}
+
+
+def create_implicit_ml_1m_dataset(file, trans_score=2, embed_dim=8, maxlen=40):
+ """
+ :param file: A string. dataset path.
+ :param trans_score: A scalar. Greater than it is 1, and less than it is 0.
+ :param embed_dim: A scalar. latent factor.
+ :param maxlen: A scalar. maxlen.
+ :return: user_num, item_num, train_df, test_df
+ """
+ print('==========Data Preprocess Start============')
+ data_df = pd.read_csv(file, sep="::", engine='python',
+ names=['user_id', 'item_id', 'label', 'Timestamp'])
+ # implicit dataset
+ data_df = data_df[data_df.label >= trans_score]
+
+ # sort
+ data_df = data_df.sort_values(by=['user_id', 'Timestamp'])
+
+ train_data, val_data, test_data = [], [], []
+
+ item_id_max = data_df['item_id'].max()
+ for user_id, df in tqdm(data_df[['user_id', 'item_id']].groupby('user_id')):
+ pos_list = df['item_id'].tolist()
+
+ def gen_neg():
+ neg = pos_list[0]
+ while neg in pos_list:
+ neg = random.randint(1, item_id_max)
+ return neg
+
+ neg_list = [gen_neg() for i in range(len(pos_list) + 100)]
+ for i in range(1, len(pos_list)):
+ hist_i = pos_list[:i]
+ if i == len(pos_list) - 1:
+ for neg in neg_list[i:]:
+ test_data.append([user_id, hist_i, pos_list[i], neg])
+ elif i == len(pos_list) - 2:
+ val_data.append([user_id, hist_i, pos_list[i], neg_list[i]])
+ else:
+ train_data.append([user_id, hist_i, pos_list[i], neg_list[i]])
+
+ # feature columns
+ user_num, item_num = data_df['user_id'].max() + 1, data_df['item_id'].max() + 1
+ feature_columns = [sparseFeature('user_id', user_num, embed_dim),
+ sparseFeature('item_id', item_num, embed_dim)]
+
+ # shuffle
+ random.shuffle(train_data)
+ random.shuffle(val_data)
+ random.shuffle(test_data)
+
+ # create dataframe
+ train = pd.DataFrame(train_data, columns=['user_id', 'hist', 'pos_item', 'neg_item'])
+ val = pd.DataFrame(val_data, columns=['user_id', 'hist', 'pos_item', 'neg_item'])
+ test = pd.DataFrame(test_data, columns=['user_id', 'hist', 'pos_item', 'neg_item'])
+ print('==================Padding===================')
+
+ # create dataset
+ def df_to_list(data):
+ return [data['user_id'].values, pad_sequences(data['hist'], maxlen=maxlen),
+ data['pos_item'].values, data['neg_item'].values]
+
+ train_X = df_to_list(train)
+ val_X = df_to_list(val)
+ test_X = df_to_list(test)
+ print('============Data Preprocess End=============')
+ return feature_columns, train_X, val_X, test_X
+
+
+# create_implicit_ml_1m_dataset('../dataset/ml-1m/ratings.dat', maxlen=5)
\ No newline at end of file
--
Gitee
From 529953db6590c3b60085efc9eb96821d61a98a69 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?=
<10359565+zhang-wenxuan09@user.noreply.gitee.com>
Date: Mon, 13 Jun 2022 07:10:36 +0000
Subject: [PATCH 30/54] =?UTF-8?q?AUTOAUGMENT=5FID2891=5Ffor=5FTensorFlow2.?=
=?UTF-8?q?X=E7=A7=BB=E4=BB=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../LICENSE | 21 ++
.../README.md | 180 +++++++++++
.../auto_augment.py | 271 ++++++++++++++++
.../cosine_annealing.py | 57 ++++
.../dataset.py | 128 ++++++++
.../example.png | Bin 0 -> 119083 bytes
.../modelzoo_level.txt | 3 +
.../requirements.txt | 0
.../test/train_full_1p.sh | 114 +++++++
.../test/train_performance_1p.sh | 181 +++++++++++
.../test/train_performance_1p_static.sh | 183 +++++++++++
.../train.py | 302 ++++++++++++++++++
.../utils.py | 42 +++
.../wide_resnet.py | 101 ++++++
14 files changed, 1583 insertions(+)
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/modelzoo_level.txt
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/requirements.txt
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_full_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p_static.sh
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/train.py
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/utils.py
create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/wide_resnet.py
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE
new file mode 100644
index 000000000..8697e3ca0
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Takato Kimura
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md
new file mode 100644
index 000000000..1b89b26bd
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md
@@ -0,0 +1,180 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Image Classification**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2021.10.01**
+
+**大小(Size)**_**:324KB**
+
+**框架(Framework):TensorFlow 2.4.1**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Benchmark**
+
+**描述(Description):基于TensorFlow框架的cv和模式识别网络训练代码**
+
+概述
+
+- 开源项目Recommender System with TF2.0主要是对经典的推荐算法论文进行复现,包括Matching(召回)(MF、BPR、SASRec等)、Ranking(排序)(DeepFM、DCN等)。
+
+- 参考论文:
+
+ [https://arxiv.org/abs/1805.09501](https://arxiv.org/abs/1805.09501)
+
+- 参考实现:
+
+ [https://github.com/4uiiurz1/keras-auto-augment](https://github.com/4uiiurz1/keras-auto-augment)
+
+- 适配昇腾 AI 处理器的实现:
+
+ skip
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+## 默认配置
+- 网络结构
+
+- 训练超参(单卡):
+ - Batch size: 128
+ - Train epochs:200
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+| ---------- | -------- |
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+相关代码示例。
+
+```
+config_proto = tf.ConfigProto(allow_soft_placement=True)
+ custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+ session_config = npu_config_proto(config_proto=config_proto)
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+
+快速上手
+
+## 数据集准备
+
+1. 数据集请用户自行获取。
+
+## 模型训练
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ 1. 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+
+ 2. 单卡训练
+
+ 2.1 设置单卡训练参数(脚本位于AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。
+
+
+ ```
+ batch_size=128
+ #训练step
+ train_epochs=200
+ ```
+
+ 2.2 单卡训练指令(脚本位于AUTOAUGMENT_ID2891_for_TensorFlow2.X/test)
+
+ ```
+ 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡
+ bash train_full_1p.sh --data_path=xx
+ 数据集应有如下结构(数据切分可能不同)
+ |
+ ├─cifar-10-batches-py.tar.gz
+
+
+ ```
+
+迁移学习指导
+
+- 数据集准备。
+
+ 1. 获取数据。
+ 请参见“快速上手”中的数据集准备
+
+- 模型训练
+
+ 请参考“快速上手”章节
+
+高级参考
+
+## 脚本和示例代码
+
+ ├── README.md //说明文档
+ ├── requirements.txt //依赖
+ ├── train.py //主脚本
+ ├── utils.py
+ ├── auto+augment.py
+ ├── cosine_annealing.py
+ ├── dataset.py
+ ├── wide_resnet.py
+ ├── test
+ | |—— train_full_1p.sh //单卡训练脚本
+ | |—— train_performance_1p.sh //单卡训练脚本
+
+## 脚本参数
+
+```
+batch_size 训练batch_size
+train_epochs 总训练epoch数
+其余参数请在utils.py中配置flag默认值
+```
+
+## 训练过程
+
+通过“模型训练”中的训练指令启动单卡训练。
+将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。
\ No newline at end of file
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py
new file mode 100644
index 000000000..a45f34461
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py
@@ -0,0 +1,271 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import random
+import numpy as np
+import scipy
+from scipy import ndimage
+from PIL import Image, ImageEnhance, ImageOps
+
+
+operations = {
+ 'ShearX': lambda img, magnitude: shear_x(img, magnitude),
+ 'ShearY': lambda img, magnitude: shear_y(img, magnitude),
+ 'TranslateX': lambda img, magnitude: translate_x(img, magnitude),
+ 'TranslateY': lambda img, magnitude: translate_y(img, magnitude),
+ 'Rotate': lambda img, magnitude: rotate(img, magnitude),
+ 'AutoContrast': lambda img, magnitude: auto_contrast(img, magnitude),
+ 'Invert': lambda img, magnitude: invert(img, magnitude),
+ 'Equalize': lambda img, magnitude: equalize(img, magnitude),
+ 'Solarize': lambda img, magnitude: solarize(img, magnitude),
+ 'Posterize': lambda img, magnitude: posterize(img, magnitude),
+ 'Contrast': lambda img, magnitude: contrast(img, magnitude),
+ 'Color': lambda img, magnitude: color(img, magnitude),
+ 'Brightness': lambda img, magnitude: brightness(img, magnitude),
+ 'Sharpness': lambda img, magnitude: sharpness(img, magnitude),
+ 'Cutout': lambda img, magnitude: cutout(img, magnitude),
+}
+
+
+def apply_policy(img, policy):
+ if random.random() < policy[1]:
+ img = operations[policy[0]](img, policy[2])
+ if random.random() < policy[4]:
+ img = operations[policy[3]](img, policy[5])
+
+ return img
+
+
+def transform_matrix_offset_center(matrix, x, y):
+ o_x = float(x) / 2 + 0.5
+ o_y = float(y) / 2 + 0.5
+ offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
+ reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
+ transform_matrix = offset_matrix @ matrix @ reset_matrix
+ return transform_matrix
+
+
+def shear_x(img, magnitude):
+ magnitudes = np.linspace(-0.3, 0.3, 11)
+
+ transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 0],
+ [0, 1, 0],
+ [0, 0, 1]])
+ transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
+ affine_matrix = transform_matrix[:2, :2]
+ offset = transform_matrix[:2, 2]
+ img = np.stack([ndimage.interpolation.affine_transform(
+ img[:, :, c],
+ affine_matrix,
+ offset) for c in range(img.shape[2])], axis=2)
+ return img
+
+
+def shear_y(img, magnitude):
+ magnitudes = np.linspace(-0.3, 0.3, 11)
+
+ transform_matrix = np.array([[1, 0, 0],
+ [random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 1, 0],
+ [0, 0, 1]])
+ transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
+ affine_matrix = transform_matrix[:2, :2]
+ offset = transform_matrix[:2, 2]
+ img = np.stack([ndimage.interpolation.affine_transform(
+ img[:, :, c],
+ affine_matrix,
+ offset) for c in range(img.shape[2])], axis=2)
+ return img
+
+
+def translate_x(img, magnitude):
+ magnitudes = np.linspace(-150/331, 150/331, 11)
+
+ transform_matrix = np.array([[1, 0, 0],
+ [0, 1, img.shape[1]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
+ [0, 0, 1]])
+ transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
+ affine_matrix = transform_matrix[:2, :2]
+ offset = transform_matrix[:2, 2]
+ img = np.stack([ndimage.interpolation.affine_transform(
+ img[:, :, c],
+ affine_matrix,
+ offset) for c in range(img.shape[2])], axis=2)
+ return img
+
+
+def translate_y(img, magnitude):
+ magnitudes = np.linspace(-150/331, 150/331, 11)
+
+ transform_matrix = np.array([[1, 0, img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
+ [0, 1, 0],
+ [0, 0, 1]])
+ transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
+ affine_matrix = transform_matrix[:2, :2]
+ offset = transform_matrix[:2, 2]
+ img = np.stack([ndimage.interpolation.affine_transform(
+ img[:, :, c],
+ affine_matrix,
+ offset) for c in range(img.shape[2])], axis=2)
+ return img
+
+
+def rotate(img, magnitude):
+ magnitudes = np.linspace(-30, 30, 11)
+
+ theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
+ [np.sin(theta), np.cos(theta), 0],
+ [0, 0, 1]])
+ transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
+ affine_matrix = transform_matrix[:2, :2]
+ offset = transform_matrix[:2, 2]
+ img = np.stack([ndimage.interpolation.affine_transform(
+ img[:, :, c],
+ affine_matrix,
+ offset) for c in range(img.shape[2])], axis=2)
+ return img
+
+
+def auto_contrast(img, magnitude):
+ img = Image.fromarray(img)
+ img = ImageOps.autocontrast(img)
+ img = np.array(img)
+ return img
+
+
+def invert(img, magnitude):
+ img = Image.fromarray(img)
+ img = ImageOps.invert(img)
+ img = np.array(img)
+ return img
+
+
+def equalize(img, magnitude):
+ img = Image.fromarray(img)
+ img = ImageOps.equalize(img)
+ img = np.array(img)
+ return img
+
+
+def solarize(img, magnitude):
+ magnitudes = np.linspace(0, 256, 11)
+
+ img = Image.fromarray(img)
+ img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ img = np.array(img)
+ return img
+
+
+def posterize(img, magnitude):
+ magnitudes = np.linspace(4, 8, 11)
+
+ img = Image.fromarray(img)
+ img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))))
+ img = np.array(img)
+ return img
+
+
+def contrast(img, magnitude):
+ magnitudes = np.linspace(0.1, 1.9, 11)
+
+ img = Image.fromarray(img)
+ img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ img = np.array(img)
+ return img
+
+
+def color(img, magnitude):
+ magnitudes = np.linspace(0.1, 1.9, 11)
+
+ img = Image.fromarray(img)
+ img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ img = np.array(img)
+ return img
+
+
+def brightness(img, magnitude):
+ magnitudes = np.linspace(0.1, 1.9, 11)
+
+ img = Image.fromarray(img)
+ img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ img = np.array(img)
+ return img
+
+
+def sharpness(img, magnitude):
+ magnitudes = np.linspace(0.1, 1.9, 11)
+
+ img = Image.fromarray(img)
+ img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
+ img = np.array(img)
+ return img
+
+
+def cutout(org_img, magnitude=None):
+ magnitudes = np.linspace(0, 60/331, 11)
+
+ img = np.copy(org_img)
+ mask_val = img.mean()
+
+ if magnitude is None:
+ mask_size = 16
+ else:
+ mask_size = int(round(img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])))
+ top = np.random.randint(0 - mask_size//2, img.shape[0] - mask_size)
+ left = np.random.randint(0 - mask_size//2, img.shape[1] - mask_size)
+ bottom = top + mask_size
+ right = left + mask_size
+
+ if top < 0:
+ top = 0
+ if left < 0:
+ left = 0
+
+ img[top:bottom, left:right, :].fill(mask_val)
+
+ return img
+
+
+def main():
+ import matplotlib.pyplot as plt
+ from keras.datasets import cifar10
+
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+
+ img = x_train[0]
+ for key, op in zip(operations.keys(), operations.values()):
+ print(key)
+ dst = op(img, random.randint(0, 9))
+ plt.imshow(dst)
+ plt.axis('off')
+ plt.show()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py
new file mode 100644
index 000000000..dec897f1b
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py
@@ -0,0 +1,57 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import math
+from keras.callbacks import Callback
+from keras import backend as K
+
+
+class CosineAnnealingScheduler(Callback):
+ """Cosine annealing scheduler.
+ """
+
+ def __init__(self, T_max, eta_max, eta_min=0, verbose=0):
+ super(CosineAnnealingScheduler, self).__init__()
+ self.T_max = T_max
+ self.eta_max = eta_max
+ self.eta_min = eta_min
+ self.verbose = verbose
+
+ def on_epoch_begin(self, epoch, logs=None):
+ if not hasattr(self.model.optimizer, 'lr'):
+ raise ValueError('Optimizer must have a "lr" attribute.')
+ lr = self.eta_min + (self.eta_max - self.eta_min) * (1 + math.cos(math.pi * epoch / self.T_max)) / 2
+ K.set_value(self.model.optimizer.lr, lr)
+ if self.verbose > 0:
+ print('\nEpoch %05d: CosineAnnealingScheduler setting learning '
+ 'rate to %s.' % (epoch + 1, lr))
+
+ def on_epoch_end(self, epoch, logs=None):
+ logs = logs or {}
+ logs['lr'] = K.get_value(self.model.optimizer.lr)
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py
new file mode 100644
index 000000000..de44921ce
--- /dev/null
+++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py
@@ -0,0 +1,128 @@
+#
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import random
+from keras.preprocessing.image import ImageDataGenerator
+
+from auto_augment import cutout, apply_policy
+from utils import *
+
+
+class Cifar10ImageDataGenerator:
+ def __init__(self, args):
+ self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, fill_mode='constant', cval=0, horizontal_flip=True)
+
+ self.means = np.array([0.4914009 , 0.48215896, 0.4465308])
+ self.stds = np.array([0.24703279, 0.24348423, 0.26158753])
+
+ self.args = args
+ if args.auto_augment:
+ self.policies = [
+ ['Invert', 0.1, 7, 'Contrast', 0.2, 6],
+ ['Rotate', 0.7, 2, 'TranslateX', 0.3, 9],
+ ['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3],
+ ['ShearY', 0.5, 8, 'TranslateY', 0.7, 9],
+ ['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2],
+ ['ShearY', 0.2, 7, 'Posterize', 0.3, 7],
+ ['Color', 0.4, 3, 'Brightness', 0.6, 7],
+ ['Sharpness', 0.3, 9, 'Brightness', 0.7, 9],
+ ['Equalize', 0.6, 5, 'Equalize', 0.5, 1],
+ ['Contrast', 0.6, 7, 'Sharpness', 0.6, 5],
+ ['Color', 0.7, 7, 'TranslateX', 0.5, 8],
+ ['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8],
+ ['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6],
+ ['Brightness', 0.9, 6, 'Color', 0.2, 8],
+ ['Solarize', 0.5, 2, 'Invert', 0, 0.3],
+ ['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0],
+ ['Equalize', 0.2, 8, 'Equalize', 0.6, 4],
+ ['Color', 0.9, 9, 'Equalize', 0.6, 6],
+ ['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8],
+ ['Brightness', 0.1, 3, 'Color', 0.7, 0],
+ ['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3],
+ ['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9],
+ ['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3],
+ ['Equalize', 0.8, 8, 'Invert', 0.1, 3],
+ ['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1],
+ ]
+
+ def standardize(self, x):
+ x = x.astype('float32') / 255
+
+ means = self.means.reshape(1, 1, 1, 3)
+ stds = self.stds.reshape(1, 1, 1, 3)
+
+ x -= means
+ x /= (stds + 1e-6)
+
+ return x
+
+ def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None,
+ seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):
+ batches = self.datagen.flow(x, y, batch_size, shuffle, sample_weight,
+ seed, save_to_dir, save_prefix, save_format, subset)
+
+ while True:
+ x_batch, y_batch = next(batches)
+
+ if self.args.cutout:
+ for i in range(x_batch.shape[0]):
+ x_batch[i] = cutout(x_batch[i])
+
+ if self.args.auto_augment:
+ x_batch = x_batch.astype('uint8')
+ for i in range(x_batch.shape[0]):
+ x_batch[i] = apply_policy(x_batch[i], self.policies[random.randrange(len(self.policies))])
+
+ x_batch = self.standardize(x_batch)
+
+ yield x_batch, y_batch
+
+
+def main():
+ import argparse
+ import matplotlib.pyplot as plt
+ from keras.datasets import cifar10
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cutout', default=True, type=str2bool)
+ parser.add_argument('--auto-augment', default=True, type=str2bool)
+ args = parser.parse_args()
+
+ datagen = Cifar10ImageDataGenerator(args)
+
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+
+ for imgs, _ in datagen.flow(x_train, y_train):
+ plt.imshow(imgs[0].astype('uint8'))
+ plt.axis('off')
+ plt.show()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png
new file mode 100644
index 0000000000000000000000000000000000000000..cac1a25100ae5b9a1864ff67b2515eb025ac2bdf
GIT binary patch
literal 119083
zcmbT7cTf{=zxJ_#iWE^nS^}aVU3w29LO>}ZN-shL3=n#Ugr8ESh2ERel@?0qy+ms0
zB>_V3p-2zy<^1uU^UO2v%rocB>`r!f?%m96=AQ3$UEk0B{hbDw>Mqk=GBPqMRh8H8
z$;fWP$;hrN+$O)ef=6`eU47kjex>^1_SMJd_UGWM^E(bI`p#rzlyv{KUmKc6oLya%
za#7NA(YCjAaW`|aAcL8?IM~{|*h05;YsG
zF4!fUP!Z2yVK`Ri>j>44r{_-2{la)tN<6aBh!p$yB80HF;nQG0U5@QvIYba(PgQ^3RG|Iu&A%bwV-OqD3H?i
z%7D{v$#ODQFavJ?^Iov0{@O_}-Pxx7WtZ**u*v;w>%m>7ws~h$W}_0LV>+KPnqAL4
zO$6P%UzMD6H9#)-sOwW)fkb}^b#Y14zEr&?)-_qgFppa#FMj^1RLsNQ_uAyc32&V(h&<&oU+d&AK&XamJJu5kqCaCo-TVGdU}X7
z11wn7YbF#M^4sDyWl!28DpM|RR-{Qr5Z0aLAJ3NN-}4J8A#|WoH6Mejb$-8MIoX<(
zPU%rdUiu{e=HA)mS3PE4hu8@@wc#lg+Ie&5d8~^LGnV(`CbnK(tP0Jp@r3m2MXS_P
za}-nyn)_aFnUUhHm-WrP`yHY{etX+j$ua!1?m2sY8#)h8z1Wc*(xS0J6C=w3@|S-%
z)+xfDs=B%K*6cxiDRBMYqt;puMgweiCQJQeG&qs2Oo1b4Yuj*_@ku;eW3>asp`m|_
zRx8-~{PTUh-;W~WVu3=%0K{j2)3
zaDtzQ_ilKLtNARvV)L@eb8}#ilRkcTpB7X{Xmu>hiSvADroFa*&T1p`_7Y?N@>Ugd
zg|Zr|(LS|R%A@Fm|8a|!0fous;W!(S;4{a3TqTFxf|Q>=KCWJ4iJx9X{A!zfazTXW
z9Vz-;YQn!=ANLcj{0hlsVhY;H+52$Rbb4yqjLqF?thpDFk_9zLv0yPOuj2zY=AFa7
zL7e+LrOJ|KnGltVqgE))X}J99d_;tl=^5#g3bsymp_`GrUDjla$
z6yj@WX*20mBU^2P*9)i!$YG|{1(%)$f(!a`-$!QGZZ@d-B2j8aDX+Eun6upb^~fvG
zg=u^(u(b(>-f1*Uc~}R(pgtP2QEfb2KPoM%kg2BEtl0JPuDb@kJT$%Bil|JBLO}-X
zdSCOd$m(=|9R{;oD*DxmdwV*tf+d&Z^<;QH=2OuRAB(h)JdA+hVSGK|MR6?$JAdwF
zdm2{9LrVsXKJv~PiF)olMToc8+S~HIGoajP*E#lS3?k+a>MHU~bn&?jN8NgqRqy8Av;+lbp^*IWL++dYulU|FYo$zUJ>JaeLe{f_5Rx^{Io!LOZWqN
zj1n?+C!bS!2RMHlg3UU$Ez0n#Sb8*Nvhi6@xk7p>Z^xUGb;_;*?Fx5D!%q@SM0>8`2VJDqXOrGoa9K?`60uxK>U1Iom15k@plcbM$AH)LD5)WrJKV-Y3N)zauF3odCp8
z(wD-bcH7|}x*!uEezo_-ve1}IRvM`IV>BPS&PB~hzNdNDrz{Yb)AZGF56>Gbh%VHs
zxgWb9#_CIEUIM0D>~l4GTgz%osHH1zUc5n$@3c>bdd+Jx*0D`YWso^OWz%_d^U$
zqAn6$QE`3u0|2izlIQlJS7`0$E9w`Fdl2^-k?KY%U&bLEEVjjNZODic51(lPx$f^g
zZ#M}ft(UDy`-u+qjMiCp|G
zX>#lCX&S@3sWr1SgQ`NFd>}ddGP{My293CBju=#&nLZAWxtPYaj)}U57v4=3jcLXo
zQ&&{H&gfg&El;C9OnK3~qF#YuApE_kb$-eRjN;^4SJLb637r6AlzC~b#Gv^}tw-NK
zgb|0Qe^!2h-NX0Yi#exvs;1u%7dyRePdU_|Ds^pl5wfrGq1Q}ClK~Y`|9DBO`0aL*
zI_J2vTJJFKdOvu)SX;(v!pOz~HT{}=T0+-S*vE4GefOuN4W|-!yxrlvm+}8p@0)Y>
za{pdwKkF_TM|7aOt=)kZPQ|FZb{#zBFij|oSElMd%>pE^+V+rS`>
zgVSq!zP`+Ln$qSqk%e^O@
zExk6vg*?3D8Yu_Qln7$tAsIy1(o6Eqhr#B7%YRTpvRWm7)PvdX;oRk|THB)NhftY|b@FtQWL5Re-8sI)fB>pDRTFCx0xAClAv5bYrWBG(Bj(wTHT)K5n6DTf+-0G}c_}lZRVr>c?*wPCx!uw@9&YuDYvb@R?E1?O+@#
zaNPYUwpm^TDI8u6KCmeh62s=mgNEB}=-RHwye+ImA)3yQugX1#cH(6^_VSYPQb*S#
zdwA-Stc_x;k#asJ4|}f|GodOyUi|YOuM>1mJFQvUzg4t5uv;p&8JL#4OHxl!U2{(y
z6_a(e_X^Y)eVem0U7*wP;E2xc&19cv^e>~P5Sx$!ZX=O8P-Tx95Fau(d$zb(3JXYn
zq?GlQSQpt*rjfH+z9d}!|hZXo_jcz@$p)4X#<}q5q@(f`#b25H)sy2U2m@_17
z%;3v=Bl7*JwG2ZVsr5JJ$z>XNbBc^(13YEDxh{xNZ5WF2^d60mVnWC%;?t#f@XlWH
z&Xv7GeHPySnx|{xdY9#Y^&);OK%t2gF{(sX@&1j4gF1LsRNBAk){{Z-zf4FonkWA*
zp54ZWF)xW<#NReasZ;;hDr$GBa|=tZMWj@8oaw1&Ksze6yFUsblO++GCJC&nQ0V0I
zvaGkKn~$en?xpWe4({C9YyXp7BTu_Vscw1uGirgbkmxD!#YFX*$X679p}F?(Ai2Qs
zcMg
zCEIK6LVrlec(|0f^CUk}utiEq^Uz3i?2Qy>sADrOKZZi(_jhOQ_GP;_*>%p9JbVp$F-D
z&@jEna;%VD7XVr7?B1MB_Rb3r%M|pFQZS8G+0(YRpO?hj&ewaCy?S7fhum{cDEjEt
zyO4*^YLfGsy_&sA39&0f?tLg#F6f-qw&yH+utdc|3&YrM`F$I#&Ob3`cE=O5q=^xI
zm6XPn2efs}5)|Wx3F!e9HI#x1_0sYbs2@dl_{HmJA5EbJN~$dM&SiPLK0UBGp0uM`VT9YQ?(Z-X>isu1x?dEnyzdRdSf#&kcJ8_R
z1ASGI;9R|DLRsOVqddj(GBDe$^72sHgphqZeZJY_^-@m|N~n0eKz+QkRO6&B8-(-{
z5SY^5Tr&4%vXf15)a$(|JZp&-2WuH)sf5&WvGH=zD+gu`}CQp4{!ZYGGl7gM(b<<%?`}^@X$K-|y3p-}UpsJ_h#5e?J7VxFmLQ8yeO@(I7Wy
z_!p(kenys+@`c3?2FoOD+E?OpAxE`vvCA6CIqF7Vk+R|cFa`hJzy2xqA}wm?d%v}*
zUGyP$wa;O(*Qfvbd;5{mU$o@VQ#`Fj02WQFIi!UDRO#@~aljZInR35Q}OH;^Y=
zpIJu8c$Q;6S)aVPH$qI}=3^#w$ma476&UnB>^u?zHs)Sadx${y_>f5hE{cAyRh@r1
za>+y3Jev1GiB}hE%-n&}$hyh5i$G|91#)`V*9SsVdgt@YHGn3jS5nhP!o<(R_OaM{
zH>dH5)x{E4@%^&y#qIeZ`?|D?u!BE?Or3X$Pe#z3CqBz{C5i?-eg>1)u5{+6Hcf_x
zl*d(SwZ8k!gDsT|=JJV#jn`2*FNV4Mj?Aq6Jv(mOzilOzcXb9*^~ry>qjntz?9p?V
z38BZKRKxn1@B&C{gumaInEBR@LMX=i6H%nL}H8UV`0Zer006o08OW=h)_M*
zqTZx^@w+C`M62%Ar_>dSg%L%vCn1vLV?Q7e3@<~zN#L(zIn-Cci^d>VueJH5E3@)K
zBbm_>F)&)M*1v_3TKa(>1Ph>c?Y(i)KW942CcbDI+r^OpZHeYf;}#tYlRCUt?o9p79`s0+uADz8;*u34_3MR2EeF
z+ugK<58?H)S+Pp_^pCSCMN>8I0mzC
zc~odjSJ}HaP$t9=Z1)U=so`zUs3DdQp`YXnsiTsXP!KRX49>c}U`<@=lPFOzNd4qn
zJTkz+J?LX_Qk!d_gv|IRPVI(Eyd;r(HMg@P60*V-OoA6Wy_okNFAc1g7kXMOC0c1^
zeLXVPX9pHFV}zGL9;|4~+low&%k}r~;#8>#5gYX`890xLR!7JjXJb%sS(_gb%<~I0Y>g2mrKkP`~e=*vT;eb!^%H>hZ|
z_MXr<<3zU{itRtLDUR;?jrG~`x_i3tM^)IhGO#qJ`xX@P)f6aYH
ztV;~C1G!bwu*jDrnke
zHOA2NCgxcNiKMafoqOw(3?*=wgR#RqLPLWaGj~`hPCP0rRBG3m
zCA1(74hPchZ5`L!hucO3bnSj}o*e86zVyMK4v28%c}nffxeLygqZ&8f`&K2q|x#cJy&9SeU<(GqPvUE$5oT}Ivo{kfQpj2$LzJaqIDBU5-u5vxYP4w?I%rpBSdl%X6^Q5Tz4?i
z-VK1M-IMDsPXOD;57Lo%0Ww~9=e;Wyb<^9VT)4X8QJ`%W=@-?Ym`@oW82Q{Q9q+Ub
zJw6yzCLc#?F|Cl0@ZfqVHDH1UDwyGWe_ldx7S}Pfe9;cx4*3>f6RZi3-V#CTzc(fM
z5rU3?h~%+T?IvB@KukjQgBsEvD1
zyAxgyv)g@?-(V>2E%Dg|k^01=hB)(W{ek>mIjwtfA60S2agV&8XphayDVX_X#H;nU
zQfRxPMbze9>TrcN`bNzU&YRD;Iyyla%L2~V@hjxx#i0o0Q1yM~TF154Ssb9#3JZLVtuF_jvb7h;
zhVNZ7G;DnIAIq!T$C;133-l$a>UeY+6dk~8Cd`XAR+I0K2L@S{5*<5OV1qrSr~6*E
zluz)rGvg7mri6BEE&GL~rAp0w7I8R$8CqlPK#hu8Z)rsmuH;;B??B*BWQ}Zt5+bAxjs`7XVWEv@oOAb
zNJX5`6s+i1ewh|G4D39j$>h2M9Fm$hK!KROehwG#o+iqj^Qluz3ko-X9LNEOyGgWWv%;T>2J#!*ok1@
z)%(Gr+y)e%6JuM;90lCvYz^CRLVW<4{sv-sjY-VQ$AxJD^?o92+a&Vv%cgU~rti?-
zk`hyw%~HXHqbMM&k*0dqW_IghJ_IMd+Q)flv1DedXWU#l5yhSCLc
zsvOb^WIoMo5U1RQ17ELYIH`s>rV>elu?}J9C?SInYJHo26~ONf<{Rax%o>f2Oxl!i
zCz2%LvtYe+H3$_5vv!gn%R5RdQMAp;w@vq%=T`APyc?r)Q&PiZj5NUoy5LC68+<@}
znc{nS(sEF8QPM;XwPS9$-uPFSl9o`tJ2Xq05-cNCFO?o2#m5FXm*zD*P}i7m(A6pB
zcRiiBaJ_RfDKc?^ZP+#mE@x!Y^`XIc$Lae}Sc_kU>Uq!$VLsa!X8iG_bwEThfR!nz
z!ijO^T_~$D@zw}oB{A&0a!*pn9XDZlq^U)i(CZM2MJm)>uheR5UP0-dFaCWVuqXPP
zDy6HyJsQ6)b!WHG(r|F%^$G5FOjjVN(YCm1mDT)L=hKRhXZYBk(yENd^_6$y)un?3
z<)VJN#dXE96}@W~(}KkP-Ct$^Q9ws6WM`&WfFJmK_tYxT~%x+CI;;b;W6{h?MWM#D@u3y0dHN$p%f1^r%i
z{yQQzGHNmuL5G}(+M}0gY$PbCXXde%r1kxb&=Dd8#VW3$+q$Qko0c&B$a^ux;&^4v
zf6_h&cl{7F%`K}t`@0aSqmjkXr@X=0-oP&n&=m5Gb)z|`|N3@KnW=|IS0;G(xs1Vf
zuOM|XlA(0V^t`jle?s^<7WAM~kbfS&Rm&h&7*ts{8N_w#Hw}C~feo=Mwa@yOG=izs
zx9vr}g89b2YZT~jya}DYH@t$Kb^OiDyg!{rZC&XO1WDIVa}hd+gM8Inkvf_cNOPIG
zj^5^FKTEH1Nr&^k;A@c0t4zc~tJ
z*$9g~DS>v%)vH0o(Mj8lULL51ybpmp)h04v+?ol497o8beN7>Vo6+xOU(3Iq0^xhe
zE$HV`j>)%5Bj_sQc1~>&V${7(Zfu2vZUjUg?2==DMm%h%<0dbmY
z78y=p-IvFj6yuj$A{hY{f|sKSY@l4$x^#04c9~LA{3bg(ZSJn8
zz`gN-->ssk3s&N7>9Q)*X4PE5zS=pb{Tmqdz@9WQO@u&QUy?(D-J&cf7O$ne?cPq0av)a
z0B16+-l9^pB`^l13odiMkWs1ZmK`7KDiN`mXUhL%cIjqrOUc8&EgF<02H=}>w-EWj
z)hht50ik2Y#L+M=Fp;NkUE2N=00V}mJJ_aDcCgo|QJ!yi$lay*H_l^h8WUs(L7_RPOGVk3gon>~cD$9O>m+@YathhN2T9riL+Fmtev)g2^TebKBae+RlpX+xWl!C@b$
z2;TZypet~+E$iBFf8@HsWLXFS6h!0
zJm03xsM#K-#j9P}XmA8Ur1lH>D{Jrfsa%%_PHi{R>WbntVlk>wHo<~j2
z%=Qa{F)wI0d8|KW3g;)-y*aPP6Hl_ebjIzELSM}46
z21EwU)5!RCOmnfL$%Y`+nKBw_UP|^)YXDB)z$5BoW~v=ohc;PAU#OflRgm4B8=Et9
zCqI4&U$p%cU)HOpmvQ)e6LCoH#lSO76NVL>N(AryoZFk=PwW+WZ_n&=trhOcXP}tR
zu2bB5^y;8mA>dHf7Ir@O#B`yoa!sp
z#oO+HLhYWPUh6J0k>cW=vUQ2$gulY@8PVM&!@~6U)I}@Fu+*M6&up=0)4$*69JQ#C
zucSt?r~F!H|GgOIqpAqVqtN%6pR9D
z{Qi#6Y@m)ed}u~^{^k6+S03mX5bZj^(RmwMG=pmu`_Ux}e@|}c>p!yGN+y?dMz&EE
znYW^;FPU3mFcFc;H>Opf;^{Wjt6l8ZSjhA9+^~
z;ESOLX8=s1!%O2I8F(B**s~Ie-+G0nA}#Ywuhfj2232HO)%nwen-`A1%1Bf5niITy
zc=;ogxEf)4e9&?*jRf6>%DFSnb7lCNI~W(w?xG;nc1<7dQbHgx=%=+ab@*@9kX5j1
z<8Tz=?6F%>g?-esXA{_Bqa4`cIY_+X0hi{hR1Ju{2w|D;uVf3h6GwY7(3VeC?|B1*
z#-1Hoy}-hM<)tu7n&c$y?|q@72<_2K1sWksIj)F}FpS1g1;>W=rlfO_+=dQKm8CZ*
z-1UA;3V;P)vk=Sd8q1EZT<`=dJ8YCe&3!d=-vMIANZ^4ExB1cq{qvWhlUfx3ndNFM
znmFFQ5X+@gOr(#up&QnFCtz+|4%HZ)j4I#;9pCw<>#2g78eF_D8`*JpOdH!K}_m2uqr}8Z%_+_<5i4w9SMa$s$&^JSOL(O%%yG
z((;fRlgP3ympk3rn_d1PU5WXbY@K}`e(P8uOFtQdCj$`#5awI%{aPkw#I
z!sa429P*_g7!sM&5XM+n^T@BETGC)pY7<`CId*>XSZ2*AW-QXqAENdfJ0{0%pgmzr
zeJ9BWu}oH+z}vQ%gVHkfuV8C#qN<#e`9xrrdA_&gN^fm52#66A-S|pYV4x
z6_;>zDBQAkM$pdoPML!1l78DaUfcbXg&`k5tc!ip^S@8Sc>Ap|a!g}}1klo~)$rYe
z*4#tyNAl8Xz|#~b6_E*?vT?@vtEO<&6YFZ%uB^6*N2yHU@T>yQnb({8-tdtHLr&|P
z<>GL6)QfA-2QeN#b2xH~mLJ)f8CCljp@e6EEv%bQEApTk*e
zNOLp@7YE}%SvtuXH*2?P*OfLlvh(UM{<37B++Vjn;J8B+UH^H*E~NSMVq@-+`t`z?
z_qSZz!s(&?MVpe@4Oy`P5VntOh#3S8QqEp
zuEB)M2@1BchSqsQ5*wZJzTmCg2bafOD2)=6^nl|O(-ZO~p=PUw3V)2N4%LXfl@U*v
zRTIfor99}}1}q|q=4DmOnOP#mv1_wiE?Okyz$pEzr>6++exze;m|Kpj{KM5NLp^3{}6Iav*E)6&f*U@$8
zoddV!!lvtJqwRJDokg)gt^lIM8wY=DVf)Vaabv>6+S-xy;NvO^8rFY27WPJfjfV$K
zfubdT8`WNEB&`^DNm^T@V@wzVM5`O+yl)P5KI-4y
z7MrpvDjDUFH>sU|d6_XNw7=E`#bmkH_WBLByYB3~I45?Jk2OVoO8-XIQ!<|8rKZ1n
z4f32t7ma?7UAu>{&-!i?BCuEc7MkwW!>ebt@IW%ivaDBRFi*TU9>`P8c?7!pjnw+RKkRp6c|g9#OjC0rYHyPcuG!rYeP+H(t}`CeP;)_&HQ7vz`u`$
z_ofc$4`cZI-sWRT!5fftdIoy7!%FF3?K9%6y|UZ?7NVT|s0k#18
z`ib;um`%=tpoqHKfhoA#=jgchMDdVl4Y3wAGgykgY_;}F`IR%J@UWqt$p5GcYJ$-L}Af{DRczX_xdO@!{dhN;5XVdB}XB34`
z&L7)Mniwc%`1L+TpqVq%r-wthF!kKRLZ?&}MJInVeRL1xbjWVmW}p=$LTpD`$2bRX
zSUr2f>iMf%_NE?<#o>+af$nemh6NuUZ5tN;)DhqR6J+T<2UTI_nWEXv^IbFjuN?DG
zm%V6M0cJn0H0_h8B)y&_RG2Y+m1?QIt#1ei-ROgjtTE9)+F2*J4$6?alx-I@@o;sR
z##z5iX2`rYvQ}=z24H0b7#a4u{~qnUnEo>04ovKHG;{KLIB$BE(?qe`I#k(m;9Sxe
z=(+im{IvTCsb{g18@?AV!B%v1>-Oke&^D8iz5Bb3X$V3x{j0?iMY81yD^>A~i~&F2
z%ByhE4DIWUgp3d@6Bv0`O(|Q{eZbuI%0N4!Q`K*=s@?cHy76u1~hm
zbms$g+avzvg-8qMB!)&A;xR$z?Dna9Ar+2w%y(MA`Y>TkobBR(a3)VNUe4fP1oK`U
zc5zLrzdkWi>;oLMYP
ztprUwn1wWjHTQPECo(8)F6>z(WvB=!Ww@W(U;1o6``sJ5SSxPt<;ucv#G3rI+H0K*
zk+dbDZx!gLJK(DU+c;=WuL`swccpK*i2V_F`gknQ3$Su)bi7kF)RW?T+WB$G5BC7C
zL&n;}q?N9Zhm*JQQPMuK#lBk@p`t(Zdf2Ad6zJ=kMc9cYrx(e{{TG(WtPy`P8H{hFASwX7}m7O#C3HI|I#%@Te;sarw
zHwANMJ`_C5gO|@OwDse9644$vFasJv>+BJO8HIf~t)|cg-NHwG^m}4cc4k)$h>S#~
z8u1VJ4=Wv+&fV(!?>EaBWRVRqhmhr%M>cz@NKX&m{|G$(zZ~^EG2((|fx;1O
zvUm7K&S8}*6jkW&IN4BD1gtNm%|6r4zU-Tl!zttbu>Rx|nh@Kfd(a#@Yl{^1A>)<3
zL{pBI)9l0yl`N&kyYYUc7E6W(CGo0vd(g?+t%p5>Rx={sm*1KvCI%(0eWUWTIT3>|
z3V4xNK)a8Z+c=9{v*anE7c$*MOHsDUJL1-5
ziU!9XR0qd!2C=k2w+!oR_=h%}RmTEK$u|k}7xHQh4Wlnjr
z9^QEWmynodTv$*#-A2k8Nb{x7EEyCz+>5b*-gh1Ss_8nhZI&(#3riXellI*H(Q?46
zYZ@fl6ix+;3Qv*h3$?5bySOFM-*gidm#b{6VSX;5xU7)-I5{aEv%eyviafzR4>HHM
zYCw}#&zl=m==#X`@0JdpTmw0ZD-y)|WpBO`UnO0IW?|18hAlwu0?U)*IQ0@_hO1u6
z+ljrLO=Dy6vt}*UR&l#8MYZFdH4PbEWmxl5J!6jTQZ?$8Os(aQ53E7rs8;m-08r@Q
zZ;snvjKX#`9eY2l?nOQAy%AW%Ug&?4D+Vn(Y&~IMBci~L=^1HCz+jc0vaQLi*!T26
zDYfakNQ)@X_mlVIJJyB!bjeeW+@rOx%!qBn8-RvVs>z1|t6X*RL;cZg&-t#<1hzFj
zigBr|H>ZC=%n-pWWGsoN|N9&0i6nw0W=rOtnMCG+^M%0j90cXs@@PL%o7td=u@jUf
zFzcl_-R!B!nwYiWNAUti2j3nMM8p;oJ~wKeI#jL*R5`K$t*FS!$FT~8Nv
zg}rZ$pKYY9^2d~}IC@tEs+8l^B4)kt5a%BSp1%_BL`;(#rpn-?t+E3fMHMUW1Ycpv
zU_u@u!D4}=VP5VyJXr5^4k(5s^ZU>cpGoS}Z(n(`>c(?%TT0|*zx~U4)Jv@VM}?BS
zH+L}C32jc}azYEY_hxH)$CbSme%T9@i=s6`QKl}q{VGFzV$h#vX<^tVR}Zu3g~rF-
zlMetnIzypwjGh0%eWvQMvOPWV+EJ8QeO-dne>Tyznlc#}{&!J1tlHyNm^Wp_x(1M4
zl_?>UKUIK`zBNm~RwPEkhxj37Mg0v+{wjKeRTbH1S@_EM>^yFda9b>Y@MDqh3a%~=
z_0G3};z+-MZMm`ULtxi{mUKzB=@I@LBC6n+F5!KmxkB@yV|fOdh^>s}sy1BzkpEQ}
zD~F|Ff^ounxt8g-3UfeJEEVjgBUNWb1*
zM-4rixqLBG6=F~oU~{=Q>t&Y?V<4Cb82LOhH;f@)A9|h>i?jP=2z4o?AsGEY>j0G2
z)QiI4E-EtVE(^H3*~UIMay}4_mnGN)5?1RxtM{83H-35#T9aM+#@%4?{x^n*twp?1ym=%gg@3M^&K{{Ua;GU&h4U+RQg@
zYe7v7e3s%)VdbzdgDg(&mYX#%TVE4G)jxrO+{+|2K9@ZQmg%RplKi`(pP-e6#f-gI
zA*MA-GaVN?2@bnU;6D)_b~euW0&m~FDPWrw7CsX`-}Bw=5PXGG;(SJ$dGP#Mh#2>)
zW_sRh!*XDQQw<*-J2m<)9fj_Z&s1w?bu#2f4YLC%KU(|#A+5zFN3^7n!_+(aTrB^9
z(Sc^;iJXe}r@NR;0^b+(Eg${yW{%p?h385pNk62PmH;x>llmAjTUiHxCmlI|*9O{z
z7-oNr60d@leeW~JbeCr(j`j!+A8d-LF`F+P|AQ8h76s}n8|2I1FfMVJ$i_qfubhR97z0G-FhFXpqCG?E5pZJv%hMu^8
zbe4atoYomP(1w-fd{gnB(T%{jw4qFiNwnS)ZRdDoG=VW--YP!;2UQ-al)qI;R7M-M
zGWbijtk_hp%JuOq;2>OGPrv93OSxfZNj0}m7vo!3(i&MYzcHY*)mhTMeykVu%5Kud
z!U{;F+Rjd7Qjg+vbXnSf>G;~HV()TKe9%JX+;FuP#HljOTo^3w4ufLGPi+Bjg(gdw
zXJxA=$H#PSFV;l3Z8QfTZ}9qTuLtUh(;M0>Q^q2Ly?yI%{l~8L{|gG+pG0-)sCYp`
zS!w!xBgzs5MmJ(WC6P+uRv^ENTZo`7?SdbNuB&8#I!RM7|MJk?wF`~0cbIYFtQGG<
zwxj5T)}YxxUVmg|qD%8Uwrn7Z_#0&u1DNkJS&S(9|S!!pq>GD1X*i$^J)mr=RN0`A%W+?1^mG&Iw
zMV(rcVM-=?dX4|3K~n^#4!c!JUHYqJAgZyTAfmhC%
zo%+GPxaY2`Gzo3gUPi>M=7~tXnfcVhM0zDU={VcKt7cWqdY&W*0Z?upW7N`lP(})H
z=OpOS+h;7`vRl>(+oxn>`sEP^mz~V{+ha&T)8r?USG|f((?!b0D>-uG!@ZrjJ=8O&3eFr8+_REmEZBN>v{e{iA_dNS+jD$}Z
z_bS7)+ctbX@1tJe2hm;y?l(|zaMLEWOwD!H_4+R&(Z`w+2@bThl?*hl%h17)GE8>?m4T{sf
zj!*`60KgJr9ML(|7z&}rI_Im0-S5$+YpP_3e!u#)RW9BwI-C|0{y}X6DQd%T{?IV>
zq#A8vr|m_%Ko}dA6z8)>L_>N*v+d_5BA@meiu`ol$dTG_XGZUUcyR_q+-8X)?n;yb
zoqgyob(Sh_joS1wM4ssJENf-yELV#1Rb~DIsX=LgkcU+nm~Atd+L9yuowvWN`UoHC
zumGQQXVF`&xAO}jr!u{(ljC9%uo1)^gsX3h2ouOg)xS?BSdO(E1V+tH0Z#Nx7>RY!
z!TctZGW`=f*J=SuC2x=?A(hFi5b0A+*4=cJfU5;|P_qk|+iSPQ
zA@@3gW$rWu)8t^W9Q5JLm~AGqNAU>s(=EgCI
zBqQb1jGwuD-2P*%5O5jpO9frAGWDNua6J1a^(>s2-S3kSSm}QFff|@U-ptTQ&wCW1
z6MzozbWk)JmLMHR;D${`W~!xVpw1h|Rs2Lx^W~m}GT^K3tW_zak+X?)9~K|V|1UKz
zRAW2L0W)kpqrB&40=J#r);8v^A%IzpTKCt*)V;m;#jilwWNGSM5yBaH`{=zjvOd~_
zz7(6^w7Q*Q>X7eHj|U^Wo@%}c5nT}WS*B4?=Ly~L!AI^x}Qli>6le>O~lb9wyg}jYrQo`3|?pb>g
z8U|*$$|npBjLNw*_lkWttq@v7&heFj(A*jg+*%d|@|A&j=!H~I&ju})P%Tw0u!w4H
zAggRw4(&!0nh_lotsFr#3O*kGF0~yz>x<*#gEn~vS&ok8z~!dtR#~Y+ehc$o3(;!_j)~_a=1K!e8r#a!)U3oI5$cVYUQA2b
zswa+hB+Dw%R0WGUG*sDMFPo#Fj!;_%4WW6UA;C9X?GL&3+>kXct>%JQy<
z>9Exdy%2vgwdS08kJ*^?foF1P%p(?x625sakF2nT@wN12t@36$_(ZsmZ7DosYxJ`T
z%l80h%K}B19fW>d=-DCl&<={a)9WD~_!=qedKxsUF=R7uWS>`{Ce2r5^pZmj-zhz2
z+Hn0)gH~8GdoXFO#kIS&@(rAICGNxBCf0oVJXpah;&c()?ra*Ts~Kx`
zBj|;jc=czYFv#Wp9e%$FH$13u2X^3iHhq|1w8I@asq-toZ4CF(K)#>wnr6fTEdZ2L
zK@^1UIY-iyZ#MKcyA4l0F?}N8;cgQ8AIr6yYle8jaI?4}`ePDtR{J{!&0mM`UastDcj=9=RN_bVELqfz
zzZe*P3R`2b8;7BH%lB}OX>Yhc4x;aaji(T{r5w__I%^)pI14_(ws!2rzNXLJ`D#Tl
z7;)OQ{VCnV@S8`zn&DU7lG(@7V&C!hDJ@LxJW6y$I9j{!An&t#?H90w-9lsOiYE-}dUg;T#<=zCCKx7h8gba;QO$Tqxj+uiCE!=M`8Ct!eM$NiOzdZ*-3|PT
zlZvx_f&X7zA4s5|NzPWcr~C>CK1!D=bc^Aja11Z(IZz*K46>;n;4k$WVu;>g{=zW0
z{)sWWT$JndS_F(pU7RPZ&u0TK+(2ca>x^S(R&P1p3j4n-07R+%sa405W%&$n*UY+f
zk+b;?{Iul*3l*D9PiG}kG05932~qx2_!O2Smv>!1T{u&RAbPc0x9?$KQfSHdkc
zcF-H}T6G>(hkVKM%%k0tcJT3%k{W%_*lUfbF&2vL*W8~^kyk5Lm_Nzed^w0jU%|Fg
zaIW^Ch^eA;*5#_YkiW-E0)}liZjaI7u>LYfb<@=Lt7XZ+ODHK_{`5&UKk2#VdBNp+
z0{J+Bw7$Yl?6Pkp-@P8=dm>OYOa*dl`v244?Y-LV!xdg`|jAO2%ESg-?B
z@3*rscrf4P(A98&W|w_Zu3O!a2d*-khfdK1pRTRae>{7BAS^RuoKP58#k5B@rG8_R
zLC<`(6k>8halNSM#h{~fyP7mRRHxi{nO_JSVF;&k&DVC%U^YtN8(G*E1#QsC6`h+l
zdwCKdE3RF9@{`Fy~|Q5G?`bYEKR+!e3yf!x2q3f&0~9YlMDOEqpK
zwP#=bHMVPrF}=U3K(fWPKVF+CF^zIpR1d^RFAT}J=&x!?(SYZ
z$MX2jhk0iSjLatuFPT++>D-^UgG`Ay0-8Pcy*0LxOEUJ35$lgpY9TomdF
zcEMWg0jCTBRJn5D07Xgbp7T5hc&pJ3a!wm#Ri;&&Httb5Y9k!h7P(z=L>*`^Svq8T
z`}BfQbS43Ce(f|v;ZXBnGibRxodbI^(6l}iCQ#q=x{VwVRP9A>exBH;oM0!e)?n#KHaDvCAa^}l7SxBWFD)JzfUl*
z&$6c$S?dcAcb--z{~g*ken>~*G=qWoiMbbw)S_zfpqF?gGmDDew{UH9w#-t~2A#2u
zIf-$Ys2N`WRo$u{T$M2RFR^2OSaCZ)V8%4(!Rey4A09K<)mVOCpukZ%DY#;sP
zkbr5dxR{ipeQx5WOw&xHDL#v2(z
z=#jHQ8F$?ESEb9x^PXh_^D|B=bCb6Gn0yt;liWYJvS~0~X2x^($O4hcj-{F~p#u)>
z`lq#-7kWKCwqdTdr`pF4A8!P0S6{3UWoiG=KP1ZpMCMq3J1M~~nLP8c0e0DsZ3qjm
z>W3)$-@w?vQLWCcS~r+-E~r+Oh>!iQ`)5pmg}V;I<~cX8;SZ7))9d18Q*q-j(6c3!
zadQaXbAT@luCUdZ@+-}k7f@IXB{=_@V-Ud>=7E}EN=-xbY&kcn)!vXoPcYUp~U*dkfe~3MI~kZ
z&3V|ZW$&Sj6|L^8bPb5i-0b|~bJ`1WD%of<0Yq^8=+t;8&3zpoW$WCd9~k!X&Tesx
z>oC4=rBo)2e8e5Ro6d3Gz7|3S_BFSr5$Q0$JK+63r;QuM5pe2vscH#(xeDaT)X=;y
zT#*t5gfK39DjtBN<5}b{CT?5c5VsT7>e74h7(?i(u!gMnDNh$to+#EOSv{Hq-a8gP
za{Kb%_Zv@+bH6A{#vHJWQ5|Ev
zM~l~*js|U}Bx0~2C49T=GxqyRK8MXKg^
zc06Cska(|5PO{Mey0}wlXWE#)IXzmaAuPm^?T>peZcR0f2(t;_T;^QgP98tRQsd7>
zL@kh&w#l1+S}lX}nDD8kbdK=r!lFJXB{#=T-hfC8Itqq3`S9w&{RY@iSd4-PK9=uz
zmhr1I^4C7k&HX@~@XTChF{Z~Z=Z6{BO-qyjm*+kCHTM(>5@t)%-@pb7d=80Bdb=06?5D*nDGNjjEKzGxlIIyxVK>@Ds=$6|g;I^C=)8Fw@+$?~2
z$b$G`wd3Or`Q-_Zvs>ebjrIue5PY05lUuhm^BLD5E%F(dE@4(gF}Fy@C(y)7vbwMl
zno@M)6!3v@tZG;U_#75_%_0j_Qe14B_smd>wq9b}r-n=jFu6FhG;l;hAGqZ94BDg{
zrBRrHGTctK60q{&FS!=MhIeR$Tq<9a8X>cw6jdbtuHOcKN_Qu624~YwTi0L~=Q>*Jj`7ad*DPx(lLu7n(01YPl~ddShp?Nr$!mw7cm
zJT*Jb?7|=Hp;9Pu)7L{51c`v%YjGLKx>xx45opw?E+-r{+)tG{=TcKyZJ=RZCr^
zTD@#!RkSXQoWsmI_C0;MWK?VP;zUEX$x>+Ov3K8K7bfVANs(6)+e;D_uZgYr*^
zuUTdnxay^(Z&p_t>tK?z8HLyySHI)gE51vk73HjtF)l7EkQ-D!Y`mxzpo2Nck{+=h
z9D49D#Wj4`0h#__#^_N*h_j=5Xf{W}%2^Zs=++M3+|N=Md*Z6)yJ%$Ufc`-IY|iaE
z?5YwFLuJ{#b5uiTB;j#(a4@kh!j;g$>+|9V@U6d?xFcJ-I%4O-S0bx^p=G?YvX6_E
z_84zF)@n@uaj!A5P|E%M{pR7{)Azd~-^q)0O~Gdi(?97$Hg2+`129>DGNSL06w_5$11-yGvUQ835xK`GA@V`eEQ;e%^tpTUf*^16Le(az0u94qCCw>s3>?X@~??VxqHAjqrN5iWQI+Y!?yJFOJ
z|6CI{zmu&g+w4@e{Z9VzUT_>RLD|K#?B=zUX+-$Vt)pF^v?)JlZ+Al^G^Pie5
z3ONBP>q+lSOU(!0j|3vd625EKtvoHamAf%ME%r+Suj(La;L8$m9Ap}G=*J;T7jPm2
zz_&I;BW_T)G&10MKhJ{uGdYGZPdXEXvZD*P(U@o+xAJ
zqg)&_q4-Vi)5WzhBLlUHb-@+u`!64BTlU;VOg5Cjuh-0~bflIWZ!!;)$&@$%atR<}
zCQ(ceDh3Ob?Uqxr@%dh-w8?}InRR5jH9-oLcUtiWlJLD!Q{(GrGF`@)UM*+NU-ng#
zBYeLGzrZ_KMK1)`yYCS6aXSHX;IzR37tpL_SbvX$
zQdhkhzny^B2{oUIN15l+x!=H@efh}O98st1{IN$M107G|d7_Q$y=|8O;Mbz4F`RjV
z^_v)WZx&}WXCH;rug)7tqv6p|nESP(Eebyyk9n8t;)r;tetpW`ZgvVkv7(NmIxli_b9oEx2PAm-RrD2^RcF4tgP8v$~wjL}B4u?V3$zNWG(Ytwr
zcM}3P6f*?>qI;4g2%>t>N2~*qAQsQ9lKtM5GIiibsqQlsz=dxJ#WXzmTP`o>*GlR=
zJkNmap)IRmnrdOVZLkTRL#Bm3(QMf4cxTg=A1^AOC8`^b-A%$)MXj`#3}LFG)^4jx
zg;7z6BPZtspV*TJ!qvIjX>DYfBzV}uT=in&*&ly3;t-!`^%H_j>BrD1#&*DZ-_fYK
z8j<1kOI~groDJwMWr959IsIKVU86hhIMk}Y)WWq_Gp{C&OM6L$ius*z2p5s1+23Pq
zXmFvXtewa3Kl5FO=%?Q;=3EL4!L`gJ1PHb(nsSN8*o0&^mclvhp4w&|k5OQtw|^A@pkYOBlvKh4e#^`WD{{tB<|luLnnoFQ19ql?<2-4xnBa~SQA*NH
z(&u1XPMVr|IzT7x<)bW4mz9Oe#Q%{s=yN9B@RS@2TkL14%*5w5-|oL)OJ1y+R2C>h
zbN;UL{rVycH?qzYEHu5(oFyUre(@I9xPaZOW4RWwxwu6*zfG~~
zgn)Q}b_c5nG;fDt(?Oo5;V1d3cw<r4=BxAp0L;MIVIah?Bs;xtKwhU
z6`Y#3`M%4FXHtt`@)g7!QZ#ivZ*O}p-8t1DAEg_3cFaDkPFN?r(cqh+du{v^<`B@U{P2Lfh(JNFf6%oWP`Nv*ED_i-4jowBJCLQ*LcTUhI{|oR|*_%
zNo9Pi<@tGWM>6nuN5Dv;P-1OBE*^x}y^9#U`z9MII>_0xU5D9lQQC2Wm#p1Lo2Cig
zATzj2^l=}ktx$j0*;TPF{B+Xtf@4iMW|XGdJ11#LisxBb&1O6
z!fq?xEcJf^-4J3U+e*Z84aTig5y!(eO;K##{iNcmjBdow8Q%!Yv59W3;3?4g)vq_k
z3!H*k4QV1DV#l_tmmy-E46%-?lO2Gp=@B$?3hz@o(E@Y(51)CHi+jS1pr##bJ`Z7lSy1W7U4KiqBL1rD(4shHJ;
zA%i{EdAd4Ivg^XPJC4iIAOCHQqlUT-<21&9Z#g|+`3_n7s`yE&gj-vr=^>g22l;riLFYO*Hltc43ABIEEH)BN>T`bgmzoN=
zMlUdwG&h=;h3ZiHIVrc&GBN|xkJX+!rgvz<=?3poophb^xY>k_^Y**-q>|S8CU1-B
zQo$<6m2Er&bVN>EL?WR>#r3S}ic2M|MGe5x%yD=lMsDys`kVI%HGcDLn!?jvo6{uq
z`VaE)i;raEk!^~<$|HxUuH%h#>U;&b)t!sy9>DR3>!T%`Fj0!(L_I;LM(*$6lCWYa
zChy<=>h}0^;gib&%ZH?iJS|)8R(zEx1KOeomNLfARZ=3HILUH)z!9{odG@e|Ii^=f8*+{$lyn
zcUGJFFID}MQLnuW%uH9MkyW&L8-F%TuV}^~9oC%?r#s#=WXGz3t5x~2PRu*r$LfBc
zzq~&Q$Lja~{^_FW^~rWD?wOA1C4q?mqll2G9cL9&CP}uLVrKRKU?w3
za3E6CXi9A6P3RpAag)4l2M@x*NxSX1NqJ1jx?6OscZ!qy5w&3Wj^RrkbSIwoYejwX
z;pqJF`XB7*dumA3hAwA*F#f7XKPQqv$+@6-APtP8zH0()J#
z4P=h;qR`^==lz~dt0H^6Tqn`)nqGJMIWvR`CjRtgJ;@@XBeXOWPh&g&0W;r|8MfKn
zUm_SOv|KHy_B#KZH>Dd_dy@~;V36k9opa8fmU)nDCdUPRY?07#CX--l)UB22
zC|&*rFpfLDl&AML_X>_1icsK5=a9~RLR_}p2-JKD>J3}5YYtAUvd(=Uc0>m&cb<+v
zd${wCb-?~-($HXrg5+I8Ylf90b8~f5l_IAtw}2bzYsAKl;`*LtT_EnZ)zr|YEvvXi
zg21OHDvt{nJ)|ixxf}kDunS^PtZ{r#x1hx%XT_chYvu{`C9p0F
zN7Prl4GVHhkNtzl=E=7hgFhMIZVj)G$1lB#CVP%alJ~ns`e#V0IY30(wz3-wal91N
zVa4<1Nkp!UTr6q9`47W3U;TSorC%SM*~0nmrEZFu6+NH-}wgz&E@7<_7%Vo
z@7&{vn<~9gbU$-~zATdgjM~}dt^z8vz_Z`(&n+yEt0Pe*!++%>hc18182`ztd4KwD
zi3@Vo1HRGCWn~7ZwuQX3hxTO$uzvD;z5fph7QI7XWvw7Nzc}-YlC@iTcw6}z$0vq=
zdt^L698>@I4-7V!N40?e?>7v}(bwNw%R`FGj}v&ESlmOElsCyEr@u5UC#2^T9&|qn
zKsJ4EXFH6O5YpN#W`EfpwB$We0g!WQV~~*4r`7Qh)*U{m+Urf>(aKY82RL1(Nz;{1
zFuRsH~1pVl(7h1LGyJS%Xe=-t{F3Z|C})`o1%_HAT#Aw7Z$rjS^MteviK#SNhpoSXH(dq`K`C
zqd&EoPosg-6@_wmJ_ITbWUZo)eJ=u<$Ro0fH*ATEKESRw^wmVKVFwFT?zTPwd*7hi
zf|vOMNU)uK1WmSC`=@JRq`ps&yn8`s+p&kg2(L?badA^-qZ_P=Yo%dFfJMIJU=58v
z{hA0c8?q!71A|`jXZ7fx;OTMRGKIVjlk2r0Kz>$Hk;u<-1Q)#L*%daWa_M01gtO~A
zhexZEJqU*j!AAC|#IP8=zZM9dn5VF2%4T@>O&T5i3G&1G&vy3EioAt;wvV{@n~$|9
zNE^41rDV%agYDstbekXJvRCMeMWJoa_#Z~VMSkLRwd0ZTyiWF;GCQC(}#1#koR>aQ^3Fe75s%48(PQ?2SWC5=dBXIg*;tH|jBG(``tEXpNr)2dr
zkOwxBptf4M0Og1~R)uM>FsPgfKjt6rfS5Mn_{wgEs-4yaJ$
zG}Yekm@r4pH;e8z?#^Q8SiTmN=oQ%vOBP`k)MP7m|6L2~
z#_;R=)#n2tBJ<~aDI5L)$j*&%8P97#!1%;q^<^NW+Tz32nA4eHZh3XuAJQCzfyiA
zc}=$|y<=F3iLuCbTX19YLgvZ!zT#qdxv
zIOyPPzmAI_V$x_TV~665BFb(-!yiu}QF-FVd9&fC_pN#5|c|6Ad3Y=K>_r#maOv&A(h(JJ6B21T!Uo`2%v
zb%od(Mlv&bhCMl_aYKNfE_u(){Hk#WjkAH8{
zzt|<#=t!9RDQNlrm1c}7wydi7h8VL-27AEQOdD0;p+{LVGl;#MM>S{g57HF~7beVD
z_qR-I2^MpGrtS@p8P5{@5n`}WC%Y8!V!aHnq!A)?RdQX%yb!Os)P&vyA>
za{9BNes2WHtEws^uwt}}^tTJU(J<$brGV+_zsEy66Xgv633+~KT5x#>{CU@Glk@-S
zW0Ynl9JW!E_3Q!7B4
zkUopOb6JHJulN<9qI31|AfrRsHivefR+VS6
zIZl&mGo|_kmT=%G@7&0xGea)Eb1J1B5?76N!d6%
z0P!WXC*q?xB7^0`TAY9|rAG|%ZcE*HSveoOm(G_a<^qn~s*m~ru#Y%!p$aHgz0S
z1+ZfR8nUFPl>;BaRq6J!qS(Fa!iuw3ezP&MYhK49}3!EEGvmdBf3OU!(C#;)SD+O(c
zz}0(=<8~kc>(^3mQTyplZqEd>8lfQ=a-#C=;Gko4xo7b=XOy4&DZuJeitoa8aHH$o
z%j`U&k1`+RKT3*&i=q8r42!eIdO)FAvu^F*XF*RA=ggpT*FW-ah0D5bL*B%phinbB
zSj?@MG^1I1v=h!WgDUPIMNj{H@%n6_4dC^elNeJdNzKZrJ(GbSwMrjo%sMRz)Aw2z
znuwUafO<^|=IWtaR#CSmp1=Z14R+lM8#`l#Sjnq|b6XeRxty5cvD}L>p>r*sm#5!9
ze`ov-!@YXh0#*!%OGNw_<+HX_p)Z_cu?ml;bMms}Tf+}npWI1>;8D;}Kk$Cd5<#yG
zAFe;;UXalC9FbuL2cTRhylSW4>=bZojQ7tjB>v3XRSf6d+TP}>#LTlf6?dHdx&JWu
zL^X~zO;>!bEA>+Xer4E7L_Ff?>kNeIr2Mc9>FBiwr{%9~&}-RF@>({^DDc`a_+#}p
z^>mD|&oSDZU2A|G-mJTOfYj_Y=V@3x;ht!`)iR?5?k9BZ{^fZi1Wdwqr@V4Z3i_vZ
zm3PUM%p9nWeNu6RGqqxU$_
zWIUchFzrAe=|M9RMv+lI7q4k+sstQ!*KbRL-
zreW6-dmaDR-7?ooDK9Q_Vo%W%?N)e@E%S06aq$+o$R-%G_b$hC?iS+s`Ab73Fy{@GR8GZ7xHteg
zG$cb(Esw{z2i~N6)F%1#o{M_XBa)CA{`SE`{FOG1B=KoVi3abA?Vk!-tDy73X%grg
z?d|E5O#?$c>r&_!B<2@)7O8EevpOaFiX{#!0!uZ>YA+U>{ev(bdl`<~jZh`M=}SjS
z1dBSLO-GF-LZQUscVJ_NB$nUs&&iUm!+&Z*-_jNS18XtdjkY@d53HrE{gAY^IleuH
zJW&fzL*^@n=>Zf@Zl2ss)%LFrXK8k$IjU@jkG88`nJ{A#DIFjDLZj>AFLST?(pNvRQDsHdIJ%ZGBu2XvjgU+FGq6WAnzA
zcL`?SmwkF-;dPSOTA&7H+Dj6hdQ!ors+XZeu^;;fRA>4Qfw^tl)NE62Wom3UYnvlj
zRt6^?b*b+nRyK?T=^Hx^=8eIK-ExNYnDwl7uX9r#S?|l@<#n`;l*^V_hwQ841J-uC9)!BwHt7TPG9sxp$Lk60}IQu`m5PJFhydbMwMxkXifOTo;99a)fX<=Iy8H!T_t=CogPY5zd|+$%uw8T302W
z1_D&(1P%l4D$ig3Acn4ORoj*9YS^O=h}qqrhmzP9kB$MQ_voU{x!%CXc<}>r8Y4sMlKTR+`0z_${;h
zGciGa;Q*}AFzw=A`JC;&cIBSbtNdnCUmZ8cE>Yk@64=4&Bkbmyed1G?00*O3O%#O`
zF79+$5+^)lReC7Kd6HFhB2W^K%`^=FWT+yW`^BapZ~e`ys`El?8@vxWK3vJc&oU|U
z>UbSc9qQLWQ9te-jK|er>m6>!S4t}@bc7&{8nEjLw%|DYl5W(x`1L8vYG--95w=Ix
zznoKtGEW_SehU7%Mv5#uk&cEQ_o_#sxOH>GM1*3e-JKQ(s{s%qsh}dYGq}b~Rj4Nq
zw8gcz7i4j8c|`#MU!x$G)gnxh^6&cTGG4i93p!L1v~%Ub;UGDEXvN<5zt?!6@!oNT
zu+frx#l7JVqrge8M$!k=45zmXt(96Mz~#&)s3xEghdbRE2RBSNvtMm7!!(Z>t~ZCd
zdXQYj|}EcxUV%F0bJ0n-wT|L){M=C9}VARRY;J93*S_x0cfFV>TqgUZWegT6uEk
z4Q0%szZARH&pZKJNz+w%;m5Nj&@jKJin8;9{D9Efa0D*&cyDRE6U%Rj>^?D#R{233
z9;m+6=q?+4t$!E3aj%Fya5psd)EC0NXzGg3Ox!9
zb-FYSf2$^i*71}qP~3|v^Xb_A6l4mf8<6}Cd2=AW7y0*RCi1VeII0waqgNZ`h-Cb5
zywFQP7C7e)E!)ekV$;-$cWqj%b~4{i%UCLuEGk>qOE~U^zqKy8r5qIUrAPrmxS{3n
zC*z$SFzX>{ihcK;A{cjDE03E%M~`b%R#>_V7NW-9&vEKW
z6Oi@So2=W6R11~eCvP(q+o*r?X=D4{$DCCj};_iJb8?=!`ibC-YHKZ_h
z%$NAB%E-g<56mTlGM(A1}y;mKa8XWR=U3=%^?#RhZ
z;+>PriwNc{bC`qR`bnDPt1Bq-zw9l&@TzfLp7?i+4^Fgu9o<%_yJjC%`SZOQx>F`n
z51egPAJ}gFezEDxtJ?mRuJ-S8
za%)$&trz%(*37|rlgK5(LO!adioJYL+GMWO=YfTUoabc|VJbXYGJ#R1W3c9kR~hY(
zx0VldkiwV@ENJf6jmjhPS$YWVvK`XQM}5E-*oFs3hpcm-k1r9XsEcJo2(*6MD1lUU
zd2({nSAm75YqmM?QLD`zTj7Q!EhObKz@JfC{3{Na?Oy{gnq3EnGDu(_>hvI}d^oSD
z9{QXz@OJyBI@pRf$%$U$S^MtXQ0Il2KnE&4h-AzTOnahJfn`goF&XIhrxF~1x|F?n
z$vdZVYI&YJ^Rf{;CgKN8s|q9tk5XYKHhDF>YtwQXRW_{t*>tb2Nz3Jy@==
z55C?B+R)^5?#Oky2Zvl=6HJHg`=-$pdpdASC3tUqp{bIlgYoP+-W=3-@l}#e&Zuc~
z&U(+H>YQhMxYvo7Q-rNrze&cWYNRPW<^3X!Y0G`m9;g(yZc!xdnUwDSci!9k-gs;4
zCGS0>d*V~z`RF-6t2}w2W(}NO%LP2_HX8M9hU&HmWOKhoT?tCYbJ_cG(Z5*k<(gl1l0zpOe3
z_J!tYahk&>o@6ry^z_;P96I@3-w@xC(sCRHpX5FQ?RaKg34*<3ckyg?^t{D+A7$SW
z3ylx7L$@=%ETq0Pyf1R8Av<2H6Wc89aKG1)Cw8;hv(T-PZ@yX`
zGCL-oY;3l2+2l%`(*HSd0nZYZ{?{OOhDbE}Pjgwozt}4L%Yp9y??1e=gGnUgk=D9c
zDP0?9*)XKtMyn}3TCX|Sr%7JJg2$Pzg@Hsc4rJ%ae!Bil5W|u1$ho6{-Zn3`gH>id
zD4B7mB@o`w0PgI@Xye%PG9*rbQf)ZINpTx>x9Bn_f{AWfD*bk+99
zRlH{(4j4WOweM|tkiMo{bt41(Zj=n?0NgqT9G}lZJI+|TZcE%k3}i7`@EGU2ytAWT
zPQgLh-Q3-BB5E5Zy`v>1ryjbwWgnQi()OF7bfRU4n7XiOyomuO>*L*xddMO!3FXkE
z4N5|&CZo!%kJL;nYbQ9zH+Uo0S!FP}u0PyRUJKQbQTnrgsg<*wDu
zWFJw&$Z1V?JZnwI->tozQN@TH&squ9!e~sU5Wyow1WUoML&5lYTm%C>bra5_>e>9_
z{7hu+8>hP9x2Go4h?YG_8~Be023P**PPVbvp1nddixU>V)p
zCve%_kA5`)Z=1+PI&269Vwqx8Q)8|0{4f&uvG#tNTlh@iMihse!jVuUiry9)ReN#P
zL8|6he3}iqks_0N&*88=!1cFn2_#{=1*A5-AGGtPjfpTb3=+JE
zc8nv9<206VZ?1Q;Le`sYU&Ms_yC3f$l
z$h&T&0$y@c*+#E$B8oV~hI89v`=yW|)#E>`R?=7v8Ad1&WIM4z0FtM|Zf|feb66hm
zj+_uFC$JrmDq!6~z28iWdAR75ebR?f#GCT0oRWgMbcnl4`*uA)I!Llw#|ugJ7InD+
zXJ0~VpcxqAr`^8ZA_+D7#8yDaaqFQSs542ym)|{vto>45{9sUlz_6h-uHbOG*Fl~Q
z53y|>efhsLWiN#O_pQkn$XDN@`g+Bhbgt3u5;tHv8^Ok+EVZHj0l)b*IXMBz8e)WE
zmq>+#fWe_<-OZt+y<`8(3VSGh<;(})lvfK^+m;6uZa}yGczTpsJjjxoS1lIxF<26?
z;A&-co)%JRt9PlwHPbfjgkWh07%*dsky5vYj=U|~f}bScmkT_JUm^^Qd+Ph(C<}SH
z#-?lgf0XW8pU+L_@BC?==70w+%0!+u=JRvSzTpUW4XmnvHm@L?C{jxrW(#XN+I`x!
zJx~&fV&zLOZ~?JDkwLMlnboy_7RjEHcq#Br5a22HEIHR>=hd>gUPve+8LvZrQqMM4
za?V>6Sh67eDovY<)|M*yGeN-dtJa6c4-GfLX{UVL%qF&eDYa+yD3R$tt$jaQDmgCr
z0!`ABS>>ZNLw=@*KJr`Muj(2<6kStgWqlmjP}QtQ77`k=_fKzUl)pxBZwlChBr507
z4@z5v+!0ZPU69r{RcM3g`Pmz^&K=Gxdw%|@Nawym-#XJ&xfcpO
zxeVzKnMCZYt)3&0>aGGItd_gtcC!TQ9T$74y5GGbvWC0OD{FTp>%9EulN;oR@|5I=
zJDv=RhrN(aY^UeA{!~T5Y60qxZ9(@p(^qrbM+JiehOpvqNsJa9K2-e*q;*yXXbWC9
znh(!~*LDvbxUV-)7K|oAIHk=Wl})TD9xFvdFBYsLq+0B1DIpQCl6&it=|-^E`k70IDjmyclRIA1_j#{(+XEBjC!m(MSNbFUw~~BS=JQG448>dzul8Lw=djBT+?ur?a(~s+
znaQ?j7|ykL--q^)_wEAm4%F}Nwh+f>lvRVj?|cUdu60ng5Hb~HGd)YA0d{k%x{PnZ
zCGp#bv3kf}9}UNkMDB?6_SFm*AexV}m(lO+T0o
zvvP6X_z->(oYFw(9WB(Cc=NlNR?&GDKN@{Xy5s^|IXenyEd0>
z>!uvL*|J&!3P~U1JP3A$xX!4fL;2U94pVR33l(J6*!YP^Bxi%*y})2|>ltPg@zJU$
z2C4kAb)N+RZdoP29;Il;G3#shtpRRO=|HA_I=1KJ10zZj(L%z0#)hgNmQ{YjEKRFe
z!0%F?u=LYk{hNOdrP?MgL33v
zepQ%TtWhBUda0RNOQeP+ew_57eoeVz>fBBF6w_b7pL-+d(6zbdQ=Ue1j%W;;JhI>`?(MmK>
zvo1VOYCvGzeK2)z9cdsN(?kyoT4^yC=aKFB=s1h^|8u-OuMkd>H~$?FjOJfNk1zHu
z`N+jW)>*lCKOVmvpKzY&c1B}uT2Hd|;PlHF#rD0^dL_OJUsyjtT$&ce
zioUImNy66z{dI6TvH8u5M&^#UBZq-qNVRpjyEGUdbvN0Y`wPB{H2Vctq9ReA
ztFscjvmot;So3Ig>_@{q8s3E8yT9a|@7)R7{m~^wjWlUoV0JRBWC`|h#LEGuBOOz6
z1y9LWJrNwk6i=8NN&3(we)EW?(OV9^aMQ!`hOaJhH~!Fe%HB7S{@L=V^ZmZ@|D_08
zCNcdN3=s5-7r3fO38mw?n=~bGGk!V(S2z3sZQGqTl_7rEeRH?*^{<1{ab`E7{Nb&Q
z6gYE2n9USzt;DI3@N#y&)`{=%dGqBqynLccTeC|LhuM{hk3s-tJ?IB!@1E%@3lXKb
zAw^6QcfzDZvM@lB@?wQ6UEANv+TT4+0-gx-lsAkRZ)Lilop`i9pqOLxc1J3=DZ*I$
zI9+eodyt5yrgG4ip#T=D0ln^_x1AWHJLat~HvunaWs9RIsWX%7PDA83Z|yJ3Rd>k{
z#u(GQTV3MzQ4F~6DjQ0hiZ
zu^>KbRB!3rv`wwS7~;)1G0wuVH8Q*g2uK=zOndC?vu
zOSBi_7uYxR{&&f>U;DX8;hx#19fBy#+FaSnKJbQ~u7DcEzvpC1Dgk3g7WY4H@=82S
zODwYf9enZk3NEs#J{l0CWo{Se;`UZ?sq!1}oxZ52kgtAJtw)#EH3Ze+*^;L{y`KF|
z$jXHf@Hhsw1}UP-KS6MKtJqiE<@lGZzf;|yazzF37+3bz<`^}TZ8i+
zfm=fZ`aWk8Z0bx*>kkP6ibbhs?^Y-93-(Vpp66hf=DRofl``t&AnWBb7{(7$)qHD1
zIesnCi=T#A7L89thCLJ3hA^F!jG;NElOl;hVLJ>(0%0a~y(YcLo@yXHR=!&+-3g+O
z+2~`5t_GdM_L)VIIjmaYCEH3S03zSL?baP9DZmNq8b6eSazq79`CWh08}%N^p&0&Q
zb|VabDa}^oLDY4pEYUD&q>_dL$NR@`Ny@9V8$)*pd#zZ(0J(-hJL~Qb%*5??=qcD)
z!41PVJlpk+4}D7YYO7U4LKp8{i7_3|8OjRSNg*1*Ls_toYvd+_DpoR1tphtKCEuJ=
zgywSp*#7>cpOt~e$QoMcBjQk?7=dIjcOUM^=?si;f#nPpV+UR=YWP=R?mjt;*RV7+
z*1S&2$PZNj;5u-F9)ZDhsCdIJh5A79qb|L(k*FbG`XOagjg>#-M)psBYq!VMza|^%
z!$K!-kO#T^i*@YxJK5b>3E;TnO$hGG&-=7w*5SrsPG)hRS%1X*Rl_`MgAgj~eDWCF
zh5qYk^Nm2NBqH;UeVb7HK*#p{)hS>3mUCg?-IF8#G=%a*=Slv`&yqQTPrK|@^gJk6
z>%28b)!aB}rvl8x^nn|Ce+e2^yQKpB0RnJaxF5uc6P5;IprVF$@VU(uxyX`TSR-^L
zzr)%+R3VIpetBju4}q4PS9q2PfygZj#dNGZUT2|K<|nmZ5XTuV215=EJhkNs7q4yN?)
zx#pbD^Lc_UgnROyd-CWmcKXmD{)E!Y-#+q!7f871o9%jQ2yaWqfJkj@Om4o;&0lNb
zZ*~UysETZ_LUn?inzMTR8Z@3AmAupaWt1hfW2&$l=McWL$z+=3oFZ*8k=lYz!Bo3DE}M%A)PC=4dM@4PKx_L63mC5Shy?);Wg7F6Ov
z?D*bFE~uRTBfSNPFla_tu9mdhV*E}iQ`6__cKZpQJGC0FiT;A*|a#jj>v6#|BRI|%KD`iZe0B>1sC(nE2_XbMxlh{cMY6eHEKHvCxi#0uLC*vT9#QG`|nSH
zAByW0bzu`Nx^VV04<&aZ>LDnb4^m`wXS?}HaNm)^(|=W-*>TqLqzjr|fZCwweMU7!
zn^`T@ugZmIjw;pM++b+ZJwPTVwId8b~
z2mLd2OsR>L*2s1`D^a;+jT@4Es!2+C3Cp}h||3S7>e0+zhgr=
z?`b6+!yl*qXbYOpw14%@W|@eWijbicfn}$43E8{SqFN^Z*(*Z`P2oa9Z6m{DB-h%8
zHK`D4Df(q?qEx=ETafXO)v-6#vMsBpDzP@)!|pA?jTKh&PJfD#S&eH!5&ei=^7wJZ
zk7CVjc!{x)GIgaxH#=OClXMct*dEzPDb^ay=ig6|7meQkie(4f4LZrafLCZ54(7=tCE5zdMuF
zjV?2us6!R%K(4J-hr&Ctqu?pW{Mae0?sWKN=aNh>OXJnO!k8$Lr1TYa3AT+Ix$<(G
zmxvf|e)YHeHpfgeP?Z@dmT3>FQH}H%WG;^rLO*{&|4X>=2fG&!KUr>Yx=~8j_*quN
zV;U^^RxvoDAB%xO-+IyVpMaMelhjYOs%^gCN2u&?oeU#+`tqi*nH4L~@N6
zB~d-;ug_R`NpxEFzDJsG-+X9T6eynydG?f^1@ZEw65CC{nX@DlRz~jlbUOiV>dl5w
zoV&L=9{1$xmRX$C=u@_JA!oiwSr-E<#Ztct>~|`YVv1;aiZj-?$xXUR)&2dxvGUL;
zu#Nnh*Xoi_xzxR766YB89E;OB@gX;9jr|hPgT-#Ec@X#0R(&8;q_U}NMLdEJ>_;YR
zE}j$)Z#jOU0fd~}+VfRcEw|H^dy29h_2rRLAaFcei|55PFyXF!)gw<(ZNES|W7Hfk
zyih7UsSjTrX~mjRwskolL|KKTW^1Qk%Qt9JSh}qh46#`!*K&(LXHfPuFK;Dc2?N
zsQV86O2EyvwQu9Mo9ad*b+^88M&D5)1%N7T*$i@r9Od8jTL2E~0!LLKclHENM&>X3
z?h!dHTRoZ9{ybLl{o3$>cti^7!xkk#>#_Qc
z^G`zIo1-yD#;(xs*g20&?w;9v`zK>bQNatUz}MbFjqD3G8&`rl=)bOc|EWU#A1}UG
zb47VdnP9o$Ng^(s;%9%Ztx|`5yy$0Zm-E}|tTINq&xX~t#q6m_|L@54QC!KXExxpt+ep|
z`0p>8-HiZbP)7VqEXu$vuCb=S+N8IIifbQH?6-P218)*DjhU_*?N#=T94d
zS$d2~CE!N-mc|BJ+wcLAJh;%}d{RRd8yid~+aMHrcCba1W|g-B;;sC*C&R-L^2Y}9
zbhg0D5q72X_?+|atN7KoGv;kxgs`j^J$NBIHLiB}gZYd;C-(i`Kc0C}guwS_Zr^UM
zI5HjgtMgxg>cZgisz-IE$96ngX~?5N<1z#}cDmO;KOdt9W9%jOLx;5;zPF@sOt%K^
zhjskIuEq!C12rpH#$y~Q3K=^bczd>Gdo_6d;IOfvk}38MM0}f!unrtv^1XRN@}-i*YqJWYoxp6PFsL#%
zyKn@3r)}D)?quEAa=T4f!5LLkLS|fH8
zC%fP2gB}Apk>E1dRgh->H=Bz{I8+Eir7W*z4OR)o2drRnfd8FWF+?-!{OS0g%}Q7u5HQj#M;bEgB*e_
zvj0nyg@pZ|dhYtzVFv@}G|6Iq4duC)N3Abp`Zj@g3u#9YrK>#PJ$2TUF0OYFf5bpF
zG3gh$EqeIj9xHa4j$hAtLSHg=rD?(Yu3yl>sm!@0V{-MRv4MqM%CoJ6;!O9S`rnIr
z_{DV4dz<^rdlDWz%SYiO`pDabb(ngOv-U(Tp6KU1+>F&OZQorv>Zby^lI#MVS*z4$
z`b0}WG%udJ#U^w4v^L)+cGkxS(mLGS_lk!!g=yHy@s9poMZAjs`=Br!vSF<$ftVNO
z*$Li*2h$EW9tBx}mkVr0^?qYh^$Um;>dn{o9)F{hg#pigUd=9O{CJyG?@Ac?w8_0z
znER{{>z~%VaNyNyBcIV!@LoFtF3%QMFA};*{hNGZQ{4Veot!x)QH_nH;s&DyR;scD
z=DZ)pFY!88atg-9+*fZ=^K5b+lkYE;Q&UdvDT=0ghEZ3o-3-hOK=5(t_zX`6(VXGj
zFJJV8V_I#RFJicmG`1?dewVY3E;L|`sl=})MjS164uiJBU8gvBl)YN+7Z&viqw$U#
zWb+lLXBv#Tu*Kb~jq0fCT#!Aq86hO0DAGYWR4c;u1iHS__GHYIpqnyfE&LlKe>fB4H
z7ikIl+AAnwUl=UaQbJE+mq
zcYJ^6H=+liTDLGukvUD?bTX<
zj?F8;UFbmGH(QQ;XfuPx^ov^74iT`c>!c`SL8$nWl~r%eZkPlu5uo#
z=^M(S^%w&`Ez9QO8i=G=ki*V5l)ZiK2>oHIun=A$G;EUEchLU##(P)|q`N*AP3Ir1
z_ZY94ecrK;*Mi6|19mU#*cumVe~v-lDWW0yA;c}0z8S6JXi^1yGL-k%`hZEPd_F}i
zdtiU>PYI4OZ6=+Yc$wDP?>o}&ob<&ph4xk2)|NLmZ13Qs;OY{6;+rR)7TZ(N*bZ
z9seC^qM0bUl4*`V9?uB}YjO~oVdsKR09T6=95IyZtVULyc)zuHas)=DMOZU#7`dK|aZ^0{V-E#D86NpBV)|jEA$w0^McZ
z$#3jU@;tHm2b+#qs|qRE{qMUQi0*GFtFL}gm!P2RRLH%dPj7c^BpX`A2Kq#=A_fBo
z?aAM?zjbA-|EPH@$O>}gtc$NGCNu7AAMf=Y*b4xa1pHL@F;I>{Q;h^gD)XN|0Rtu5m6srJgzDV~j!&u9}Hgb{8SQzn<^N`OH9~hHHMEJ23WB
z#+Y4oRTD~>;!K)KvHPGgm
zv@lH#jfAaFwUtYXd5$TK0(oDv0z=Oqso$u>K8fs)8-9N>+omjHG89Lx%pf1w61I35
z<2}N3e7K7BJTuI)lqff~%gZqYv@%ceajiG}B)t_;98c6hMPCN$DkBrBLr)gbt5Oy2
z>0sJ><>h>5$IZ!tZ)Oz0dry?*oYm3X2Cp{L1e{dbc;?RG*};y@7o~zh4~JfEzU+~j
z8Y)V*pT3$VQgUkyaE4SBv(KYGqi2s=_)Va)6l~q9`8E|Y+b*y+Oq%Oe8YKMUiGR!8X2U>={_q)`u)IadIPU8Vrd(4-)IV(H%+ibscAS2Fn87YVxZ(x&L!J%%
z6=H*3RIcKU0ip1X-EYQEveTq)dEzH*j8<)Q=UT{sZ{#Uqn>QJD90W(W^IbYR1t8}3sBhnHxNWP*26$h#=FT;sO~Qu`{IyeZ)slmGNbl|-@`sNI(X
zvyE7?yCIGK9O3+_QK}Ve0s3eB>--P&+0zSVV$a2@t0p0V#CNEgtV-mo6H|9wLZ
z=+{n!L1X&sxUS2gyi1;%Mbm`CZtA`agru*j8IZ{h3AskPJY#AvI-v#85?Y*KRjO+n
zRZBkPU;peZPQZ5zf0{RLcMY#226US~ubJ+>^eZqlT~|!69%s-RsSqp%?@Qb}hBNB|
zF=pfYqc4bwC$*;wXO1{@J$pN7TAmY}wf9QICEZXTjYM9i7g%VAtS=Q8a>@%=-2LSvny4gq7{Zgk%p2$4?}{x+
zly7msc^Aq;=86q2jx%`g1n+#2s;r=Jtj-dGD95b7iRVE%t&cCVtC}XMJ+l#@!1xIA
zjOFTd_BO4sxUsj;UWl0)>%!Vw!=$kR(|$@{IsE0~QuJNUlzZ+%8g<|wz3+F>9{cBi
zvlA>cqHZWWshq6$A(hy(e%+?H7dIUazG>-{1F$Se6>V*Fjm5;kkiWKN2)^p&;;0rG
zBsNe%Gg~Oc@GaoX%^$D}S2`ZxG07QER7uf+Fqdx9&bj1Yy;)ngFO$NV?|5l3S6H@%
z^yj>n8DSg|V#NBLPh)BBRJr|1dF#})dZ$VdiKLh{ES_#0It;qykGt()8nFF&(IKXO
zGL)h*783j(%+N3ysFvJr{F2K)9I^Siy>mOD?_!$$#@8Nf{Hm2iIqtdEN9myw9Pyan1!yaP|)3VKO>#MTmUhPdNf
z`HSIlZJ!Lz*we|s(&6d@)PFcr&VphdBwcJ!XMdLbQH_jJ0ztvM284{9D|d)>b4Sg0
zxQ7$|Q4cgco5>WB@r{Tv&hBotG8cxQ$FGOymBYTH%e5}4vNyBDp2$C`UHwEwD^dg{
zy2^&>-^66h&B8r_$_24dFa4*$ekG
z?MTP(vsR*XV*4S6fg@QelfJ{)k&d5|emboiDm#-)f;9)R6elW?xO(TgW!0iXgP90FnZKur{fs`x^ns#J1BN;VrsLOD8Q5wq&s1fK+z!@C=^1k>;?
zc1i1TU?YspurFdqp*s1Y@
z)}*O9_QPYtfS(V|?bAGUosPCe1v&!$#;b5xWeNke9r{hp`hs1$_}sA$J0=e%^;={-
zr0nvY0awM0o~FU-r5cuIL(C?_SOcqH;!iQPQ0Dc7`qUWzXN7GYr|yi7^A!cUdJZCfed>n)>Z_-@#9W8d_Kh#1v9vvUq>JD7?5ZGxMp>K13>~#XI<*s(IM+so7N8
z;yr+L;WOJE7D%phL-@Csjf0QjcQOa^XvSrdtQuLM5BP#8#dB
zHpN~q{jmWin|}@Sb?!`3h|uI$sQ#?iL0pSOC+r8X{)(2OBio#YEOK@wS^l2At&!@}
ztPVb|+_^QkiTktUA(KZE`>qFZ(y%38xUy}ddQ?jv==M0rQi&z{i;LsnqrE#oe;P#J
zajWSI)}Hx;#OtI+<>e$s@~)>N(x$+mdlSCEW55WF9gY{|w@$YUX4q`9g!3r~JO3LK|0EefcHg3#DA^*U|fdj44$~zO^sm>9AWj-Ir7zm$2-ai54?k1URH(
z5@rayQtIDb)#ORfEFl-S=79N;XlrrzXCh~^Y9kIVkrfZ-Bj1)I$|KQk7fYZ!gbJkX
zV7&&mbZ5sgg3@_^Z;5OnNV~QY+C=tcUrUF(i0SS6cvGUxS>fl}cDphuVn{w1KKSxw
zPow7Zg3B~pB&E7Mi@AP!>eh{$(c^fQ(C~w=&^9vev%Dvr+RY^&_oVO)BIFEco9h8=
zEvnP5soy0bKhD)h2T;QTtI5!dzXC~L0L~39mqaOUEQx#i&;kTK8eky=8jC+@s-ctB
z$d(n06n{Vt`T!mbm{^Y?2-ON3xBOxmf<#^zZ$m~mJhf*Wa%X&ZewUIev}7;qBl??n
zGLDOZeL^ZcOYW?p6F5!^gylz%%4A#({V_l0dSfVed=F0zD#a_q)}dY71md8!VUR)+
z-t8>!el!8Xe*Snyq2`bkLYC+3J}tQPai;0_hS|mV+cBVwMLxFOF}=`y(HSr_1)9$d
z!-l5YD(E+Cl>}30iC^9OpHN|X4|p7RU0`W5%z`5+#^zDSI2N}@q(2Yn+Z}7V_Z5$xvcwI*M%Hjp9YwVO=kwD?U
zVTv<)ATzl`V-77j=MK|pM-|7`V69?(1=>0q^2M1K2=5gyjb?nQ
z3+jBz*LS@Mf+s%NQ%+&5!{!kO3OQ@qG;&I<*;4)0(d-Z~^TnGW-i`~Im?bLPx_%+C
zqa46R{~Z*|dT(vB--Dsujqf(GX*V9jGbtK3nT|7u*hMd;s=yDYH^UIyXEkO$dlx_D
z&rVkHmY6rd?EHuOvFniLPH}eCJQw5y2@C3>
zn#U`thaZ?jW!aW@XDR(4ku;LY$Yy+U1E=hzCf&53eFZkHLR(>>hH|^Ox$%-eLa-{;
zYt@&vFVI79q4n}D^T
zZK|>b4@r^z;ZXhRjX%BNd|P|Fh3^S4rFK(>
zFwwU19i}w?6iC`ic~Yse^(a!R-L5!hU5>Qjc%MD33s3S?S$ZjL1jP8yXb-?eo>qw>
zsJ%Wy2joA>55&3i1m@NvOS0%7OMxNrmZ$+i)T3;|C<3HKRexTk6}ds!*E`lW*tI8
zfj_Im#dlOf#|}P5_IcYR*>*5;SFlY
zUzI0TmJqk*EHD#qeoeIWzUJMN{pnvlUw{&nFW{P4M>I=)1De*U1g
z=oK9bUW>%0s;jLcy5RZV>F^p%^YY5&B4OT5deO5$!n3mJT1rsaQv~pPI!HV`c;La|
zFJj27Z}YzIx16kO@fGSl=53IEd9I3jGT!iaYNNPJG}|q-D^4pyG5^R_tQxJu{#vly
z_4stqbn)W$PE+E)RgP`ZW^0_`lfXVZ8`cZs+GKfcfl$=SULGcNIQ!$Arympk*Sw7M
z5~Bc9RXE*Fle4)=73|C&_<6o=arX0~P4RbdcOdd;!b7Hj~8ij)<<{^-~qvoy`z)puxm=!)kNO7I!uSYqR~
zv(4)nl8|rlJ1HV;0qhTG$k5N@tyI#6N;1{yvfB?=(VBLaA5yY}v|Gd%N!#}rC_(xQ
ze2pkb1n6f)x}&)xJ1G?r%N{PNI&r_RiTLM(V><>>vGL8GY%%m4%BKEcJ2GyKwBlQn
zb0JYBt8cfxR^!p2Oh0j=n`UO(CNaLSon4TJSsF&WwMGVqYJBM(4NgS
zOtZVh%$VnLLWS4aVep5UvIV65Grl~$23~!jm8EjGF(YD;_%BJOfr1Uj4i3x?*qmMU
z@wr9Mm(q)h)Q;Rzjb}oBI3^uwY%~eGLrF`08mFemw$fe_L>|`cmf+?N=vOQI$T|~v
zZ>A$X3fUus{SNaM?9sWP3jlm?8t>edxYkNIlafgs)O8^axqV%6!(Z%4#w40RL*$OH
zzHc*ijUGC$IX;;U>SUFX_`uhueg#=rj4`j5nm`PE?jy`h{(qcnii%xiqWgA!>f~@WW|_m0tEd$W##-{
zPnm|D6eg6A^{rNsDMiYctO%}ziwK7V*3D~Y`y5`Qo)gBH-A|z^nz5pJZgGu$PHXvj
z626`>@#_tN1wYekRZkjIEI$3{6@F7R_UTSjv+T4Ja_S>x?DfevCC{?F+
zmF=S&RCs9wd&EVOFuE|rk&5Vw?CF)5XvbqPAHa69pnU_T^3VmA#z_RK6zat;Sg&GjZ-jRv&^QnQ>Ua21`Un@7aO=Mim2
zzlk5hY`;lzwKiJsI&A$DV~maDUWjy{llv(|FoK0$ob3%u76Ag6RncoNvUPkjDicp*
zC`u;<7JO
zv{GO962Tt#K(64-6P`eZ68V}uORLWoIo;tV<6df}Dv)&_y|h>&vqDo7AjQc47vJ>t
z&i&8-J1pb>*tqK?*RNF;8dot)F#b{s`0GY#(7E7V=2Q1z&ckS00b5U?>!Gyd05s9%1FxQPVi*W~
zes~Dot*@@!m78W98~NK2m!C9_5KH{+3!=I3`0<=5z^X#8FN6VVFMWmZ?mr$6DP*1(
zwDP-qLH#&+JYii@o8uK&Uz10IIg9+JYxnjiQ^pi^id14g3?bn1M9Mo=K}#&gidJ@R
z412Q;2bk3J$ON{=jNH%bdMn4RC6h;O+C@s;qMpnjvdq)b3VzJ6_Ku(c33`d1
zKYneEY3